aboutsummaryrefslogtreecommitdiffstats
path: root/hicn-plugin/src
diff options
context:
space:
mode:
Diffstat (limited to 'hicn-plugin/src')
-rwxr-xr-xhicn-plugin/src/cache_policies/cs_lru.c213
-rwxr-xr-xhicn-plugin/src/cache_policies/cs_lru.h67
-rwxr-xr-xhicn-plugin/src/cache_policies/cs_policy.h81
-rwxr-xr-xhicn-plugin/src/cli.c1247
-rwxr-xr-xhicn-plugin/src/data_fwd.h209
-rwxr-xr-xhicn-plugin/src/data_fwd_node.c541
-rwxr-xr-xhicn-plugin/src/data_pcslookup.h55
-rwxr-xr-xhicn-plugin/src/data_pcslookup_node.c246
-rwxr-xr-xhicn-plugin/src/data_push_node.c349
-rwxr-xr-xhicn-plugin/src/error.c28
-rwxr-xr-xhicn-plugin/src/error.h100
-rwxr-xr-xhicn-plugin/src/face_db.h153
-rwxr-xr-xhicn-plugin/src/faces/app/address_mgr.c243
-rwxr-xr-xhicn-plugin/src/faces/app/address_mgr.h76
-rwxr-xr-xhicn-plugin/src/faces/app/face_app_cli.c203
-rwxr-xr-xhicn-plugin/src/faces/app/face_cons.c126
-rwxr-xr-xhicn-plugin/src/faces/app/face_cons.h75
-rwxr-xr-xhicn-plugin/src/faces/app/face_prod.c375
-rwxr-xr-xhicn-plugin/src/faces/app/face_prod.h113
-rwxr-xr-xhicn-plugin/src/faces/app/face_prod_node.c341
-rwxr-xr-xhicn-plugin/src/faces/face.c141
-rwxr-xr-xhicn-plugin/src/faces/face.h240
-rwxr-xr-xhicn-plugin/src/faces/face_cli.c131
-rwxr-xr-xhicn-plugin/src/faces/ip/dpo_ip.c187
-rwxr-xr-xhicn-plugin/src/faces/ip/dpo_ip.h255
-rwxr-xr-xhicn-plugin/src/faces/ip/face_ip.c326
-rwxr-xr-xhicn-plugin/src/faces/ip/face_ip.h241
-rwxr-xr-xhicn-plugin/src/faces/ip/face_ip_cli.c158
-rwxr-xr-xhicn-plugin/src/faces/ip/face_ip_node.c761
-rwxr-xr-xhicn-plugin/src/faces/ip/face_ip_node.h40
-rwxr-xr-xhicn-plugin/src/faces/ip/iface_ip_node.c845
-rwxr-xr-xhicn-plugin/src/faces/ip/iface_ip_node.h35
-rwxr-xr-xhicn-plugin/src/faces/udp/dpo_udp.c158
-rwxr-xr-xhicn-plugin/src/faces/udp/dpo_udp.h312
-rwxr-xr-xhicn-plugin/src/faces/udp/face_udp.c371
-rwxr-xr-xhicn-plugin/src/faces/udp/face_udp.h248
-rwxr-xr-xhicn-plugin/src/faces/udp/face_udp_cli.c164
-rwxr-xr-xhicn-plugin/src/faces/udp/face_udp_node.c864
-rwxr-xr-xhicn-plugin/src/faces/udp/face_udp_node.h35
-rwxr-xr-xhicn-plugin/src/faces/udp/iface_udp_node.c894
-rwxr-xr-xhicn-plugin/src/faces/udp/iface_udp_node.h36
-rwxr-xr-xhicn-plugin/src/hashtb.c1008
-rwxr-xr-xhicn-plugin/src/hashtb.h550
-rwxr-xr-xhicn-plugin/src/hicn.api538
-rwxr-xr-xhicn-plugin/src/hicn.c253
-rwxr-xr-xhicn-plugin/src/hicn.h86
-rwxr-xr-xhicn-plugin/src/hicn_all_api_h.h22
-rwxr-xr-xhicn-plugin/src/hicn_api.c570
-rwxr-xr-xhicn-plugin/src/hicn_api.h32
-rwxr-xr-xhicn-plugin/src/hicn_api_test.c1046
-rwxr-xr-xhicn-plugin/src/hicn_msg_enum.h36
-rwxr-xr-xhicn-plugin/src/infra.h101
-rwxr-xr-xhicn-plugin/src/interest_hitcs.h55
-rwxr-xr-xhicn-plugin/src/interest_hitcs_node.c300
-rwxr-xr-xhicn-plugin/src/interest_hitpit.h56
-rwxr-xr-xhicn-plugin/src/interest_hitpit_node.c313
-rwxr-xr-xhicn-plugin/src/interest_pcslookup.h57
-rwxr-xr-xhicn-plugin/src/interest_pcslookup_node.c240
-rwxr-xr-xhicn-plugin/src/mapme.h307
-rwxr-xr-xhicn-plugin/src/mapme_ack.h53
-rwxr-xr-xhicn-plugin/src/mapme_ack_node.c224
-rwxr-xr-xhicn-plugin/src/mapme_ctrl.h92
-rwxr-xr-xhicn-plugin/src/mapme_ctrl_node.c333
-rwxr-xr-xhicn-plugin/src/mapme_eventmgr.c559
-rwxr-xr-xhicn-plugin/src/mapme_eventmgr.h48
-rwxr-xr-xhicn-plugin/src/mgmt.c100
-rwxr-xr-xhicn-plugin/src/mgmt.h132
-rwxr-xr-xhicn-plugin/src/params.h104
-rwxr-xr-xhicn-plugin/src/parser.h102
-rwxr-xr-xhicn-plugin/src/pcs.c53
-rwxr-xr-xhicn-plugin/src/pcs.h836
-rwxr-xr-xhicn-plugin/src/pg.c1147
-rwxr-xr-xhicn-plugin/src/pg.h56
-rwxr-xr-xhicn-plugin/src/punt.c1005
-rwxr-xr-xhicn-plugin/src/punt.h338
-rwxr-xr-xhicn-plugin/src/route.c392
-rwxr-xr-xhicn-plugin/src/route.h61
-rwxr-xr-xhicn-plugin/src/state.h102
-rwxr-xr-xhicn-plugin/src/strategies/dpo_mw.c305
-rwxr-xr-xhicn-plugin/src/strategies/dpo_mw.h131
-rwxr-xr-xhicn-plugin/src/strategies/strategy_mw.c171
-rwxr-xr-xhicn-plugin/src/strategies/strategy_mw.h31
-rwxr-xr-xhicn-plugin/src/strategies/strategy_mw_cli.c148
-rwxr-xr-xhicn-plugin/src/strategy.c265
-rwxr-xr-xhicn-plugin/src/strategy.h82
-rwxr-xr-xhicn-plugin/src/strategy_dpo_ctx.h69
-rwxr-xr-xhicn-plugin/src/strategy_dpo_manager.c159
-rwxr-xr-xhicn-plugin/src/strategy_dpo_manager.h186
-rwxr-xr-xhicn-plugin/src/utils.h66
-rwxr-xr-xhicn-plugin/src/vface_db.h155
90 files changed, 24358 insertions, 0 deletions
diff --git a/hicn-plugin/src/cache_policies/cs_lru.c b/hicn-plugin/src/cache_policies/cs_lru.c
new file mode 100755
index 000000000..f35bee3c9
--- /dev/null
+++ b/hicn-plugin/src/cache_policies/cs_lru.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../hashtb.h"
+#include "../strategy_dpo_manager.h"
+#include "../error.h"
+#include "cs_lru.h"
+#include "cs_policy.h"
+
+hicn_cs_policy_vft_t hicn_cs_lru = {
+ .hicn_cs_insert = &hicn_cs_lru_insert,
+ .hicn_cs_update = &hicn_cs_lru_update_head,
+ .hicn_cs_dequeue = &hicn_cs_lru_dequeue,
+ .hicn_cs_delete_get = &hicn_cs_lru_delete_get,
+ .hicn_cs_trim = &hicn_cs_lru_trim,
+};
+
+/*
+ * Insert a new CS element at the head of the CS LRU
+ */
+void
+hicn_cs_lru_insert (hicn_pit_cs_t * p, hicn_hash_node_t * node,
+ hicn_pcs_entry_t * pcs, hicn_cs_policy_t * policy_state)
+{
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+ u32 idx;
+
+ idx = hicn_hashtb_node_idx_from_node (p->pcs_table, node);
+
+ if (policy_state->head != 0)
+ {
+ lrunode = hicn_hashtb_node_from_idx (p->pcs_table, policy_state->head);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ ASSERT (lrupcs->u.cs.cs_lru_prev == 0);
+ lrupcs->u.cs.cs_lru_prev = idx;
+
+ pcs->u.cs.cs_lru_prev = 0;
+ pcs->u.cs.cs_lru_next = policy_state->head;
+
+ policy_state->head = idx;
+ }
+ else
+ {
+ ASSERT (policy_state->tail == 0); /* We think the list is
+ * empty */
+
+ policy_state->head = policy_state->tail = idx;
+
+ pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
+ }
+
+ policy_state->count++;
+}
+
+void
+hicn_cs_lru_delete_get (hicn_pit_cs_t * p, hicn_cs_policy_t * policy_state,
+ hicn_hash_node_t ** nodep,
+ hicn_pcs_entry_t ** pcs_entry,
+ hicn_hash_entry_t ** hash_entry)
+{
+ *nodep = hicn_hashtb_node_from_idx (p->pcs_table, policy_state->tail);
+ *pcs_entry = hicn_pit_get_data (*nodep);
+
+ *hash_entry = hicn_hashtb_get_entry (p->pcs_table, (*nodep)->entry_idx,
+ (*nodep)->bucket_id,
+ (*nodep)->hn_flags &
+ HICN_HASH_NODE_OVERFLOW_BUCKET);
+}
+
+/*
+ * Dequeue an LRU element, for example when it has expired.
+ */
+void
+hicn_cs_lru_dequeue (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * pcs, hicn_cs_policy_t * lru)
+{
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+
+ if (pcs->u.cs.cs_lru_prev != 0)
+ {
+ /* Not already on the head of the LRU */
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table,
+ pcs->u.cs.cs_lru_prev);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_next;
+ }
+ else
+ {
+ ASSERT (lru->head ==
+ hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ lru->head = pcs->u.cs.cs_lru_next;
+ }
+
+ if (pcs->u.cs.cs_lru_next != 0)
+ {
+ /* Not already the end of the LRU */
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table,
+ pcs->u.cs.cs_lru_next);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_prev = pcs->u.cs.cs_lru_prev;
+ }
+ else
+ {
+ /* This was the last LRU element */
+ ASSERT (lru->tail ==
+ hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ lru->tail = pcs->u.cs.cs_lru_prev;
+ }
+
+ pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
+ lru->count--;
+}
+
+/*
+ * Move a CS LRU element to the head, probably after it's been used.
+ */
+void
+hicn_cs_lru_update_head (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * pcs, hicn_cs_policy_t * lru)
+{
+ if (pcs->u.cs.cs_lru_prev != 0)
+ {
+ /*
+ * Not already on the head of the LRU, detach it from its
+ * current position
+ */
+ hicn_cs_lru_dequeue (pit, pnode, pcs, lru);
+
+ /* Now detached from the list; attach at head */
+ hicn_cs_lru_insert (pit, pnode, pcs, lru);
+
+ }
+ else
+ {
+ ASSERT (lru->head ==
+ hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ }
+}
+
+/*
+ * Remove a batch of nodes from the CS LRU, copying their node indexes into
+ * the caller's array. We expect this is done when the LRU size exceeds the
+ * CS's limit. Return the number of removed nodes.
+ */
+int
+hicn_cs_lru_trim (hicn_pit_cs_t * pit, u32 * node_list, int sz,
+ hicn_cs_policy_t * lru)
+{
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+ u32 idx;
+ int i;
+
+ idx = lru->tail;
+
+ for (i = 0; i < sz; i++)
+ {
+
+ if (idx == 0)
+ {
+ break;
+ }
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table, idx);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ node_list[i] = idx;
+
+ idx = lrupcs->u.cs.cs_lru_prev;
+ lrupcs->u.cs.cs_lru_prev = 0;
+ lrupcs->u.cs.cs_lru_next = 0;
+ }
+
+ lru->count -= i;
+
+ lru->tail = idx;
+ if (idx != 0)
+ {
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table, idx);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_next = 0;
+ }
+ else
+ {
+ /* If the tail is empty, the whole lru is empty */
+ lru->head = 0;
+ }
+
+ return (i);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/cache_policies/cs_lru.h b/hicn-plugin/src/cache_policies/cs_lru.h
new file mode 100755
index 000000000..94320f7f9
--- /dev/null
+++ b/hicn-plugin/src/cache_policies/cs_lru.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LRU_H__
+#define __LRU_H__
+
+#include "../pcs.h"
+#include "../hashtb.h"
+#include "cs_policy.h"
+
+extern hicn_cs_policy_vft_t hicn_cs_lru;
+
+/*
+ * Insert a new CS element at the head of the CS LRU
+ */
+void
+hicn_cs_lru_insert (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+
+
+/*
+ * Dequeue an LRU element, for example when it has expired.
+ */
+void
+hicn_cs_lru_dequeue (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+
+/*
+ * Move a CS LRU element to the head, probably after it's been used.
+ */
+void
+hicn_cs_lru_update_head (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+
+void
+hicn_cs_lru_delete_get (hicn_pit_cs_t * p, hicn_cs_policy_t * policy,
+ hicn_hash_node_t ** node, hicn_pcs_entry_t ** pcs,
+ hicn_hash_entry_t ** hash_entry);
+
+/*
+ * Remove a batch of nodes from the CS LRU, copying their node indexes into
+ * the caller's array. We expect this is done when the LRU size exceeds the
+ * CS's limit. Return the number of removed nodes.
+ */
+int hicn_cs_lru_trim (hicn_pit_cs_t * pcs, u32 * node_list, int sz,
+ hicn_cs_policy_t * lru);
+
+
+#endif /* // __LRU_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/cache_policies/cs_policy.h b/hicn-plugin/src/cache_policies/cs_policy.h
new file mode 100755
index 000000000..08817de18
--- /dev/null
+++ b/hicn-plugin/src/cache_policies/cs_policy.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_CS_POLICY_H__
+#define __HICN_CS_POLICY_H__
+
+#include "../hashtb.h"
+
+/*
+ * Structure
+ */
+typedef struct hicn_cs_policy_s
+{
+ u32 max;
+ u32 count;
+
+ /* Indexes to hashtable nodes forming CS LRU */
+ u32 head;
+ u32 tail;
+
+} hicn_cs_policy_t;
+
+/* Forward declaration */
+struct hicn_pit_cs_s;
+struct hicn_hash_node_s;
+struct hicn_pcs_entry_s;
+struct hicn_cs_policy_s;
+
+/**
+ * @brief Definition of the virtual functin table for a cache policy.
+ *
+ * A cache policy must implement three functions: insert, update, delete, trim.
+ */
+typedef struct hicn_cs_policy_vft_s
+{
+ void (*hicn_cs_insert) (struct hicn_pit_cs_s * p,
+ struct hicn_hash_node_s * node,
+ struct hicn_pcs_entry_s * pcs,
+ hicn_cs_policy_t * policy);
+
+ void (*hicn_cs_update) (struct hicn_pit_cs_s * p,
+ struct hicn_hash_node_s * node,
+ struct hicn_pcs_entry_s * pcs,
+ hicn_cs_policy_t * policy);
+
+ void (*hicn_cs_dequeue) (struct hicn_pit_cs_s * p,
+ struct hicn_hash_node_s * node,
+ struct hicn_pcs_entry_s * pcs,
+ hicn_cs_policy_t * policy);
+
+ void (*hicn_cs_delete_get) (struct hicn_pit_cs_s * p,
+ hicn_cs_policy_t * policy,
+ struct hicn_hash_node_s ** node,
+ struct hicn_pcs_entry_s ** pcs,
+ struct hicn_hash_entry_s ** hash_entry);
+
+ int (*hicn_cs_trim) (struct hicn_pit_cs_s * p, u32 * node_list, int sz,
+ hicn_cs_policy_t * policy);
+} hicn_cs_policy_vft_t;
+
+
+
+#endif /* // __HICN_POLICY_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/cli.c b/hicn-plugin/src/cli.c
new file mode 100755
index 000000000..c8c0be4ff
--- /dev/null
+++ b/hicn-plugin/src/cli.c
@@ -0,0 +1,1247 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/udp/udp.h> // port registration
+#include <vnet/ip/ip6_packet.h> // ip46_address_t
+#include <vnet/ip/format.h>
+
+#include "hicn.h"
+#include "infra.h"
+#include "parser.h"
+#include "mgmt.h"
+#include "strategy_dpo_manager.h"
+#include "strategy.h"
+#include "pg.h"
+#include "error.h"
+#include "faces/face.h"
+#include "route.h"
+#include "punt.h"
+#include "hicn_api.h"
+#include "mapme.h"
+
+extern ip_version_t ipv4;
+extern ip_version_t ipv6;
+
+static vl_api_hicn_api_node_params_set_t node_ctl_params = {
+ .pit_max_size = -1,
+ .pit_dflt_lifetime_sec = -1.0f,
+ .pit_min_lifetime_sec = -1.0f,
+ .pit_max_lifetime_sec = -1.0f,
+ .cs_max_size = -1,
+ .cs_reserved_app = -1,
+};
+
+typedef enum
+{
+ IP,
+ ETHERNET,
+} interface_type_t;
+
+/*
+ * Supporting function that return if the interface is IP or ethernet
+ */
+static interface_type_t
+hicn_cli_is_ip_interface (vlib_main_t * vm,
+ vnet_main_t * vnm, u32 sw_if_index)
+{
+
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, sw_if_index);
+
+ vnet_device_class_t *dev_class =
+ vnet_get_device_class (vnm, hi->dev_class_index);
+ if (!strcmp (dev_class->name, "Loopback"))
+ {
+ return IP;
+ }
+ return ETHERNET;
+
+}
+
+/*
+ * cli handler for 'control start'
+ */
+static clib_error_t *
+hicn_cli_node_ctl_start_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int ret;
+
+ ret = hicn_infra_plugin_enable_disable (1 /* enable */ ,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_dflt_lifetime_sec,
+ node_ctl_params.pit_min_lifetime_sec,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size,
+ node_ctl_params.cs_reserved_app);
+
+ vlib_cli_output (vm, "hicn: fwdr initialize => %s\n",
+ get_error_string (ret));
+
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
+ get_error_string
+ (ret));
+}
+
+/*
+ * cli handler for 'control stop'
+ */
+static clib_error_t *
+hicn_cli_node_ctl_stop_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int ret;
+
+ /*
+ * Catch unexpected extra arguments on this line. See comment on
+ * hicn_cli_node_ctrl_start_set_command_fn
+ */
+ if (main_input->index > 0 &&
+ main_input->buffer[main_input->index - 1] != '\n')
+ {
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ ret = hicn_infra_plugin_enable_disable (0 /* !enable */ ,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_dflt_lifetime_sec,
+ node_ctl_params.pit_min_lifetime_sec,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size,
+ node_ctl_params.cs_reserved_app);
+
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
+ get_error_string
+ (ret));
+}
+
+#define DFLTD_RANGE_OK(val, min, max) \
+({ \
+ __typeof__ (val) _val = (val); \
+ __typeof__ (min) _min = (min); \
+ __typeof__ (max) _max = (max); \
+ (_val == -1) || \
+ (_val >= _min && _val <= _max); \
+})
+
+/*
+ * cli handler for 'control param'
+ */
+static clib_error_t *
+hicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int rv = 0;
+
+ int table_size;
+ f64 lifetime;
+ int cs_reserved_app;
+
+ if (hicn_main.is_enabled)
+ {
+ return (clib_error_return
+ (0, "params cannot be altered once hicn started"));
+ }
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return clib_error_return (0,
+ get_error_string
+ (HICN_ERROR_FWD_ALREADY_ENABLED));
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "pit"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, HICN_PARAM_PIT_ENTRIES_MIN,
+ HICN_PARAM_PIT_ENTRIES_MAX))
+ {
+ rv = HICN_ERROR_PIT_CONFIG_SIZE_OOB;
+ break;
+ }
+ node_ctl_params.pit_max_size = table_size;
+ }
+ else if (unformat (line_input, "dfltlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = HICN_ERROR_PIT_CONFIG_DFTLT_OOB;
+ break;
+ }
+ node_ctl_params.pit_dflt_lifetime_sec = lifetime;
+ }
+ else if (unformat (line_input, "minlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = HICN_ERROR_PIT_CONFIG_MINLT_OOB;
+ break;
+ }
+ node_ctl_params.pit_min_lifetime_sec = lifetime;
+ }
+ else if (unformat (line_input, "maxlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = HICN_ERROR_PIT_CONFIG_MAXLT_OOB;
+ break;
+ }
+ node_ctl_params.pit_max_lifetime_sec = lifetime;
+ }
+ else
+ {
+ rv = HICN_ERROR_CLI_INVAL;
+ break;
+ }
+ }
+ else if (unformat (line_input, "cs"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, HICN_PARAM_CS_ENTRIES_MIN,
+ HICN_PARAM_CS_ENTRIES_MAX))
+ {
+ rv = HICN_ERROR_CS_CONFIG_SIZE_OOB;
+ break;
+ }
+ node_ctl_params.cs_max_size = table_size;
+ }
+ else if (unformat (line_input, "app %d", &cs_reserved_app))
+ {
+ if (!DFLTD_RANGE_OK (cs_reserved_app, 0, 100))
+ {
+ rv = HICN_ERROR_CS_CONFIG_SIZE_OOB;
+ break;
+ }
+ node_ctl_params.cs_reserved_app = cs_reserved_app;
+ }
+ else
+ {
+ rv = HICN_ERROR_CLI_INVAL;
+ break;
+ }
+ }
+ else
+ {
+ rv = HICN_ERROR_CLI_INVAL;
+ break;
+ }
+ }
+
+ if (node_ctl_params.cs_max_size == 0)
+ vlib_cli_output (vm,
+ "CS size set to 0. Consider disable CS at compilation time for better performances\n");
+
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s '%U'",
+ get_error_string
+ (rv),
+ format_unformat_error,
+ line_input);
+}
+
+/*
+ * cli handler for 'hicn show'
+ */
+static clib_error_t *
+hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int face_p = 0, fib_p = 0, all_p, internal_p = 0, strategies_p = 0, ret =
+ HICN_ERROR_NONE;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "face all"))
+ {
+ face_p = 1;
+ }
+ else if (unformat (line_input, "internal"))
+ {
+ /*
+ * We consider 'internal' a superset, so
+ * include 'detail' too
+ */
+ internal_p = 1;
+ }
+ else if (unformat (line_input, "strategies"))
+ {
+ /*
+ * We consider 'internal' a superset, so
+ * include 'detail' too
+ */
+ strategies_p = 1;
+ }
+ else
+ {
+ ret = HICN_ERROR_CLI_INVAL;
+ goto done;
+ }
+ }
+ }
+ /* If nothing specified, show everything */
+ if ((face_p == 0) && (fib_p == 0) && (strategies_p == 0))
+ {
+ all_p = 1;
+ }
+ if (!hicn_main.is_enabled)
+ {
+ if (node_ctl_params.pit_max_size == -1 &&
+ node_ctl_params.pit_dflt_lifetime_sec == -1 &&
+ node_ctl_params.pit_min_lifetime_sec == -1 &&
+ node_ctl_params.pit_max_lifetime_sec == -1 &&
+ node_ctl_params.cs_max_size == -1 &&
+ node_ctl_params.cs_reserved_app == -1)
+ {
+ ret = HICN_ERROR_FWD_NOT_ENABLED;
+ goto done;
+ }
+ vlib_cli_output (vm, "Forwarder: %sabled\nPreconfiguration:\n",
+ hicn_main.is_enabled ? "en" : "dis");
+
+ if (node_ctl_params.pit_max_size != -1)
+ {
+ vlib_cli_output (vm, " PIT:: max entries:%d\n",
+ node_ctl_params.pit_max_size);
+ }
+ if (node_ctl_params.pit_dflt_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: dflt lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_dflt_lifetime_sec);
+ }
+ if (node_ctl_params.pit_min_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: min lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_min_lifetime_sec);
+ }
+ if (node_ctl_params.pit_max_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: max lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_max_lifetime_sec);
+ }
+ if (node_ctl_params.cs_max_size != -1)
+ {
+ vlib_cli_output (vm, " CS:: max entries:%d\n",
+ node_ctl_params.cs_max_size);
+ }
+ if (node_ctl_params.cs_reserved_app != -1)
+ {
+ vlib_cli_output (vm, " CS:: reserved to app:%d\n",
+ node_ctl_params.cs_reserved_app);
+ }
+ goto done;
+ }
+ /* Globals */
+ vlib_cli_output (vm,
+ "Forwarder: %sabled\n"
+ " PIT:: max entries:%d,"
+ " lifetime default: %05.3f sec (min:%05.3f, max:%05.3f)\n"
+ " CS:: max entries:%d, network entries:%d, app entries:%d (allocated %d, free %d)\n",
+ hicn_main.is_enabled ? "en" : "dis",
+ hicn_infra_pit_size,
+ ((f64) hicn_main.pit_lifetime_dflt_ms) / SEC_MS,
+ ((f64) hicn_main.pit_lifetime_min_ms) / SEC_MS,
+ ((f64) hicn_main.pit_lifetime_max_ms) / SEC_MS,
+ hicn_infra_cs_size,
+ hicn_infra_cs_size - hicn_main.pitcs.pcs_app_max,
+ hicn_main.pitcs.pcs_app_max,
+ hicn_main.pitcs.pcs_app_count,
+ hicn_main.pitcs.pcs_app_max -
+ hicn_main.pitcs.pcs_app_count);
+
+ vl_api_hicn_api_node_stats_get_reply_t rm = { 0, }
+ , *rmp = &rm;
+ if (hicn_mgmt_node_stats_get (&rm) == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, //compare vl_api_hicn_api_node_stats_get_reply_t_handler block
+ " PIT entries (now): %d\n"
+ " CS total entries (now): %d, network entries (now): %d\n"
+ " Forwarding statistics:\n"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_ntw_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
+ }
+ if (face_p || all_p)
+ {
+ u8 *strbuf = NULL;
+
+ strbuf = format_hicn_face_all (strbuf, 1, 0);
+ vlib_cli_output (vm, "%s", strbuf);
+
+ }
+ if (strategies_p || all_p)
+ {
+ u8 *strbuf = NULL;
+
+ strbuf = format_hicn_strategy_list (strbuf, 1, 0);
+ vlib_cli_output (vm, (char *) strbuf);
+ }
+done:
+ if (all_p && internal_p && ret == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Plugin features: cs:%d\n", HICN_FEATURE_CS);
+ vlib_cli_output (vm,
+ "Removed CS entries (and freed vlib buffers) %d, Removed PIT entries %d",
+ hicn_main.pitcs.pcs_cs_dealloc,
+ hicn_main.pitcs.pcs_pit_dealloc);
+
+ }
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (ret));
+}
+
+/*
+ * cli handler for 'fib'
+ */
+static clib_error_t *
+hicn_cli_fib_set_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+
+ int rv = HICN_ERROR_NONE;
+ int addpfx = -1;
+ ip46_address_t prefix;
+ hicn_face_id_t faceid = HICN_FACE_NULL;
+ u32 strategy_id;
+ u8 plen = 0;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (addpfx == -1 && unformat (line_input, "add"))
+ {
+ addpfx = 1;
+ }
+ else if (addpfx == -1 && unformat (line_input, "delete"))
+ {
+ addpfx = 0;
+ }
+ else if (unformat (line_input, "set strategy %d", &strategy_id))
+ {
+ addpfx = 2;
+ }
+ else if (addpfx != -1
+ && unformat (line_input, "prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else if (addpfx <= 1 && unformat (line_input, "face %u", &faceid))
+ {;
+ }
+ else
+ {
+ cl_err = clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ /* Check parse */
+ if (addpfx <= 1
+ && ((ip46_address_is_zero (&prefix)) || faceid == HICN_FACE_NULL))
+ {
+ cl_err =
+ clib_error_return (0, "Please specify prefix and a valid faceid...");
+ goto done;
+ }
+ /* Check parse */
+ if ((ip46_address_is_zero (&prefix))
+ || (addpfx == 2 && hicn_dpo_strategy_id_is_valid (strategy_id)))
+ {
+ cl_err = clib_error_return (0,
+ "Please specify prefix and strategy_id...");
+ goto done;
+ }
+ if (addpfx == 0)
+ {
+ if (ip46_address_is_zero (&prefix))
+ {
+ cl_err = clib_error_return (0, "Please specify prefix");
+ goto done;
+ }
+ if (faceid == HICN_FACE_NULL)
+ {
+ rv = hicn_route_del (&prefix, plen);
+ }
+ else
+ {
+ rv = hicn_route_del_nhop (&prefix, plen, faceid);
+ }
+ cl_err =
+ (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
+ get_error_string
+ (rv));
+
+ }
+ else if (addpfx == 1)
+ {
+ rv = hicn_route_add (&faceid, 1, &prefix, plen);
+ if (rv == HICN_ERROR_ROUTE_ALREADY_EXISTS)
+ {
+ rv = hicn_route_add_nhops (&faceid, 1, &prefix, plen);
+ }
+ cl_err =
+ (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
+ get_error_string
+ (rv));
+ }
+ else if (addpfx == 2)
+ {
+ rv = hicn_route_set_strategy (&prefix, plen, strategy_id);
+ cl_err =
+ (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
+ get_error_string
+ (rv));
+ }
+done:
+
+ return (cl_err);
+}
+
+static clib_error_t *
+hicn_cli_punting_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ hicn_mgmt_punting_op_e punting_op = HICN_MGMT_PUNTING_OP_NONE;
+ unsigned int subnet_mask = 0;
+ ip46_address_t prefix;
+ u32 sw_if_index = ~0;
+ int ret = 0;
+ vnet_main_t *vnm = NULL;
+ u8 type = HICN_PUNT_IP_TYPE;
+ u32 src_port = 0, dst_port = 0;
+ vnm = vnet_get_main ();
+
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ {
+ punting_op = HICN_MGMT_PUNTING_OP_CREATE;
+ }
+ else if (unformat (line_input, "delete"))
+ {
+ punting_op = HICN_MGMT_PUNTING_OP_DELETE;
+ }
+ else if (unformat (line_input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {;
+ }
+ else if (unformat
+ (line_input, "prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &subnet_mask))
+ {;
+ }
+ else if (unformat (line_input, "type ip"))
+ {
+ type = HICN_PUNT_IP_TYPE;
+ }
+ else if (unformat (line_input, "type"))
+ {
+ if (unformat (line_input, "udp4"))
+ {
+ type = HICN_PUNT_UDP4_TYPE;
+ }
+ else if (unformat (line_input, "udp6"))
+ {
+ type = HICN_PUNT_UDP6_TYPE;
+ }
+
+ if (unformat (line_input, "src_port %u", &src_port))
+ ;
+ if (unformat (line_input, "dst_port %u", &dst_port))
+ ;
+ }
+ else
+ {
+ return (clib_error_return (0, "invalid option"));
+ }
+ }
+
+ if (punting_op == HICN_MGMT_PUNTING_OP_CREATE
+ && (ip46_address_is_zero (&prefix) || sw_if_index == ~0))
+ {
+ return (clib_error_return
+ (0, "Please specify valid prefix and interface"));
+ }
+ else if ((punting_op == HICN_MGMT_PUNTING_OP_DELETE) &&
+ ip46_address_is_zero (&prefix))
+ {
+ return (clib_error_return
+ (0, "Please specify valid prefix and optionally an interface"));
+ }
+ else if (punting_op == HICN_MGMT_PUNTING_OP_NONE)
+ {
+ return (clib_error_return
+ (0, "Please specify valid operation, add or delete"));
+ }
+ switch (punting_op)
+ {
+ case HICN_MGMT_PUNTING_OP_CREATE:
+ {
+ if (type == HICN_PUNT_UDP4_TYPE || type == HICN_PUNT_UDP6_TYPE)
+ {
+ if (src_port != 0 && dst_port != 0)
+ ret =
+ hicn_punt_interest_data_for_udp (vm, &prefix, subnet_mask,
+ sw_if_index, type,
+ clib_host_to_net_u16
+ (src_port),
+ clib_host_to_net_u16
+ (dst_port));
+ else
+ return (clib_error_return
+ (0,
+ "Please specify valid source and destination udp port"));
+ }
+ else
+ {
+ ret =
+ hicn_punt_interest_data_for_ethernet (vm, &prefix, subnet_mask,
+ sw_if_index, type);
+ }
+ }
+ break;
+ case HICN_MGMT_PUNTING_OP_DELETE:
+ {
+ if (sw_if_index != ~0)
+ {
+ ip46_address_is_ip4 (&prefix) ?
+ hicn_punt_enable_disable_vnet_ip4_table_on_intf (vm,
+ sw_if_index,
+ 0) :
+ hicn_punt_enable_disable_vnet_ip6_table_on_intf (vm,
+ sw_if_index,
+ 0);
+ }
+ else if (!(ip46_address_is_zero (&prefix)))
+ {
+ ret = ip46_address_is_ip4 (&prefix) ?
+ hicn_punt_remove_ip4_address (vm, &(prefix.ip4), subnet_mask, 1,
+ sw_if_index,
+ 0) :
+ hicn_punt_remove_ip6_address (vm, (ip6_address_t *) & prefix,
+ subnet_mask, 1, sw_if_index, 0);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
+ get_error_string
+ (ret));
+}
+
+static clib_error_t *
+hicn_cli_mapme_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ hicn_mgmt_mapme_op_e mapme_op = HICN_MGMT_MAPME_OP_NONE;
+ unsigned int subnet_mask = 0;
+ ip46_address_t prefix;
+ u32 sw_if_index = ~0;
+ int ret = 0;
+ vnet_main_t *vnm = NULL;
+
+ vnm = vnet_get_main ();
+
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "add"))
+ {
+ mapme_op = HICN_MGMT_MAPME_OP_CREATE;
+ }
+ else if (unformat (line_input, "delete"))
+ {
+ mapme_op = HICN_MGMT_MAPME_OP_DELETE;
+ }
+ else if (unformat (line_input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {;
+ }
+ else if (unformat
+ (line_input, "prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &subnet_mask))
+ {;
+ }
+ else
+ {
+ return (clib_error_return (0, "invalid option"));
+ }
+ }
+
+ if (mapme_op == HICN_MGMT_MAPME_OP_CREATE
+ && (ip46_address_is_zero (&prefix) || sw_if_index == ~0))
+ {
+ return (clib_error_return
+ (0, "Please specify valid prefix and interface"));
+ }
+ else if ((mapme_op == HICN_MGMT_MAPME_OP_DELETE) &&
+ ip46_address_is_zero (&prefix))
+ {
+ return (clib_error_return
+ (0, "Please specify valid prefix and optionally an interface"));
+ }
+ else if (mapme_op == HICN_MGMT_MAPME_OP_NONE)
+ {
+ return (clib_error_return
+ (0, "Please specify valid operation, add or delete"));
+ }
+ return (ret == HICN_ERROR_NONE) ? clib_error_return (0, "Punting %s",
+ get_error_string (ret))
+ : clib_error_return (0, get_error_string (ret));
+}
+
+/*
+ * cli handler for 'pgen'
+ */
+static clib_error_t *
+hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ hicn_main_t *sm = &hicn_main;
+ hicnpg_main_t *hpgm = &hicnpg_main;
+ ip46_address_t src_addr, hicn_name;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u16 lifetime = 4000;
+ int rv = VNET_API_ERROR_UNIMPLEMENTED;
+ u32 max_seq = ~0;
+ u32 n_flows = ~0;
+ u32 mask = 0;
+ u32 n_ifaces = 1;
+ u32 hicn_underneath = ~0;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "fwd"))
+ {
+ if (unformat (line_input, "ip"))
+ hicn_underneath = 0;
+ else if (unformat (line_input, "hicn"))
+ hicn_underneath = 1;
+ }
+ if (unformat
+ (line_input, "intfc %U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ {
+ ;
+ }
+ else if (unformat (line_input, "src %U",
+ unformat_ip46_address, &src_addr))
+ {
+ ;
+ }
+ else if (unformat (line_input, "n_ifaces %d", &n_ifaces))
+ {
+ ;
+ }
+ else if (unformat (line_input, "name %U/%d",
+ unformat_ip46_address, &hicn_name, IP46_TYPE_ANY,
+ &mask))
+ {
+ ;
+ }
+ else if (unformat (line_input, "lifetime %d", &lifetime))
+ {
+ ;
+ }
+ else if (unformat (line_input, "max_seq %d", &max_seq))
+ {
+ ;
+ }
+ else if (unformat (line_input, "n_flows %d", &n_flows))
+ {
+ ;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+ }
+ hpgm->interest_lifetime = lifetime;
+
+ if (sw_if_index == ~0)
+ {
+ return (clib_error_return (0, "Packet generator interface missing"));
+ }
+ if (hicn_underneath == ~0)
+ {
+ return (clib_error_return
+ (0, "Choose the underlying forwarder type ip|hicn"));
+ }
+ else if (hicn_underneath && !sm->is_enabled)
+ {
+ return (clib_error_return (0, "hICN not enabled in VPP"));
+ }
+ else if (!hicn_underneath && sm->is_enabled)
+ {
+ return (clib_error_return (0, "hICN enabled in VPP"));
+ }
+
+ int skip = 1;
+ int base_offset = ETH_L2;
+ u8 use_current_data = HICN_CLASSIFY_NO_CURRENT_DATA_FLAG;
+
+ if (hicn_cli_is_ip_interface (vm, vnm, sw_if_index) == IP)
+ {
+ skip = 0;
+ base_offset = NO_L2;
+ use_current_data = HICN_CLASSIFY_CURRENT_DATA_FLAG;
+ }
+ /*
+ * Register punting on src address generated by pg and data punting
+ * on the name
+ */
+ if (ip46_address_is_ip4 (&src_addr) && ip46_address_is_ip4 (&hicn_name))
+ {
+ /* Add data node to the vpp graph */
+ u32 next_hit_node = vlib_node_add_next (vm,
+ hicn_punt_glb.
+ hicn_node_info.ip4_inacl_node_index,
+ hicn_pg_data_node.index);
+
+ /* Add pgen_client node to the vpp graph */
+ vlib_node_add_next (vm,
+ pg_input_node.index, hicn_pg_interest_node.index);
+
+ /* Create the punting table if it does not exist */
+ hicn_punt_add_vnettbl (&ipv4, &ipv4_src, mask, ~0, sw_if_index,
+ base_offset, use_current_data);
+ hicn_punt_add_vnettbl (&ipv4, &ipv4_dst, mask,
+ hicn_punt_glb.ip4_vnet_tbl_idx[sw_if_index][skip]
+ [HICN_PUNT_SRC][mask], sw_if_index, base_offset,
+ use_current_data);
+
+ /* Add a session to the table */
+ hicn_punt_add_vnetssn (&ipv4, &ipv4_src,
+ &hicn_name, mask,
+ next_hit_node, sw_if_index, base_offset);
+
+ hicn_punt_add_vnetssn (&ipv4, &ipv4_src,
+ &hicn_name, mask,
+ next_hit_node, sw_if_index, base_offset);
+
+ hicn_punt_enable_disable_vnet_ip4_table_on_intf (vm, sw_if_index,
+ OP_ENABLE);
+
+ pg_node_t *pn;
+ pn = pg_get_node (hicn_pg_interest_node.index);
+ pn->unformat_edit = unformat_pg_ip4_header;
+
+ }
+ else if (!ip46_address_is_ip4 (&src_addr)
+ && !ip46_address_is_ip4 (&hicn_name))
+ {
+ /* Add node to the vpp graph */
+ u32 next_hit_node = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip6_inacl_node_index,
+ hicn_pg_data_node.index);
+
+ /* Add pgen_client node to the vpp graph */
+ vlib_node_add_next (vm, pg_input_node.index,
+ hicn_pg_interest_node.index);
+
+ /* Create the punting table if it does not exist */
+ hicn_punt_add_vnettbl (&ipv6, &ipv6_src, mask, ~0, sw_if_index,
+ base_offset, use_current_data);
+ hicn_punt_add_vnettbl (&ipv6, &ipv6_dst, mask,
+ hicn_punt_glb.ip6_vnet_tbl_idx[sw_if_index][skip]
+ [HICN_PUNT_SRC][mask], sw_if_index, base_offset,
+ use_current_data);
+
+ /* Add a session to the table */
+ hicn_punt_add_vnetssn (&ipv6, &ipv6_src,
+ &hicn_name, mask,
+ next_hit_node, sw_if_index, base_offset);
+
+ hicn_punt_add_vnetssn (&ipv6, &ipv6_src,
+ &hicn_name, mask,
+ next_hit_node, sw_if_index, base_offset);
+
+ hicn_punt_enable_disable_vnet_ip6_table_on_intf (vm, sw_if_index,
+ OP_ENABLE);
+
+ pg_node_t *pn;
+ pn = pg_get_node (hicn_pg_interest_node.index);
+ pn->unformat_edit = unformat_pg_ip6_header;
+ }
+ else
+ {
+ return (clib_error_return
+ (0,
+ "pg interface source address, source address and hicn name must be of the same type IPv4 or IPv6"));
+ }
+
+
+ hpgm->pgen_clt_src_addr = src_addr;
+ hpgm->pgen_clt_hicn_name = hicn_name;
+ hpgm->max_seq_number = max_seq;
+ hpgm->n_flows = n_flows;
+ hpgm->n_ifaces = n_ifaces;
+ hpgm->hicn_underneath = hicn_underneath;
+ vlib_cli_output (vm, "ifaces %d", hpgm->n_ifaces);
+ rv = 0;
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0, "Unimplemented, NYI");
+ break;
+
+ default:
+ return clib_error_return (0, "hicn enable_disable returned %d", rv);
+ }
+
+ return 0;
+}
+
+/*
+ * cli handler for 'pgen'
+ */
+static clib_error_t *
+hicn_cli_pgen_server_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err;
+ int rv = HICN_ERROR_NONE;
+ hicnpg_server_main_t *pg_main = &hicnpg_server_main;
+ hicn_main_t *sm = &hicn_main;
+ ip46_address_t hicn_name;
+ u32 subnet_mask;
+ int payload_size = 0;
+ u32 sw_if_index = ~0;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 hicn_underneath = ~0;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ /* Parse the arguments */
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "fwd"))
+ {
+ if (unformat (line_input, "ip"))
+ hicn_underneath = 0;
+ else if (unformat (line_input, "hicn"))
+ hicn_underneath = 1;
+ }
+ if (unformat (line_input, "name %U/%d",
+ unformat_ip46_address, &hicn_name, IP46_TYPE_ANY,
+ &subnet_mask))
+ {;
+ }
+ else if (unformat (line_input, "size %d", &payload_size))
+ {
+ if (payload_size > 1440)
+ {
+ return (clib_error_return (0,
+ "Payload size must be <= 1440 bytes..."));
+ }
+ }
+ else
+ if (unformat
+ (line_input, "intfc %U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ {
+ ;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+ }
+ /* Attach our packet-gen node for ip4 udp local traffic */
+ if (payload_size == 0 || sw_if_index == ~0)
+ {
+ return clib_error_return (0,
+ "Error: must supply local port, payload size and incoming interface");
+ }
+ if (hicn_underneath == ~0)
+ {
+ return (clib_error_return
+ (0, "Choose the underlying forwarder type ip|hicn"));
+ }
+ else if (hicn_underneath && !sm->is_enabled)
+ {
+ return (clib_error_return (0, "hICN not enabled in VPP"));
+ }
+ else if (!hicn_underneath && sm->is_enabled)
+ {
+ return (clib_error_return (0, "hICN enabled in VPP"));
+ }
+ pg_main->hicn_underneath = hicn_underneath;
+
+ /* Allocate the buffer with the actual content payload TLV */
+ vlib_buffer_alloc (vm, &pg_main->pgen_svr_buffer_idx, 1);
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, pg_main->pgen_svr_buffer_idx);
+
+ /* Initialize the buffer data with zeros */
+ memset (rb->data, 0, payload_size);
+ rb->current_length = payload_size;
+
+ int skip = 2;
+ int base_offset = ETH_L2;
+ u8 use_current_data = HICN_CLASSIFY_NO_CURRENT_DATA_FLAG;
+
+ if (hicn_cli_is_ip_interface (vm, vnm, sw_if_index) == IP)
+ {
+ skip = 1;
+ base_offset = NO_L2;
+ use_current_data = HICN_CLASSIFY_CURRENT_DATA_FLAG;
+ }
+ if (ip46_address_is_ip4 (&hicn_name))
+ {
+ /* Add node to the vpp graph */
+ u32 next_hit_node = vlib_node_add_next (vm,
+ hicn_punt_glb.
+ hicn_node_info.ip4_inacl_node_index,
+ hicn_pg_server_node.index);
+
+ /* Create the punting table if it does not exist */
+ hicn_punt_add_vnettbl (&ipv4, &ipv4_src, subnet_mask, ~0, sw_if_index,
+ base_offset, use_current_data);
+ hicn_punt_add_vnettbl (&ipv4, &ipv4_dst, subnet_mask,
+ hicn_punt_glb.ip4_vnet_tbl_idx[sw_if_index][skip]
+ [HICN_PUNT_SRC][subnet_mask - 1], sw_if_index,
+ base_offset, use_current_data);
+
+
+ /* Add a session to the table */
+ hicn_punt_add_vnetssn (&ipv4, &ipv4_dst,
+ (ip46_address_t *) & (hicn_name.ip4),
+ subnet_mask, next_hit_node, sw_if_index,
+ base_offset);
+
+ hicn_punt_enable_disable_vnet_ip4_table_on_intf (vm, sw_if_index,
+ OP_ENABLE);
+
+ }
+ else
+ {
+ /* Add node to the vpp graph */
+ u32 next_hit_node = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip6_inacl_node_index,
+ hicn_pg_server_node.index);
+
+ /* Create the punting table if it does not exist */
+ hicn_punt_add_vnettbl (&ipv6, &ipv6_src, subnet_mask, ~0, sw_if_index,
+ base_offset, use_current_data);
+ hicn_punt_add_vnettbl (&ipv6, &ipv6_dst, subnet_mask,
+ hicn_punt_glb.ip6_vnet_tbl_idx[sw_if_index][skip]
+ [HICN_PUNT_SRC][subnet_mask - 1], sw_if_index,
+ base_offset, use_current_data);
+
+
+ /* Add a session to the table */
+ hicn_punt_add_vnetssn (&ipv6, &ipv6_dst,
+ (ip46_address_t *) & (hicn_name.ip6),
+ subnet_mask, next_hit_node, sw_if_index,
+ base_offset);
+
+ hicn_punt_enable_disable_vnet_ip6_table_on_intf (vm, sw_if_index,
+ OP_ENABLE);
+ }
+
+ switch (rv)
+ {
+ case 0:
+ cl_err = 0;
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ cl_err = clib_error_return (0, "Unimplemented, NYI");
+ break;
+
+ default:
+ cl_err = clib_error_return (0, "hicn pgen server returned %d", rv);
+ }
+
+ return cl_err;
+}
+
+/* cli declaration for 'control start' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_start_set_command, static)=
+{
+ .path = "hicn control start",
+ .short_help = "hicn control start",
+ .function = hicn_cli_node_ctl_start_set_command_fn,
+};
+
+
+/* cli declaration for 'control stop' */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_stop_set_command, static)=
+{
+ .path = "hicn control stop",
+ .short_help = "hicn control stop",
+ .function = hicn_cli_node_ctl_stop_set_command_fn,
+};
+
+
+/* cli declaration for 'control param' */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_param_set_command, static)=
+{
+ .path = "hicn control param",
+ .short_help = "hicn control param { pit { size <entries> | { dfltlife | minlife | maxlife } <seconds> } | fib size <entries> | cs {size <entries> | app <portion to reserved to app>} }\n",
+ .function = hicn_cli_node_ctl_param_set_command_fn,
+};
+
+/* cli declaration for 'control' (root path of multiple commands, for help) */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_command, static)=
+{
+ .path = "hicn control",
+ .short_help = "hicn control"
+};
+
+/* cli declaration for 'fib' */
+VLIB_CLI_COMMAND(hicn_cli_fib_set_command, static)=
+{
+ .path = "hicn fib",
+ .short_help = "hicn fib {{add | delete } prefix <prefix> face <faceid> }"
+ " | set strategy <strategy_id> prefix <prefix>",
+ .function = hicn_cli_fib_set_command_fn,
+};
+
+/* cli declaration for 'show' */
+VLIB_CLI_COMMAND(hicn_cli_show_command, static)=
+{
+ .path = "hicn show",
+ .short_help = "hicn show "
+ "[detail] [internal]"
+ "[strategies]",
+ .function = hicn_cli_show_command_fn,
+};
+
+/* cli declaration for 'punting' */
+VLIB_CLI_COMMAND(hicn_cli_punting_command, static)=
+{
+ .path = "hicn punting",
+ .short_help = "hicn punting {add|delete} prefix <ip_address/mask> intfc <interface> type <ip/udp>",
+ .function = hicn_cli_punting_command_fn,
+};
+
+VLIB_CLI_COMMAND(hicn_cli_mapme_command, static)=
+{
+ .path = "hicn mapme",
+ .short_help = "hicn mapme {enable|disable|set <param> <value>}",
+ .function = hicn_cli_mapme_command_fn,
+};
+
+/* cli declaration for 'hicn pgen client' */
+VLIB_CLI_COMMAND(hicn_cli_pgen_client_set_command, static)=
+{
+ .path = "hicn pgen client",
+ .short_help = "hicn pgen client fwd <ip|hicn> src <addr> n_ifaces <n_ifaces> name <addr/subnet> lifetime <interest-lifetime> intfc <data in-interface> max_seq <max sequence number> n_flows <number of flows>",
+ .long_help = "Run hicn in packet-gen client mode\n",
+ .function = hicn_cli_pgen_client_set_command_fn,
+};
+
+/* cli declaration for 'hicn pgen client' */
+VLIB_CLI_COMMAND(hicn_cli_pgen_server_set_command, static)=
+{
+ .path = "hicn pgen server",
+ .short_help = "hicn pgen server fwd <ip|hicn> name <addr/subnet> intfc <interest in-interface> size <payload_size>",
+ .long_help = "Run hicn in packet-gen server mode\n",
+ .function = hicn_cli_pgen_server_set_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/data_fwd.h b/hicn-plugin/src/data_fwd.h
new file mode 100755
index 000000000..7390382ef
--- /dev/null
+++ b/hicn-plugin/src/data_fwd.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DATA_FWD_H__
+#define __HICN_DATA_FWD_H__
+
+#include <vlib/buffer.h>
+
+#include "pcs.h"
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_data_fwd_runtime_s
+{
+ vlib_combined_counter_main_t repm_counters;
+
+ /* per-cpu vector of cloned packets */
+ u32 **clones;
+} hicn_data_fwd_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[64];
+} hicn_data_fwd_trace_t;
+
+typedef enum
+{
+ HICN_DATA_FWD_NEXT_V4_LOOKUP,
+ HICN_DATA_FWD_NEXT_V6_LOOKUP,
+ HICN_DATA_FWD_NEXT_ERROR_DROP,
+ HICN_DATA_FWD_N_NEXT,
+} hicn_data_fwd_next_t;
+
+/**
+ *@brief Create a maximum of 256 clones of buffer and store them
+ * in the supplied array. Unlike the original function in the vlib
+ * library, we don't prevent cloning if n_buffer==1 and if
+ * s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2.
+ *
+ * @param vm - (vlib_main_t *) vlib main data structure pointer
+ * @param src_buffer - (u32) source buffer index
+ * @param buffers - (u32 * ) buffer index array
+ * @param n_buffers - (u16) number of buffer clones requested (<=256)
+ * @param head_end_offset - (u16) offset relative to current position
+ * where packet head ends
+ * @return - (u16) number of buffers actually cloned, may be
+ * less than the number requested or zero
+ */
+always_inline u16
+vlib_buffer_clone_256_2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u16 n_buffers, u16 head_end_offset)
+{
+ u16 i;
+ vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
+
+ ASSERT (n_buffers);
+ ASSERT (n_buffers <= 256);
+
+ if (s->current_length <= CLIB_CACHE_LINE_BYTES * 2)
+ {
+ for (i = 0; i < n_buffers; i++)
+ {
+ vlib_buffer_t *d;
+ d = vlib_buffer_copy (vm, s);
+ clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
+ if (d == 0)
+ return i;
+ buffers[i] = vlib_get_buffer_index (vm, d);
+ }
+ s->current_data += head_end_offset;
+ s->current_length -= head_end_offset;
+ return n_buffers;
+ }
+ n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
+ vlib_buffer_get_free_list_index
+ (s));
+
+ for (i = 0; i < n_buffers; i++)
+ {
+ vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
+ d->current_data = s->current_data;
+ d->current_length = head_end_offset;
+ d->trace_index = s->trace_index;
+ vlib_buffer_set_free_list_index (d,
+ vlib_buffer_get_free_list_index (s));
+
+ d->total_length_not_including_first_buffer = s->current_length -
+ head_end_offset;
+ if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ d->total_length_not_including_first_buffer +=
+ s->total_length_not_including_first_buffer;
+ }
+ d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
+ d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
+ clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
+ clib_memcpy (d->opaque2, s->opaque2, sizeof (s->opaque2));
+ clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
+ head_end_offset);
+ d->next_buffer = src_buffer;
+ }
+ vlib_buffer_advance (s, head_end_offset);
+ s->n_add_refs = n_buffers - 1;
+ while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ s = vlib_get_buffer (vm, s->next_buffer);
+ s->n_add_refs = n_buffers - 1;
+ }
+
+ return n_buffers;
+}
+
+/**
+ * @brief Create multiple clones of buffer and store them
+ * in the supplied array. Unlike the function in the vlib library,
+ * we allow src_buffer to have n_add_refs != 0.
+ *
+ * @param vm - (vlib_main_t *) vlib main data structure pointer
+ * @param src_buffer - (u32) source buffer index
+ * @param buffers - (u32 * ) buffer index array
+ * @param n_buffers - (u16) number of buffer clones requested (<=256)
+ * @param head_end_offset - (u16) offset relative to current position
+ * where packet head ends
+ * @return - (u16) number of buffers actually cloned, may be
+ * less than the number requested or zero
+ */
+always_inline u16
+vlib_buffer_clone2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u16 n_buffers, u16 head_end_offset)
+{
+ ASSERT (head_end_offset >= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
+
+ vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
+
+ /*
+ * total_length_not_including_first_buffer is not initialized to 0
+ * when a buffer is used.
+ */
+ if (PREDICT_TRUE (s->next_buffer == 0))
+ s->total_length_not_including_first_buffer = 0;
+
+ u16 n_cloned = 0;
+ u8 n_clone_src = 255 - s->n_add_refs;
+
+ /*
+ * We need to copy src for all the clones that cannot be chained in
+ * the src_buffer
+ */
+ /* MAX(n_add_refs) = 256 */
+ if (n_buffers > n_clone_src)
+ {
+ vlib_buffer_t *copy;
+ /* Ok to call the original vlib_buffer_copy. */
+ copy = vlib_buffer_copy (vm, s);
+ n_cloned += vlib_buffer_clone (vm,
+ vlib_get_buffer_index (vm, copy),
+ buffers,
+ n_buffers - n_clone_src,
+ head_end_offset);
+ n_buffers -= n_cloned;
+ }
+ /*
+ * vlib_buffer_clone_256 check if n_add_refs is 0. We force it to be
+ * 0 before calling the function and we retore it to the right value
+ * after the function has been called
+ */
+ u8 tmp_n_add_refs = s->n_add_refs;
+
+ s->n_add_refs = 0;
+ /*
+ * The regular vlib_buffer_clone_256 does copy if we need to clone
+ * only one packet. While this is not a problem per se, it adds
+ * complexity to the code, especially because we need to add 1 to
+ * n_add_refs when the packet is cloned.
+ */
+ n_cloned += vlib_buffer_clone_256_2 (vm,
+ src_buffer,
+ (buffers + n_cloned),
+ n_buffers, head_end_offset);
+
+ s->n_add_refs += tmp_n_add_refs;
+
+ return n_cloned;
+}
+
+#endif /* //__HICN_DATA_FWD_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/data_fwd_node.c b/hicn-plugin/src/data_fwd_node.c
new file mode 100755
index 000000000..088683fe0
--- /dev/null
+++ b/hicn-plugin/src/data_fwd_node.c
@@ -0,0 +1,541 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/dpo.h>
+
+#include "data_fwd.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy.h"
+#include "strategy_dpo_manager.h"
+#include "state.h"
+#include "error.h"
+
+/* Stats string values */
+static char *hicn_data_fwd_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Declarations */
+always_inline void
+drop_packet (vlib_main_t * vm, u32 bi0,
+ u32 * n_left_to_next, u32 * next0, u32 ** to_next,
+ u32 * next_index, vlib_node_runtime_t * node);
+
+always_inline int
+hicn_satisfy_faces (vlib_main_t * vm, u32 b0,
+ hicn_pcs_entry_t * pitp, u32 * n_left_to_next,
+ u32 ** to_next, u32 * next_index,
+ vlib_node_runtime_t * node, u8 isv6,
+ vl_api_hicn_api_node_stats_get_reply_t * stats);
+
+always_inline void
+clone_data_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pitp, hicn_header_t * hicn0, f64 tnow,
+ hicn_hash_node_t * nodep, vlib_buffer_t * b0,
+ hicn_hash_entry_t * hash_entry, u64 name_hash,
+ hicn_buffer_t * hicnb, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id);
+
+
+/* packet trace format function */
+always_inline u8 *hicn_data_fwd_format_trace (u8 * s, va_list * args);
+
+vlib_node_registration_t hicn_data_fwd_node;
+
+/*
+ * ICN forwarder node for interests: handling of Data delivered based on ACL.
+ * - 1 packet at a time - ipv4/tcp ipv6/tcp
+ */
+static uword
+hicn_data_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+
+ u32 n_left_from, *from, *to_next;
+ hicn_data_fwd_next_t next_index;
+ hicn_pit_cs_t *pitcs = &hicn_main.pitcs;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+ u32 data_received = 1;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ hicn_buffer_t *hicnb0;
+ hicn_hash_node_t *node0;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0;
+ hicn_pcs_entry_t *pitp;
+ hicn_hash_entry_t *hash_entry0;
+ int ret = HICN_ERROR_NONE;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+
+ /* HICN PREFETCH */
+ hicn_buffer_t *hicnb1 = hicn_get_buffer (b1);
+ hicn_prefetch_pcs_entry (hicnb1, pitcs);
+ }
+ /* Dequeue a packet buffer */
+ /*
+ * Do not copy the index in the next buffer, we'll do
+ * it later. The packet might be cloned, so the buffer to move
+ * to next must be the cloned one
+ */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Get hicn buffer and state */
+ hicnb0 = hicn_get_buffer (b0);
+ hicn_get_internal_state (hicnb0, pitcs, &node0, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+
+ ret = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ pitp = hicn_pit_get_data (node0);
+ nameptr = (u8 *) (&name);
+
+ if (PREDICT_FALSE
+ (ret != HICN_ERROR_NONE
+ || !hicn_node_compare (nameptr, namelen, node0)
+ || (hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)))
+ {
+ /*
+ * Remove the lock acquired from
+ * data_pcslookup node
+ */
+ dpo_id_t hicn_dpo_id0 = { dpo_vft0->hicn_dpo_get_type (), 0, 0,
+ dpo_ctx_id0
+ };
+ hicn_pcs_remove_lock (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+
+ goto end_processing;
+ }
+ /*
+ * Check if the hit is instead a collision in the
+ * hash table. Unlikely to happen.
+ */
+ /*
+ * there is no guarantee that the type of entry has
+ * not changed from the lookup.
+ */
+
+ if (tnow > pitp->shared.expire_time)
+ {
+ dpo_id_t hicn_dpo_id0 =
+ { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+ hicn_pcs_delete (pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+ stats.pit_expired_count++;
+ }
+ else
+ {
+ ASSERT ((hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ == 0);
+
+ data_received++;
+ /*
+ * We do not check if the data is coming from
+ * the outgoing interest face.
+ */
+
+ /* Prepare the buffer for the cloning */
+ ret = hicn_satisfy_faces (vm, bi0, pitp, &n_left_to_next,
+ &to_next, &next_index, node,
+ isv6, &stats);
+
+ dpo_id_t hicn_dpo_id0 = { dpo_vft0->hicn_dpo_get_type (), 0, 0,
+ dpo_ctx_id0
+ };
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
+ {
+ hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+ continue;
+ }
+ /*
+ * Call the strategy callback since the
+ * interest has been satisfied
+ */
+ strategy_vft0->hicn_receive_data (dpo_ctx_id0,
+ pitp->u.pit.pe_txnh);
+
+#if HICN_FEATURE_CS
+ /*
+ * Clone data packet in the content store and
+ * convert the PIT entry into a CS entry
+ */
+ clone_data_to_cs (vm, pitcs, pitp, hicn0, tnow, node0,
+ b0, hash_entry0, hicnb0->name_hash, hicnb0,
+ dpo_vft0, &hicn_dpo_id0);
+
+ hicn_pcs_remove_lock (pitcs, &pitp, &node0, vm,
+ hash_entry0, NULL, NULL);
+#else
+ ASSERT (pitp == hicn_pit_get_data (node0));
+ /*
+ * Remove one reference as the buffer is no
+ * longer in any frame
+ */
+ b0->n_add_refs--;
+ /* If not enabled, delete the PIT entry */
+ hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+#endif
+ }
+ end_processing:
+
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pit_get_int_count (pitcs);
+ u32 pit_cs_count = hicn_pit_get_cs_count (pitcs);
+
+ vlib_node_increment_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_INTEREST_AGG_ENTRY,
+ stats.pkts_data_count / data_received);
+
+ return (frame->n_vectors);
+}
+
+always_inline void
+drop_packet (vlib_main_t * vm, u32 bi0,
+ u32 * n_left_to_next, u32 * next0, u32 ** to_next,
+ u32 * next_index, vlib_node_runtime_t * node)
+{
+ *next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
+
+ (*to_next)[0] = bi0;
+ *to_next += 1;
+ *n_left_to_next -= 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index,
+ *to_next, *n_left_to_next, bi0, *next0);
+}
+
+always_inline int
+hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
+ hicn_pcs_entry_t * pitp, u32 * n_left_to_next,
+ u32 ** to_next, u32 * next_index,
+ vlib_node_runtime_t * node, u8 isv6,
+ vl_api_hicn_api_node_stats_get_reply_t * stats)
+{
+ int found = 0;
+ int ret = HICN_ERROR_NONE;
+ u32 *clones = NULL, *header = NULL;
+ u32 n_left_from = 0;
+ u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP, next1 =
+ HICN_DATA_FWD_NEXT_ERROR_DROP;
+
+ /*
+ * We have a hard limit on the number of vlib_buffer that we can
+ * chain (no more than 256)
+ */
+ /*
+ * The first group of vlib_buffer can be directly cloned from b0. We
+ * need to be careful to clone it only 254 times as the buffer
+ * already has n_add_reds=1.
+ */
+ vec_alloc (clones, pitp->u.pit.faces.n_faces);
+ header = clones;
+
+ /* Clone bi0 */
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+
+ /* Add one reference to maintain the buffer in the CS */
+ b0->n_add_refs++;
+ found = n_left_from =
+ vlib_buffer_clone2 (vm, bi0, clones, pitp->u.pit.faces.n_faces,
+ VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
+
+ ASSERT (n_left_from == pitp->u.pit.faces.n_faces);
+
+ /* Index to iterate over the faces */
+ int i = 0;
+
+ while (n_left_from > 0)
+ {
+
+ //Dual loop, X2
+ while (n_left_from >= 4 && *n_left_to_next >= 2)
+ {
+ vlib_buffer_t *h0, *h1;
+ u32 hi0, hi1;
+ dpo_id_t *face0, *face1;
+
+ /* Prefetch for next iteration. */
+ {
+ vlib_buffer_t *h2, *h3;
+ h2 = vlib_get_buffer (vm, clones[2]);
+ h3 = vlib_get_buffer (vm, clones[3]);
+ CLIB_PREFETCH (h2, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (h3, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ face0 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+ face1 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+
+ h0 = vlib_get_buffer (vm, clones[0]);
+ h1 = vlib_get_buffer (vm, clones[1]);
+
+ (*to_next)[0] = hi0 = clones[0];
+ (*to_next)[1] = hi1 = clones[1];
+ *to_next += 2;
+ *n_left_to_next -= 2;
+ n_left_from -= 2;
+ clones += 2;
+
+ next0 = face0->dpoi_next_node;
+ next1 = face1->dpoi_next_node;
+ vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0->dpoi_index;
+ vnet_buffer (h1)->ip.adj_index[VLIB_TX] = face1->dpoi_index;
+
+ stats->pkts_data_count += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (h0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, h0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (h0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (h0),
+ sizeof (t->packet_data));
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (h1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, h1, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (h1)->sw_if_index[VLIB_RX];
+ t->next_index = next1;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (h1),
+ sizeof (t->packet_data));
+ }
+ vlib_validate_buffer_enqueue_x2 (vm, node, *next_index,
+ *to_next, *n_left_to_next,
+ hi0, hi1, next0, next1);
+ }
+
+
+ while (n_left_from > 0 && *n_left_to_next > 0)
+ {
+ vlib_buffer_t *h0;
+ u32 hi0;
+ dpo_id_t *face0;
+
+ face0 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+
+ h0 = vlib_get_buffer (vm, clones[0]);
+
+ (*to_next)[0] = hi0 = clones[0];
+ *to_next += 1;
+ *n_left_to_next -= 1;
+ n_left_from -= 1;
+ clones += 1;
+ next0 = face0->dpoi_next_node;
+ vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0->dpoi_index;
+
+ stats->pkts_data_count++;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (h0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, h0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (h0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (h0),
+ sizeof (t->packet_data));
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ /*
+ * Fix in case of a wrong speculation. Needed to
+ * clone the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index,
+ *to_next, *n_left_to_next,
+ hi0, next0);
+
+ }
+
+ /* Ensure that there is space for the next clone (if any) */
+ if (PREDICT_FALSE (*n_left_to_next == 0))
+ {
+ vlib_put_next_frame (vm, node, *next_index, *n_left_to_next);
+
+ vlib_get_next_frame (vm, node, *next_index, *to_next,
+ *n_left_to_next);
+ }
+ }
+
+
+ vec_free (header);
+
+ if (PREDICT_FALSE (!found))
+ {
+ ASSERT (0);
+ drop_packet (vm, bi0, n_left_to_next, &next0, to_next, next_index,
+ node);
+ ret = HICN_ERROR_FACE_NOT_FOUND;
+ }
+ return ret;
+}
+
+always_inline void
+clone_data_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pitp, hicn_header_t * hicn0, f64 tnow,
+ hicn_hash_node_t * nodep, vlib_buffer_t * b0,
+ hicn_hash_entry_t * hash_entry, u64 name_hash,
+ hicn_buffer_t * hicnb, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id)
+{
+ hicn_lifetime_t dmsg_lifetime;
+ /*
+ * At this point we think we're safe to proceed. Store the CS buf in
+ * the PIT/CS hashtable entry
+ */
+
+ /*
+ * Start turning the PIT into a CS. Note that we may be stepping on
+ * the PIT part of the union as we update the CS part, so don't
+ * expect the PIT part to be valid after this point.
+ */
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
+ hicn_pit_to_cs (vm, pitcs, pitp, hash_entry, nodep, dpo_vft, hicn_dpo_id,
+ &hicnb->face_dpo_id, hicnb0->is_appface);
+
+ pitp->shared.create_time = tnow;
+
+ hicn_type_t type = hicnb0->type;
+ hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
+ &dmsg_lifetime);
+
+ if (dmsg_lifetime < HICN_PARAM_CS_LIFETIME_MIN
+ || dmsg_lifetime > HICN_PARAM_CS_LIFETIME_MAX)
+ {
+ dmsg_lifetime = HICN_PARAM_CS_LIFETIME_DFLT;
+ }
+ pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, dmsg_lifetime);
+
+ /* Store the original packet buffer in the CS node */
+ pitp->u.cs.cs_pkt_buf = vlib_get_buffer_index (vm, b0);
+}
+
+/* packet trace format function */
+always_inline u8 *
+hicn_data_fwd_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_data_fwd_trace_t *t = va_arg (*args, hicn_data_fwd_trace_t *);
+ u32 indent = format_get_indent (s);
+
+ s = format (s, "DATAFWD: pkt: %d, sw_if_index %d, next index %d\n",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+
+ s = format (s, "%U%U", format_white_space, indent,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the data forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_data_fwd_node) =
+{
+ .function = hicn_data_node_fn,
+ .name = "hicn-data-fwd",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_data_fwd_runtime_t),
+ .format_trace = hicn_data_fwd_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_data_fwd_error_strings),
+ .error_strings = hicn_data_fwd_error_strings,
+ .n_next_nodes = HICN_DATA_FWD_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [HICN_DATA_FWD_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICN_DATA_FWD_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICN_DATA_FWD_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/data_pcslookup.h b/hicn-plugin/src/data_pcslookup.h
new file mode 100755
index 000000000..fa75c3ac3
--- /dev/null
+++ b/hicn-plugin/src/data_pcslookup.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DATA_PCSLOOKUP_H__
+#define __HICN_DATA_PCSLOOKUP_H__
+
+#include "pcs.h"
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_data_pcslookup_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_data_pcslookup_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_data_pcslookup_trace_t;
+
+typedef enum
+{
+ HICN_DATA_PCSLOOKUP_NEXT_V4_LOOKUP,
+ HICN_DATA_PCSLOOKUP_NEXT_V6_LOOKUP,
+ HICN_DATA_PCSLOOKUP_NEXT_STORE_DATA,
+ HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD, /* This must be one position
+ * before the error drop!! */
+ HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP,
+ HICN_DATA_PCSLOOKUP_N_NEXT,
+} hicn_data_pcslookup_next_t;
+
+#endif /* //__HICN_DATA_PCSLOOKUP_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/data_pcslookup_node.c b/hicn-plugin/src/data_pcslookup_node.c
new file mode 100755
index 000000000..222545106
--- /dev/null
+++ b/hicn-plugin/src/data_pcslookup_node.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "data_pcslookup.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy.h"
+#include "strategy_dpo_manager.h"
+#include "state.h"
+
+/* Stats string values */
+static char *hicn_data_pcslookup_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* packet trace format function */
+always_inline u8 *hicn_data_pcslookup_format_trace (u8 * s, va_list * args);
+
+vlib_node_registration_t hicn_data_pcslookup_node;
+
+/*
+ * hICN node for handling data. It performs a lookup in the PIT.
+ */
+static uword
+hicn_data_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+
+ u32 n_left_from, *from, *to_next;
+ hicn_data_pcslookup_next_t next_index;
+ hicn_data_pcslookup_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ rt = vlib_node_get_runtime_data (vm, node->node_index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ hicn_buffer_t *hb0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
+ u64 name_hash = 0;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ u32 node_id0 = 0;
+ u8 dpo_ctx_id0 = 0;
+ int ret0;
+ u8 vft_id0;
+ u8 is_cs0;
+ u8 hash_entry_id = 0;
+ u8 bucket_is_overflown = 0;
+ u32 bucket_id = ~0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ //Prefetch one cache line-- 64 byte-- so that we load the hicn_buffer_t as well
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+
+ b0 = vlib_get_buffer (vm, bi0);
+ hb0 = hicn_get_buffer (b0);
+
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+
+ ret0 = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+
+ if (PREDICT_TRUE (ret0 == HICN_ERROR_NONE))
+ {
+ next0 =
+ isv6 ? HICN_DATA_PCSLOOKUP_NEXT_V6_LOOKUP :
+ HICN_DATA_PCSLOOKUP_NEXT_V4_LOOKUP;
+ }
+ nameptr = (u8 *) (&name);
+ if (PREDICT_FALSE
+ (ret0 != HICN_ERROR_NONE
+ || hicn_hashtb_fullhash (nameptr, namelen,
+ &name_hash) != HICN_ERROR_NONE))
+ {
+ next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
+ }
+ else
+ {
+ int res =
+ hicn_hashtb_lookup_node (rt->pitcs->pcs_table, nameptr,
+ namelen, name_hash,
+ !(hb0->is_appface) /* take lock */ ,
+ &node_id0, &dpo_ctx_id0, &vft_id0,
+ &is_cs0,
+ &hash_entry_id, &bucket_id,
+ &bucket_is_overflown);
+
+ stats.pkts_data_count += 1;
+
+ if ((res == HICN_ERROR_HASHTB_HASH_NOT_FOUND
+ || (res == HICN_ERROR_NONE && is_cs0))
+ && (hb0->is_appface))
+ {
+ next0 = HICN_DATA_PCSLOOKUP_NEXT_STORE_DATA;
+ }
+ else if (res == HICN_ERROR_NONE)
+ {
+ /*
+ * In case the result of the lookup
+ * is a CS entry, the packet is
+ * dropped
+ */
+ next0 = HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD + is_cs0;
+ }
+ }
+
+ hicn_store_internal_state (b0, name_hash, node_id0, dpo_ctx_id0,
+ vft_id0, hash_entry_id, bucket_id,
+ bucket_is_overflown);
+
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ /*
+ * Fix in case of a wrong speculation. Needed to
+ * clone the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_pcslookup_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ /* Check the CS LRU, and trim if necessary. */
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pit_get_cs_count (rt->pitcs);
+
+ vlib_node_increment_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+
+ vlib_node_increment_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ update_node_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+ update_node_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_data_pcslookup_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_data_pcslookup_trace_t *t =
+ va_arg (*args, hicn_data_pcslookup_trace_t *);
+
+ s = format (s, "DATA-PCSLOOKUP: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the data forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_data_pcslookup_node) =
+{
+ .function = hicn_data_pcslookup_node_fn,
+ .name = "hicn-data-pcslookup",
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (hicn_data_pcslookup_runtime_t),
+ .format_trace = hicn_data_pcslookup_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_data_pcslookup_error_strings),
+ .error_strings = hicn_data_pcslookup_error_strings,
+ .n_next_nodes = HICN_DATA_PCSLOOKUP_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_DATA_PCSLOOKUP_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICN_DATA_PCSLOOKUP_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICN_DATA_PCSLOOKUP_NEXT_STORE_DATA] = "hicn-data-push",
+ [HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD] = "hicn-data-fwd",
+ [HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/data_push_node.c b/hicn-plugin/src/data_push_node.c
new file mode 100755
index 000000000..a4a25e29b
--- /dev/null
+++ b/hicn-plugin/src/data_push_node.c
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "hicn.h"
+#include "parser.h"
+#include "strategy_dpo_ctx.h"
+#include "infra.h"
+#include "mgmt.h"
+#include "pcs.h"
+
+/*
+ * Node context data (to be used in all the strategy nodes); we think this is
+ * per-thread/instance
+ */
+typedef struct hicn_data_push_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_data_push_runtime_t;
+
+/* Stats string values */
+static char *hicn_data_push_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+typedef enum
+{
+ HICN_DATA_PUSH_NEXT_ERROR_DROP,
+ HICN_DATA_PUSH_N_NEXT,
+} hicn_data_push_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[40];
+} hicn_data_push_trace_t;
+
+vlib_node_registration_t hicn_data_push_node;
+
+always_inline void
+prep_buffer_for_cs (vlib_main_t * vm, vlib_buffer_t * b0, u8 isv6)
+{
+ if (isv6)
+ {
+ /* Advance the vlib buffer to the beginning of the TCP header */
+ vlib_buffer_advance (b0, sizeof (ip6_header_t) + sizeof (tcp_header_t));
+ b0->total_length_not_including_first_buffer = 0;
+ }
+ else
+ {
+ /* Advance the vlib buffer to the beginning of the TCP header */
+ vlib_buffer_advance (b0, sizeof (ip4_header_t) + sizeof (tcp_header_t));
+ b0->total_length_not_including_first_buffer = 0;
+ }
+}
+
+always_inline int
+hicn_new_data (vlib_main_t * vm, hicn_data_push_runtime_t * rt,
+ vlib_buffer_t * b0, u32 * next, f64 tnow, u8 * nameptr,
+ u16 namelen, u8 isv6)
+{
+ int ret;
+ hicn_hash_node_t *nodep;
+ hicn_pcs_entry_t *pitp;
+ hicn_header_t *hicn0;
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
+ u32 node_id0 = 0;
+ u8 dpo_ctx_id0 = 0;
+ u8 vft_id0 = 0;
+ u8 is_cs0 = 0;
+ u8 hash_entry_id = 0;
+ u32 bucket_id = ~0;
+ u8 bucket_is_overflow = 0;
+ hicn_lifetime_t dmsg_lifetime;
+
+ /* Create PIT node and init PIT entry */
+ nodep = hicn_hashtb_alloc_node (rt->pitcs->pcs_table);
+ if (PREDICT_FALSE (nodep == NULL))
+ {
+ /* Nothing we can do - no mem */
+ *next = HICN_DATA_PUSH_NEXT_ERROR_DROP;
+ return HICN_ERROR_HASHTB_NOMEM;
+ }
+ pitp = hicn_pit_get_data (nodep);
+ hicn_pit_init_data (pitp);
+ pitp->shared.create_time = tnow;
+
+ hicn0 = vlib_buffer_get_current (b0);
+
+ hicn_type_t type = hicnb0->type;
+ hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
+ &dmsg_lifetime);
+
+ if (dmsg_lifetime < HICN_PARAM_CS_LIFETIME_MIN
+ || dmsg_lifetime > HICN_PARAM_CS_LIFETIME_MAX)
+ {
+ dmsg_lifetime = HICN_PARAM_CS_LIFETIME_DFLT;
+ }
+ pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, dmsg_lifetime);
+ prep_buffer_for_cs (vm, b0, isv6);
+
+ /* Store the original packet buffer in the CS node */
+ pitp->u.cs.cs_pkt_buf = vlib_get_buffer_index (vm, b0);
+
+ pitp->u.cs.cs_rxface = hicnb0->face_dpo_id;
+
+ /* Set up the hash node and insert it */
+ hicn_hashtb_init_node (rt->pitcs->pcs_table, nodep, nameptr, namelen);
+
+
+ nodep->hn_flags |= HICN_HASH_NODE_CS_FLAGS;
+ pitp->shared.entry_flags |= HICN_PCS_ENTRY_CS_FLAG;
+
+ hicn_hash_entry_t *hash_entry;
+ ret =
+ hicn_pcs_cs_insert_update (vm, rt->pitcs, pitp, nodep, &hash_entry,
+ hicnb0->name_hash, &node_id0, &dpo_ctx_id0,
+ &vft_id0, &is_cs0, &hash_entry_id, &bucket_id,
+ &bucket_is_overflow);
+
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ if (ret != HICN_ERROR_NONE)
+ {
+ hicn_hashtb_free_node (rt->pitcs->pcs_table, nodep);
+ }
+ return (ret);
+
+}
+
+/*
+ * ICN strategy later node for interests: - 1 packet at a time - ipv4/tcp
+ * ipv6/tcp
+ */
+uword
+hicn_data_push_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+
+ u32 n_left_from, *from, *to_next, n_left_to_next;
+ hicn_data_push_next_t next_index;
+ hicn_data_push_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = (hicn_data_push_next_t) node->cached_next_index;
+ rt = vlib_node_get_runtime_data (vm, hicn_data_push_node.index);
+ rt->pitcs = &hicn_main.pitcs;
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u8 isv6_0, isv6_1;
+ u8 *nameptr0, *nameptr1;
+ u16 namelen0, namelen1;
+ hicn_name_t name0, name1;
+ hicn_header_t *hicn0, *hicn1;
+ vlib_buffer_t *b0, *b1;
+ u32 bi0, bi1;
+ u32 next0 = next_index, next1 = next_index;
+ int ret0, ret1;
+
+ /* Prefetch for next iteration. */
+ {
+ vlib_buffer_t *b2, *b3;
+ b2 = vlib_get_buffer (vm, from[2]);
+ b3 = vlib_get_buffer (vm, from[3]);
+ CLIB_PREFETCH (b2, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b3, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ bi1 = from[1];
+ from += 2;
+ n_left_from -= 2;
+ /* to_next[0] = bi0; */
+ /* to_next[1] = bi1; */
+ /* to_next += 2; */
+ /* n_left_to_next -= 2; */
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ next0 = next1 = HICN_DATA_PUSH_NEXT_ERROR_DROP;
+
+ ret0 = hicn_data_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0);
+ ret1 = hicn_data_parse_pkt (b1, &name1, &namelen1, &hicn1, &isv6_1);
+
+ nameptr0 = (u8 *) (&name0);
+ nameptr1 = (u8 *) (&name1);
+ if (PREDICT_TRUE (ret0 == HICN_ERROR_NONE))
+ hicn_new_data (vm, rt, b0, &next0, tnow, nameptr0, namelen0,
+ isv6_0);
+
+ if (PREDICT_TRUE (ret1 == HICN_ERROR_NONE))
+ hicn_new_data (vm, rt, b1, &next1, tnow, nameptr1, namelen1,
+ isv6_1);
+ stats.pkts_data_count += 2;
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_push_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];;
+ t->next_index = next0;
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_push_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];;
+ t->next_index = next0;
+ }
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u32 next0 = next_index;
+ int ret0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ //hicn_buffer_t * hicnb1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ next0 = HICN_DATA_PUSH_NEXT_ERROR_DROP;
+
+ ret0 = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ nameptr = (u8 *) (&name);
+
+ if (PREDICT_TRUE (ret0 == HICN_ERROR_NONE))
+ hicn_new_data (vm, rt, b0, &next0, tnow, nameptr, namelen, isv6);
+ stats.pkts_data_count++;
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_push_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];;
+ t->next_index = next0;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_data_push_node.index,
+ HICNFWD_ERROR_CACHED, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+always_inline u8 *
+hicn_data_push_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_data_push_trace_t *t = va_arg (*args, hicn_data_push_trace_t *);
+
+ s = format (s, "DATA-STORE: pkt: %d, sw_if_index %d, next index %d\n",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+
+ return (s);
+}
+
+
+/*
+ * Node registration for the data forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_data_push_node) =
+{
+ .function = hicn_data_push_fn,
+ .name = "hicn-data-push",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_data_push_runtime_t),
+ .format_trace = hicn_data_push_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_data_push_error_strings),
+ .error_strings = hicn_data_push_error_strings,
+ .n_next_nodes = HICN_DATA_PUSH_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [HICN_DATA_PUSH_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/error.c b/hicn-plugin/src/error.c
new file mode 100755
index 000000000..588ae2398
--- /dev/null
+++ b/hicn-plugin/src/error.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "error.h"
+
+const char *HICN_ERROR_STRING[] = {
+#define _(a,b,c) c,
+ foreach_hicn_error
+#undef _
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/error.h b/hicn-plugin/src/error.h
new file mode 100755
index 000000000..978c7f2ca
--- /dev/null
+++ b/hicn-plugin/src/error.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_ERROR_H__
+#define __HICN_ERROR_H__
+
+#define foreach_hicn_error \
+ _(NONE, 0, "Ok") \
+ _(UNSPECIFIED, -128, "Unspecified Error") \
+ _(FACE_NOT_FOUND, -129, "Face not found in Face table") \
+ _(FACE_NULL, -130, "Face null") \
+ _(FACE_IP_ADJ_NOT_FOUND, -131, "Ip adjacecny for face not found") \
+ _(FACE_HW_INT_NOT_FOUND, -132, "Hardware interface not found") \
+ _(FACE_NOMEM, -133, "Face table is full") \
+ _(FACE_NO_GLOBAL_IP, -134, "No global ip address for face") \
+ _(FACE_NOT_FOUND_IN_ENTRY, -135, "Face not found in entry") \
+ _(FACE_ALREADY_DELETED, -136, "Face alredy deleted") \
+ _(FACE_ALREADY_CREATED, -137, "Face alredy created") \
+ _(FWD_NOT_ENABLED, -138, "hICN forwarder not enabled") \
+ _(FWD_ALREADY_ENABLED, -139, "hICN forwarder alredy enabled") \
+ _(PARSER_UNSUPPORTED_PROTO, -140, "Unsupported protocol") \
+ _(PARSER_PKT_INVAL, -141, "Packet null") \
+ _(PIT_CONFIG_MINLT_OOB, -142, "Min lifetime ouf of bounds") \
+ _(PIT_CONFIG_MAXLT_OOB, -143, "Max lifetime ouf of bounds") \
+ _(PIT_CONFIG_MINMAXLT, -144, "Min lifetime grater than max lifetime") \
+ _(PIT_CONFIG_DFTLT_OOB, -145, "Default lifetime ouf of bounds") \
+ _(PIT_CONFIG_SIZE_OOB, -146, "Pit size ouf of bounds") \
+ _(CS_CONFIG_SIZE_OOB, -147, "CS size ouf of bounds") \
+ _(CS_CONFIG_RESERVED_OOB, -148, "Reseved CS must be between 0 and 100 (excluded)") \
+ _(DPO_CTX_NHOPS_NS, -149, "No space for additional next hop") \
+ _(DPO_CTX_NHOPS_EXISTS, -150, "Next hop already in the route") \
+ _(DPO_CTX_NOT_FOUND, -151, "Dpo context not found") \
+ _(DPO_MGR_ID_NOT_VALID, -152, "Dpo id for strategy and context not valid") \
+ _(HASHTB_HASH_NOT_FOUND, -153, "Hash not found in hash table") \
+ _(HASHTB_HASH_INVAL, -154, "Error while calculating the hash") \
+ _(HASHTB_NOMEM, -155, "Unable to allocate new buckets or nodes") \
+ _(HASHTB_INVAL, -156, "Invalid argument") \
+ _(HASHTB_KEY_INVAL, -157, "Invalid hashtb key") \
+ _(HASHTB_EXIST, -158, "Hash already in hashtable") \
+ _(ROUTE_INVAL, -159, "Invalid face id and weight") \
+ _(ROUTE_NO_LD, -160, "Expected load balance dpo") \
+ _(ROUTE_MLT_LD, -161, "Unexpected mulitple buckets in load balance dpo") \
+ _(ROUTE_NO_INSERT, -162, "Unable to insert a new FIB entry") \
+ _(ROUTE_DPO_NO_HICN, -163, "Dpo is not of type hICN") \
+ _(ROUTE_NOT_FOUND, -164, "Route not found in FIB") \
+ _(ROUTE_NOT_UPDATED, -165, "Unable to update route") \
+ _(ROUTE_ALREADY_EXISTS, -166, "Route already in FIB") \
+ _(CLI_INVAL, -167, "Invalid input") \
+ _(PUNT_INVAL, -168, "Invalid prefix or subnet or interface") \
+ _(PUNT_TBL_NOT_FOUND, -169, "Vnet table not found") \
+ _(PUNT_TBL_EXIST, -170, "Vnet table already created") \
+ _(PUNT_SSN_NOT_FOUND, -171, "Vnet session not found") \
+ _(PUNT_SSN_EXIST, -172, "Vnet session already created") \
+ _(PUNT_SKIP_NOT_SUPPORTED, -173, "Skip size not supported. Skip must be <= 1") \
+ _(PUNT_NOMEM, -174, "Unable to allocate skip_mask") \
+ _(IPS_ADDR_TYPE_NONUNIFORM, -175, "Src and dst addr have different ip types") \
+ _(FACE_TYPE_EXISTS, -176, "Face type already registered") \
+ _(NO_BUFFERS, -177, "No vlib_buffer available for packet cloning.") \
+ _(NOT_IMPLEMENTED, -178, "Function not yet implemented") \
+ _(IFACE_IP_ADJ_NOT_FOUND, -179, "IP adjacency on incomplete face not available") \
+ _(APPFACE_ALREADY_ENABLED, -180, "Application face already enabled on interface") \
+ _(APPFACE_FEATURE, -181, "Error while enabling app face feature") \
+ _(APPFACE_NOT_FOUND, -182, "Application face not found") \
+ _(APPFACE_PROD_PREFIX_NULL, -183, "Prefix must not be null for producer face") \
+ _(MW_STRATEGY_NH_NOT_FOUND, -184, "Next hop not found") \
+ _(MW_STRATEGY_SET, -185, "Error while setting weight for next hop") \
+ _(STRATEGY_NOT_FOUND, -186, "Strategy not found")
+
+
+typedef enum
+{
+#define _(a,b,c) HICN_ERROR_##a = (b),
+ foreach_hicn_error
+#undef _
+ HICN_N_ERROR,
+} hicn_error_t;
+
+extern const char *HICN_ERROR_STRING[];
+
+#define get_error_string(errno) (char *)(errno ? HICN_ERROR_STRING[(-errno) - 127] : HICN_ERROR_STRING[errno])
+
+#endif /* //__HICN_ERROR_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/face_db.h b/hicn-plugin/src/face_db.h
new file mode 100755
index 000000000..7b8a08879
--- /dev/null
+++ b/hicn-plugin/src/face_db.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_DB_H__
+#define __HICN_FACE_DB_H__
+
+#include <vnet/dpo/dpo.h>
+#include "faces/face.h"
+
+/**
+ * @File
+ *
+ * Define a face db that is store in every pit entry. A face db containes a list
+ * of incoming faces for interest packets that are used to forward data packets
+ * on the interests' reverse path
+ */
+
+/* Must be power of two */
+#define HICN_FACE_DB_INLINE_FACES 4
+
+#define HICN_PIT_N_HOP_BITMAP_SIZE HICN_PARAM_PIT_ENTRY_PHOPS_MAX
+
+#define HICN_PIT_N_HOP_BUCKET (HICN_PARAM_PIT_ENTRY_PHOPS_MAX - HICN_FACE_DB_INLINE_FACES)
+
+STATIC_ASSERT ((HICN_PIT_N_HOP_BUCKET & (HICN_PIT_N_HOP_BUCKET - 1)) == 0,
+ "HICN_PARAM_PIT_ENTRY_PHOP_MAX must be a power of 2 + 4");
+
+/* Takes 2 cache lines */
+typedef struct __attribute__ ((packed)) hicn_face_bucket_s
+{
+ /* Array of indexes of virtual faces */
+ dpo_id_t faces[HICN_PIT_N_HOP_BUCKET];
+
+ CLIB_CACHE_LINE_ALIGN_MARK (cache_line1);
+
+ /* Used to check if interests are retransmission */
+ u8 bitmap[HICN_PIT_N_HOP_BITMAP_SIZE];
+
+} hicn_face_bucket_t;
+
+extern hicn_face_bucket_t *hicn_face_bucket_pool;
+
+typedef struct __attribute__ ((packed)) hicn_face_db_s
+{
+ /* 19B + 1B = 20B */
+ /* Equal to one or zero */
+ u8 is_overflow;
+
+ /* Number of faces in the last bucket */
+ /* Or next availabe entry for storing a dpo_id_t */
+ /* 20B + 4B = 24B */
+ u32 n_faces;
+
+ /* 24B + 32B (8*4) = 56B */
+ /* Array of indexes of virtual faces */
+ dpo_id_t inline_faces[HICN_FACE_DB_INLINE_FACES];
+
+ /* 56B + 4B = 60B */
+ u32 next_bucket;
+
+ /* 60B + 4B = 64B */
+ u32 align;
+ //align back to 64
+
+} hicn_face_db_t;
+
+always_inline dpo_id_t *
+hicn_face_db_get_dpo_face (u32 index, hicn_face_db_t * face_db)
+{
+ ASSERT (index < face_db->n_faces);
+
+ return index < HICN_FACE_DB_INLINE_FACES ? &(face_db->inline_faces[index]) :
+ &(pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket)->faces
+ [(index - HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
+}
+
+always_inline void
+hicn_face_db_init (int max_element)
+{
+ pool_init_fixed (hicn_face_bucket_pool, max_element);
+}
+
+always_inline hicn_face_bucket_t *
+hicn_face_db_get_bucket (u32 bucket_index)
+{
+ return pool_elt_at_index (hicn_face_bucket_pool, bucket_index);
+}
+
+always_inline void
+hicn_face_db_add_face_dpo (dpo_id_t * dpo, hicn_face_db_t * face_db)
+{
+ ASSERT (dpo->dpoi_index != ~0);
+
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+
+ dpo_id_t *face =
+ face_db->n_faces <
+ HICN_FACE_DB_INLINE_FACES ? &(face_db->inline_faces[face_db->n_faces]) :
+ &(faces_bkt->faces
+ [(face_db->n_faces -
+ HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
+
+ clib_memcpy (face, dpo, sizeof (dpo_id_t));
+
+ /* This access the dpoi to increase the lock */
+ dpo_lock (dpo);
+
+ u32 bitmap_index = dpo->dpoi_index % HICN_PIT_N_HOP_BITMAP_SIZE;
+ faces_bkt->bitmap[bitmap_index] |= 0x01;
+ face_db->n_faces++;
+}
+
+always_inline u8
+hicn_face_search (dpo_id_t * dpo, hicn_face_db_t * face_db)
+{
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ u32 bitmap_index = dpo->dpoi_index % HICN_PIT_N_HOP_BITMAP_SIZE;
+
+ return faces_bkt->bitmap[bitmap_index] & 0x01;
+}
+
+always_inline void
+hicn_faces_flush (hicn_face_db_t * face_db)
+{
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ clib_memset_u64 (&(faces_bkt->bitmap), 0, HICN_PIT_N_HOP_BITMAP_SIZE / 8);
+ face_db->n_faces = 0;
+ pool_put_index (hicn_face_bucket_pool, face_db->next_bucket);
+}
+
+
+#endif /* // __HICN_FACE_DB_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/address_mgr.c b/hicn-plugin/src/faces/app/address_mgr.c
new file mode 100755
index 000000000..76a7e0f6d
--- /dev/null
+++ b/hicn-plugin/src/faces/app/address_mgr.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by cisco systems inc. All rights reserved.
+ *
+ */
+
+#include <dlfcn.h>
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip4.h> //ip4_add_del_ip_address
+#include <vnet/ip/ip6.h> //ip6_add_del_ip_address
+#include <vnet/fib/fib_types.h> //FIB_PROTOCOL_IP4/6, FIB_NODE_INDEX_INVALID
+#include <vnet/fib/fib_entry.h> //FIB_SOURCE_PLUGIN_HI
+#include <vnet/fib/fib_table.h>
+#include <vppinfra/format.h>
+#include <vnet/interface.h> //appif_flags
+#include <vnet/interface_funcs.h> //vnet_sw_interface_set_flags
+
+#include "address_mgr.h"
+#include "../../hicn.h"
+#include "../../infra.h"
+#include "../../error.h"
+#include "../face.h"
+#include "../ip/face_ip.h"
+#include "../../strategy_dpo_ctx.h"
+#include "../../route.h"
+
+typedef struct address_mgr_main_s
+{
+ ip4_address_t next_ip4_local_addr;
+ ip6_address_t next_ip6_local_addr;
+} address_mgr_main_t;
+
+address_mgr_main_t address_mgr_main;
+
+static void
+increment_v4_address (ip4_address_t * a, u32 val)
+{
+ u32 v;
+
+ v = clib_net_to_host_u32 (a->as_u32) + val;
+ a->as_u32 = clib_host_to_net_u32 (v);
+}
+
+static void
+increment_v6_address (ip6_address_t * a, u64 val)
+{
+ u64 v;
+
+ v = clib_net_to_host_u64 (a->as_u64[1]) + val;
+ a->as_u64[1] = clib_host_to_net_u64 (v);
+}
+
+void
+get_two_ip4_addresses (ip4_address_t * appif_addr, ip4_address_t * nh_addr)
+{
+ /* We want two consecutives address that fall into a /31 mask */
+ if (address_mgr_main.next_ip4_local_addr.as_u8[3] & 0x01)
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+
+ *appif_addr = address_mgr_main.next_ip4_local_addr;
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+ *nh_addr = address_mgr_main.next_ip4_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP4;
+ fib_pfx.fp_len = ADDR_MGR_IP4_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, appif_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index =
+ fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ FIB_SOURCE_PLUGIN_HI);
+ }
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ increment_v4_address (appif_addr, 2);
+ increment_v4_address (nh_addr, 2);
+ }
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ address_mgr_main.next_ip4_local_addr = *nh_addr;
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+}
+
+void
+get_two_ip6_addresses (ip6_address_t * appif_addr, ip6_address_t * nh_addr)
+{
+
+ /* We want two consecutives address that fall into a /127 mask */
+ if (address_mgr_main.next_ip6_local_addr.as_u8[15] & 0x01)
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+
+ *appif_addr = address_mgr_main.next_ip6_local_addr;
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+ *nh_addr = address_mgr_main.next_ip6_local_addr;
+
+
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP6;
+ fib_pfx.fp_len = ADDR_MGR_IP6_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, appif_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index =
+ fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ FIB_SOURCE_PLUGIN_HI);
+ }
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ increment_v6_address (appif_addr, 2);
+ increment_v6_address (nh_addr, 2);
+ }
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ address_mgr_main.next_ip6_local_addr = *nh_addr;
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+}
+
+ip4_address_t
+get_ip4_address ()
+{
+ ip4_address_t *prefix = &address_mgr_main.next_ip4_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP4;
+ fib_pfx.fp_len = ADDR_MGR_IP4_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+ increment_v4_address (prefix, 1);
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ return fib_pfx.fp_addr.ip4;
+}
+
+ip6_address_t
+get_ip6_address ()
+{
+ ip6_address_t *prefix = &address_mgr_main.next_ip6_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP6;
+ fib_pfx.fp_len = ADDR_MGR_IP6_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+ increment_v6_address (prefix, 1);
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ return fib_pfx.fp_addr.ip6;
+}
+
+void
+address_mgr_init ()
+{
+
+ address_mgr_main.next_ip4_local_addr.as_u8[0] = 169;
+ address_mgr_main.next_ip4_local_addr.as_u8[1] = 254;
+ address_mgr_main.next_ip4_local_addr.as_u8[2] = 1;
+ address_mgr_main.next_ip4_local_addr.as_u8[3] = 1;
+
+ ip6_address_set_zero (&address_mgr_main.next_ip6_local_addr);
+ address_mgr_main.next_ip6_local_addr.as_u16[0] =
+ clib_host_to_net_u16 (0xfc00);
+ address_mgr_main.next_ip6_local_addr.as_u8[15] = 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/address_mgr.h b/hicn-plugin/src/faces/app/address_mgr.h
new file mode 100755
index 000000000..99450dcdd
--- /dev/null
+++ b/hicn-plugin/src/faces/app/address_mgr.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ADDRESS_MGR_H_
+#define _ADDRESS_MGR_H_
+
+/**
+ * @file
+ *
+ * @brief Address manager.
+ *
+ * Address manager that maintains a pool of ip4 and ip6 addresses to assign to
+ * an interface.
+ */
+
+#define ADDR_MGR_IP4_LEN 32
+#define ADDR_MGR_IP4_CONS_LEN 31
+#define ADDR_MGR_IP6_LEN 128
+#define ADDR_MGR_IP6_CONS_LEN 127
+
+/**
+ * @brief Get two consecutive IP v4 addresses from the same /31 subnet
+ *
+ * @param addr1 first ip address with the least significant bit set to 0
+ * @param addr2 second ip address with the least significant bit set to 1
+ */
+void get_two_ip4_addresses (ip4_address_t * addr1, ip4_address_t * addr2);
+
+/**
+ * @brief Get two consecutive IP v6 addresses from the same /126 subnet
+ *
+ * @param addr1 first ip address with the least significant bit set to 0
+ * @param addr2 second ip address with the least significant bit set to 1
+ */
+void get_two_ip6_addresses (ip6_address_t * addr1, ip6_address_t * addr2);
+
+/**
+ * @brief Get one IP v4 address
+ *
+ * @return ip address
+ */
+ip4_address_t get_ip4_address (void);
+
+/**
+ * @brief Get one IP v6 address
+ *
+ * @return ip address
+ */
+ip6_address_t get_ip6_address (void);
+
+/**
+ * @brief Init the address manager
+ */
+void address_mgr_init (void);
+
+#endif /* _ADDRESS_MGR_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_app_cli.c b/hicn-plugin/src/faces/app/face_app_cli.c
new file mode 100755
index 000000000..d55e990de
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_app_cli.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+
+#include "../ip/face_ip.h"
+#include "../ip/dpo_ip.h"
+#include "../face.h"
+#include "face_prod.h"
+#include "face_cons.h"
+
+#define HICN_FACE_NONE 0
+#define HICN_FACE_DELETE 1
+#define HICN_FACE_ADD 2
+
+static clib_error_t *
+hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip46_address_t prefix;
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+ u32 cs_reserved = HICN_PARAM_FACE_DFT_CS_RESERVED;
+ int ret = HICN_ERROR_NONE;
+ int sw_if;
+ int face_op = HICN_FACE_NONE;
+ int prod = 0;
+ int len;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ {
+ face_op = HICN_FACE_DELETE;
+ }
+ else if (face_op == HICN_FACE_DELETE
+ && unformat (line_input, "id %d", &face_id))
+ ;
+ else if (unformat (line_input, "add"))
+ {
+ face_op = HICN_FACE_ADD;
+ }
+ else if (face_op == HICN_FACE_ADD)
+ {
+ if (unformat (line_input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if))
+ ;
+ else
+ if (unformat
+ (line_input, "prod prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &len))
+ {
+ prod = 1;
+ }
+ else if (prod && unformat (line_input, "cs_size %d", &cs_reserved))
+ ;
+ else if (unformat (line_input, "cons"))
+ ;
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (face_id != HICN_FACE_NULL)
+ {
+
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ {
+ return clib_error_return (0, "%s, face_id %d not valid",
+ get_error_string (ret), face_id);
+ }
+ }
+
+ int rv;
+ switch (face_op)
+ {
+ case HICN_FACE_ADD:
+ {
+ ip46_address_t prod_addr;
+ ip4_address_t cons_addr4;
+ ip6_address_t cons_addr6;
+
+ hicn_prefix_t name_prefix = {
+ .name = prefix,
+ .len = len,
+ };
+ if (prod)
+ {
+ rv =
+ hicn_face_prod_add (&name_prefix, sw_if, &cs_reserved,
+ &prod_addr, &face_id);
+ if (rv == HICN_ERROR_NONE)
+ {
+ u8 *sbuf = NULL;
+ sbuf =
+ format (sbuf, "Face id: %d, producer address %U", face_id,
+ format_ip46_address, &prod_addr,
+ 0 /*IP46_ANY_TYPE */ );
+ vlib_cli_output (vm, "%s", sbuf);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ }
+ else
+ {
+ rv =
+ hicn_face_cons_add (&cons_addr4, &cons_addr6, sw_if, &face_id);
+ if (rv == HICN_ERROR_NONE)
+ {
+ u8 *sbuf = NULL;
+ sbuf =
+ format (sbuf, "Face id: %d, consumer addresses v4 %U v6 %U",
+ face_id, format_ip4_address, &cons_addr4,
+ format_ip6_address, &cons_addr6);
+ vlib_cli_output (vm, "%s", sbuf);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ }
+ break;
+ }
+ case HICN_FACE_DELETE:
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ rv = hicn_face_cons_del (face_id);
+ else
+ rv = hicn_face_prod_del (face_id);
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face %d deleted", face_id);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ }
+ default:
+ return clib_error_return (0, "Operation (%d) not implemented", face_op);
+ break;
+ }
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (rv));
+}
+
+/* cli declaration for 'cfg face' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_app_cli_set_command, static) =
+{
+ .path = "hicn face app",
+ .short_help = "hicn face app {add intfc <sw_if> { prod prefix <hicn_prefix> cs_size <size_in_packets>} {cons} | {del <face_id>}",
+ .function = hicn_face_app_cli_set_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_cons.c b/hicn-plugin/src/faces/app/face_cons.c
new file mode 100755
index 000000000..8278b6ab3
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_cons.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "face_cons.h"
+#include "address_mgr.h"
+#include "../../infra.h"
+
+int
+hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
+ u32 swif, hicn_face_id_t * faceid)
+{
+ /* Create the corresponding appif if */
+ /* Retrieve a valid local ip address to assign to the appif */
+ /* Set the ip address and create the face in the face db */
+
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hicn_main_t *hm = &hicn_main;
+
+ ip46_address_t if_ip;
+ ip46_address_reset (&if_ip);
+ nh_addr4->as_u32 = 0;
+ nh_addr6->as_u64[0] = 0;
+ nh_addr6->as_u64[1] = 0;
+ u32 if_flags = 0;
+
+ if (!hm->is_enabled)
+ {
+ return HICN_ERROR_FWD_NOT_ENABLED;
+ }
+ if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, swif, if_flags);
+
+ get_two_ip4_addresses (&(if_ip.ip4), nh_addr4);
+ ip4_add_del_interface_address (vm,
+ swif,
+ &(if_ip.ip4),
+ ADDR_MGR_IP4_CONS_LEN, 0 /* is_del */ );
+
+ ip46_address_t nh_addr = to_ip46 (0, (u8 *) nh_addr4);
+
+ hicn_iface_ip_add (&if_ip, &nh_addr, swif, faceid);
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (*faceid);
+ face->shared.flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+
+ get_two_ip6_addresses (&(if_ip.ip6), nh_addr6);
+ ip6_add_del_interface_address (vm,
+ swif,
+ &(if_ip.ip6),
+ ADDR_MGR_IP6_CONS_LEN, 0 /* is_del */ );
+
+ hicn_iface_ip_add (&if_ip, (ip46_address_t *) nh_addr6, swif, faceid);
+
+ face = hicn_dpoi_get_from_idx (*faceid);
+ face->shared.flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+
+ return vnet_feature_enable_disable ("ip6-unicast",
+ "hicn-iface-ip6-input", swif, 1, 0,
+ 0) ==
+ 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+int
+hicn_face_cons_del (hicn_face_id_t face_id)
+{
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ {
+ int ret = hicn_face_ip_del (face_id);
+
+ return ret ==
+ HICN_ERROR_NONE
+ ? (vnet_feature_enable_disable
+ ("ip6-unicast", "hicn-iface-ip6-input", face->shared.sw_if, 0,
+ 0, 0) == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE) : ret;
+ }
+ else
+ {
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+ }
+}
+
+u8 *
+format_hicn_face_cons (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ s = format (s, " (consumer face)");
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_cons_app, static)=
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "hicn-iface-ip6-input",
+ .runs_before = VNET_FEATURES("ip6-inacl"),
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_cons.h b/hicn-plugin/src/faces/app/face_cons.h
new file mode 100755
index 000000000..067b45a1f
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_cons.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FACE_CONSUMER_H_
+#define _FACE_CONSUMER_H_
+
+#include <vnet/vnet.h>
+#include "../face.h"
+
+/**
+ * @file
+ *
+ * @brief Consumer application face.
+ *
+ * A consumer application face is built upon an ip face and identify a local
+ * consumer application (co-located with the forwarder) that acts as a
+ * consumer. The interface used by the consumer application face is
+ * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
+ * connects the applictation to the forwarder). Only one application face can be
+ * assigned to an interface.
+ *
+ * In the vlib graph a consumer application face directly connect the
+ * device-input node to the hicn-vface-ip node.
+ */
+
+/**
+ * @brief Add a new consumer application face
+ *
+ * The method creates the internal ip face and set the ip address to the interface.
+ * @param nh_addr4 ipv4 address to assign to interface used by the application to
+ * send interest to the consumer face
+ * @param nh_addr6 ipv6 address to assign to interface used by the application to
+ * send interest to the consumer face
+ * @param swif interface associated to the face
+ */
+int
+hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
+ u32 swif, hicn_face_id_t * faceid);
+
+/**
+ * @brief Delete an existing consumer application face
+ *
+ * @param face_id Id of the consumer application face
+ */
+int hicn_face_cons_del (hicn_face_id_t face_id);
+
+/**
+ * @brief Format an application consumer face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_cons (u8 * s, va_list * args);
+
+
+#endif /* _FACE_CONSUMER_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_prod.c b/hicn-plugin/src/faces/app/face_prod.c
new file mode 100755
index 000000000..d06fe2ff3
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_prod.c
@@ -0,0 +1,375 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "face_prod.h"
+#include "address_mgr.h"
+#include "../../infra.h"
+#include "../../route.h"
+#include "../../cache_policies/cs_lru.h"
+
+hicn_face_prod_state_t *face_state_vec;
+
+/* used to check if an interface is already in the vector */
+u32 *face_state_pool;
+
+static int
+hicn_app_state_create (u32 swif, hicn_prefix_t * prefix)
+{
+ /* Make sure that the pool is not empty */
+ pool_validate_index (face_state_pool, 0);
+
+ u32 *swif_app;
+ u8 found = 0;
+ /* *INDENT-OFF* */
+ pool_foreach (swif_app, face_state_pool,{
+ if (*swif_app == swif)
+ {
+ found = 1;
+ }
+ }
+ );
+ /* *INDENT-ON* */
+
+
+ if (found)
+ return HICN_ERROR_APPFACE_ALREADY_ENABLED;
+
+
+ /* Create the appif and store in the vector */
+ vec_validate (face_state_vec, swif);
+ clib_memcpy (&(face_state_vec[swif].prefix), prefix,
+ sizeof (hicn_prefix_t));
+
+ /* Set as busy the element in the vector */
+ pool_get (face_state_pool, swif_app);
+ *swif_app = swif;
+
+ int ret = HICN_ERROR_NONE;
+ if (ip46_address_is_ip4 (&(prefix->name)))
+ {
+ ret =
+ vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
+ }
+ else
+ {
+ ret =
+ vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
+ }
+
+ return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+static int
+hicn_app_state_del (u32 swif)
+{
+ /* Make sure that the pool is not empty */
+ pool_validate_index (face_state_pool, 0);
+
+ u32 *temp;
+ u32 *swif_app = NULL;
+ u8 found = 0;
+ ip46_address_t *prefix_addr;
+ /* *INDENT-OFF* */
+ pool_foreach (temp, face_state_pool,{
+ if (*temp == swif)
+ {
+ found = 1;
+ swif_app = temp;
+ }
+ }
+ );
+ /* *INDENT-ON* */
+
+ prefix_addr = &(face_state_vec[swif].prefix.name);
+ if (!found)
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ int ret = HICN_ERROR_NONE;
+ if (ip46_address_is_ip4 (prefix_addr))
+ {
+ ret =
+ vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
+ }
+ else
+ {
+ ret =
+ vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
+ }
+
+ pool_put (face_state_pool, swif_app);
+ memset (&face_state_vec[swif], 0, sizeof (hicn_face_prod_state_t));
+
+ return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+int
+hicn_face_prod_add (hicn_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
+ ip46_address_t * prod_addr, hicn_face_id_t * faceid)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hicn_main_t *hm = &hicn_main;
+
+ ip46_address_t app_ip;
+ u32 if_flags = 0;
+
+ if (!hm->is_enabled)
+ {
+ return HICN_ERROR_FWD_NOT_ENABLED;
+ }
+ int ret = HICN_ERROR_NONE;
+ hicn_face_t *face = NULL;
+
+ if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, sw_if, if_flags);
+
+ if (ip46_address_is_zero (&prefix->name))
+ {
+ return HICN_ERROR_APPFACE_PROD_PREFIX_NULL;
+ }
+ /*
+ * Check if a producer face is already existing for the same prefix
+ * and sw_if
+ */
+ if (ip46_address_is_ip4 (&prefix->name))
+ {
+ face =
+ hicn_face_ip4_get (&(prefix->name.ip4), sw_if,
+ &hicn_face_ip_remote_hashtb);
+ }
+ else
+ {
+ face =
+ hicn_face_ip6_get (&(prefix->name.ip6), sw_if,
+ &hicn_face_ip_remote_hashtb);
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+ }
+
+ if (face != NULL)
+ {
+ if (!(face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ /*
+ * Something went worng, a consumer face exists for the
+ * producer's prefix.
+ */
+ /* It should never happens, this is a safety check. */
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ /* If the face exists but is marked as deleted, undelete it */
+ if (face->shared.flags & HICN_FACE_FLAGS_DELETED)
+ {
+ /*
+ * remove the deleted flag and retrieve the face
+ * local addr
+ */
+ face->shared.flags &= HICN_FACE_FLAGS_DELETED;
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+ app_ip = prod_face->ip_face.local_addr;
+ }
+ }
+ else
+ {
+ /* Otherwise create the face */
+ if (ip46_address_is_ip4 (&prefix->name))
+ {
+ /*
+ * Otherwise retrieve an ip address to assign as a
+ * local ip addr.
+ */
+ ip4_address_t app_ip4 = get_ip4_address ();
+ ip4_add_del_interface_address (vm,
+ sw_if,
+ &app_ip4,
+ ADDR_MGR_IP4_CONS_LEN,
+ 0 /* is_del */ );
+ app_ip = to_ip46 ( /* isv6 */ 0, app_ip4.as_u8);
+ }
+ else
+ {
+ ip6_address_t app_ip6 = get_ip6_address ();
+ ip6_add_del_interface_address (vm,
+ sw_if,
+ &app_ip6,
+ ADDR_MGR_IP6_CONS_LEN,
+ 0 /* is_del */ );
+ app_ip = to_ip46 ( /* isv6 */ 1, app_ip6.as_u8);
+ }
+
+ /*
+ * Special case: the nh_addr in the face is the appif ip
+ * address
+ */
+ ret = hicn_face_ip_add (&app_ip, &(prefix->name), sw_if, faceid);
+
+ face = hicn_dpoi_get_from_idx (*faceid);
+
+ face->shared.flags |= HICN_FACE_FLAGS_APPFACE_PROD;
+
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+
+ /*
+ * For the moment we keep them here although it would be good
+ * to create a different face for appface
+ */
+ prod_face->policy_vft.hicn_cs_insert = hicn_cs_lru.hicn_cs_insert;
+ prod_face->policy_vft.hicn_cs_update = hicn_cs_lru.hicn_cs_update;
+ prod_face->policy_vft.hicn_cs_dequeue = hicn_cs_lru.hicn_cs_dequeue;
+ prod_face->policy_vft.hicn_cs_delete_get =
+ hicn_cs_lru.hicn_cs_delete_get;
+ prod_face->policy_vft.hicn_cs_trim = hicn_cs_lru.hicn_cs_trim;
+
+ }
+
+ if (ret == HICN_ERROR_NONE
+ && hicn_face_prod_set_lru_max (*faceid, cs_reserved) == HICN_ERROR_NONE)
+ {
+ hicn_app_state_create (sw_if, prefix);
+ ret = hicn_route_add (faceid, 1, &(prefix->name), prefix->len);
+ }
+
+ *prod_addr = app_ip;
+
+ /* Cleanup in case of something went wrong. */
+ if (ret)
+ {
+ hicn_app_state_del (sw_if);
+
+ if (*faceid != HICN_FACE_NULL)
+ hicn_face_ip_del (*faceid);
+ }
+ return ret;
+}
+
+int
+hicn_face_prod_del (hicn_face_id_t face_id)
+{
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD)
+ {
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+ /* Free the CS reserved for the face */
+ hicn_main.pitcs.pcs_app_max += prod_face->policy.max;
+ hicn_main.pitcs.pcs_app_count -= prod_face->policy.max;
+ prod_face->policy.max = 0;
+
+ /* Remove the face from the fib */
+ hicn_route_del_nhop (&(face_state_vec[face->shared.sw_if].prefix.name),
+ (face_state_vec[face->shared.sw_if].prefix.len),
+ face_id);
+
+ int ret = hicn_face_ip_del (face_id);
+ return ret ==
+ HICN_ERROR_NONE ? hicn_app_state_del (face->shared.sw_if) : ret;
+ }
+ else
+ {
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+ }
+}
+
+int
+hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 * requested_size)
+{
+ int ret = HICN_ERROR_NONE;
+ vlib_main_t *vm = vlib_get_main ();
+ hicn_face_t *face;
+ hicn_face_prod_t *face_prod;
+
+ if (!hicn_infra_fwdr_initialized)
+ {
+ ret = HICN_ERROR_FWD_NOT_ENABLED;
+ vlib_cli_output (vm, "hicn: %s\n", get_error_string (ret));
+ return ret;
+ }
+ face = hicn_dpoi_get_from_idx (face_id);
+ face_prod = (hicn_face_prod_t *) face->data;
+
+ if (face == NULL)
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ if (*requested_size > HICN_PARAM_FACE_MAX_CS_RESERVED)
+ *requested_size = HICN_PARAM_FACE_MAX_CS_RESERVED;
+
+ uint32_t available =
+ hicn_main.pitcs.pcs_app_max - hicn_main.pitcs.pcs_app_count;
+
+ if (*requested_size > available)
+ *requested_size = available;
+
+ face_prod->policy.max = *requested_size;
+ face_prod->policy.count = 0;
+ face_prod->policy.head = face_prod->policy.tail = 0;
+
+ hicn_main.pitcs.pcs_app_count += *requested_size;
+
+ return ret;
+}
+
+u8 *
+format_hicn_face_prod (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+ hicn_face_t *face;
+ hicn_face_prod_t *prod_face;
+
+ face = hicn_dpoi_get_from_idx (index);
+ prod_face = (hicn_face_prod_t *) face->data;
+
+ s =
+ format (s, " (producer face: CS size %d, data cached %d)",
+ prod_face->policy.max, prod_face->policy.count);
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_prod_app_input_ip6, static)=
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "hicn-face-prod-input",
+ .runs_before = VNET_FEATURES("ip6-inacl"),
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_prod_app_input_ip4, static)=
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "hicn-face-prod-input",
+ .runs_before = VNET_FEATURES("ip4-inacl"),
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_prod.h b/hicn-plugin/src/faces/app/face_prod.h
new file mode 100755
index 000000000..89b74680b
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_prod.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FACE_PRODUCER_H_
+#define _FACE_PRODUCER_H_
+
+#include "../../cache_policies/cs_policy.h"
+#include "../ip/face_ip.h"
+
+/**
+ * @file
+ *
+ * @brief Producer application face.
+ *
+ * A producer application face is built upon an ip face and identify a local
+ * producer application (co-located with the forwarder) that acts as a producer. In the
+ * current design an application face is either a face towards a consumer face
+ * or towards a producer. The interface used by the producer application face is
+ * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
+ * connects the applictation to the forwarder). Only one application face can be
+ * assigned to an interface.
+ *
+ * To each producer application face it is assigned a portion of the CS. Every
+ * data arriving to a producer application will be stored in the portion of the
+ * CS assigned to the face. The eviction policy is defined in the
+ * face. Available eviction faces are list in the /cache_policy folder.
+ *
+ * In the vlib graph a producer application face is directly connected to the
+ * device-input node (with the node hicn-face-prod-input) and passes every packet to
+ * the hicn-face-ip node.
+ */
+
+/**
+ * @brief Producer application face state that refer to the hICN producer socket
+ * created by the application.
+ *
+ */
+typedef struct
+{
+ hicn_prefix_t prefix;
+} hicn_face_prod_state_t;
+
+extern hicn_face_prod_state_t *face_state_vec;
+
+typedef struct __attribute__ ((packed)) hicn_face_prod_t_
+{
+ hicn_face_ip_t ip_face;
+
+ hicn_cs_policy_t policy;
+ hicn_cs_policy_vft_t policy_vft;
+
+} hicn_face_prod_t;
+
+/**
+ * @brief Add a new producer application face
+ *
+ * The method creates the internal ip face and the state specific to the
+ * producer application face. This method setups a route in the FIB for the
+ * producer's prefix.
+ * @param prefix hicn prefix name assigned to the producer face
+ * @param len length of the prefix
+ * @param swif interface associated to the face
+ * @param cs_reserved return the amount of cs assigned to the face
+ * @param prod_addr address to assign to interface used by the appliction to
+ * send data to the producer face
+ */
+int
+hicn_face_prod_add (hicn_prefix_t * prefix, u32 swif, u32 * cs_reserved,
+ ip46_address_t * prod_addr, hicn_face_id_t * faceid);
+
+/**
+ * @brief Delete an existing application face
+ *
+ * @param faceid id of the face to remove
+ */
+int hicn_face_prod_del (hicn_face_id_t faceid);
+
+/**
+ * @brief Set lru queue size for an app face
+ *
+ * @param face_id Id of the producer application face
+ */
+int hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 * requested_size);
+
+/**
+ * @brief Format an application producer face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_prod (u8 * s, va_list * args);
+
+
+#endif /* _FACE_PROD_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_prod_node.c b/hicn-plugin/src/faces/app/face_prod_node.c
new file mode 100755
index 000000000..2e746a703
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_prod_node.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ *
+ * @brief Application interface node
+ *
+ * This node runs after the device-input node and perfoms some safety checks in
+ * order to avoid unespected interest and data (i.e., hICN packets whose name do
+ * not contain the prefix associated to the application face)
+ */
+
+#include "face_prod.h"
+#include "../../hicn_api.h"
+#include "../../mgmt.h"
+
+#define foreach_face_prod_input_error \
+ _(NOT_SOCK_PREFIX, "name not in the socket prefix")
+
+typedef enum
+{
+#define _(f,s) FACE_PROD_INPUT_ERROR_##f,
+ foreach_face_prod_input_error
+#undef _
+ FACE_PROD_INPUT_N_ERROR,
+} face_prod_input_error_t;
+
+static __clib_unused char *face_prod_input_error_strings[] = {
+#define _(n,s) s,
+ foreach_face_prod_input_error
+#undef _
+};
+
+/* Node context data */
+typedef struct hicn_face_prod_runtime_s
+{
+ int id;
+} hicn_face_prod_runtime_t;
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} hicn_face_prod_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_PROD_NEXT_DATA_IP4,
+ HICN_FACE_PROD_NEXT_DATA_IP6,
+ HICN_FACE_PROD_NEXT_ERROR_DROP,
+ HICN_FACE_PROD_N_NEXT,
+} hicn_face_prod_next_t;
+
+vlib_node_registration_t hicn_face_prod_input_node;
+
+static __clib_unused u8 *
+format_face_prod_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_prod_input_trace_t *t =
+ va_arg (*args, hicn_face_prod_input_trace_t *);
+ CLIB_UNUSED (u32 indent) = format_get_indent (s);
+
+ s = format (s, "prod-face: sw_if_index %d next-index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+static_always_inline int
+match_ip4_name (u32 * name, hicn_prefix_t * prefix)
+{
+ u32 xor = 0;
+
+ xor = *name & prefix->name.ip4.data_u32;
+
+ return xor == prefix->name.ip4.data_u32;
+}
+
+static_always_inline int
+match_ip6_name (u32x4 * name, hicn_prefix_t * prefix)
+{
+ union
+ {
+ u32x4 as_u32x4;
+ u64 as_u64[2];
+ u32 as_u32[4];
+ } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
+
+#ifdef CLIB_HAVE_VEC128
+ if (U32X4_ALIGNED (name))
+ { //SSE can't handle unaligned data
+ xor_sum.as_u32x4 = *((u32x4 *) name) &
+ UNION_CAST (prefix->name.ip6.as_u64[0], u32x4);
+ }
+ else
+#endif /* CLIB_HAVE_VEC128 */
+ {
+ xor_sum.as_u64[0] = ((u64 *) name)[0] & prefix->name.ip6.as_u64[0];
+ xor_sum.as_u64[1] = ((u64 *) name)[1] & prefix->name.ip6.as_u64[1];
+ }
+
+ return (xor_sum.as_u64[0] == prefix->name.ip6.as_u64[0]) &&
+ (xor_sum.as_u64[1] == prefix->name.ip6.as_u64[1]);
+}
+
+static_always_inline u32
+hicn_face_prod_next_from_data_hdr (vlib_node_runtime_t * node,
+ vlib_buffer_t * b, hicn_prefix_t * prefix)
+{
+ u8 *ptr = vlib_buffer_get_current (b);
+ u8 v = *ptr & 0xf0;
+ int match_res = 1;
+
+ if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->name)))
+ {
+ match_res = match_ip4_name ((u32 *) & (ptr[12]), prefix);
+ }
+ else if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->name)))
+ {
+ match_res = match_ip6_name ((u32x4 *) & (ptr[8]), prefix);
+ }
+
+ b->error = 0*(1-match_res) + match_res*(node->errors[FACE_PROD_INPUT_ERROR_NOT_SOCK_PREFIX]);
+
+ return match_res ? HICN_FACE_PROD_NEXT_DATA_IP4 + (v ==
+ 0x60) :
+ HICN_FACE_PROD_NEXT_ERROR_DROP;
+}
+
+static_always_inline void
+hicn_face_prod_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u32 swif, vlib_buffer_t * b, u32 next)
+{
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_face_prod_input_trace_t *t =
+ vlib_add_trace (vm, node, b, sizeof (*t));
+ t->next_index = next;
+ t->sw_if_index = swif;
+ }
+}
+
+static uword
+hicn_face_prod_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_face_prod_next_t next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 bi0, bi1, bi2, bi3;
+ hicn_face_prod_state_t *prod_face0 = NULL;
+ hicn_face_prod_state_t *prod_face1 = NULL;
+ hicn_face_prod_state_t *prod_face2 = NULL;
+ hicn_face_prod_state_t *prod_face3 = NULL;
+ u32 next0, next1, next2, next3;
+
+ {
+ vlib_buffer_t *b4, *b5, *b6, *b7;
+ b4 = vlib_get_buffer (vm, from[4]);
+ b5 = vlib_get_buffer (vm, from[5]);
+ b6 = vlib_get_buffer (vm, from[6]);
+ b7 = vlib_get_buffer (vm, from[7]);
+ CLIB_PREFETCH (b4, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b5, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b6, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b7, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+
+ from += 4;
+ n_left_from -= 4;
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ to_next[2] = bi2;
+ to_next[3] = bi3;
+
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ prod_face0 =
+ &face_state_vec[vnet_buffer (b0)->sw_if_index[VLIB_RX]];
+ prod_face1 =
+ &face_state_vec[vnet_buffer (b1)->sw_if_index[VLIB_RX]];
+ prod_face2 =
+ &face_state_vec[vnet_buffer (b2)->sw_if_index[VLIB_RX]];
+ prod_face3 =
+ &face_state_vec[vnet_buffer (b3)->sw_if_index[VLIB_RX]];
+
+ next0 =
+ hicn_face_prod_next_from_data_hdr (node, b0, &prod_face0->prefix);
+ next1 =
+ hicn_face_prod_next_from_data_hdr (node, b1, &prod_face1->prefix);
+ next2 =
+ hicn_face_prod_next_from_data_hdr (node, b2, &prod_face2->prefix);
+ next3 =
+ hicn_face_prod_next_from_data_hdr (node, b3, &prod_face3->prefix);
+ stats.pkts_data_count += 4;
+
+ /* trace */
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ b0, next0);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX],
+ b1, next1);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b2)->sw_if_index[VLIB_RX],
+ b2, next2);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b3)->sw_if_index[VLIB_RX],
+ b3, next3);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+
+ stats.pkts_processed += 4;
+
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, swif;
+ hicn_face_prod_state_t *prod_face = NULL;
+ u32 next0;
+
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ swif = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ prod_face = &face_state_vec[swif];
+
+ next0 =
+ hicn_face_prod_next_from_data_hdr (node, b0, &prod_face->prefix);
+ stats.pkts_data_count++;
+
+ /* trace */
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ b0, next0);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ stats.pkts_processed += 1;
+
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_prod_input_node) =
+{
+ .function = hicn_face_prod_input_node_fn,
+ .name = "hicn-face-prod-input",
+ .vector_size = sizeof(u32),
+ .format_trace = format_face_prod_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(face_prod_input_error_strings),
+ .error_strings = face_prod_input_error_strings,
+ .n_next_nodes = HICN_FACE_PROD_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_FACE_PROD_NEXT_DATA_IP4] = "hicn-face-ip4-input",
+ [HICN_FACE_PROD_NEXT_DATA_IP6] = "hicn-face-ip6-input",
+ [HICN_FACE_PROD_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/face.c b/hicn-plugin/src/faces/face.c
new file mode 100755
index 000000000..f0559bb98
--- /dev/null
+++ b/hicn-plugin/src/faces/face.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "face.h"
+#include "ip/face_ip.h"
+#include "ip/face_ip_node.h"
+#include "ip/iface_ip_node.h"
+#include "ip/dpo_ip.h"
+#include "udp/face_udp.h"
+#include "udp/face_udp_node.h"
+#include "udp/iface_udp_node.h"
+#include "udp/dpo_udp.h"
+
+dpo_id_t *face_dpo_vec;
+hicn_face_vft_t *face_vft_vec;
+char **face_type_names_vec;
+
+hicn_face_t *hicn_dpoi_face_pool;
+
+dpo_type_t first_type = DPO_FIRST;
+
+u8 *
+face_show (u8 * s, int face_id, u32 indent)
+{
+ s = format (s, "Faces:\n", indent);
+ indent += 4;
+ int i;
+ vec_foreach_index (i, face_dpo_vec)
+ {
+ s =
+ format (s, "%U", face_vft_vec[i].format_face,
+ face_dpo_vec[face_id].dpoi_index, indent);
+ }
+
+ return (s);
+
+}
+
+void
+register_face_type (hicn_face_type_t face_type, hicn_face_vft_t * vft,
+ char *name)
+{
+ if (first_type == DPO_FIRST)
+ first_type = face_type;
+
+ int idx = face_type - first_type;
+ ASSERT (idx >= 0);
+ vec_validate (face_vft_vec, idx);
+ vec_validate (face_type_names_vec, idx);
+
+ /* Copy the null char as well */
+ char *name_str = (char *) malloc ((strlen (name) + 1) * sizeof (char));
+ strcpy (name_str, name);
+ face_vft_vec[idx] = *vft;
+ face_type_names_vec[idx] = name_str;
+}
+
+// Make this more flexible for future types face
+void
+hicn_face_module_init (vlib_main_t * vm)
+{
+ pool_validate (hicn_dpoi_face_pool);
+
+ hicn_face_ip_init (vm);
+ hicn_iface_ip_init (vm);
+ hicn_face_udp_init (vm);
+ hicn_iface_udp_init (vm);
+}
+
+u8 *
+format_hicn_face_all (u8 * s, int n, ...)
+{
+ va_list ap;
+ va_start (ap, n);
+ u32 indent = va_arg (ap, u32);
+
+ s = format (s, "Faces: %d\n", indent);
+
+ hicn_face_t *face;
+
+ /* *INDENT-OFF* */
+ pool_foreach ( face, hicn_dpoi_face_pool,
+ {
+ hicn_face_vft_t * vft = hicn_face_get_vft(face->shared.face_type);
+ hicn_face_id_t face_id = hicn_dpoi_get_index(face);
+ s = format(s, "%U\n", vft->format_face, face_id, indent);
+ });
+ /* *INDENT-ON* */
+
+ return s;
+}
+
+hicn_face_vft_t *
+hicn_face_get_vft (hicn_face_type_t face_type)
+{
+ int idx = face_type - first_type;
+ if (idx >= 0)
+ return &face_vft_vec[idx];
+ else
+ return NULL;
+
+}
+
+int
+hicn_face_del (hicn_face_id_t face_id)
+{
+ int ret = HICN_ERROR_NONE;
+
+ if (pool_len (hicn_dpoi_face_pool) > face_id)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ if (face->shared.locks == 0)
+ pool_put_index (hicn_dpoi_face_pool, face_id);
+ else
+ face->shared.flags |= HICN_FACE_FLAGS_DELETED;
+ }
+ else
+ ret = HICN_ERROR_FACE_NOT_FOUND;
+
+ return ret;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/face.h b/hicn-plugin/src/faces/face.h
new file mode 100755
index 000000000..2774d9a2e
--- /dev/null
+++ b/hicn-plugin/src/faces/face.h
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_H__
+#define __HICN_FACE_H__
+
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/adj/adj_types.h>
+
+typedef u8 hicn_face_flags_t;
+typedef index_t hicn_face_id_t;
+typedef dpo_type_t hicn_face_type_t;
+
+/**
+ * @file
+ *
+ * @brief Face
+ *
+ * This file implements a general face type. A face is carried through nodes as a
+ * dpo. The face state (hicn_face_t) is the object pointed by the
+ * dpoi_index in the dpo_id_t (see
+ * https://docs.fd.io/vpp/18.07/d0/d37/dpo_8h_source.html).
+ * A face state that does not contain the indication of the l2 adjacency is an
+ * incomplete face (iface), otherwise it is considered to be complete. Each face type
+ * provide specific node for processing packets in input or output of complete
+ * and incomplete faces.
+ */
+
+/**
+ * @brief Fields shared among all the different types of faces
+ */
+typedef struct __attribute__ ((packed)) hicn_face_shared_s
+{
+ /* Flags to idenfity if the face is incomplete (iface), complete (face) */
+ /* And a network or application face (1B) */
+ hicn_face_flags_t flags;
+
+ /* Path label (2B) */
+ u16 pl_id;
+
+ /* Number of dpo holding a reference to the dpoi (4B) */
+ u32 locks;
+
+ /* Adjacency for the neighbor (4B) */
+ adj_index_t adj;
+
+ /* local interface for the local ip address */
+ u32 sw_if;
+
+ /* Face id corresponding to the global face pool (4B) */
+ union
+ {
+ hicn_face_type_t face_type;
+ u32 int_face_type; //To forse the face_type_t to be 4B
+ };
+
+} hicn_face_shared_t;
+
+/**
+ * @brief Structure holding the face state. It containes the fields shared among
+ * all the types of faces as well it leaves some space for storing additional
+ * information specific to each type.
+ */
+typedef struct __attribute__ ((packed)) hicn_face_s
+{
+ /* Additional space to fill with face_type specific information */
+ u8 data[2 * CLIB_CACHE_LINE_BYTES - sizeof (hicn_face_shared_t)];
+ hicn_face_shared_t shared;
+
+}
+
+hicn_face_t;
+
+/* Pool of faces */
+extern hicn_face_t *hicn_dpoi_face_pool;
+
+/* Flags */
+/* A face is complete and it stores all the information. A iface lacks of the
+ adj index, therefore sending a packet through a iface require a lookup in
+ the FIB. */
+#define HICN_FACE_FLAGS_DEFAULT 0x00
+#define HICN_FACE_FLAGS_FACE 0x01
+#define HICN_FACE_FLAGS_IFACE 0x02
+#define HICN_FACE_FLAGS_APPFACE_PROD 0x04 /* Currently only IP face can be appface */
+#define HICN_FACE_FLAGS_APPFACE_CONS 0x08 /* Currently only IP face can be appface */
+#define HICN_FACE_FLAGS_DELETED 0x10
+
+#define HICN_FACE_NULL (hicn_face_id_t) ~0
+
+/**
+ * @brief Definition of the virtual functin table for an hICN FACE DPO.
+ *
+ * An hICN dpo is a combination of a dpo context (hicn_dpo_ctx or struct that
+ * extends a hicn_dpo_ctx) and a strategy node. The following virtual function table
+ * template that glues together the fuction to interact with the context and the
+ * creating the dpo
+ */
+typedef struct hicn_face_vft_s
+{
+ u8 *(*format_face) (u8 * s, va_list * args);
+ /**< Format an hICN face dpo*/
+ int (*hicn_face_del) (hicn_face_id_t face_id);
+ void (*hicn_face_get_dpo) (hicn_face_t * face, dpo_id_t * dpo);
+} hicn_face_vft_t;
+
+
+/* Vector maintaining a dpo per face */
+extern dpo_id_t *face_dpo_vec;
+extern hicn_face_vft_t *face_vft_vec;
+
+/* Vector holding the set of face names */
+extern char **face_type_names_vec;
+
+/* First face type registered in the sytem.*/
+extern dpo_type_t first_type;
+
+/**
+ * @brief Return the face id from the face state
+ *
+ * @param Pointer to the face state
+ * @return face id
+ */
+always_inline hicn_face_id_t
+hicn_dpoi_get_index (hicn_face_t * face_dpoi)
+{
+ return face_dpoi - hicn_dpoi_face_pool;
+}
+
+/**
+ * @brief Return the face from the face id. Face id must be valid.
+ *
+ * @param dpoi_index Face identifier
+ * @return Pointer to the face
+ */
+always_inline hicn_face_t *
+hicn_dpoi_get_from_idx (hicn_face_id_t dpoi_index)
+{
+ return (hicn_face_t *) pool_elt_at_index (hicn_dpoi_face_pool, dpoi_index);
+}
+
+/**
+ * @brief Return true if the face id belongs to an existing face
+ */
+always_inline int
+hicn_dpoi_idx_is_valid (hicn_face_id_t face_id)
+{
+ return pool_len (hicn_dpoi_face_pool) > face_id
+ && !pool_is_free_index (hicn_dpoi_face_pool, face_id);
+}
+
+/**
+ * @brief Add a lock to the face dpo
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_lock (dpo_id_t * dpo)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (dpo->dpoi_index);
+ face->shared.locks++;
+}
+
+/**
+ * @brief Remove a lock to the face dpo. Deallocate the face id locks == 0
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_unlock (dpo_id_t * dpo)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (dpo->dpoi_index);
+ face->shared.locks--;
+}
+
+/**
+ * @brief Init the internal structures of the face module
+ *
+ * Must be called before processing any packet
+ */
+void hicn_face_module_init (vlib_main_t * vm);
+
+/**
+ * @brief Format all the existing faces
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param n Number of input parameters
+ * @return String with the faces formatted
+ */
+u8 *format_hicn_face_all (u8 * s, int n, ...);
+
+/**
+ * @brief Delete a face
+ *
+ * @param face_id Id of the face to delete
+ * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
+ * HICN_ERROR_NONE
+ */
+int hicn_face_del (hicn_face_id_t face_id);
+
+/**
+ * @brief Return the virtual function table corresponding to the face type
+ *
+ * @param face_type Type of the face
+ * @return NULL if the face type does not exist
+ */
+hicn_face_vft_t *hicn_face_get_vft (hicn_face_type_t face_type);
+
+/**
+ * @brief Register a new face type
+ *
+ * @param face_type Type of the face
+ * @param vft Virtual Function table for the new face type
+ */
+void register_face_type (hicn_face_type_t face_type, hicn_face_vft_t * vft,
+ char *name);
+#endif // __HICN_FACE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/face_cli.c b/hicn-plugin/src/faces/face_cli.c
new file mode 100755
index 000000000..3ddf96beb
--- /dev/null
+++ b/hicn-plugin/src/faces/face_cli.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include "face.h"
+#include "../error.h"
+
+static clib_error_t *
+hicn_face_cli_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+ char *face_type_name = NULL;
+ int found = ~0;
+ int deleted = 0;
+
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%u", &face_id))
+ ;
+ else if (unformat (line_input, "type %s", &face_type_name))
+ ;
+ else if (unformat (line_input, "deleted"))
+ deleted = 1;
+ else
+ {
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL));
+ }
+ }
+
+ if (face_type_name != NULL)
+ {
+ int idx = 0;
+ vec_foreach_index (idx, face_type_names_vec)
+ {
+ if (!strcmp (face_type_names_vec[idx], face_type_name))
+ found = idx;
+ }
+ if (found == ~0)
+ return (clib_error_return (0, "Face type unknown"));
+ }
+
+ }
+
+ if (face_id != HICN_FACE_NULL)
+ {
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_FACE_NOT_FOUND));
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ hicn_face_vft_t *vft = hicn_face_get_vft (face->shared.face_type);
+ vlib_cli_output (vm, "%U\n", vft->format_face, face_id, 0 /*indent */ );
+ }
+ else
+ {
+ if (found != ~0)
+ {
+ hicn_face_t *face;
+ dpo_type_t type = (dpo_type_t) (found + first_type);
+ hicn_face_vft_t *vft = hicn_face_get_vft (type);
+ /* *INDENT-OFF* */
+ pool_foreach(face, hicn_dpoi_face_pool,
+ {
+ if (!((face->shared.flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ if ((face->shared.face_type == type) && (face->shared.flags))
+ vlib_cli_output(vm, "%U\n", vft->format_face, hicn_dpoi_get_index(face), 0);
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ hicn_face_t *face;
+ /* *INDENT-OFF* */
+ pool_foreach(face, hicn_dpoi_face_pool,
+ {
+ if (!((face->shared.flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ hicn_face_vft_t * vft = hicn_face_get_vft(face->shared.face_type);
+ vlib_cli_output(vm, "%U\n", vft->format_face, hicn_dpoi_get_index(face), 0);
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ }
+
+ return 0;
+}
+
+/* cli declaration for 'show faces' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_cli_show_command, static) =
+{
+ .path = "hicn face show",
+ .short_help = "hicn face show [<face_id>| type <ip/udp>]",
+ .function = hicn_face_cli_show_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/dpo_ip.c b/hicn-plugin/src/faces/ip/dpo_ip.c
new file mode 100755
index 000000000..1b2dbcff9
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/dpo_ip.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_ip.h"
+
+mhash_t hicn_face_ip_local_hashtb;
+mhash_t hicn_face_ip_remote_hashtb;
+dpo_type_t hicn_face_ip_type;
+
+const static char *const hicn_face_ip4dpoi_nodes[] = {
+ "hicn-face-ip4-input",
+ "hicn-face-ip4-output",
+ "hicn-iface-ip4-input",
+ "hicn-iface-ip4-output",
+ NULL,
+};
+
+const static char *const hicn_face_ip6dpoi_nodes[] = {
+ "hicn-face-ip6-input",
+ "hicn-face-ip6-output",
+ "hicn-iface-ip6-input",
+ "hicn-iface-ip6-output",
+ NULL,
+};
+
+const static char *const *const hicn_ip_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = hicn_face_ip4dpoi_nodes,
+ [DPO_PROTO_IP6] = hicn_face_ip6dpoi_nodes
+};
+
+const static dpo_vft_t hicn_face_ip_vft = {
+ .dv_lock = hicn_face_lock,
+ .dv_unlock = hicn_face_unlock,
+ .dv_format = format_hicn_face_ip,
+};
+
+/* Must be executed after all the strategy nodes are created */
+void
+hicn_dpo_ip_module_init (void)
+{
+ mhash_init (&hicn_face_ip_local_hashtb,
+ sizeof (hicn_face_id_t) /* value */ ,
+ sizeof (hicn_face_ip_key_t) /* key */ );
+ mhash_init (&hicn_face_ip_remote_hashtb,
+ sizeof (hicn_face_id_t) /* value */ ,
+ sizeof (hicn_face_ip_key_t) /* key */ );
+
+ /*
+ * How much useful is the following registration?
+ * So far it seems that we need it only for setting the dpo_type.
+ */
+ hicn_face_ip_type =
+ dpo_register_new_type (&hicn_face_ip_vft, hicn_ip_nodes);
+}
+
+
+int
+hicn_dpo_ip4_create (dpo_id_t * dpo,
+ const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u32 sw_if,
+ adj_index_t adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id)
+{
+ /* If local matches the dpoi is a face */
+ hicn_face_t *face =
+ hicn_face_ip4_get (local_addr, sw_if, &hicn_face_ip_local_hashtb);
+ u8 is_appface;
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ face = hicn_face_ip4_get (remote_addr, sw_if, &hicn_face_ip_remote_hashtb);
+
+ if (face == NULL)
+ {
+ hicn_dpo_ip4_add_and_lock_from_remote (dpo, &is_appface, local_addr,
+ remote_addr, sw_if, node_index);
+ *face_id = (hicn_face_id_t) dpo->dpoi_index;
+ face = hicn_dpoi_get_from_idx (*face_id);
+ }
+ else
+ {
+ *face_id = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP4, *face_id);
+ dpo->dpoi_next_node = node_index;
+ }
+
+
+ hicn_face_ip_key_t key;
+ hicn_face_ip4_get_key (local_addr, sw_if, &key);
+
+ mhash_set_mem (&hicn_face_ip_local_hashtb, &key, (uword *) face_id, 0);
+
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ ip46_address_set_ip4 (&ip_face->local_addr, local_addr);
+ ip46_address_set_ip4 (&ip_face->remote_addr, remote_addr);
+ face->shared.flags = flags;
+ face->shared.adj = adj;
+
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_dpo_ip6_create (dpo_id_t * dpo,
+ const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u32 sw_if,
+ adj_index_t adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id)
+{
+ /* If local matches the dpoi is a face */
+ hicn_face_t *face =
+ hicn_face_ip6_get (local_addr, sw_if, &hicn_face_ip_local_hashtb);
+
+ u8 is_appface;
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ face = hicn_face_ip6_get (remote_addr, sw_if, &hicn_face_ip_remote_hashtb);
+
+ /* If remote matches the dpoi is a iface */
+ if (face == NULL)
+ {
+ hicn_dpo_ip6_add_and_lock_from_remote (dpo, &is_appface, local_addr,
+ remote_addr, sw_if, node_index);
+ *face_id = (hicn_face_id_t) dpo->dpoi_index;
+ face = hicn_dpoi_get_from_idx (*face_id);
+ }
+ else
+ {
+ *face_id = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP6, *face_id);
+ dpo->dpoi_next_node = node_index;
+ }
+
+ hicn_face_ip_key_t key;
+ hicn_face_ip6_get_key (local_addr, sw_if, &key);
+
+ mhash_set_mem (&hicn_face_ip_local_hashtb, &key, (uword *) face_id, 0);
+
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ clib_memcpy (&ip_face->local_addr, local_addr, sizeof (ip6_address_t));
+ clib_memcpy (&ip_face->remote_addr, remote_addr, sizeof (ip6_address_t));
+ face->shared.sw_if = sw_if;
+ face->shared.flags = flags;
+ face->shared.adj = adj;
+
+
+ return HICN_ERROR_NONE;
+}
+
+void
+hicn_dpo_ip_create_from_face (hicn_face_t * face, dpo_id_t * dpo,
+ u16 dpoi_next_node)
+{
+ hicn_face_id_t face_dpoi_id = hicn_dpoi_get_index (face);
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ dpo_set (dpo, face->shared.face_type,
+ ip46_address_is_ip4 (&ip_face->
+ local_addr) ? DPO_PROTO_IP4 : DPO_PROTO_IP6,
+ face_dpoi_id);
+ dpo->dpoi_next_node = dpoi_next_node;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/dpo_ip.h b/hicn-plugin/src/faces/ip/dpo_ip.h
new file mode 100755
index 000000000..675443277
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/dpo_ip.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_IP_H__
+#define __HICN_DPO_IP_H__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip4_packet.h>
+
+#include "face_ip.h"
+#include "../face.h"
+
+/**
+ * @brief Initialize the internal structures of the dpo ip face module.
+ */
+void hicn_dpo_ip_module_init (void);
+
+
+/**
+ * @brief Retrieve a face from the ip4 local address and returns its dpo. This
+ * method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not
+ * @param local_addr: Ip v4 local address of the face
+ * @param sw_if: software interface id of the face
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_dpo_ip4_lock_from_local (dpo_id_t * dpo,
+ u8 * is_appface,
+ const ip4_address_t * local_addr, u32 sw_if)
+{
+ hicn_face_t *face =
+ hicn_face_ip4_get (local_addr, sw_if, &hicn_face_ip_local_hashtb);
+
+ if (PREDICT_FALSE (face == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ *is_appface = face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD;
+
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = ~0;
+ dpo_lock (dpo);
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Retrieve a face from the ip6 local address and returns its dpo. This
+ * method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not
+ * @param local_addr: Ip v6 local address of the face
+ * @param sw_if: software interface id of the face
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_dpo_ip6_lock_from_local (dpo_id_t * dpo,
+ u8 * is_appface,
+ const ip6_address_t * local_addr, u32 sw_if)
+{
+ hicn_face_t *face =
+ hicn_face_ip6_get (local_addr, sw_if, &hicn_face_ip_local_hashtb);
+
+ if (PREDICT_FALSE (face == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ *is_appface = face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD;
+
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP6, dpoi_index);
+ dpo->dpoi_next_node = ~0;
+ dpo_lock (dpo);
+
+ return HICN_ERROR_NONE;
+}
+
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
+ * address and returns its dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not
+ * @param local_addr: Ip v4 local address of the face
+ * @param remote_addr: Ip v4 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_dpo_ip4_add_and_lock_from_remote (dpo_id_t * dpo,
+ u8 * is_appface,
+ const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u32 sw_if, u32 node_index)
+{
+ /*All (complete) faces are indexed by remote addess as well */
+ hicn_face_t *face =
+ hicn_face_ip4_get (remote_addr, sw_if, &hicn_face_ip_remote_hashtb);
+
+ if (face == NULL)
+ {
+ hicn_face_id_t dpoi_index;
+ ip46_address_t local_addr46 = to_ip46 (0, (u8 *) local_addr);
+ ip46_address_t remote_addr46 = to_ip46 (0, (u8 *) remote_addr);
+ hicn_iface_ip_add (&local_addr46, &remote_addr46, sw_if, &dpoi_index);
+
+ *is_appface = 0;
+
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+
+ return;
+ }
+
+ /* Code replicated on purpose */
+ *is_appface = face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD;
+
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+}
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
+ * address and returns its dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not
+ * @param local_addr: Ip v6 local address of the face
+ * @param remote_addr: Ip v6 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_dpo_ip6_add_and_lock_from_remote (dpo_id_t * dpo,
+ u8 * is_appface,
+ const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u32 sw_if, u32 node_index)
+{
+ /*All (complete) faces are indexed by remote addess as well */
+ hicn_face_t *face =
+ hicn_face_ip6_get (remote_addr, sw_if, &hicn_face_ip_remote_hashtb);
+
+ if (face == NULL)
+ {
+ hicn_face_id_t dpoi_index;
+ hicn_iface_ip_add ((ip46_address_t *) local_addr,
+ (ip46_address_t *) remote_addr, sw_if, &dpoi_index);
+
+ *is_appface = 0;
+
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+
+ return;
+ }
+ /* Code replicated on purpose */
+ *is_appface = face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD;
+
+ index_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_ip_type, DPO_PROTO_IP6, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+}
+
+
+/**
+ * @brief Create an ip face and its corresponding dpo. Meant to be used for the
+ * control plane.
+ *
+ * @param dpo: Data plane object that point to the face created.
+ * @param local_addr: Ip v4 local address of the face
+ * @param remote_addr: Ip v4 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param adj: Ip adjacency corresponding to the remote address in the face
+ * @param node_index: vlib edge index to use in the packet processing
+ * @param flags: Flags of the face
+ * @param face_id: Identifier for the face (dpoi_index)
+ * @return HICN_ERROR_FACE_ALREADY_CREATED if the face exists, otherwise HICN_ERROR_NONE
+ */
+int hicn_dpo_ip4_create (dpo_id_t * dpo,
+ const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u32 sw_if,
+ adj_index_t adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id);
+
+/**
+ * @brief Create an ip face and its corresponding dpo. Meant to be used for the
+ * control plane.
+ *
+ * @param dpo: Data plane object that point to the face created.
+ * @param local_addr: Ip v6 local address of the face
+ * @param remote_addr: Ip v6 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param adj: Ip adjacency corresponding to the remote address in the face
+ * @param node_index: vlib edge index to use in the packet processing
+ * @param flags: Flags of the face
+ * @param face_id: Identifier for the face (dpoi_index)
+ * @return HICN_ERROR_FACE_ALREADY_CREATED if the face exists, otherwise HICN_ERROR_NONE
+ */
+int hicn_dpo_ip6_create (dpo_id_t * dpo,
+ const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u32 sw_if,
+ adj_index_t adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id);
+
+/**
+ * @brief Create a dpo from an ip face
+ *
+ * @param face Face from which to create the dpo
+ * @param dpoi_next_node Edge index that connects a node to the iface or face nodes
+ * @return the dpo
+ */
+void hicn_dpo_ip_create_from_face (hicn_face_t * face, dpo_id_t * dpo,
+ u16 dpoi_next_node);
+
+#endif // __HICN_DPO_IP_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/face_ip.c b/hicn-plugin/src/faces/ip/face_ip.c
new file mode 100755
index 000000000..c7f6a1ba1
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/face_ip.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "face_ip.h"
+#include "face_ip_node.h"
+#include "dpo_ip.h"
+#include "../../strategy_dpo_manager.h"
+#include "../face.h"
+#include "../../cache_policies/cs_lru.h"
+#include "../../infra.h"
+#include "../../hicn.h"
+#include "../app/face_prod.h"
+#include "../app/face_cons.h"
+
+#include "../../mapme.h" // HICN_MAPME_EVENT_*
+#include "../../mapme_eventmgr.h" // hicn_mapme_eventmgr_process_node
+
+extern vlib_node_registration_t hicn_mapme_eventmgr_process_node;
+
+u32 strategy_face_ip4_vlib_edge;
+u32 strategy_face_ip6_vlib_edge;
+
+void
+hicn_face_ip_init (vlib_main_t * vm)
+{
+ int strategy_nodes_n = hicn_strategy_get_all_available ();
+
+ /* Default Strategy has index 0 and it always exists */
+ strategy_face_ip4_vlib_edge = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft
+ (default_dpo.
+ hicn_dpo_get_type ())->
+ get_strategy_node_index
+ (),
+ hicn_face_ip4_output_node.
+ index);
+
+ strategy_face_ip6_vlib_edge = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft
+ (default_dpo.
+ hicn_dpo_get_type ())->
+ get_strategy_node_index
+ (),
+ hicn_face_ip6_output_node.
+ index);
+ /*
+ * Create and edge between al the other strategy nodes
+ * and the ip_encap nodes.
+ */
+ for (int i = 1; i < strategy_nodes_n; i++)
+ {
+ u32 temp_index4 = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft_from_id
+ (i)->get_strategy_node_index (),
+ hicn_face_ip4_output_node.index);
+ u32 temp_index6 = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft_from_id
+ (i)->get_strategy_node_index (),
+ hicn_face_ip6_output_node.index);
+ ASSERT (temp_index4 == strategy_face_ip4_vlib_edge);
+ ASSERT (temp_index6 == strategy_face_ip6_vlib_edge);
+ }
+
+ hicn_dpo_ip_module_init ();
+
+ register_face_type (hicn_face_ip_type, &ip_vft, "ip");
+}
+
+int
+hicn_face_ip_del (hicn_face_id_t face_id)
+{
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ hicn_face_ip_t *face_ip = (hicn_face_ip_t *) face->data;
+ hicn_face_ip_key_t key;
+ hicn_face_ip_key_t old_key;
+
+ if (ip46_address_is_ip4 (&face_ip->local_addr))
+ {
+ hicn_face_ip4_get_key (&(face_ip->local_addr.ip4), face->shared.sw_if,
+ &key);
+ mhash_unset (&hicn_face_ip_local_hashtb, &key, (uword *) & old_key);
+ hicn_face_ip4_get_key (&(face_ip->remote_addr.ip4), face->shared.sw_if,
+ &key);
+ mhash_unset (&hicn_face_ip_remote_hashtb, &key, (uword *) & old_key);
+ }
+ else
+ {
+ hicn_face_ip6_get_key (&(face_ip->local_addr.ip6), face->shared.sw_if,
+ &key);
+ mhash_unset (&hicn_face_ip_local_hashtb, &key, (uword *) & old_key);
+ hicn_face_ip6_get_key (&(face_ip->remote_addr.ip6), face->shared.sw_if,
+ &key);
+ mhash_unset (&hicn_face_ip_remote_hashtb, &key, (uword *) & old_key);
+ }
+ return hicn_face_del (face_id);
+}
+
+
+/*
+ * Utility that adds a new face cache entry. For the moment we assume that the
+ * ip_adjacency has already been set up.
+ */
+int
+hicn_face_ip_add (const ip46_address_t * local_addr,
+ const ip46_address_t * remote_addr,
+ int sw_if, hicn_face_id_t * pfaceid)
+{
+ fib_protocol_t fib_type;
+ vnet_link_t link_type;
+ adj_index_t adj;
+ dpo_proto_t dpo_proto;
+
+ /* Check if we found at least one ip address */
+ if (ip46_address_is_zero (local_addr) || ip46_address_is_zero (remote_addr))
+ return HICN_ERROR_FACE_NO_GLOBAL_IP;
+
+ if (ip46_address_is_ip4 (local_addr) && ip46_address_is_ip4 (remote_addr))
+ {
+ link_type = VNET_LINK_IP4;
+ fib_type = FIB_PROTOCOL_IP4;
+ }
+ else
+ {
+ link_type = VNET_LINK_IP6;
+ fib_type = FIB_PROTOCOL_IP6;
+ }
+
+
+ adj = adj_nbr_add_or_lock (fib_type, link_type, remote_addr, sw_if);
+
+ hicn_face_flags_t flags = (hicn_face_flags_t) 0;
+ flags |= HICN_FACE_FLAGS_FACE;
+
+ hicn_face_t *face;
+ if (ip46_address_is_ip4 (local_addr))
+ {
+ face =
+ hicn_face_ip4_get (&(local_addr->ip4), sw_if,
+ &hicn_face_ip_local_hashtb);
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ face =
+ hicn_face_ip4_get (&(remote_addr->ip4), sw_if,
+ &hicn_face_ip_remote_hashtb);
+
+ /* If remote matches the face is a iface */
+ if (face == NULL)
+ {
+ hicn_iface_ip_add (local_addr, remote_addr, sw_if, pfaceid);
+ face = hicn_dpoi_get_from_idx (*pfaceid);
+ }
+ else
+ {
+ *pfaceid = hicn_dpoi_get_index (face);
+ }
+
+ hicn_face_ip_key_t key;
+ hicn_face_ip4_get_key (&(local_addr->ip4), sw_if, &key);
+
+ mhash_set_mem (&hicn_face_ip_local_hashtb, &key, (uword *) pfaceid, 0);
+
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ clib_memcpy (&ip_face->local_addr, local_addr, sizeof (ip4_address_t));
+ clib_memcpy (&ip_face->remote_addr, remote_addr,
+ sizeof (ip4_address_t));
+ face->shared.sw_if = sw_if;
+ face->shared.flags = flags;
+ face->shared.adj = adj;
+
+ dpo_proto = DPO_PROTO_IP4;
+ }
+ else
+ {
+ face =
+ hicn_face_ip6_get (&(local_addr->ip6), sw_if,
+ &hicn_face_ip_local_hashtb);
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ face =
+ hicn_face_ip6_get (&(remote_addr->ip6), sw_if,
+ &hicn_face_ip_remote_hashtb);
+
+ /* If remote matches the face is a iface */
+ if (face == NULL)
+ {
+ hicn_iface_ip_add (local_addr, remote_addr, sw_if, pfaceid);
+ face = hicn_dpoi_get_from_idx (*pfaceid);
+ }
+ else
+ {
+ *pfaceid = hicn_dpoi_get_index (face);
+ }
+
+ hicn_face_ip_key_t key;
+ hicn_face_ip6_get_key (&(local_addr->ip6), sw_if, &key);
+
+ mhash_set_mem (&hicn_face_ip_local_hashtb, &key, (uword *) pfaceid, 0);
+
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ clib_memcpy (&ip_face->local_addr, local_addr, sizeof (ip6_address_t));
+ clib_memcpy (&ip_face->remote_addr, remote_addr,
+ sizeof (ip6_address_t));
+ face->shared.sw_if = sw_if;
+ face->shared.flags = flags;
+ face->shared.adj = adj;
+
+ dpo_proto = DPO_PROTO_IP6;
+ }
+
+ retx_t *retx = vlib_process_signal_event_data (vlib_get_main (),
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_ADD, 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = 0,.dpo = (dpo_id_t)
+ {
+ .dpoi_type = hicn_face_ip_type,.dpoi_proto = dpo_proto,.dpoi_next_node =
+ 0,.dpoi_index = *pfaceid,}
+ };
+
+ return HICN_ERROR_NONE;
+}
+
+u8 *
+format_hicn_face_ip (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+ hicn_face_t *face;
+ hicn_face_ip_t *ip_face;
+ ip_adjacency_t *adj;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ face = hicn_dpoi_get_from_idx (index);
+ ip_face = (hicn_face_ip_t *) face->data;
+
+ if (face->shared.flags & HICN_FACE_FLAGS_FACE)
+ {
+ ASSERT (face->shared.adj != (adj_index_t) ~ 0);
+ adj = adj_get (face->shared.adj);
+
+ hicn_face_id_t face_id = hicn_dpoi_get_index (face);
+ s = format (s, "%U Face %d: ", format_white_space, indent, face_id);
+ s = format (s, "type IP local %U ",
+ format_ip46_address, &ip_face->local_addr, IP46_TYPE_ANY);
+ s =
+ format (s, "remote %U ", format_ip46_address, &ip_face->remote_addr,
+ IP46_TYPE_ANY);
+ s = format (s, "%U", format_vnet_link, adj->ia_link);
+ s = format (s, " dev %U", format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, face->shared.sw_if));
+
+ if ((face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD))
+ s = format (s, " %U", format_hicn_face_prod, face_id, 0);
+ else if ((face->shared.flags & HICN_FACE_FLAGS_APPFACE_CONS))
+ s = format (s, " %U", format_hicn_face_cons, face_id, 0);
+
+ if ((face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+ else
+ {
+ hicn_face_id_t face_id = hicn_dpoi_get_index (face);
+ s = format (s, "%U iFace %d: ", format_white_space, indent, face_id);
+ s = format (s, "type IP local %U remote %U",
+ format_ip46_address, &ip_face->local_addr, IP46_TYPE_ANY,
+ format_ip46_address, &ip_face->remote_addr, IP46_TYPE_ANY);
+ s =
+ format (s, " dev %U", format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, face->shared.sw_if));
+
+ if ((face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD))
+ s = format (s, " %U", format_hicn_face_prod, face_id, 0);
+ else if ((face->shared.flags & HICN_FACE_FLAGS_APPFACE_CONS))
+ s = format (s, " %U", format_hicn_face_cons, face_id, 0);
+
+ if ((face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+
+ return s;
+}
+
+void
+hicn_face_ip_get_dpo (hicn_face_t * face, dpo_id_t * dpo)
+{
+
+ hicn_face_ip_t *face_ip = (hicn_face_ip_t *) face->data;
+ return hicn_dpo_ip_create_from_face (face, dpo,
+ ip46_address_is_ip4 (&face_ip->
+ remote_addr) ?
+ strategy_face_ip4_vlib_edge :
+ strategy_face_ip6_vlib_edge);
+}
+
+hicn_face_vft_t ip_vft = {
+ .format_face = format_hicn_face_ip,
+ .hicn_face_del = hicn_face_ip_del,
+ .hicn_face_get_dpo = hicn_face_ip_get_dpo,
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/face_ip.h b/hicn-plugin/src/faces/ip/face_ip.h
new file mode 100755
index 000000000..8c31f6dd3
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/face_ip.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_IP_H__
+#define __HICN_FACE_IP_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include "../face.h"
+#include "../../cache_policies/cs_policy.h"
+
+/**
+ * @file
+ *
+ * @brief IP face
+ *
+ * A face is carried through nodes as a dpo. The face state is the object
+ * pointed by the dpoi_index in the dpo_id_t (see
+ * https://docs.fd.io/vpp/18.07/d0/d37/dpo_8h_source.html)
+ */
+typedef struct hicn_ip_face_t_
+{
+ /**
+ * The headers to paint, in packet painting order
+ */
+ /* Local address of the interface sw_if */
+ ip46_address_t local_addr;
+
+ /* Remote address of neighbor */
+ ip46_address_t remote_addr;
+
+} hicn_face_ip_t;
+
+
+/**
+ * Hash tables that indexes a face by local address. For fast lookup when an
+ * data arrives.
+ */
+extern mhash_t hicn_face_ip_local_hashtb;
+
+/**
+ * Hash tables that indexes a face by remote address. For fast lookup when an
+ * interest arrives.
+ */
+extern mhash_t hicn_face_ip_remote_hashtb;
+
+/**
+ * Key definition for the mhash table. An ip face is uniquely identified by ip
+ * address and the interface id. The ip address can correspond to the remote ip
+ * address of the next hicn hop, or to the local address of the receiving
+ * interface. The former is used to retrieve the incoming face when an interest
+ * is received, the latter when the arring packet is a data.
+ */
+typedef struct hicn_face_ip_key_s
+{
+ ip46_address_t addr;
+ u32 sw_if;
+} hicn_face_ip_key_t;
+
+
+extern hicn_face_type_t hicn_face_ip_type;
+extern hicn_face_vft_t ip_vft;
+
+/**
+ * @brief Create the key object for the mhash. Fill in the key object with the
+ * expected values.
+ *
+ * @param addr Local or remote ip v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param key Pointer to an allocated hicn_face_ip_key_t object
+ */
+always_inline void
+hicn_face_ip6_get_key (const ip6_address_t * addr,
+ u32 sw_if, hicn_face_ip_key_t * key)
+{
+ key->addr.ip6 = *addr;
+ key->sw_if = sw_if;
+}
+
+
+/**
+ * @brief Create the key object for the mhash. Fill in the key object with the
+ * expected values.
+ *
+ * @param addr Local or remote ip v4 address of the face
+ * @param sw_if interface associated to the face
+ * @param key Pointer to an allocated hicn_face_ip_key_t object
+ */
+always_inline void
+hicn_face_ip4_get_key (const ip4_address_t * addr,
+ u32 sw_if, hicn_face_ip_key_t * key)
+{
+ ip46_address_set_ip4 (&(key->addr), addr);
+ key->sw_if = sw_if;
+}
+
+/**
+ * @brief Get the dpoi from the ip v4 address. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_ip4_get (const ip4_address_t * addr, u32 sw_if, mhash_t * hashtb)
+{
+ hicn_face_ip_key_t key;
+
+ hicn_face_ip4_get_key (addr, sw_if, &key);
+
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
+ &key);
+
+ return dpoi_index == NULL ? NULL : hicn_dpoi_get_from_idx (*dpoi_index);
+}
+
+/**
+ * @brief Get the dpoi from the ip v6 address. Does not add any lock.
+ *
+ * @param addr Ip v6 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_ip6_get (const ip6_address_t * addr, u32 sw_if, mhash_t * hashtb)
+{
+ hicn_face_ip_key_t key;
+
+ hicn_face_ip6_get_key (addr, sw_if, &key);
+
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
+ &key);
+
+ return dpoi_index == NULL ? NULL : hicn_dpoi_get_from_idx (*dpoi_index);
+}
+
+/**
+ * @brief Create a new face ip. API for other modules (e.g., routing)
+ *
+ * @param local_addr Local ip v4 or v6 address of the face
+ * @param remote_addr Remote ip v4 or v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param is_app_face Boolean to set the face as an application face
+ * @param pfaceid Pointer to return the face id
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+int hicn_face_ip_add (const ip46_address_t * local_addr,
+ const ip46_address_t * remote_addr,
+ int swif, hicn_face_id_t * pfaceid);
+
+/**
+ * @brief Create a new incomplete face ip. (Meant to be used by the data plane)
+ *
+ * @param local_addr Local ip v4 or v6 address of the face
+ * @param remote_addr Remote ip v4 or v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param pfaceid Pointer to return the face id
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+always_inline void
+hicn_iface_ip_add (const ip46_address_t * local_addr,
+ const ip46_address_t * remote_addr,
+ int sw_if, hicn_face_id_t * pfaceid)
+{
+ hicn_face_t *face;
+ pool_get (hicn_dpoi_face_pool, face);
+
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) (face->data);
+
+ clib_memcpy (&(ip_face->local_addr.ip6), local_addr,
+ sizeof (ip6_address_t));
+ clib_memcpy (&(ip_face->remote_addr.ip6), remote_addr,
+ sizeof (ip6_address_t));
+ face->shared.sw_if = sw_if;
+
+ face->shared.adj = ADJ_INDEX_INVALID;
+ face->shared.pl_id = (u16) 0;
+ face->shared.face_type = hicn_face_ip_type;
+ face->shared.flags = HICN_FACE_FLAGS_IFACE;
+ face->shared.locks = 0;
+
+ hicn_face_ip_key_t key;
+ hicn_face_ip6_get_key (&(remote_addr->ip6), sw_if, &key);
+ *pfaceid = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_ip_remote_hashtb, &key, (uword *) pfaceid, 0);
+}
+
+/**
+ * @brief Delete an ip face
+ *
+ * @param face_id Id of the face to delete
+ * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
+ * HICN_ERROR_NONE
+ */
+int hicn_face_ip_del (hicn_face_id_t face_id);
+
+/**
+ * @brief Format a IP face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_ip (u8 * s, va_list * args);
+
+/**
+ * @brief Create a dpo from an ip face
+ *
+ * @param face Face from which to create the dpo
+ * @return the dpo
+ */
+void hicn_face_ip_get_dpo (hicn_face_t * face, dpo_id_t * dpo);
+
+#endif // __HICN_FACE_IP_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/face_ip_cli.c b/hicn-plugin/src/faces/ip/face_ip_cli.c
new file mode 100755
index 000000000..1558c82cb
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/face_ip_cli.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+
+#include "face_ip.h"
+#include "dpo_ip.h"
+#include "../face.h"
+
+#define HICN_FACE_NONE 0
+#define HICN_FACE_DELETE 1
+#define HICN_FACE_ADD 2
+
+static clib_error_t *
+hicn_face_ip_cli_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip46_address_t local_addr;
+ ip46_address_t remote_addr;
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+ int app_face = 0;
+ u32 cs_reserved = HICN_PARAM_FACE_DFT_CS_RESERVED;
+ int ret = HICN_ERROR_NONE;
+ int sw_if;
+ int face_op = HICN_FACE_NONE;
+
+ ip46_address_reset (&local_addr);
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ {
+ if (unformat (line_input, "id %d", &face_id))
+ face_op = HICN_FACE_DELETE;
+ else
+ {
+ return clib_error_return (0, "missing face id");
+ }
+ }
+ else if (unformat (line_input, "add"))
+ {
+ face_op = HICN_FACE_ADD;
+ if (unformat (line_input, "local %U remote %U intfc %U",
+ unformat_ip46_address, &local_addr, IP46_TYPE_ANY,
+ unformat_ip46_address, &remote_addr, IP46_TYPE_ANY,
+ unformat_vnet_sw_interface, vnm, &sw_if));
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ else if (unformat (line_input, "app_face %d", &app_face))
+ {
+ if (unformat (line_input, "cs_size %d", &cs_reserved));
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (face_id != HICN_FACE_NULL)
+ {
+
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ {
+ return clib_error_return (0, "%s, face_id %d not valid",
+ get_error_string (ret), face_id);
+ }
+ }
+
+ int rv;
+ switch (face_op)
+ {
+ case HICN_FACE_ADD:
+
+ /* Check for presence of next hop address */
+ if ((remote_addr.as_u64[0] == (u64) 0)
+ && (remote_addr.as_u64[1] == (u64) 0))
+ {
+ return clib_error_return (0, "next hop address not specified");
+ }
+
+ rv = hicn_face_ip_add (&local_addr, &remote_addr, sw_if, &face_id);
+
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face id: %d", face_id);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ case HICN_FACE_DELETE:
+ rv = hicn_face_ip_del (face_id);
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face %d deleted", face_id);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ default:
+ return clib_error_return (0, "Operation (%d) not implemented", face_op);
+ break;
+ }
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (rv));
+}
+
+/* cli declaration for 'cfg face' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_ip_cli_set_command, static) =
+{
+ .path = "hicn face ip",
+ .short_help = "hicn face ip {add local <local_address> remote <remote_address> intfc <sw_if>} {app_face <0/1>} {cs_size <size_in_packets>} | {del id <face_id>}",
+ .function = hicn_face_ip_cli_set_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/face_ip_node.c b/hicn-plugin/src/faces/ip/face_ip_node.c
new file mode 100755
index 000000000..6081e4737
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/face_ip_node.c
@@ -0,0 +1,761 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+
+#include "face_ip.h"
+#include "face_ip_node.h"
+#include "dpo_ip.h"
+#include "../../strategy_dpo_manager.h"
+#include "../face.h"
+#include "../../cache_policies/cs_lru.h"
+#include "../../infra.h"
+#include "../../hicn.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for ip incomplete faces.
+ */
+
+vlib_node_registration_t hicn_face_ip4_input_node;
+vlib_node_registration_t hicn_face_ip4_output_node;
+vlib_node_registration_t hicn_face_ip6_input_node;
+vlib_node_registration_t hicn_face_ip6_output_node;
+
+#define ip_v4 4
+#define ip_v6 6
+
+static char *hicn_face_ip4_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_face_ip6_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_ip4_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_IP4_INPUT_NEXT_DATA,
+ HICN_FACE_IP4_INPUT_NEXT_MAPME,
+ HICN_FACE_IP4_INPUT_NEXT_ERROR_DROP,
+ HICN_FACE_IP4_INPUT_N_NEXT,
+} hicn_face_ip4_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_ip6_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_IP6_INPUT_NEXT_DATA,
+ HICN_FACE_IP6_INPUT_NEXT_MAPME,
+ HICN_FACE_IP6_INPUT_NEXT_ERROR_DROP,
+ HICN_FACE_IP6_INPUT_N_NEXT,
+} hicn_face_ip6_input_next_t;
+
+#define NEXT_MAPME_IP4 HICN_FACE_IP4_INPUT_NEXT_MAPME
+#define NEXT_MAPME_IP6 HICN_FACE_IP6_INPUT_NEXT_MAPME
+#define NEXT_DATA_IP4 HICN_FACE_IP4_INPUT_NEXT_DATA
+#define NEXT_DATA_IP6 HICN_FACE_IP6_INPUT_NEXT_DATA
+
+#define NEXT_ERROR_DROP_IP4 HICN_FACE_IP4_INPUT_NEXT_ERROR_DROP
+#define NEXT_ERROR_DROP_IP6 HICN_FACE_IP6_INPUT_NEXT_ERROR_DROP
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define LOCK_FROM_LOCAL_IP4 hicn_dpo_ip4_lock_from_local
+#define LOCK_FROM_LOCAL_IP6 hicn_dpo_ip6_lock_from_local
+
+#define TRACE_INPUT_PKT_IP4 hicn_face_ip4_input_trace_t
+#define TRACE_INPUT_PKT_IP6 hicn_face_ip6_input_trace_t
+
+/*
+ * NOTE: Both hicn_face_ip4_input_node_fn and hicn_face_ip6_input_node_fn
+ * present a similar codebase. Macro are hard to debug, although the
+ * followind code is pretty straighforward and most of the complexity is in
+ * functions that can be easily debug.
+ */
+#define face_input_x1(ipv) \
+ do{ \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ int ret; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer(b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ \
+ u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp)*NEXT_DATA_IP##ipv; \
+ \
+ ret = LOCK_FROM_LOCAL_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &hicnb0->is_appface, \
+ &(ip_hdr->dst_address), \
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]); \
+ \
+ if ( PREDICT_FALSE(ret != HICN_ERROR_NONE) ) \
+ next0 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ stats.pkts_data_count += 1; \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define face_input_x2(ipv) \
+ do{ \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ u32 next1 = NEXT_ERROR_DROP_IP##ipv; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ hicn_buffer_t * hicnb0; \
+ hicn_buffer_t * hicnb1; \
+ int ret0, ret1; \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ \
+ u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
+ u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp0)*NEXT_DATA_IP##ipv; \
+ \
+ next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp1)*NEXT_DATA_IP##ipv; \
+ \
+ \
+ ret0 = LOCK_FROM_LOCAL_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &hicnb0->is_appface, \
+ &(ip_hdr0->dst_address), \
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]); \
+ \
+ ret1 = LOCK_FROM_LOCAL_IP##ipv \
+ (&(hicnb1->face_dpo_id), \
+ &hicnb1->is_appface, \
+ &(ip_hdr1->dst_address), \
+ vnet_buffer (b1)->sw_if_index[VLIB_RX]); \
+ \
+ if ( PREDICT_FALSE(ret0 != HICN_ERROR_NONE) ) \
+ next0 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ stats.pkts_data_count += 1; \
+ \
+ if ( PREDICT_FALSE(ret1 != HICN_ERROR_NONE) ) \
+ next1 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ stats.pkts_data_count += 1; \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn_face_ip4_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_ip4_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_ip4_input_trace_t *t =
+ va_arg (*args, hicn_face_ip4_input_trace_t *);
+
+ s = format (s, "FACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_ip4_input_node) =
+{
+ .function = hicn_face_ip4_input_node_fn,
+ .name = "hicn-face-ip4-input",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn_face_ip4_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_face_ip4_input_error_strings),
+ .error_strings = hicn_face_ip4_input_error_strings,
+ .n_next_nodes = HICN_FACE_IP4_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_FACE_IP4_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN_FACE_IP4_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN_FACE_IP4_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief IPv6 face input node function
+ * @see hicn_face_ip4_input_node_fn
+ */
+static uword
+hicn_face_ip6_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_ip6_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_ip6_input_trace_t *t =
+ va_arg (*args, hicn_face_ip6_input_trace_t *);
+
+ s = format (s, "FACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_ip6_input_node) =
+{
+ .function = hicn_face_ip6_input_node_fn,
+ .name = "hicn-face-ip6-input",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn_face_ip6_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_face_ip6_input_error_strings),
+ .error_strings = hicn_face_ip6_input_error_strings,
+ .n_next_nodes = HICN_FACE_IP6_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_FACE_IP6_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN_FACE_IP6_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN_FACE_IP6_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/**** FACE OUTPUT *****/
+
+static inline void
+hicn_face_rewrite_interest (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * face, u32 * next)
+{
+ ip_adjacency_t *adj = adj_get (face->shared.adj);
+
+ /* We assume the ip adjacency has already the MAC/link layer address */
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = face->shared.adj;
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_interest (type, &hicn->protocol,
+ &ip_face->local_addr, &temp_addr);
+
+ /* We rewrite the dst address to send an arp/neighbour discovert request */
+ if (PREDICT_FALSE
+ (adj->lookup_next_index == IP_LOOKUP_NEXT_ARP
+ || adj->lookup_next_index == IP_LOOKUP_NEXT_GLEAN))
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &ip_face->remote_addr, &temp_addr,
+ 0);
+
+ *next = adj->lookup_next_index;
+}
+
+static char *hicn_face_ip4_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_face_ip6_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_ip4_output_trace_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_ip6_output_trace_t;
+
+#define TRACE_OUTPUT_PKT_IP4 hicn_face_ip4_output_trace_t
+#define TRACE_OUTPUT_PKT_IP6 hicn_face_ip6_output_trace_t
+
+#define face_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = IP_LOOKUP_NEXT_DROP; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ face = \
+ hicn_dpoi_get_from_idx (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b0, face, &next0); \
+ stats.pkts_interest_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+#define face_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = IP_LOOKUP_NEXT_DROP; \
+ u32 next1 = IP_LOOKUP_NEXT_DROP; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ face0 = \
+ hicn_dpoi_get_from_idx (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ face1 = \
+ hicn_dpoi_get_from_idx (vnet_buffer (b1)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b0, face0, &next0); \
+ stats.pkts_interest_count += 1; \
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b1, face1, &next1); \
+ stats.pkts_interest_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn_face_ip4_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_ip4_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_ip4_output_trace_t *t =
+ va_arg (*args, hicn_face_ip4_output_trace_t *);
+
+ s = format (s, "FACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_ip4_output_node) =
+{
+ .function = hicn_face_ip4_output_node_fn,
+ .name = "hicn-face-ip4-output",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn_face_ip4_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_face_ip4_output_error_strings),
+ .error_strings = hicn_face_ip4_output_error_strings,
+ .n_next_nodes = IP4_LOOKUP_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with arp */
+ .next_nodes = IP4_LOOKUP_NEXT_NODES,
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn_face_ip6_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_ip6_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_ip6_output_trace_t *t =
+ va_arg (*args, hicn_face_ip6_output_trace_t *);
+
+ s = format (s, "FACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_ip6_output_node) =
+{
+ .function = hicn_face_ip6_output_node_fn,
+ .name = "hicn-face-ip6-output",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn_face_ip6_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_face_ip6_output_error_strings),
+ .error_strings = hicn_face_ip6_output_error_strings,
+ .n_next_nodes = IP6_LOOKUP_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with neighbour discovery */
+ .next_nodes = IP6_LOOKUP_NEXT_NODES,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/face_ip_node.h b/hicn-plugin/src/faces/ip/face_ip_node.h
new file mode 100755
index 000000000..000395a04
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/face_ip_node.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_IP_NODE_H__
+#define __HICN_FACE_IP_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+extern vlib_node_registration_t hicn_face_ip4_input_node;
+extern vlib_node_registration_t hicn_face_ip4_output_node;
+extern vlib_node_registration_t hicn_face_ip6_input_node;
+extern vlib_node_registration_t hicn_face_ip6_output_node;
+
+/**
+ * @brief Initialize the ip face module
+ */
+void hicn_face_ip_init (vlib_main_t * vm);
+
+#endif // __HICN_FACE_IP_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/iface_ip_node.c b/hicn-plugin/src/faces/ip/iface_ip_node.c
new file mode 100755
index 000000000..8df0467f0
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/iface_ip_node.c
@@ -0,0 +1,845 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hicn/hicn.h>
+#include "face_ip.h"
+#include "dpo_ip.h"
+#include "../../strategy_dpo_manager.h"
+#include "../face.h"
+#include "../../infra.h"
+#include "../../cache_policies/cs_lru.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for ip incomplete faces.
+ */
+
+vlib_node_registration_t hicn_iface_ip4_input_node;
+vlib_node_registration_t hicn_iface_ip4_output_node;
+vlib_node_registration_t hicn_iface_ip6_input_node;
+vlib_node_registration_t hicn_iface_ip6_output_node;
+
+u32 data_fwd_iface_ip4_vlib_edge;
+u32 data_fwd_iface_ip6_vlib_edge;
+
+void
+hicn_iface_ip_init (vlib_main_t * vm)
+{
+ u32 temp_index4 = vlib_node_add_next (vm,
+ hicn_interest_hitcs_node.index,
+ hicn_iface_ip4_output_node.index);
+ u32 temp_index6 = vlib_node_add_next (vm,
+ hicn_interest_hitcs_node.index,
+ hicn_iface_ip6_output_node.index);
+
+ data_fwd_iface_ip4_vlib_edge = vlib_node_add_next (vm,
+ hicn_data_fwd_node.index,
+ hicn_iface_ip4_output_node.
+ index);
+
+ data_fwd_iface_ip6_vlib_edge = vlib_node_add_next (vm,
+ hicn_data_fwd_node.index,
+ hicn_iface_ip6_output_node.
+ index);
+
+ ASSERT (temp_index4 == data_fwd_iface_ip4_vlib_edge);
+ ASSERT (temp_index6 == data_fwd_iface_ip6_vlib_edge);
+}
+
+static char *hicn_iface_ip4_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_iface_ip6_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_ip4_input_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_IP4_INPUT_NEXT_INTEREST,
+ HICN_IFACE_IP4_INPUT_NEXT_MAPME,
+ HICN_IFACE_IP4_INPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_IP4_INPUT_N_NEXT,
+} hicn_iface_ip4_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_ip6_input_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_IP6_INPUT_NEXT_INTEREST,
+ HICN_IFACE_IP6_INPUT_NEXT_MAPME,
+ HICN_IFACE_IP6_INPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_IP6_INPUT_N_NEXT,
+} hicn_iface_ip6_input_next_t;
+
+#define NEXT_MAPME_IP4 HICN_IFACE_IP4_INPUT_NEXT_MAPME
+#define NEXT_MAPME_IP6 HICN_IFACE_IP6_INPUT_NEXT_MAPME
+
+#define NEXT_INTEREST_IP4 HICN_IFACE_IP6_INPUT_NEXT_INTEREST
+#define NEXT_INTEREST_IP6 HICN_IFACE_IP6_INPUT_NEXT_INTEREST
+
+#define ADDRESS_IP4 ip_interface_address_t *ia = 0;ip4_address_t *local_address = ip4_interface_first_address(&ip4_main, swif, &ia)
+#define ADDRESS_IP6 ip6_address_t *local_address = ip6_interface_first_address(&ip6_main, swif)
+
+#define ADDRESSX2_IP4 ip_interface_address_t *ia0, *ia1; ia0 = ia1 = 0; \
+ ip4_address_t *local_address0 = ip4_interface_first_address(&ip4_main, swif0, &ia0); \
+ ip4_address_t *local_address1 = ip4_interface_first_address(&ip4_main, swif1, &ia1);
+
+#define ADDRESSX2_IP6 ip6_address_t *local_address0 = ip6_interface_first_address(&ip6_main, swif0); \
+ ip6_address_t *local_address1 = ip6_interface_first_address(&ip6_main, swif1);
+
+#define DPO_ADD_LOCK_IP4 hicn_dpo_ip4_add_and_lock_from_remote
+#define DPO_ADD_LOCK_IP6 hicn_dpo_ip6_add_and_lock_from_remote
+
+#define VLIB_EDGE_IP4 data_fwd_iface_ip4_vlib_edge
+#define VLIB_EDGE_IP6 data_fwd_iface_ip6_vlib_edge
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define TRACE_INPUT_PKT_IP4 hicn_iface_ip4_input_trace_t
+#define TRACE_INPUT_PKT_IP6 hicn_iface_ip6_input_trace_t
+
+#define iface_input_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0, next0; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ u32 swif; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer(b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ \
+ stats.pkts_interest_count += 1; \
+ \
+ u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp)*NEXT_INTEREST_IP##ipv; \
+ \
+ swif = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ \
+ ADDRESS_IP##ipv; \
+ \
+ DPO_ADD_LOCK_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &hicnb0->is_appface, \
+ local_address, \
+ &(ip_hdr->src_address), \
+ vnet_buffer(b0)->sw_if_index[VLIB_RX], \
+ VLIB_EDGE_IP##ipv); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define iface_input_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1, next0, next1; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ hicn_buffer_t *hicnb0, *hicnb1; \
+ u32 swif0, swif1; \
+ \
+ /* Prefetch for next iteration. */ \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ \
+ stats.pkts_interest_count += 2; \
+ \
+ u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
+ u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp0)*NEXT_INTEREST_IP##ipv; \
+ \
+ next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp1)*NEXT_INTEREST_IP##ipv; \
+ \
+ swif0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ swif1 = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ \
+ ADDRESSX2_IP##ipv; \
+ \
+ DPO_ADD_LOCK_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &hicnb0->is_appface, \
+ local_address0, \
+ &(ip_hdr0->src_address), \
+ vnet_buffer(b0)->sw_if_index[VLIB_RX], \
+ VLIB_EDGE_IP##ipv); \
+ \
+ DPO_ADD_LOCK_IP##ipv \
+ (&(hicnb1->face_dpo_id), \
+ &hicnb1->is_appface, \
+ local_address1, \
+ &(ip_hdr1->src_address), \
+ vnet_buffer(b1)->sw_if_index[VLIB_RX], \
+ VLIB_EDGE_IP##ipv); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+static uword
+hicn_iface_ip4_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_ip4_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_ip4_input_trace_t *t =
+ va_arg (*args, hicn_iface_ip4_input_trace_t *);
+
+ s = format (s, "IFACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_ip4_input_node) =
+{
+ .function = hicn_iface_ip4_input_node_fn,
+ .name = "hicn-iface-ip4-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_ip4_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_ip4_input_error_strings),
+ .error_strings = hicn_iface_ip4_input_error_strings,
+ .n_next_nodes = HICN_IFACE_IP4_INPUT_N_NEXT,
+ /* edit / add dispositions*/
+ .next_nodes =
+ {
+ [HICN_IFACE_IP4_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN_IFACE_IP4_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN_IFACE_IP4_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+static uword
+hicn_iface_ip6_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (6);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_ip6_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_ip6_input_trace_t *t =
+ va_arg (*args, hicn_iface_ip6_input_trace_t *);
+
+ s = format (s, "IFACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_ip6_input_node) =
+{
+ .function = hicn_iface_ip6_input_node_fn,
+ .name = "hicn-iface-ip6-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_ip6_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_ip6_input_error_strings),
+ .error_strings = hicn_iface_ip6_input_error_strings,
+ .n_next_nodes = HICN_IFACE_IP6_INPUT_N_NEXT,
+ /* edit / add dispositions*/
+ .next_nodes =
+ {
+ [HICN_IFACE_IP6_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN_IFACE_IP6_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN_IFACE_IP6_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+/**** IFACE OUTPUT *****/
+
+static inline void
+hicn_rewrite_iface_data4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * iface)
+{
+ ip4_header_t *ip0;
+
+ /* Get the pointer to the old ip and tcp header */
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Set up the ip6 header */
+ /* IP4 lenght contains the size of the ip4 header too */
+ u16 sval = (vlib_buffer_length_in_chain (vm, b0));
+ ip0->length = clib_host_to_net_u16 (sval);
+ ip0->ttl = 254; // FIXME TTL
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ~0;
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_face_ip_t *iface_ip = (hicn_face_ip_t *) iface->data;
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &(iface_ip->remote_addr), &(temp_addr),
+ iface->shared.pl_id);
+}
+
+static inline void
+hicn_rewrite_iface_data6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * iface)
+{
+ ip6_header_t *ip0;
+
+ /* Get the pointer to the old ip and tcp header */
+ /* Copy the previous ip and tcp header to the new portion of memory */
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Set up the ip6 header */
+ /* IP6 lenght does not include the size of the ip6 header */
+ u16 sval = (vlib_buffer_length_in_chain (vm, b0) - (sizeof (ip6_header_t)));
+ ip0->payload_length = clib_host_to_net_u16 (sval);
+ ip0->hop_limit = HICN_IP6_HOP_LIMIT;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ~0;
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_face_ip_t *iface_ip = (hicn_face_ip_t *) iface->data;
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &(iface_ip->remote_addr), &(temp_addr),
+ iface->shared.pl_id);
+}
+
+static char *hicn_iface_ip4_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_iface_ip6_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_ip4_output_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_IP4_OUTPUT_NEXT_LOOKUP,
+ HICN_IFACE_IP4_OUTPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_IP4_OUTPUT_N_NEXT,
+} hicn_iface_ip4_output_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_ip6_output_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_IP6_OUTPUT_NEXT_LOOKUP,
+ HICN_IFACE_IP6_OUTPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_IP6_OUTPUT_N_NEXT,
+} hicn_iface_ip6_output_next_t;
+
+#define ERROR_OUTPUT_IP4 HICN_IFACE_IP4_OUTPUT_NEXT_ERROR_DROP
+#define ERROR_OUTPUT_IP6 HICN_IFACE_IP6_OUTPUT_NEXT_ERROR_DROP
+
+#define NEXT_DATA_LOOKUP_IP4 HICN_IFACE_IP4_OUTPUT_NEXT_LOOKUP
+#define NEXT_DATA_LOOKUP_IP6 HICN_IFACE_IP6_OUTPUT_NEXT_LOOKUP
+
+#define HICN_REWRITE_DATA_IP4 hicn_rewrite_iface_data4
+#define HICN_REWRITE_DATA_IP6 hicn_rewrite_iface_data6
+
+#define TRACE_OUTPUT_PKT_IP4 hicn_iface_ip4_output_trace_t
+#define TRACE_OUTPUT_PKT_IP6 hicn_iface_ip6_output_trace_t
+
+#define iface_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = ERROR_OUTPUT_IP##ipv; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ face = \
+ hicn_dpoi_get_from_idx (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b0, face); \
+ next0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0); \
+
+
+#define iface_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = ERROR_OUTPUT_IP##ipv; \
+ u32 next1 = ERROR_OUTPUT_IP##ipv; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ face0 = \
+ hicn_dpoi_get_from_idx (vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ face1 = \
+ hicn_dpoi_get_from_idx (vnet_buffer (b1)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b0, face0); \
+ next0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b1, face1); \
+ next1 = NEXT_DATA_LOOKUP_IP##ipv; \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0); \
+
+
+
+static uword
+hicn_iface_ip4_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (4);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (4);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_ip4_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_ip4_output_trace_t *t =
+ va_arg (*args, hicn_iface_ip4_output_trace_t *);
+
+ s = format (s, "IFACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_ip4_output_node) =
+{
+ .function = hicn_iface_ip4_output_node_fn,
+ .name = "hicn-iface-ip4-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_ip4_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_ip4_output_error_strings),
+ .error_strings = hicn_iface_ip4_output_error_strings,
+ .n_next_nodes = HICN_IFACE_IP4_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_IFACE_IP4_OUTPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [HICN_IFACE_IP4_OUTPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn_iface_ip6_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (6);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_ip6_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_ip6_output_trace_t *t =
+ va_arg (*args, hicn_iface_ip6_output_trace_t *);
+
+ s = format (s, "IFACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_ip6_output_node) =
+{
+ .function = hicn_iface_ip6_output_node_fn,
+ .name = "hicn-iface-ip6-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_ip6_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_ip6_output_error_strings),
+ .error_strings = hicn_iface_ip6_output_error_strings,
+ .n_next_nodes = HICN_IFACE_IP6_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_IFACE_IP6_OUTPUT_NEXT_LOOKUP] = "ip6-lookup",
+ [HICN_IFACE_IP6_OUTPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/ip/iface_ip_node.h b/hicn-plugin/src/faces/ip/iface_ip_node.h
new file mode 100755
index 000000000..36923f069
--- /dev/null
+++ b/hicn-plugin/src/faces/ip/iface_ip_node.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_IFACE_IP_NODE_H__
+#define __HICN_IFACE_IP_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @brief Initialize the ip iface module
+ */
+void hicn_iface_ip_init (vlib_main_t * vm);
+
+#endif // __HICN_IFACE_IP_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/dpo_udp.c b/hicn-plugin/src/faces/udp/dpo_udp.c
new file mode 100755
index 000000000..e58fc9788
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/dpo_udp.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_udp.h"
+
+#include <vnet/ip/format.h>
+#include <vnet/adj/adj.h>
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+
+const static char *const hicn_face_ip4udp_nodes[] = {
+ "hicn-face-encap-udp4",
+ "hicn-face-decap-udp4",
+ "hicn-iface-decap-udp4",
+ "hicn-iface-encap-udp4",
+ NULL,
+};
+
+const static char *const hicn_face_ip6udp_nodes[] = {
+ "hicn-face-encap-udp6",
+ "hicn-face-decap-udp6",
+ "hicn-iface-decap-udp6",
+ "hicn-iface-encap-udp6",
+ NULL,
+};
+
+const static char *const *const hicn_ipudp_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = hicn_face_ip4udp_nodes,
+ [DPO_PROTO_IP6] = hicn_face_ip6udp_nodes
+};
+
+
+const static dpo_vft_t hicn_dpoi_udp_vft = {
+ .dv_lock = hicn_face_lock,
+ .dv_unlock = hicn_face_unlock,
+ .dv_format = format_hicn_face_udp,
+};
+
+/* Must be executed after all the strategy nodes are created */
+void
+hicn_dpo_udp_module_init (void)
+{
+ mhash_init (&hicn_face_udp_hashtb, sizeof (hicn_face_id_t) /* value */ ,
+ sizeof (hicn_face_udp_key_t) /* key */ );
+
+ /*
+ * How much useful is the following registration?
+ * So far it seems that we need it only for setting the dpo_type.
+ */
+ hicn_face_udp_type =
+ dpo_register_new_type (&hicn_dpoi_udp_vft, hicn_ipudp_nodes);
+}
+
+
+/* Here udp ports are in host order, move them to network order to do the lookup */
+int
+hicn_dpo_udp4_create (dpo_id_t * dpo,
+ const ip4_address_t * src_ip,
+ const ip4_address_t * dst_ip,
+ u16 src_port, u16 dst_port,
+ u32 sw_if,
+ adj_index_t ip_adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id)
+{
+ u16 net_src_port = clib_host_to_net_u16 (src_port);
+ u16 net_dst_port = clib_host_to_net_u16 (dst_port);
+ hicn_face_t *face = hicn_face_udp4_get (src_ip, dst_ip, src_port, dst_port);
+
+ u8 is_appface;
+ /* ip_csum_t sum0; */
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ hicn_dpo_udp4_add_and_lock (dpo, src_ip, dst_ip, net_src_port, net_dst_port,
+ node_index, &is_appface);
+
+ face = hicn_dpoi_get_from_idx (dpo->dpoi_index);
+
+ hicn_face_udp_t *udp_face = (hicn_face_udp_t *) face->data;
+
+ udp_face->hdrs.ip4.ip.checksum =
+ ip4_header_checksum (&(udp_face->hdrs.ip4.ip));
+
+ face->shared.flags = flags;
+ face->shared.adj = ip_adj;
+ face->shared.sw_if = sw_if;
+ *face_id = hicn_dpoi_get_index (face);
+
+ return HICN_ERROR_NONE;
+}
+
+
+int
+hicn_dpo_udp6_create (dpo_id_t * dpo,
+ const ip6_address_t * src_ip,
+ const ip6_address_t * dst_ip,
+ u16 src_port, u16 dst_port,
+ u32 sw_if,
+ adj_index_t ip_adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id)
+{
+ u16 net_src_port = clib_host_to_net_u16 (src_port);
+ u16 net_dst_port = clib_host_to_net_u16 (dst_port);
+ hicn_face_t *face =
+ hicn_face_udp6_get (src_ip, dst_ip, net_src_port, net_dst_port);
+ u8 is_appface;
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ hicn_dpo_udp6_add_and_lock (dpo, src_ip, dst_ip, net_src_port, net_dst_port,
+ node_index, &is_appface);
+
+ face = hicn_dpoi_get_from_idx (dpo->dpoi_index);
+
+ face->shared.flags = flags;
+ face->shared.adj = ip_adj;
+ face->shared.sw_if = sw_if;
+ *face_id = hicn_dpoi_get_index (face);
+
+ return HICN_ERROR_NONE;
+}
+
+void
+hicn_dpo_udp_create_from_face (hicn_face_t * face, dpo_id_t * dpo,
+ u16 dpoi_next_node)
+{
+ hicn_face_id_t face_dpoi_id = hicn_dpoi_get_index (face);
+ hicn_face_udp_t *face_udp = (hicn_face_udp_t *) face->data;
+ u8 version =
+ (face_udp->hdrs.ip4.ip.ip_version_and_header_length & 0xf0) >> 4;
+ dpo_set (dpo, face->shared.face_type,
+ version == 4 ? DPO_PROTO_IP4 : DPO_PROTO_IP6, face_dpoi_id);
+ dpo->dpoi_next_node = dpoi_next_node;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/dpo_udp.h b/hicn-plugin/src/faces/udp/dpo_udp.h
new file mode 100755
index 000000000..fdde4192b
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/dpo_udp.h
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_UDP_H__
+#define __HICN_DPO_UDP_H__
+
+#include <vnet/adj/adj_types.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+
+#include "face_udp.h"
+#include "../face.h"
+#include "../../error.h"
+
+
+/**
+ * @brief Initialize the internal structures of the dpo udp face module.
+ */
+void hicn_dpo_udp_module_init (void);
+
+/**
+ * @brief Create a udp face and its corresponding dpo. Meant to be used for the
+ * control plane.
+ *
+ * @param dpo: Data plane object that point to the face created.
+ * @param local_addr: Local address of the UDP tunnel
+ * @param remote_addr: Remote address of the UDP tunnel
+ * @param local_port: Local port of the UDP tunnel
+ * @param remote_port: Remote port of the UDP tunnel
+ * @param adj: Ip adjacency corresponding to the remote address in the face
+ * @param node_index: vlib edge index to use in the packet processing
+ * @param flags: Flags of the face
+ * @param face_id: Identifier for the face (dpoi_index)
+ * @return HICN_ERROR_FACE_ALREADY_CREATED if the face exists, otherwise HICN_ERROR_NONE
+ */
+int
+hicn_dpo_udp4_create (dpo_id_t * dpo,
+ const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u16 local_port, u16 remote_port,
+ u32 sw_if,
+ adj_index_t adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id);
+
+/**
+ * @brief Retrieve a face using the face identifier, i.e., the quadruplet (local_addr, remote_addr,
+ * local_port, remote_port). This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
+ * @param local_addr: Local address of the UDP tunnel
+ * @param remote_addr: Remote address of the UDP tunnel
+ * @param local_port: Local port of the UDP tunnel
+ * @param remote_port: Remote port of the UDP tunnel
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not. (Currently only IP faces can be appface)
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_dpo_udp4_lock (dpo_id_t * dpo,
+ const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u16 local_port, u16 remote_port, u8 * is_appface)
+{
+ hicn_face_t *face =
+ hicn_face_udp4_get (local_addr, remote_addr, local_port, remote_port);
+
+ if (PREDICT_FALSE (face == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ index_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_udp_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = ~0;
+ dpo_lock (dpo);
+
+ *is_appface = 0;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the face
+ * identifier (local_addr, remote_addr, local_port, remote_port) and returns its
+ * dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param local_addr: Local address of the UDP tunnel
+ * @param remote_addr: Remote address of the UDP tunnel
+ * @param local_port: Local port of the UDP tunnel
+ * @param remote_port: Remote port of the UDP tunnel
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not. (Currently only IP faces can be appface)
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_dpo_udp4_add_and_lock (dpo_id_t * dpo,
+ const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u16 local_port, u16 remote_port,
+ u32 node_index, u8 * is_appface)
+{
+ hicn_face_t *face =
+ hicn_face_udp4_get (local_addr, remote_addr, local_port, remote_port);
+
+ if (face == NULL)
+ {
+ pool_get (hicn_dpoi_face_pool, face);
+
+ hicn_face_udp_t *udp_face = (hicn_face_udp_t *) face->data;
+
+ clib_memcpy (&(udp_face->hdrs.ip4.ip), &ip4_header_skl,
+ sizeof (ip4_header_t));
+ clib_memcpy (&(udp_face->hdrs.ip4.ip.src_address), local_addr,
+ sizeof (ip4_address_t));
+ clib_memcpy (&(udp_face->hdrs.ip4.ip.dst_address), remote_addr,
+ sizeof (ip4_address_t));
+
+ udp_face->hdrs.ip4.udp.src_port = local_port;
+ udp_face->hdrs.ip4.udp.dst_port = remote_port;
+
+ face->shared.adj = ADJ_INDEX_INVALID;
+ face->shared.pl_id = (u16) 0;
+ face->shared.face_type = hicn_face_udp_type;
+ face->shared.flags = HICN_FACE_FLAGS_IFACE;
+ face->shared.locks = 0;
+
+ hicn_face_udp_key_t key;
+ hicn_face_udp4_get_key (local_addr, remote_addr, local_port,
+ remote_port, &key);
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_udp_hashtb, &key, (uword *) & dpoi_index, 0);
+ face = face;
+
+ *is_appface = 0;
+ dpo_set (dpo, hicn_face_udp_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+
+ return;
+ }
+
+ *is_appface = 0;
+
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_udp_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+}
+
+/**
+ * @brief Create a udp face and its corresponding dpo. Meant to be used for the
+ * control plane.
+ *
+ * @param dpo: Data plane object that point to the face created.
+ * @param local_addr: Local address of the UDP tunnel
+ * @param remote_addr: Remote address of the UDP tunnel
+ * @param local_port: Local port of the UDP tunnel
+ * @param remote_port: Remote port of the UDP tunnel
+ * @param adj: Ip adjacency corresponding to the remote address in the face
+ * @param node_index: vlib edge index to use in the packet processing
+ * @param flags: Flags of the face
+ * @param face_id: Identifier for the face (dpoi_index)
+ * @return HICN_ERROR_FACE_ALREADY_CREATED if the face exists, otherwise HICN_ERROR_NONE
+ */
+int
+hicn_dpo_udp6_create (dpo_id_t * dpo,
+ const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u16 local_port, u16 remote_port,
+ u32 sw_if,
+ adj_index_t adj,
+ u32 node_index,
+ hicn_face_flags_t flags, hicn_face_id_t * face_id);
+
+
+/**
+ * @brief Retrieve a face using the face identifier, i.e., the quadruplet (local_addr, remote_addr,
+ * local_port, remote_port). This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
+ * @param local_addr: Local address of the UDP tunnel
+ * @param remote_addr: Remote address of the UDP tunnel
+ * @param local_port: Local port of the UDP tunnel
+ * @param remote_port: Remote port of the UDP tunnel
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not. (Currently only IP faces can be appface)
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_dpo_udp6_lock (dpo_id_t * dpo,
+ const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u16 local_port, u16 remote_port, u8 * is_appface)
+{
+ hicn_face_t *face =
+ hicn_face_udp6_get (local_addr, remote_addr, local_port, remote_port);
+
+
+ if (PREDICT_FALSE (face == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_udp_type, DPO_PROTO_IP4, dpoi_index);
+ dpo->dpoi_next_node = ~0;
+ dpo_lock (dpo);
+ *is_appface = 0;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the face
+ * identifier (local_addr, remote_addr, local_port, remote_port) and returns its
+ * dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param local_addr: Local address of the UDP tunnel
+ * @param remote_addr: Remote address of the UDP tunnel
+ * @param local_port: Local port of the UDP tunnel
+ * @param remote_port: Remote port of the UDP tunnel
+ * @param is_appface: Boolean that indicates whether the face is an application
+ * face or not. (Currently only IP faces can be appface)
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_dpo_udp6_add_and_lock (dpo_id_t * dpo,
+ const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u16 local_port, u16 remote_port,
+ u32 node_index, u8 * is_appface)
+{
+ hicn_face_t *face =
+ hicn_face_udp6_get (local_addr, remote_addr, local_port, remote_port);
+
+ if (face == NULL)
+ {
+ pool_get (hicn_dpoi_face_pool, face);
+
+ hicn_face_udp_t *udp_face = (hicn_face_udp_t *) face->data;
+
+ clib_memcpy (&(udp_face->hdrs.ip6.ip), &ip6_header_skl,
+ sizeof (ip6_header_t));
+ clib_memcpy (&(udp_face->hdrs.ip6.ip.src_address), local_addr,
+ sizeof (ip6_address_t));
+ clib_memcpy (&(udp_face->hdrs.ip6.ip.dst_address), remote_addr,
+ sizeof (ip6_address_t));
+
+ udp_face->hdrs.ip6.udp.src_port = local_port;
+ udp_face->hdrs.ip6.udp.dst_port = remote_port;
+
+ face->shared.adj = ADJ_INDEX_INVALID;
+ face->shared.pl_id = (u16) 0;
+ face->shared.face_type = hicn_face_udp_type;
+ face->shared.flags = HICN_FACE_FLAGS_IFACE;
+ face->shared.locks = 0;
+
+ hicn_face_udp_key_t key;
+ hicn_face_udp6_get_key (local_addr, remote_addr, local_port,
+ remote_port, &key);
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_udp_hashtb, &key, (uword *) & dpoi_index, 0);
+
+ *is_appface = 0;
+ dpo_set (dpo, hicn_face_udp_type, DPO_PROTO_IP6, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+
+ return;
+ }
+
+ *is_appface = 0;
+
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+ dpo_set (dpo, hicn_face_udp_type, DPO_PROTO_IP6, dpoi_index);
+ dpo->dpoi_next_node = node_index;
+ dpo_lock (dpo);
+}
+
+/**
+ * @brief Create a dpo from a udp face
+ *
+ * @param face Face from which to create the dpo
+ * @return the dpo
+ */
+void hicn_dpo_udp_create_from_face (hicn_face_t * face, dpo_id_t * dpo,
+ u16 dpoi_next_node);
+
+#endif // __HICN_DPO_UDP_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/face_udp.c b/hicn-plugin/src/faces/udp/face_udp.c
new file mode 100755
index 000000000..92335273a
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/face_udp.c
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "face_udp.h"
+#include "face_udp_node.h"
+#include "dpo_udp.h"
+#include "../face.h"
+#include "../../strategy.h"
+#include "../../strategy_dpo_manager.h"
+#include "../../hicn.h"
+
+#include "../../mapme.h" // HICN_MAPME_EVENT_*
+#include "../../mapme_eventmgr.h" // hicn_mapme_eventmgr_process_node
+extern vlib_node_registration_t hicn_mapme_eventmgr_process_node;
+
+mhash_t hicn_face_udp_hashtb;
+
+dpo_type_t hicn_face_udp_type;
+
+ip4_header_t ip4_header_skl = {
+ .ip_version_and_header_length = 0x45,
+ .tos = 0x00,
+ .length = (u16) 0,
+ .fragment_id = (u16) 0,
+ .flags_and_fragment_offset = (u16) 0,
+ .ttl = 254,
+ .protocol = IP_PROTOCOL_UDP,
+ .checksum = 0,
+ .src_address = {{0}},
+ .dst_address = {{0}},
+};
+
+ip6_header_t ip6_header_skl = {
+#if CLIB_ARCH_IS_BIG_ENDIAN
+ .ip_version_traffic_class_and_flow_label = 0x60000000,
+#else
+ .ip_version_traffic_class_and_flow_label = 0x00000060,
+#endif
+ .payload_length = (u16) 0,
+ .protocol = IP_PROTOCOL_UDP,
+ .hop_limit = 254,
+ .src_address = {{0}},
+ .dst_address = {{0}}
+};
+
+u32 strategy_face_udp4_vlib_edge;
+u32 strategy_face_udp6_vlib_edge;
+
+/* Separated from the hicn_face_udp_init because it cannot be called by the
+ init macro due to dependencies with other modules not yet initialied */
+void
+hicn_face_udp_init_internal ()
+{
+ ip4_header_t *ip4_hdr = &ip4_header_skl;
+ ip4_header_skl.checksum = ip4_header_checksum (ip4_hdr);
+}
+
+void
+hicn_face_udp_init (vlib_main_t * vm)
+{
+ int strategy_nodes_n = hicn_strategy_get_all_available ();
+
+ /* Default Strategy has index 0 and it always exists */
+ strategy_face_udp4_vlib_edge = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft
+ (default_dpo.
+ hicn_dpo_get_type ())->
+ get_strategy_node_index
+ (),
+ hicn_face_udp4_output_node.
+ index);
+ strategy_face_udp6_vlib_edge =
+ vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft (default_dpo.
+ hicn_dpo_get_type ())->
+ get_strategy_node_index (),
+ hicn_face_udp6_output_node.index);
+
+ /*
+ * Create and edge between al the other strategy nodes
+ * and the udp_output nodes.
+ */
+ for (int i = 1; i < strategy_nodes_n; i++)
+ {
+ u32 temp_index4 = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft_from_id
+ (i)->get_strategy_node_index (),
+ hicn_face_udp4_output_node.index);
+ u32 temp_index6 = vlib_node_add_next (vm,
+ hicn_dpo_get_strategy_vft_from_id
+ (i)->get_strategy_node_index (),
+ hicn_face_udp6_output_node.index);
+ ASSERT (temp_index4 == strategy_face_udp4_vlib_edge);
+ ASSERT (temp_index6 == strategy_face_udp6_vlib_edge);
+ }
+
+ hicn_dpo_udp_module_init ();
+
+ register_face_type (hicn_face_udp_type, &udp_vft, "udp");;
+}
+
+int
+hicn_face_udp_add (const ip46_address_t * local_addr,
+ const ip46_address_t * remote_addr, u16 local_port,
+ u16 remote_port, u32 swif, hicn_face_id_t * pfaceid)
+{
+ fib_protocol_t fib_type;
+ vnet_link_t link_type;
+ adj_index_t ip_adj;
+ int ret = HICN_ERROR_NONE;
+ dpo_proto_t dpo_proto;
+
+ hicn_face_flags_t flags = (hicn_face_flags_t) 0;
+ flags |= HICN_FACE_FLAGS_FACE;
+
+
+ if (ip46_address_is_ip4 (local_addr) && ip46_address_is_ip4 (remote_addr))
+ {
+ link_type = VNET_LINK_IP4;
+ fib_type = FIB_PROTOCOL_IP4;
+ ip_adj = adj_nbr_add_or_lock (fib_type, link_type, remote_addr, swif);
+
+ hicn_face_t *face =
+ hicn_face_udp4_get (&local_addr->ip4, &remote_addr->ip4, local_port,
+ remote_port);
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ pool_get (hicn_dpoi_face_pool, face);
+
+ hicn_face_udp_t *udp_face = (hicn_face_udp_t *) face->data;
+
+ clib_memcpy (&(udp_face->hdrs.ip4.ip), &ip4_header_skl,
+ sizeof (ip4_header_t));
+ clib_memcpy (&(udp_face->hdrs.ip4.ip.src_address), &(local_addr->ip4),
+ sizeof (ip4_address_t));
+ clib_memcpy (&(udp_face->hdrs.ip4.ip.dst_address), &(remote_addr->ip4),
+ sizeof (ip4_address_t));
+
+ udp_face->hdrs.ip4.udp.src_port = local_port;
+ udp_face->hdrs.ip4.udp.dst_port = remote_port;
+
+ ip_csum_t csum = udp_face->hdrs.ip4.ip.checksum;
+ csum = ip_csum_sub_even (csum, ip4_header_skl.src_address.as_u32);
+ csum = ip_csum_sub_even (csum, ip4_header_skl.dst_address.as_u32);
+ csum =
+ ip_csum_add_even (csum, udp_face->hdrs.ip4.ip.src_address.as_u32);
+ csum =
+ ip_csum_add_even (csum, udp_face->hdrs.ip4.ip.dst_address.as_u32);
+ udp_face->hdrs.ip4.ip.checksum = ip_csum_fold (csum);
+
+ face->shared.adj = ip_adj;
+ face->shared.sw_if = swif;
+ face->shared.pl_id = (u16) 0;
+ face->shared.face_type = hicn_face_udp_type;
+ face->shared.flags = flags;
+ face->shared.locks = 0;
+
+ hicn_face_udp_key_t key;
+ hicn_face_udp4_get_key (&local_addr->ip4, &remote_addr->ip4, local_port,
+ remote_port, &key);
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_udp_hashtb, &key, (uword *) & dpoi_index, 0);
+
+ *pfaceid = hicn_dpoi_get_index (face);
+ dpo_proto = DPO_PROTO_IP4;
+ }
+ else if (!ip46_address_is_ip4 (local_addr)
+ && !ip46_address_is_ip4 (remote_addr))
+ {
+ link_type = VNET_LINK_IP6;
+ fib_type = FIB_PROTOCOL_IP6;
+ ip_adj = adj_nbr_add_or_lock (fib_type, link_type, remote_addr, swif);
+
+ hicn_face_t *face =
+ hicn_face_udp6_get (&local_addr->ip6, &remote_addr->ip6, local_port,
+ remote_port);
+
+ if (face != NULL)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ pool_get (hicn_dpoi_face_pool, face);
+
+ hicn_face_udp_t *udp_face = (hicn_face_udp_t *) face->data;
+
+ clib_memcpy (&(udp_face->hdrs.ip6.ip), &ip6_header_skl,
+ sizeof (ip6_header_t));
+ clib_memcpy (&(udp_face->hdrs.ip6.ip.src_address), local_addr,
+ sizeof (ip6_address_t));
+ clib_memcpy (&(udp_face->hdrs.ip6.ip.dst_address), remote_addr,
+ sizeof (ip6_address_t));
+
+ udp_face->hdrs.ip6.udp.src_port = local_port;
+ udp_face->hdrs.ip6.udp.dst_port = remote_port;
+
+ face->shared.adj = ip_adj;
+ face->shared.sw_if = swif;
+ face->shared.pl_id = (u16) 0;
+ face->shared.face_type = hicn_face_udp_type;
+ face->shared.flags = flags;
+ face->shared.locks = 0;
+
+ hicn_face_udp_key_t key;
+ hicn_face_udp6_get_key (&local_addr->ip6, &remote_addr->ip6, local_port,
+ remote_port, &key);
+ hicn_face_id_t dpoi_index = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_udp_hashtb, &key, (uword *) & dpoi_index, 0);
+
+ *pfaceid = hicn_dpoi_get_index (face);
+ dpo_proto = DPO_PROTO_IP6;
+ }
+ else
+ {
+ return HICN_ERROR_IPS_ADDR_TYPE_NONUNIFORM;
+ }
+
+ retx_t *retx = vlib_process_signal_event_data (vlib_get_main (),
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_ADD, 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = 0,.dpo = (dpo_id_t)
+ {
+ .dpoi_type = hicn_face_udp_type,.dpoi_proto =
+ dpo_proto,.dpoi_next_node = 0,.dpoi_index = *pfaceid,}
+ };
+
+ return ret;
+}
+
+int
+hicn_face_udp_del (u32 faceid)
+{
+ return hicn_face_del (faceid);
+}
+
+u8 *
+format_hicn_face_udp (u8 * s, va_list * args)
+{
+ hicn_face_id_t face_id = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+ hicn_face_t *face;
+ hicn_face_udp_t *udp_face;
+ ip_adjacency_t *adj;
+ u8 ipv = 0x40;
+ vnet_main_t *vnm = vnet_get_main ();
+
+
+ face = hicn_dpoi_get_from_idx (face_id);
+ udp_face = (hicn_face_udp_t *) (face->data);
+
+ if (face->shared.flags & HICN_FACE_FLAGS_FACE)
+ {
+ ASSERT (face->shared.adj != (adj_index_t) ~ 0);
+ adj = adj_get (face->shared.adj);
+
+ s = format (s, "%U Face %d: ", format_white_space, indent, face_id);
+ if (udp_face->hdrs.ip4.ip.ip_version_and_header_length == ipv)
+ {
+ s = format (s, "type UDP local %U|%u ",
+ format_ip4_address, &udp_face->hdrs.ip4.ip.src_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip4.udp.src_port));
+ s =
+ format (s, "remote %U|%u ", format_ip4_address,
+ &udp_face->hdrs.ip4.ip.dst_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip4.udp.dst_port));
+ s = format (s, "%U", format_vnet_link, adj->ia_link);
+ s = format (s, " dev %U", format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, face->shared.sw_if));
+ if ((face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+ else
+ {
+ s = format (s, "type UDP local %U|%u ",
+ format_ip6_address, &udp_face->hdrs.ip6.ip.src_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip6.udp.src_port));
+ s =
+ format (s, "remote %U|%u", format_ip6_address,
+ &udp_face->hdrs.ip6.ip.dst_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip6.udp.dst_port));
+ s = format (s, "%U", format_vnet_link, adj->ia_link);
+ s = format (s, " dev %U", format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, face->shared.sw_if));
+ if ((face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+ }
+ else
+ {
+ s = format (s, "IFace %d: ", format_white_space, indent, face_id);
+ if (udp_face->hdrs.ip4.ip.ip_version_and_header_length == ipv)
+ {
+ s = format (s, "type UDP local %U|%u",
+ format_ip4_address, &udp_face->hdrs.ip4.ip.src_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip4.udp.src_port));
+ s =
+ format (s, " local %U|%u", format_ip4_address,
+ &udp_face->hdrs.ip4.ip.dst_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip4.udp.dst_port));
+ s =
+ format (s, " dev %U", format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, face->shared.sw_if));
+ if ((face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+ else
+ {
+ s = format (s, "type UDP local %U|%u",
+ format_ip6_address, &udp_face->hdrs.ip6.ip.src_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip6.udp.src_port));
+ s =
+ format (s, " remote %U|%u", format_ip6_address,
+ &udp_face->hdrs.ip6.ip.dst_address,
+ clib_net_to_host_u16 (udp_face->hdrs.ip6.udp.dst_port));
+ s =
+ format (s, " dev %U", format_vnet_sw_interface_name, vnm,
+ vnet_get_sw_interface (vnm, face->shared.sw_if));
+ if ((face->shared.flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+ }
+
+ return s;
+}
+
+void
+hicn_face_udp_get_dpo (hicn_face_t * face, dpo_id_t * dpo)
+{
+ hicn_face_udp_t *face_udp = (hicn_face_udp_t *) face->data;
+ u8 version =
+ (face_udp->hdrs.ip4.ip.ip_version_and_header_length & 0xf0) >> 4;
+ return hicn_dpo_udp_create_from_face (face, dpo,
+ version ==
+ (u8) 4 ? strategy_face_udp4_vlib_edge
+ : strategy_face_udp6_vlib_edge);
+}
+
+hicn_face_vft_t udp_vft = {
+ .format_face = format_hicn_face_udp,
+ .hicn_face_del = hicn_face_udp_del,
+ .hicn_face_get_dpo = hicn_face_udp_get_dpo,
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/face_udp.h b/hicn-plugin/src/faces/udp/face_udp.h
new file mode 100755
index 000000000..8694bad5c
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/face_udp.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_UDP_H__
+#define __HICN_FACE_UDP_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/udp/udp_packet.h>
+
+#include "../face.h"
+
+/**
+ * @file
+ * @brief UDP face
+ *
+ * This file containes the definition of UDP faces.
+ * UDP faces encap and decap an hicn packet into a UDP tunnel.
+ * Src and dst address in interest and data packets are not considered and
+ * should be set to 0 (not checked in the forwarder).
+ */
+
+/* Pre-instantiated ip header to fast fill an newly encapsulated packet */
+extern ip4_header_t ip4_header_skl;
+extern ip6_header_t ip6_header_skl;
+
+#define INVALID_UDP_DPO_INDEX ~0
+
+/**
+ * @brief UDP face representation. The following is stored in the data field of
+ * an hicn_face_t object (see hicn_face.h). A UDP face is identifies by the
+ * quadruplet (src addr, dst addr, src port, dst port).
+ */
+typedef struct hicn_face_udp_t_
+{
+ /**
+ * The headers to paint, in packet painting order
+ */
+ union
+ {
+ struct
+ {
+ ip4_header_t ip;
+ udp_header_t udp;
+ } __attribute__ ((packed)) ip4;
+ struct
+ {
+ ip6_header_t ip;
+ udp_header_t udp;
+ } __attribute__ ((packed)) ip6;
+ } __attribute__ ((packed)) hdrs;
+} hicn_face_udp_t;
+
+/* Hast table mapping the udp key with the face id (dpoi_index pointing to and
+ element in the face pool defined in hicn_face.h)*/
+extern mhash_t hicn_face_udp_hashtb;
+
+/**
+ * @brief Hash table key.
+ */
+typedef struct hicn_face_udp_key_s
+{
+ ip46_address_t local_addr;
+ ip46_address_t remote_addr;
+ u16 local_port;
+ u16 remote_port;
+} hicn_face_udp_key_t;
+
+/* DPO type for the udp face */
+extern dpo_type_t hicn_face_udp_type;
+
+/* VFT table for the udp face. Mainly used to format the face in the right way */
+extern hicn_face_vft_t udp_vft;
+
+/**
+ * @brief Create the key object for the mhash. Fill in the key object with the
+ * expected values.
+ *
+ * @param local_addr Local address of the UDP tunnel
+ * @param remote_addr Remote address of the UDP tunnel
+ * @param local_port Local port of the UDP tunnel
+ * @param remote_port Remote port of the UDP tunnel
+ * @param key Pointer to an allocated hicn_face_udp_key_t object
+ */
+always_inline void
+hicn_face_udp4_get_key (const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u16 local_port, u16 remote_port,
+ hicn_face_udp_key_t * key)
+{
+
+ ip46_address_set_ip4 (&(key->local_addr), local_addr);
+ ip46_address_set_ip4 (&(key->remote_addr), remote_addr);
+ key->local_port = local_port;
+ key->remote_port = remote_port;
+}
+
+/**
+ * @brief Create the key object for the mhash. Fill in the key object with the
+ * expected values.
+ *
+ * @param local_addr Local address of the UDP tunnel
+ * @param remote_addr Remote address of the UDP tunnel
+ * @param local_port Local port of the UDP tunnel
+ * @param remote_port Remote port of the UDP tunnel
+ * @param key Pointer to an allocated hicn_face_udp_key_t object
+ */
+always_inline void
+hicn_face_udp6_get_key (const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u16 local_port, u16 remote_port,
+ hicn_face_udp_key_t * key)
+{
+ key->local_addr.ip6 = *local_addr;
+ key->remote_addr.ip6 = *remote_addr;
+ key->local_port = local_port;
+ key->remote_port = remote_port;
+}
+
+/**
+ * @brief Get the dpoi from the quadruplet that identifies the face. Does not add any lock.
+ *
+ * @param local_addr Local address of the UDP tunnel
+ * @param remote_addr Remote address of the UDP tunnel
+ * @param local_port Local port of the UDP tunnel
+ * @param remote_port Remote port of the UDP tunnel
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_udp4_get (const ip4_address_t * local_addr,
+ const ip4_address_t * remote_addr,
+ u16 local_port, u16 remote_port)
+{
+ hicn_face_udp_key_t key;
+
+ hicn_face_udp4_get_key (local_addr, remote_addr, local_port, remote_port,
+ &key);
+
+ hicn_face_id_t *dpoi_index =
+ (hicn_face_id_t *) mhash_get (&hicn_face_udp_hashtb,
+ &key);
+
+ return dpoi_index == NULL ? NULL : hicn_dpoi_get_from_idx (*dpoi_index);
+}
+
+/**
+ * @brief Get the dpoi from the quadruplet that identifies the face. Does not add any lock.
+ *
+ * @param local_addr Local address of the UDP tunnel (network order)
+ * @param remote_addr Remote address of the UDP tunnel (network order)
+ * @param local_port Local port of the UDP tunnel (network order)
+ * @param remote_port Remote port of the UDP tunnel (network order)
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_udp6_get (const ip6_address_t * local_addr,
+ const ip6_address_t * remote_addr,
+ u16 local_port, u16 remote_port)
+{
+ hicn_face_udp_key_t key;
+
+ hicn_face_udp6_get_key (local_addr, remote_addr, local_port, remote_port,
+ &key);
+
+ hicn_face_id_t *dpoi_index =
+ (hicn_face_id_t *) mhash_get (&hicn_face_udp_hashtb,
+ &key);
+
+ return dpoi_index == NULL ? NULL : hicn_dpoi_get_from_idx (*dpoi_index);
+}
+
+
+/**
+ * @brief Initialize the udp face module
+ */
+void hicn_face_udp_init (vlib_main_t * vm);
+
+/**
+ * @brief Create a new face ip. API for other modules (e.g., routing)
+ *
+ * @param local_addr Local ip v4 or v6 address of the face (network order)
+ * @param remote_addr Remote ip v4 or v6 address of the face (network order)
+ * @param local_port Local udp port of the face (network order)
+ * @param remote_port Remote udp port of the face (network order)
+ * @param sw_if interface associated to the face
+ * @param pfaceid Pointer to return the face id
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+int hicn_face_udp_add (const ip46_address_t * local_addr,
+ const ip46_address_t * remote_addr, u16 local_port,
+ u16 remote_port, u32 swif, hicn_face_id_t * pfaceid);
+
+/**
+ * @brief Delete an ip face
+ *
+ * @param face_id Id of the face to delete
+ * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
+ * HICN_ERROR_NONE
+ */
+int hicn_face_udp_del (hicn_face_id_t faceid);
+
+/**
+ * @brief Format a UDP face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 indent and u32 face_id
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_udp (u8 * s, va_list * args);
+
+/**
+ * @brief Create a dpo from a udp face
+ *
+ * @param face Face from which to create the dpo
+ * @return the dpo
+ */
+void hicn_face_udp_get_dpo (hicn_face_t * face, dpo_id_t * dpo);
+
+/**
+ * @brief Init some internal structures
+ */
+void hicn_face_udp_init_internal (void);
+
+#endif // __HICN_FACE_UDP_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/face_udp_cli.c b/hicn-plugin/src/faces/udp/face_udp_cli.c
new file mode 100755
index 000000000..7bb172ce8
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/face_udp_cli.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "face_udp.h"
+#include "dpo_udp.h"
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+#include <vnet/ip/format.h>
+
+#define HICN_FACE_NONE 0
+#define HICN_FACE_DELETE 1
+#define HICN_FACE_ADD 2
+
+
+static clib_error_t *
+hicn_face_udp_cli_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ ip46_address_t src_addr;
+ u32 src_port = 0;
+ ip46_address_t dst_addr;
+ u32 dst_port = 0;
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+ int ret = HICN_ERROR_NONE;
+ int sw_if;
+ int face_op = HICN_FACE_NONE;
+
+ ip46_address_reset (&src_addr);
+ ip46_address_reset (&dst_addr);
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ {
+ if (unformat (line_input, "id %d", &face_id))
+ face_op = HICN_FACE_DELETE;
+ else
+ {
+ return clib_error_return (0, "missing face id");
+ }
+ }
+ else if (unformat (line_input, "add"))
+ {
+ face_op = HICN_FACE_ADD;
+ if (unformat
+ (line_input, "src_addr %U port %u dst_addr %U port %u intfc %U",
+ unformat_ip46_address, &src_addr, IP46_TYPE_ANY, &src_port,
+ unformat_ip46_address, &dst_addr, IP46_TYPE_ANY, &dst_port,
+ unformat_vnet_sw_interface, vnm, &sw_if));
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (face_id != HICN_FACE_NULL)
+ {
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ {
+ return clib_error_return (0, "%s, face_id %d not valid",
+ get_error_string (ret), face_id);
+ }
+ }
+
+ int rv;
+ switch (face_op)
+ {
+ case HICN_FACE_ADD:
+
+ /* Check for presence of next hop address */
+ if (((dst_addr.as_u64[0] == (u64) 0) && (dst_addr.as_u64[1] == (u64) 0))
+ || dst_port == 0)
+ {
+ return clib_error_return (0, "dst address and port not specified");
+ }
+
+ if (((src_addr.as_u64[0] == (u64) 0) && (src_addr.as_u64[1] == (u64) 0))
+ || src_port == 0)
+ {
+ return clib_error_return (0, "src address not specified");
+ }
+
+ rv =
+ hicn_face_udp_add (&src_addr, &dst_addr,
+ clib_host_to_net_u16 (src_port),
+ clib_host_to_net_u16 (dst_port), sw_if, &face_id);
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face id: %d", face_id);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ case HICN_FACE_DELETE:
+ rv = hicn_face_udp_del (face_id);
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face %d deleted", face_id);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ default:
+ return clib_error_return (0, "Operation (%d) not implemented", face_op);
+ break;
+ }
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (rv));
+}
+
+/* cli declaration for 'cfg face' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_udp_cli_set_command, static) =
+{
+ .path = "hicn face udp",
+ .short_help = "hicn face udp {add src_addr <src_address> port <src_port > dst_addr <dst_address> port <dst_port>} intfc <interface> | {del id <face_id>}",
+ .function = hicn_face_udp_cli_set_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/face_udp_node.c b/hicn-plugin/src/faces/udp/face_udp_node.c
new file mode 100755
index 000000000..74d0b1864
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/face_udp_node.c
@@ -0,0 +1,864 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/ip/ip_packet.h>
+
+#include "face_udp.h"
+#include "face_udp_node.h"
+#include "dpo_udp.h"
+#include "../face.h"
+#include "../../strategy.h"
+#include "../../strategy_dpo_manager.h"
+#include "../../hicn.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for udp faces.
+ */
+
+vlib_node_registration_t hicn_face_udp4_input_node;
+vlib_node_registration_t hicn_face_udp6_input_node;
+vlib_node_registration_t hicn_face_udp4_output_node;
+vlib_node_registration_t hicn_face_udp6_output_node;
+
+static char *hicn_face_udp4_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_face_udp6_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_udp4_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_UDP4_INPUT_NEXT_DATA,
+ HICN_FACE_UDP4_INPUT_NEXT_MAPME,
+ HICN_FACE_UDP4_INPUT_NEXT_ERROR_DROP,
+ HICN_FACE_UDP4_INPUT_N_NEXT,
+} hicn_face_udp4_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_udp6_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_UDP6_INPUT_NEXT_DATA,
+ HICN_FACE_UDP6_INPUT_NEXT_MAPME,
+ HICN_FACE_UDP6_INPUT_NEXT_ERROR_DROP,
+ HICN_FACE_UDP6_INPUT_N_NEXT,
+} hicn_face_udp6_input_next_t;
+
+#define ERROR_INPUT_UDP4 HICN_FACE_UDP4_INPUT_NEXT_ERROR_DROP
+#define ERROR_INPUT_UDP6 HICN_FACE_UDP6_INPUT_NEXT_ERROR_DROP
+
+#define NEXT_MAPME_UDP4 HICN_FACE_UDP4_INPUT_NEXT_MAPME
+#define NEXT_MAPME_UDP6 HICN_FACE_UDP6_INPUT_NEXT_MAPME
+#define NEXT_DATA_UDP4 HICN_FACE_UDP4_INPUT_NEXT_DATA
+#define NEXT_DATA_UDP6 HICN_FACE_UDP6_INPUT_NEXT_DATA
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define HICN_DPO_UDP_LOCK_IP4 hicn_dpo_udp4_lock
+#define HICN_DPO_UDP_LOCK_IP6 hicn_dpo_udp6_lock
+
+#define TRACE_INPUT_PKT_UDP4 hicn_face_udp4_input_trace_t
+#define TRACE_INPUT_PKT_UDP6 hicn_face_udp6_input_trace_t
+
+#define face_input_x1(ipv) \
+ do { \
+ int ret; \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = ERROR_INPUT_UDP##ipv; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ u8 * inner_ip_hdr = NULL; \
+ udp_header_t * udp_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ udp_hdr = (udp_header_t *) (ip_hdr + 1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ \
+ inner_ip_hdr = (u8 *)(udp_hdr + 1); \
+ u8 is_v6 = ((inner_ip_hdr[0] & 2) >> 1); \
+ u8 is_icmp = is_v6*(inner_ip_hdr[7] == IPPROTO_ICMPV6) + \
+ (1 - is_v6)*(inner_ip_hdr[10] == IPPROTO_ICMPV4); \
+ \
+ ret = HICN_DPO_UDP_LOCK_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &(ip_hdr->dst_address), \
+ &(ip_hdr->src_address), \
+ (udp_hdr->dst_port), \
+ (udp_hdr->src_port), \
+ &hicnb0->is_appface); \
+ \
+ if ( PREDICT_FALSE(ret != HICN_ERROR_NONE) ) \
+ { \
+ next0 = ERROR_INPUT_UDP##ipv; \
+ } \
+ else \
+ { \
+ next0 = is_icmp*NEXT_MAPME_UDP##ipv + \
+ (1-is_icmp)*NEXT_DATA_UDP##ipv; \
+ stats.pkts_data_count += 1; \
+ \
+ vlib_buffer_advance(b0, sizeof(IP_HEADER_##ipv) + \
+ sizeof(udp_header_t)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_CONTENT; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0) \
+
+#define face_input_x2(ipv) \
+ do { \
+ int ret0, ret1; \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = ERROR_INPUT_UDP##ipv; \
+ u32 next1 = ERROR_INPUT_UDP##ipv; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ u8 * inner_ip_hdr0 = NULL; \
+ u8 * inner_ip_hdr1 = NULL; \
+ udp_header_t * udp_hdr0 = NULL; \
+ udp_header_t * udp_hdr1 = NULL; \
+ hicn_buffer_t *hicnb0, *hicnb1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ udp_hdr0 = (udp_header_t *) (ip_hdr0 + 1); \
+ udp_hdr1 = (udp_header_t *) (ip_hdr1 + 1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ \
+ inner_ip_hdr0 = (u8 *)(udp_hdr0 + 1); \
+ u8 is_v6_0 = ((inner_ip_hdr0[0] & 2) >> 1); \
+ u8 is_icmp0 = is_v6_0*(inner_ip_hdr0[7] == IPPROTO_ICMPV6) + \
+ (1 - is_v6_0)*(inner_ip_hdr0[10] == IPPROTO_ICMPV4); \
+ \
+ inner_ip_hdr1 = (u8 *)(udp_hdr1 + 1); \
+ u8 is_v6_1 = ((inner_ip_hdr1[0] & 2) >> 1); \
+ u8 is_icmp1 = is_v6_1*(inner_ip_hdr1[7] == IPPROTO_ICMPV6) + \
+ (1 - is_v6_1)*(inner_ip_hdr1[10] == IPPROTO_ICMPV4); \
+ \
+ ret0 = HICN_DPO_UDP_LOCK_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &(ip_hdr0->dst_address), \
+ &(ip_hdr0->src_address), \
+ (udp_hdr0->dst_port), \
+ (udp_hdr0->src_port), \
+ &hicnb0->is_appface); \
+ \
+ ret1 = HICN_DPO_UDP_LOCK_IP##ipv \
+ (&(hicnb1->face_dpo_id), \
+ &(ip_hdr1->dst_address), \
+ &(ip_hdr1->src_address), \
+ (udp_hdr1->dst_port), \
+ (udp_hdr1->src_port), \
+ &hicnb1->is_appface); \
+ \
+ if ( PREDICT_FALSE(ret0 != HICN_ERROR_NONE) ) \
+ { \
+ next0 = ERROR_INPUT_UDP##ipv; \
+ } \
+ else \
+ { \
+ stats.pkts_data_count += 1; \
+ next0 = is_icmp0*NEXT_MAPME_UDP##ipv + \
+ (1-is_icmp0)*NEXT_DATA_UDP##ipv; \
+ \
+ vlib_buffer_advance(b0, sizeof(IP_HEADER_##ipv) + \
+ sizeof(udp_header_t)); \
+ } \
+ \
+ if ( PREDICT_FALSE(ret1 != HICN_ERROR_NONE) ) \
+ { \
+ next1 = ERROR_INPUT_UDP##ipv; \
+ } \
+ else \
+ { \
+ stats.pkts_data_count += 1; \
+ next1 = is_icmp1*NEXT_MAPME_UDP##ipv + \
+ (1-is_icmp1)*NEXT_DATA_UDP##ipv; \
+ \
+ vlib_buffer_advance(b1, sizeof(IP_HEADER_##ipv) + \
+ sizeof(udp_header_t)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_CONTENT; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_CONTENT; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0) \
+
+static uword
+hicn_face_udp4_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_udp4_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_udp4_input_trace_t *t =
+ va_arg (*args, hicn_face_udp4_input_trace_t *);
+
+ s = format (s, "FACE_UDP4_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_face_udp4_input_node) =
+{
+ .function = hicn_face_udp4_input_node_fn,
+ .name = "hicn-face-udp4-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_face_udp4_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_face_udp4_input_error_strings),
+ .error_strings = hicn_face_udp4_input_error_strings,
+ .n_next_nodes = HICN_FACE_UDP4_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_FACE_UDP4_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN_FACE_UDP4_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN_FACE_UDP4_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn_face_udp6_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_udp6_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_udp6_input_trace_t *t =
+ va_arg (*args, hicn_face_udp6_input_trace_t *);
+
+ s = format (s, "FACE_UDP6_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_face_udp6_input_node) =
+{
+ .function = hicn_face_udp6_input_node_fn,
+ .name = "hicn-face-udp6-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_face_udp6_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_face_udp6_input_error_strings),
+ .error_strings = hicn_face_udp6_input_error_strings,
+ .n_next_nodes = HICN_FACE_UDP6_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_FACE_UDP6_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN_FACE_UDP6_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN_FACE_UDP6_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/******* Face Output *******/
+
+always_inline void
+hicn_face_udp4_encap (vlib_main_t * vm,
+ vlib_buffer_t * outer_b0,
+ hicn_face_t * face, u32 * next)
+{
+ u16 old_l0 = 0, new_l0;
+ ip_csum_t sum0;
+ ip4_header_t *ip0;
+ udp_header_t *udp0;
+ hicn_face_udp_t *face_udp = (hicn_face_udp_t *) face->data;
+ ip_adjacency_t *adj = adj_get (face->shared.adj);
+
+ /* ip */
+ ip0 = vlib_buffer_get_current (outer_b0);
+ clib_memcpy (ip0, &(face_udp->hdrs.ip4.ip), sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, outer_b0) -
+ sizeof (*ip0));
+ udp0->length = new_l0;
+
+ old_l0 = ip0->length;
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, outer_b0));
+
+ sum0 = ip0->checksum;
+
+ //old_l0 always 0, see the rewrite setup
+ new_l0 = ip0->length;
+
+ sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
+ length /* changed member */ );
+ ip0->checksum = sum0;
+
+ vnet_buffer (outer_b0)->ip.adj_index[VLIB_TX] = face->shared.adj;
+
+ *next = adj->lookup_next_index;
+}
+
+always_inline void
+hicn_face_udp6_encap (vlib_main_t * vm,
+ vlib_buffer_t * outer_b0,
+ hicn_face_t * face, u32 * next)
+{
+ int bogus0;
+ u16 new_l0;
+ ip6_header_t *ip0;
+ udp_header_t *udp0;
+ hicn_face_udp_t *face_udp = (hicn_face_udp_t *) face->data;
+ ip_adjacency_t *adj = adj_get (face->shared.adj);
+
+ /* ip */
+ ip0 = vlib_buffer_get_current (outer_b0);
+ clib_memcpy (ip0, &(face_udp->hdrs.ip6.ip), sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, outer_b0)
+ - sizeof (*ip0));
+ ip0->payload_length = new_l0;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+ udp0->length = new_l0;
+
+ udp0->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, outer_b0, ip0, &bogus0);
+
+ ASSERT (bogus0 == 0);
+
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+
+ vnet_buffer (outer_b0)->ip.adj_index[VLIB_TX] = face->shared.adj;
+
+ *next = adj->lookup_next_index;
+}
+
+static char *hicn_face_udp4_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_face_udp6_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_udp4_output_trace_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_face_udp6_output_trace_t;
+
+#define HICN_FACE_UDP_ENCAP_IP4 hicn_face_udp4_encap
+#define HICN_FACE_UDP_ENCAP_IP6 hicn_face_udp6_encap
+
+#define TRACE_OUTPUT_PKT_UDP4 hicn_face_udp4_output_trace_t
+#define TRACE_OUTPUT_PKT_UDP6 hicn_face_udp6_output_trace_t
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define face_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = IP_LOOKUP_NEXT_DROP; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ face = \
+ hicn_dpoi_get_from_idx(vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ /* Adjust vlib buffer. Create space for the udp tunnel. */ \
+ vlib_buffer_advance(b0, -(sizeof (IP_HEADER_##ipv) + \
+ sizeof (udp_header_t))); \
+ \
+ \
+ HICN_FACE_UDP_ENCAP_IP##ipv \
+ (vm, b0, face, &next0); \
+ stats.pkts_interest_count += 1; \
+ } \
+ \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ } while(0) \
+
+
+#define face_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = IP_LOOKUP_NEXT_DROP; \
+ u32 next1 = IP_LOOKUP_NEXT_DROP; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ face0 = \
+ hicn_dpoi_get_from_idx(vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ face1 = \
+ hicn_dpoi_get_from_idx(vnet_buffer (b1)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ /* Adjust vlib buffer. Create space for the udp tunnel. */ \
+ vlib_buffer_advance(b0, -(sizeof (IP_HEADER_##ipv) + \
+ sizeof (udp_header_t))); \
+ \
+ \
+ HICN_FACE_UDP_ENCAP_IP##ipv \
+ (vm, b0, face0, &next0); \
+ stats.pkts_interest_count += 1; \
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ /* Adjust vlib buffer. Create space for the udp tunnel. */ \
+ vlib_buffer_advance(b1, -(sizeof (IP_HEADER_##ipv) + \
+ sizeof (udp_header_t))); \
+ \
+ \
+ HICN_FACE_UDP_ENCAP_IP##ipv \
+ (vm, b1, face1, &next1); \
+ stats.pkts_interest_count += 1; \
+ } \
+ \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ } while(0) \
+
+
+static uword
+hicn_face_udp4_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_udp4_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_udp4_output_trace_t *t =
+ va_arg (*args, hicn_face_udp4_output_trace_t *);
+
+ s = format (s, "FACE_UDP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/* *INDENT-OFF* */
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_face_udp4_output_node) =
+{
+ .function = hicn_face_udp4_output_node_fn,
+ .name = "hicn-face-udp4-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_face_udp4_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_face_udp4_output_error_strings),
+ .error_strings = hicn_face_udp4_output_error_strings,
+ .n_next_nodes = IP4_LOOKUP_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with arp */
+ .next_nodes = IP4_LOOKUP_NEXT_NODES,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-ON* */
+
+static uword
+hicn_face_udp6_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (6);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_face_udp6_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_udp6_output_trace_t *t =
+ va_arg (*args, hicn_face_udp6_output_trace_t *);
+
+ s = format (s, "FACE_UDP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/* *INDENT-OFF* */
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_face_udp6_output_node) =
+{
+ .function = hicn_face_udp6_output_node_fn,
+ .name = "hicn-face-udp6-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_face_udp6_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_face_udp6_output_error_strings),
+ .error_strings = hicn_face_udp6_output_error_strings,
+ .n_next_nodes = IP6_LOOKUP_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with neighbour discovery */
+ .next_nodes = IP6_LOOKUP_NEXT_NODES,
+};
+/* *INDENT-ON* */
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/face_udp_node.h b/hicn-plugin/src/faces/udp/face_udp_node.h
new file mode 100755
index 000000000..c759312c8
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/face_udp_node.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_UDP_NODE_H__
+#define __HICN_FACE_UDP_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+extern vlib_node_registration_t hicn_face_udp4_input_node;
+extern vlib_node_registration_t hicn_face_udp6_input_node;
+extern vlib_node_registration_t hicn_face_udp4_output_node;
+extern vlib_node_registration_t hicn_face_udp6_output_node;
+
+#endif // __HICN_FACE_UDP_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/iface_udp_node.c b/hicn-plugin/src/faces/udp/iface_udp_node.c
new file mode 100755
index 000000000..ddea31b4c
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/iface_udp_node.c
@@ -0,0 +1,894 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "iface_udp_node.h"
+#include "dpo_udp.h"
+#include "../face.h"
+
+#include "../../infra.h"
+#include "../../hicn.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for udp incomplete faces.
+ */
+
+vlib_node_registration_t hicn_iface_udp4_input_node;
+vlib_node_registration_t hicn_iface_udp6_input_node;
+vlib_node_registration_t hicn_iface_udp4_output_node;
+vlib_node_registration_t hicn_iface_udp6_output_node;
+
+u32 data_fwd_face_udp4_vlib_edge;
+u32 data_fwd_face_udp6_vlib_edge;
+
+void
+hicn_iface_udp_init (vlib_main_t * vm)
+{
+ data_fwd_face_udp4_vlib_edge = vlib_node_add_next (vm,
+ hicn_data_fwd_node.index,
+ hicn_iface_udp4_output_node.
+ index);
+
+ data_fwd_face_udp6_vlib_edge = vlib_node_add_next (vm,
+ hicn_data_fwd_node.index,
+ hicn_iface_udp6_output_node.
+ index);
+
+ u32 temp_index4 = vlib_node_add_next (vm,
+ hicn_interest_hitcs_node.index,
+ hicn_iface_udp4_output_node.index);
+ u32 temp_index6 = vlib_node_add_next (vm,
+ hicn_interest_hitcs_node.index,
+ hicn_iface_udp6_output_node.index);
+
+ ASSERT (temp_index4 == data_fwd_face_udp4_vlib_edge);
+ ASSERT (temp_index6 == data_fwd_face_udp6_vlib_edge);
+}
+
+static char *hicn_iface_udp4_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_iface_udp6_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+u32
+get_face_udp4_output_node (void)
+{
+ return data_fwd_face_udp4_vlib_edge;
+}
+
+u32
+get_face_udp6_output_node (void)
+{
+ return data_fwd_face_udp6_vlib_edge;
+}
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_udp4_input_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_UDP4_INPUT_NEXT_INTEREST,
+ HICN_IFACE_UDP4_INPUT_NEXT_MAPME,
+ HICN_IFACE_UDP4_INPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_UDP4_INPUT_N_NEXT,
+} hicn_iface_udp4_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_udp6_input_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_UDP6_INPUT_NEXT_INTEREST,
+ HICN_IFACE_UDP6_INPUT_NEXT_MAPME,
+ HICN_IFACE_UDP6_INPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_UDP6_INPUT_N_NEXT,
+} hicn_iface_udp6_input_next_t;
+
+#define ERROR_INPUT_UDP4 HICN_IFACE_UDP4_INPUT_NEXT_ERROR_DROP
+#define ERROR_INPUT_UDP6 HICN_IFACE_UDP6_INPUT_NEXT_ERROR_DROP
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define NEXT_MAPME_UDP4 HICN_IFACE_UDP4_INPUT_NEXT_MAPME
+#define NEXT_MAPME_UDP6 HICN_IFACE_UDP6_INPUT_NEXT_MAPME
+
+#define NEXT_INTEREST_UDP4 HICN_IFACE_UDP4_INPUT_NEXT_INTEREST
+#define NEXT_INTEREST_UDP6 HICN_IFACE_UDP6_INPUT_NEXT_INTEREST
+
+#define HICN_IFACE_UDP_ADD_LOCK_IP4 hicn_dpo_udp4_add_and_lock
+#define HICN_IFACE_UDP_ADD_LOCK_IP6 hicn_dpo_udp6_add_and_lock
+
+#define GET_FACE_UDP4 get_face_udp4_output_node
+#define GET_FACE_UDP6 get_face_udp6_output_node
+
+#define TRACE_INPUT_PKT_UDP4 hicn_iface_udp4_input_trace_t
+#define TRACE_INPUT_PKT_UDP6 hicn_iface_udp6_input_trace_t
+
+#define iface_input_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = ERROR_INPUT_UDP##ipv; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ u8 * inner_ip_hdr = NULL; \
+ udp_header_t * udp_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ udp_hdr = (udp_header_t *) (ip_hdr + 1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ \
+ stats.pkts_interest_count += 1; \
+ \
+ inner_ip_hdr = (u8 *)(udp_hdr + 1); \
+ u8 is_v6 = ((inner_ip_hdr[0] & 2) >> 1); \
+ u8 is_icmp = is_v6*(inner_ip_hdr[7] == IPPROTO_ICMPV6) + \
+ (1 - is_v6)*(inner_ip_hdr[10] == IPPROTO_ICMPV4); \
+ \
+ next0 = is_icmp*NEXT_MAPME_UDP##ipv + \
+ (1-is_icmp)*NEXT_INTEREST_UDP##ipv; \
+ \
+ HICN_IFACE_UDP_ADD_LOCK_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &(ip_hdr->dst_address), \
+ &(ip_hdr->src_address), \
+ udp_hdr->dst_port, \
+ udp_hdr->src_port, \
+ GET_FACE_UDP##ipv \
+ (), \
+ &hicnb0->is_appface); \
+ \
+ vlib_buffer_advance(b0, sizeof(IP_HEADER_##ipv) + \
+ sizeof(udp_header_t)); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define iface_input_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0, next1 = ERROR_INPUT_UDP##ipv; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL, *ip_hdr1 = NULL; \
+ u8 * inner_ip_hdr0 = NULL, *inner_ip_hdr1 = NULL; \
+ udp_header_t * udp_hdr0 = NULL, *udp_hdr1 = NULL; \
+ hicn_buffer_t * hicnb0, *hicnb1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ udp_hdr0 = (udp_header_t *) (ip_hdr0 + 1); \
+ udp_hdr1 = (udp_header_t *) (ip_hdr1 + 1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ \
+ stats.pkts_interest_count += 2; \
+ \
+ inner_ip_hdr0 = (u8 *)(udp_hdr0 + 1); \
+ inner_ip_hdr1 = (u8 *)(udp_hdr1 + 1); \
+ u8 is_v6_0 = ((inner_ip_hdr0[0] & 2) >> 1); \
+ u8 is_v6_1 = ((inner_ip_hdr1[0] & 2) >> 1); \
+ u8 is_icmp0 = is_v6_0*(inner_ip_hdr0[7] == IPPROTO_ICMPV6) + \
+ (1 - is_v6_0)*(inner_ip_hdr0[10] == IPPROTO_ICMPV4); \
+ u8 is_icmp1 = is_v6_1*(inner_ip_hdr1[7] == IPPROTO_ICMPV6) + \
+ (1 - is_v6_1)*(inner_ip_hdr1[10] == IPPROTO_ICMPV4); \
+ \
+ next0 = is_icmp0*NEXT_MAPME_UDP##ipv + \
+ (1-is_icmp0)*NEXT_INTEREST_UDP##ipv; \
+ next1 = is_icmp1*NEXT_MAPME_UDP##ipv + \
+ (1-is_icmp1)*NEXT_INTEREST_UDP##ipv; \
+ \
+ HICN_IFACE_UDP_ADD_LOCK_IP##ipv \
+ (&(hicnb0->face_dpo_id), \
+ &(ip_hdr0->dst_address), \
+ &(ip_hdr0->src_address), \
+ udp_hdr0->dst_port, \
+ udp_hdr0->src_port, \
+ GET_FACE_UDP##ipv \
+ (), \
+ &hicnb0->is_appface); \
+ \
+ \
+ HICN_IFACE_UDP_ADD_LOCK_IP##ipv \
+ (&(hicnb1->face_dpo_id), \
+ &(ip_hdr1->dst_address), \
+ &(ip_hdr1->src_address), \
+ udp_hdr1->dst_port, \
+ udp_hdr1->src_port, \
+ GET_FACE_UDP##ipv \
+ (), \
+ &hicnb1->is_appface); \
+ \
+ vlib_buffer_advance(b0, sizeof(IP_HEADER_##ipv) + \
+ sizeof(udp_header_t)); \
+ \
+ vlib_buffer_advance(b1, sizeof(IP_HEADER_##ipv) + \
+ sizeof(udp_header_t)); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn_iface_udp4_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_udp4_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_udp4_input_trace_t *t =
+ va_arg (*args, hicn_iface_udp4_input_trace_t *);
+
+ s = format (s, "IFACE_UDP4_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_udp4_input_node) =
+
+{
+ .function = hicn_iface_udp4_input_node_fn,
+ .name = "hicn-iface-udp4-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_udp4_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_udp4_input_error_strings),
+ .error_strings = hicn_iface_udp4_input_error_strings,
+ .n_next_nodes = HICN_IFACE_UDP4_INPUT_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_IFACE_UDP4_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN_IFACE_UDP4_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN_IFACE_UDP4_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn_iface_udp6_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_udp6_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_udp6_input_trace_t *t =
+ va_arg (*args, hicn_iface_udp6_input_trace_t *);
+
+ s = format (s, "IFACE_UDP6_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_udp6_input_node) =
+{
+ .function = hicn_iface_udp6_input_node_fn,
+ .name = "hicn-iface-udp6-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_udp6_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_udp6_input_error_strings),
+ .error_strings = hicn_iface_udp6_input_error_strings,
+ .n_next_nodes = HICN_IFACE_UDP6_INPUT_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_IFACE_UDP6_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN_IFACE_UDP6_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN_IFACE_UDP6_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/******* Iface Output *******/
+
+always_inline void
+hicn_iface_udp4_encap (vlib_main_t * vm,
+ vlib_buffer_t * b0, hicn_face_t * face)
+{
+ u16 new_l0 = 0;
+ ip4_header_t *ip0;
+ udp_header_t *udp0;
+ hicn_face_udp_t *face_udp = (hicn_face_udp_t *) face->data;
+
+ /* Adjust vlib buffers */
+ /* Set the right length on the header buffer */
+ /* Move the next buffer current data pointer back to the ip+tcp header (hicn header) */
+ int offset = sizeof (ip4_header_t) + sizeof (udp_header_t);
+ b0->current_data -= offset;
+ b0->current_length += offset;
+
+ /* ip */
+ ip0 = vlib_buffer_get_current (b0);
+ clib_memcpy (ip0, &(face_udp->hdrs.ip4.ip), sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+
+ new_l0 =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
+ sizeof (*ip0));
+ udp0->length = new_l0;
+
+ ip0->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ ip0->checksum = ip4_header_checksum (ip0);
+}
+
+always_inline void
+hicn_iface_udp6_encap (vlib_main_t * vm,
+ vlib_buffer_t * b0, hicn_face_t * face)
+{
+ int bogus0;
+ u16 new_l0;
+ ip6_header_t *ip0;
+ udp_header_t *udp0;
+ hicn_face_udp_t *face_udp = (hicn_face_udp_t *) face->data;
+
+ /* Adjust vlib buffer */
+ int offset = sizeof (ip6_header_t) + sizeof (udp_header_t);
+ b0->current_data -= offset;
+ b0->current_length += offset;
+
+ /* ip */
+ ip0 = vlib_buffer_get_current (b0);
+ clib_memcpy (ip0, &(face_udp->hdrs.ip6.ip), sizeof (ip6_header_t) +
+ sizeof (udp_header_t));
+
+ new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
+ - sizeof (*ip0));
+
+ ip0->payload_length = new_l0;
+
+ /* Fix UDP length */
+ udp0 = (udp_header_t *) (ip0 + 1);
+ udp0->length = new_l0;
+
+ udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip0, &bogus0);
+
+ ASSERT (bogus0 == 0);
+
+ if (udp0->checksum == 0)
+ udp0->checksum = 0xffff;
+}
+
+static char *hicn_iface_udp4_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn_iface_udp6_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_udp4_output_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_UDP4_OUTPUT_NEXT_LOOKUP,
+ HICN_IFACE_UDP4_OUTPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_UDP4_OUTPUT_N_NEXT,
+} hicn_iface_udp4_output_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_iface_udp6_output_trace_t;
+
+typedef enum
+{
+ HICN_IFACE_UDP6_OUTPUT_NEXT_LOOKUP,
+ HICN_IFACE_UDP6_OUTPUT_NEXT_ERROR_DROP,
+ HICN_IFACE_UDP6_OUTPUT_N_NEXT,
+} hicn_iface_udp6_output_next_t;
+
+#define ERROR_OUTPUT_UDP4 HICN_IFACE_UDP4_OUTPUT_NEXT_ERROR_DROP
+#define ERROR_OUTPUT_UDP6 HICN_IFACE_UDP6_OUTPUT_NEXT_ERROR_DROP
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define NEXT_LOOKUP_UDP4 HICN_IFACE_UDP4_OUTPUT_NEXT_LOOKUP
+#define NEXT_LOOKUP_UDP6 HICN_IFACE_UDP6_OUTPUT_NEXT_LOOKUP
+
+#define HICN_IFACE_UDP_ADD_LOCK_IP4 hicn_dpo_udp4_add_and_lock
+#define HICN_IFACE_UDP_ADD_LOCK_IP6 hicn_dpo_udp6_add_and_lock
+
+#define HICN_FACE_UDP_ENCAP_IP4 hicn_iface_udp4_encap
+#define HICN_FACE_UDP_ENCAP_IP6 hicn_iface_udp6_encap
+
+#define TRACE_OUTPUT_PKT_UDP4 hicn_iface_udp4_output_trace_t
+#define TRACE_OUTPUT_PKT_UDP6 hicn_iface_udp6_output_trace_t
+
+#define iface_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = ERROR_OUTPUT_UDP##ipv; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ face = \
+ hicn_dpoi_get_from_idx(vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ HICN_FACE_UDP_ENCAP_IP##ipv \
+ (vm, b0, face); \
+ next0 = NEXT_LOOKUP_UDP##ipv; \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ } while(0)
+
+#define iface_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = ERROR_OUTPUT_UDP##ipv, next1 = ERROR_OUTPUT_UDP##ipv; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ \
+ /* Dequeue packets buffers */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ face0 = \
+ hicn_dpoi_get_from_idx(vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ face1 = \
+ hicn_dpoi_get_from_idx(vnet_buffer (b0)->ip.adj_index[VLIB_TX]); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ HICN_FACE_UDP_ENCAP_IP##ipv \
+ (vm, b0, face0); \
+ next0 = NEXT_LOOKUP_UDP##ipv; \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ HICN_FACE_UDP_ENCAP_IP##ipv \
+ (vm, b1, face1); \
+ next0 = NEXT_LOOKUP_UDP##ipv; \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_UDP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ } while(0)
+
+
+static uword
+hicn_iface_udp4_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (4);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (4);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_udp4_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_udp4_output_trace_t *t =
+ va_arg (*args, hicn_iface_udp4_output_trace_t *);
+
+ s = format (s, "IFACE_UDP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_udp4_output_node) =
+{
+ .function = hicn_iface_udp4_output_node_fn,
+ .name = "hicn-iface-udp4-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_udp4_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_udp4_output_error_strings),
+ .error_strings = hicn_iface_udp4_output_error_strings,
+ .n_next_nodes = HICN_IFACE_UDP4_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_IFACE_UDP4_OUTPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [HICN_IFACE_UDP4_OUTPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn_iface_udp6_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (6);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (6);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+
+}
+
+/* packet trace format function */
+static u8 *
+hicn_iface_udp6_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_iface_udp6_output_trace_t *t =
+ va_arg (*args, hicn_iface_udp6_output_trace_t *);
+
+ s = format (s, "IFACE_UDP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_iface_udp6_output_node) =
+{
+ .function = hicn_iface_udp6_output_node_fn,
+ .name = "hicn-iface-udp6-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn_iface_udp6_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_iface_udp6_output_error_strings),
+ .error_strings = hicn_iface_udp6_output_error_strings,
+ .n_next_nodes = HICN_IFACE_UDP6_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_IFACE_UDP6_OUTPUT_NEXT_LOOKUP] = "ip6-lookup",
+ [HICN_IFACE_UDP6_OUTPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/udp/iface_udp_node.h b/hicn-plugin/src/faces/udp/iface_udp_node.h
new file mode 100755
index 000000000..957d19217
--- /dev/null
+++ b/hicn-plugin/src/faces/udp/iface_udp_node.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_IFACE_UDP_H__
+#define __HICN_IFACE_UDP_H__
+
+#include <vlib/vlib.h>
+
+extern vlib_node_registration_t hicn_iface_udp4_input_node;
+extern vlib_node_registration_t hicn_iface_udp6_input_node;
+extern vlib_node_registration_t hicn_iface_udp4_output_node;
+extern vlib_node_registration_t hicn_iface_udp6_output_node;
+
+void hicn_iface_udp_init (vlib_main_t * vm);
+
+#endif // __HICN_FACE_UDP_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/hashtb.c b/hicn-plugin/src/hashtb.c
new file mode 100755
index 000000000..332da350d
--- /dev/null
+++ b/hicn-plugin/src/hashtb.c
@@ -0,0 +1,1008 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vppinfra/pool.h>
+
+#include "pcs.h"
+#include "hashtb.h"
+#include "parser.h"
+#include "error.h"
+
+/* return dvd/dvr, rounded up (intended for integer values) */
+#define CEIL(dvd, dvr) \
+ ({ \
+ __typeof__ (dvd) _dvd = (dvd); \
+ __typeof__ (dvr) _dvr = (dvr); \
+ (_dvd + _dvr - 1)/_dvr; \
+ })
+
+#ifndef ALIGN8
+#define ALIGN8(p) (((p) + 0x7) & ~(0x7))
+#endif
+
+#ifndef ALIGNPTR8
+#define ALIGNPTR8(p) ((void *)(((u8 * )(p) + 0x7) & ~(0x7)))
+#endif
+
+#ifndef ALIGN64
+#define ALIGN64(p) (((p) + 0x3f) & ~(0x3f))
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+/*
+ * Offset to aligned start of additional data (PIT/CS, FIB) embedded in each
+ * node.
+ */
+u32 ht_node_data_offset_aligned;
+
+/* Some support for posix vs vpp mem management */
+#define MEM_ALLOC(x) clib_mem_alloc_aligned((x), 8)
+#define MEM_FREE(p) clib_mem_free((p))
+
+/*
+ * Internal utilities
+ */
+
+/* Allocate an overflow bucket */
+static hicn_hash_bucket_t *
+alloc_overflow_bucket (hicn_hashtb_h h)
+{
+ hicn_hash_bucket_t *newbkt = NULL;
+
+ if (h->ht_overflow_buckets_used < h->ht_overflow_bucket_count)
+ {
+ pool_get_aligned (h->ht_overflow_buckets, newbkt, 8);
+
+ if (newbkt)
+ {
+ h->ht_overflow_buckets_used++;
+ }
+ }
+ return (newbkt);
+}
+
+/* Free an overflow bucket; clear caller's pointer */
+static void
+free_overflow_bucket (hicn_hashtb_h h, hicn_hash_bucket_t ** pb)
+{
+ hicn_hash_bucket_t *bkt = *pb;
+
+ ASSERT (h->ht_overflow_buckets_used > 0);
+
+ pool_put (h->ht_overflow_buckets, bkt);
+ h->ht_overflow_buckets_used--;
+ *pb = NULL;
+}
+
+/*
+ * Init, allocate a new hashtable
+ */
+int
+hicn_hashtb_alloc (hicn_hashtb_h * ph, u32 max_elems, size_t app_data_size)
+{
+ int ret = HICN_ERROR_NONE;
+ hicn_hashtb_h h = NULL;
+ u32 count;
+ u32 total_buckets;
+ size_t sz;
+ hicn_hash_node_t *nodep;
+ hicn_hash_bucket_t *bucket;
+
+ if (ph == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_INVAL;
+ goto done;
+ }
+ if (max_elems < HICN_HASHTB_MIN_ENTRIES ||
+ max_elems > HICN_HASHTB_MAX_ENTRIES)
+ {
+ goto done;
+ }
+ /* Allocate and init main hashtable struct */
+ h = MEM_ALLOC (sizeof (hicn_hashtb_t));
+ if (h == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ memset (h, 0, sizeof (hicn_hashtb_t));
+
+ /* Compute main table bucket (row) count and size, and allocate */
+
+ /* Consider the last entry as used for containing the overflow bucket */
+ total_buckets = CEIL (max_elems, HICN_HASHTB_BUCKET_ENTRIES - 1);
+ count = ALIGN8 (CEIL (total_buckets, HICN_HASHTB_FILL_FACTOR));
+
+ h->ht_bucket_count = count;
+
+ /* We _really_ expect to have buckets aligned on cache lines ... */
+ sz = sizeof (hicn_hash_bucket_t);
+ assert (sz == ALIGN64 (sz));
+
+ h->ht_buckets = MEM_ALLOC (count * sz);
+ if (h->ht_buckets == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ memset (h->ht_buckets, 0, count * sz);
+
+ /*
+ * First time through, compute offset to aligned extra data start in
+ * each node struct it's crucial that both the node struct (that the
+ * base hashtable uses) and the extra data area (that's also probably
+ * a struct) are aligned.
+ */
+ if (ht_node_data_offset_aligned == 0)
+ {
+ count = STRUCT_OFFSET_OF (hicn_hash_node_t, hn_data);
+ ht_node_data_offset_aligned = ALIGN8 (count);
+ }
+ //check app struct fits into space provided(HICN_HASH_NODE_APP_DATA_SIZE)
+ u32 ht_node_data_size;
+ ht_node_data_size = sizeof (hicn_hash_node_t) - ht_node_data_offset_aligned;
+ if (app_data_size > ht_node_data_size)
+ {
+ clib_error
+ ("hicn hashtable: fatal error: requested app data size(%u) > hashtb node's configured bytes available(%u), sizeof(hicn_shared_t)=%u, sizeof(hicn_pit_entry_t)=%u, sizeof(hicn_cs_entry_t)=%u",
+ app_data_size, ht_node_data_size, sizeof (hicn_pcs_shared_t),
+ sizeof (hicn_pit_entry_t), sizeof (hicn_cs_entry_t));
+ }
+ /*
+ * Compute entry node count and size, allocate Allocate/'Hide' the
+ * zero-th node so can use zero as an 'empty' value
+ */
+ pool_alloc_aligned (h->ht_nodes, max_elems, 8);
+ if (h->ht_nodes == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ pool_get_aligned (h->ht_nodes, nodep, 8);
+ //alloc node 0
+ nodep = nodep; /* Silence 'not used' warning */
+
+ h->ht_node_count = max_elems;
+ h->ht_nodes_used = 1;
+
+ /*
+ * Compute overflow bucket count and size, allocate
+ */
+ //count = ALIGN8(CEIL(max_elems, HICN_HASHTB_OVERFLOW_FRACTION));
+ count = ALIGN8 (total_buckets - h->ht_bucket_count);
+
+ pool_alloc_aligned (h->ht_overflow_buckets, count, 8);
+ if (h->ht_overflow_buckets == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ /* 'Hide' the zero-th node so we can use zero as an 'empty' value */
+ pool_get_aligned (h->ht_overflow_buckets, bucket, 8);
+ bucket = bucket; /* Silence 'not used' warning */
+
+ h->ht_overflow_bucket_count = count;
+ h->ht_overflow_buckets_used = 1;
+
+done:
+
+ if (h)
+ {
+ if ((ret == HICN_ERROR_NONE) && ph)
+ {
+ *ph = h;
+ }
+ else
+ {
+ hicn_hashtb_free (&h);
+ }
+ }
+ return (ret);
+}
+
+/*
+ * Free, de-allocate a hashtable
+ */
+int
+hicn_hashtb_free (hicn_hashtb_h * ph)
+{
+ int ret = 0;
+
+ if (ph)
+ {
+ if ((*ph)->ht_nodes)
+ {
+ pool_free ((*ph)->ht_nodes);
+ (*ph)->ht_nodes = 0;
+ }
+ if ((*ph)->ht_overflow_buckets)
+ {
+ pool_free ((*ph)->ht_overflow_buckets);
+ (*ph)->ht_overflow_buckets = 0;
+ }
+ if ((*ph)->ht_buckets)
+ {
+ MEM_FREE ((*ph)->ht_buckets);
+ (*ph)->ht_buckets = 0;
+ }
+ MEM_FREE (*ph);
+
+ *ph = NULL;
+ }
+ return (ret);
+}
+
+
+
+/*
+ * Basic api to lookup a specific hash+key tuple. This does the entire lookup
+ * operation, retrieving node structs and comparing keys, so it's not
+ * optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ return (hicn_hashtb_lookup_node_ex
+ (h, key, keylen, hashval, is_data, FALSE /* deleted nodes */ ,
+ node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow));
+}
+
+/*
+ * Extended api to lookup a specific hash+key tuple. The implementation
+ * allows the caller to locate nodes that are marked for deletion, which is
+ * part of some hashtable applications, such as the FIB.
+ *
+ * This does the entire lookup operation, retrieving node structs and comparing
+ * keys, so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node_ex (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ int include_deleted_p, u32 * node_id,
+ u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ int i, ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ int found_p = FALSE;
+ u32 bidx;
+ hicn_hash_bucket_t *bucket;
+ u32 current_bucket_id = ~0;
+
+ /*
+ * Use some bits of the low half of the hash to locate a row/bucket
+ * in the table
+ */
+ current_bucket_id = bidx = (hashval & (h->ht_bucket_count - 1));
+
+ bucket = h->ht_buckets + bidx;
+
+ *bucket_is_overflow = 0;
+ /* Check the entries in the bucket for matching hash value */
+
+loop_buckets:
+
+ for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES && !found_p; i++)
+ {
+ /*
+ * If an entry is marked for deletion, ignore it unless the
+ * caller explicitly wants these nodes.
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ if (!include_deleted_p)
+ {
+ continue;
+ }
+ }
+ if (bucket->hb_entries[i].he_msb64 == hashval)
+ {
+ /*
+ * Found a candidate - must retrieve the actual node
+ * and check the key.
+ */
+ *node_id = bucket->hb_entries[i].he_node;
+ *dpo_ctx_id = bucket->hb_entries[i].dpo_ctx_id;
+ *vft_id = bucket->hb_entries[i].vft_id;
+ *is_cs =
+ bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ *hash_entry_id = i;
+ *bucket_id = current_bucket_id;
+ /*
+ * If we are doing lookup for a data, do not take a
+ * lock in case of a hit with a CS entry
+ */
+ if (!(is_data && *is_cs))
+ {
+ bucket->hb_entries[i].locks++;
+ }
+ found_p = TRUE;
+ ret = HICN_ERROR_NONE;
+ goto done;
+ }
+ }
+
+ /*
+ * Be prepared to continue to an overflow bucket if necessary. We
+ * only expect the last entry in a bucket to refer to an overflow
+ * bucket...
+ */
+ i = HICN_HASHTB_BUCKET_ENTRIES - 1;
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ current_bucket_id = bucket->hb_entries[i].he_node;
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ *bucket_is_overflow = 1;
+ goto loop_buckets;
+ }
+done:
+
+ return (ret);
+}
+
+/**
+ * This function allows to split the hash verification from the comparison of
+ * the entire key. Useful to exploit prefertching.
+ * return 1 if equals, 0 otherwise
+ */
+int
+hicn_node_compare (const u8 * key, u32 keylen, hicn_hash_node_t * node)
+{
+
+ int ret = 0;
+
+ if (key && keylen == node->hn_keysize)
+ {
+ ret = (memcmp (key, node->hn_key.ks.key, keylen) == 0);
+ }
+ return ret;
+}
+
+/*
+ * Utility to init a new entry in a hashtable bucket/row. We use this to add
+ * new a node+hash, and to clear out an entry during removal.
+ */
+void
+hicn_hashtb_init_entry (hicn_hash_entry_t * entry, u32 nodeidx,
+ u64 hashval, u32 locks)
+{
+ entry->he_msb64 = hashval;
+ entry->he_node = nodeidx;
+
+ /* Clear out some other fields in the entry */
+ entry->he_flags = 0;
+ entry->locks = locks;
+}
+
+/*
+ * Insert a node into the hashtable. We expect the caller has a) computed the
+ * hash value to use, b) initialized the node with the hash and key info, and
+ * c) filled in its app-specific data portion of the node.
+ */
+
+int
+hicn_hashtb_insert (hicn_hashtb_h h, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hash,
+ u32 * node_id,
+ u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ int i, ret = HICN_ERROR_HASHTB_INVAL;
+ u32 bidx;
+ hicn_hash_bucket_t *bucket, *newbkt;
+ int use_seven;
+ u32 current_bucket_id = ~0;
+ int is_overflow = 0;
+
+ *hash_entry = NULL;
+
+ if (h == NULL)
+ {
+ goto done;
+ }
+ /*
+ * Use some bits of the low half of the hash to locate a row/bucket
+ * in the table
+ */
+ current_bucket_id = bidx = (hash & (h->ht_bucket_count - 1));
+
+ bucket = h->ht_buckets + bidx;
+
+ use_seven = (h->ht_flags & HICN_HASHTB_FLAG_USE_SEVEN);
+
+ /* Locate a free entry slot in the bucket */
+
+loop_buckets:
+
+ for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+ /*
+ * Be sure that we are not inserting the same entry twice
+ */
+ if (bucket->hb_entries[i].he_msb64 == hash)
+ {
+ /*
+ * We hit an existing pit entry. increase lock.
+ */
+
+ *node_id = bucket->hb_entries[i].he_node;
+ *dpo_ctx_id = bucket->hb_entries[i].dpo_ctx_id;
+ *vft_id = bucket->hb_entries[i].vft_id;
+ *is_cs =
+ bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ *hash_entry_id = i;
+ *bucket_id = current_bucket_id;
+ *hash_entry = &(bucket->hb_entries[i]);
+ /*
+ * If we are doing lookup for a data, do not take a
+ * lock in case of a hit with a CS entry
+ */
+ bucket->hb_entries[i].locks++;
+ *bucket_is_overflow = is_overflow;
+ ret = HICN_ERROR_HASHTB_EXIST;
+ goto done;
+ }
+ if ((bucket->hb_entries[i].he_msb64 == 0LL) &&
+ (bucket->hb_entries[i].he_node == 0))
+ {
+ /* Found a candidate -- fill it in */
+
+ /*
+ * Special case if the application asked not to use
+ * the last entry in each bucket.
+ */
+ if ((i != (HICN_HASHTB_BUCKET_ENTRIES - 1)) || use_seven)
+ {
+ hicn_hashtb_init_entry (&(bucket->hb_entries[i]),
+ NODE_IDX_FROM_NODE (node, h), hash, 0);
+
+ *hash_entry = &(bucket->hb_entries[i]);
+
+ node->bucket_id = current_bucket_id;
+ node->entry_idx = i;
+ if (is_overflow)
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+
+ ret = HICN_ERROR_NONE;
+ goto done;
+ }
+ }
+ }
+ /*
+ * Be prepared to continue to an overflow bucket if necessary, or to
+ * add a new overflow bucket. We only expect the last entry in a
+ * bucket to refer to an overflow bucket...
+ */
+ i = HICN_HASHTB_BUCKET_ENTRIES - 1;
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ /* Existing overflow bucket - re-start the search loop */
+ current_bucket_id = bucket->hb_entries[i].he_node;
+ bucket = pool_elt_at_index (h->ht_overflow_buckets, current_bucket_id);
+ is_overflow = 1;
+ goto loop_buckets;
+
+ }
+ else
+ {
+ /*
+ * Overflow - reached the end of a bucket without finding a
+ * free entry slot. Need to allocate an overflow bucket, and
+ * connect it to this bucket.
+ */
+ newbkt = alloc_overflow_bucket (h);
+ if (newbkt == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ /*
+ * We're touching some more bytes than we absolutely have to
+ * here, but ... that seems ok.
+ */
+ memset (newbkt, 0, sizeof (hicn_hash_bucket_t));
+
+ if (use_seven)
+ {
+ /*
+ * Copy existing entry into new bucket - we really
+ * expect these to be properly aligned so they can be
+ * treated as int.
+ */
+ memcpy (&(newbkt->hb_entries[0]),
+ &(bucket->hb_entries[i]), sizeof (hicn_hash_entry_t));
+
+ /* Update bucket id and entry_idx on the hash node */
+ hicn_hash_node_t *node =
+ pool_elt_at_index (h->ht_nodes, newbkt->hb_entries[0].he_node);
+ node->bucket_id = (newbkt - h->ht_overflow_buckets);
+ node->entry_idx = 0;
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+
+ }
+ /*
+ * Connect original bucket to the index of the new overflow
+ * bucket
+ */
+ bucket->hb_entries[i].he_flags |= HICN_HASH_ENTRY_FLAG_OVERFLOW;
+ bucket->hb_entries[i].he_node = (newbkt - h->ht_overflow_buckets);
+
+ /* Add new entry to new overflow bucket */
+ bucket = newbkt;
+
+ /*
+ * Use entry [1] in the new bucket _if_ we just copied into
+ * entry [zero] above.
+ */
+ if (use_seven)
+ {
+
+ hicn_hashtb_init_entry (&(bucket->hb_entries[1]),
+ NODE_IDX_FROM_NODE (node, h), hash, 0);
+ *hash_entry = &(bucket->hb_entries[1]);
+
+ node->bucket_id = (newbkt - h->ht_overflow_buckets);
+ node->entry_idx = 1;
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+ }
+ else
+ {
+
+ hicn_hashtb_init_entry (&(bucket->hb_entries[0]),
+ NODE_IDX_FROM_NODE (node, h), hash, 0);
+ *hash_entry = &(bucket->hb_entries[0]);
+ node->bucket_id = (newbkt - h->ht_overflow_buckets);
+ node->entry_idx = 0;
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+ }
+ }
+
+ /* And we're done with the overflow bucket */
+ ret = HICN_ERROR_NONE;
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Delete a node from a hashtable using the node itself, and delete/free the
+ * node. Caller's pointer is cleared on success.
+ */
+void
+hicn_hashtb_delete (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 hashval)
+{
+
+ hicn_hashtb_remove_node (h, *pnode, hashval);
+ hicn_hashtb_free_node (h, *pnode);
+ *pnode = NULL;
+
+}
+
+/*
+ * Delete an entry from a hashtable using the node itself. If the node was
+ * stored in an overflow bucket, and the bucket is empty after freeing the
+ * node, the bucket is freed as well.
+ */
+void
+hicn_hashtb_remove_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ u64 hashval)
+{
+ int i, count;
+ u32 bidx, overflow_p;
+ hicn_hash_bucket_t *bucket, *parent;
+
+ if ((h == NULL) || (node == NULL))
+ {
+ goto done;
+ }
+ if (node->hn_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
+ bucket = pool_elt_at_index (h->ht_overflow_buckets, node->bucket_id);
+ else
+ {
+ /*
+ * Use some bits of the low half of the hash to locate a
+ * row/bucket in the table
+ */
+ bidx = (hashval & (h->ht_bucket_count - 1));
+ ASSERT (bidx == node->bucket_id);
+ bucket = h->ht_buckets + node->bucket_id;
+ }
+
+ overflow_p = node->hn_flags & HICN_HASH_NODE_OVERFLOW_BUCKET;
+
+ /* Clear out the entry. */
+ hicn_hashtb_init_entry (&(bucket->hb_entries[node->entry_idx]), 0, 0LL, 0);
+
+ if (!overflow_p)
+ {
+ /*
+ * And we're done, in the easy case where we didn't change an
+ * overflow bucket
+ */
+ goto done;
+ }
+ /*
+ * The special case: if this is the last remaining entry in an
+ * overflow bucket, liberate the bucket. That in turn has a special
+ * case if this bucket is in the middle of a chain of overflow
+ * buckets.
+ *
+ * Note that we're not trying aggressively (yet) to condense buckets at
+ * every possible opportunity.
+ */
+
+ /*
+ * Reset this flag; we'll set it again if this bucket links to
+ * another
+ */
+ overflow_p = FALSE;
+
+ for (i = 0, count = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ if (bucket->hb_entries[i].he_node != 0)
+ {
+ count++;
+ }
+ if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1) &&
+ (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW))
+ {
+ count--; /* Doesn't count as a 'real' entry */
+ overflow_p = TRUE;
+ }
+ }
+
+ if (count > 0)
+ {
+ /* Still a (real) entry in the row */
+ goto done;
+ }
+ /*
+ * Need to locate the predecessor of 'bucket': start at the beginning
+ * of the chain of buckets and move forward
+ */
+ bidx = (hashval & (h->ht_bucket_count - 1));
+
+ for (parent = h->ht_buckets + bidx; parent != NULL;)
+ {
+
+ if ((parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_flags &
+ HICN_HASH_ENTRY_FLAG_OVERFLOW) == 0)
+ {
+ parent = NULL;
+ break;
+ }
+ bidx = parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
+
+ if (pool_elt_at_index (h->ht_overflow_buckets, bidx) == bucket)
+ {
+ /*
+ * Found the predecessor of 'bucket'. If 'bucket' has
+ * a successor, connect 'parent' to it, and take
+ * 'bucket out of the middle.
+ */
+ if (overflow_p)
+ {
+ parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node =
+ bucket->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
+ }
+ else
+ {
+ /*
+ * Just clear the predecessor entry pointing
+ * at 'bucket'
+ */
+ hicn_hashtb_init_entry (&parent->hb_entries
+ [(HICN_HASHTB_BUCKET_ENTRIES - 1)], 0,
+ 0LL, 0);
+ }
+
+ break;
+ }
+ /*
+ * After the first iteration, 'parent' will be an overflow
+ * bucket too
+ */
+ parent = pool_elt_at_index (h->ht_overflow_buckets, bidx);
+ }
+
+ /* We really expect to have found the predecessor */
+ ASSERT (parent != NULL);
+
+ /* And now, finally, we can put 'bucket' back on the free list */
+ free_overflow_bucket (h, &bucket);
+
+done:
+ return;
+}
+
+/*
+ * Prepare a hashtable node, supplying the key, and computed hash info.
+ */
+void
+hicn_hashtb_init_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ const u8 * key, u32 keylen)
+{
+ assert (h != NULL);
+ assert (node != NULL);
+ assert (keylen <= HICN_PARAM_HICN_NAME_LEN_MAX);
+
+ /* Init the node struct */
+ node->hn_flags = HICN_HASH_NODE_FLAGS_DEFAULT;
+ node->hn_keysize = 0;
+ node->hn_keysize = keylen;
+ memcpy (node->hn_key.ks.key, key, keylen);
+ node->bucket_id = ~0;
+ node->entry_idx = ~0;
+}
+
+/*
+ * Release a hashtable node back to the free list when an entry is cleared
+ */
+void
+hicn_hashtb_free_node (hicn_hashtb_h h, hicn_hash_node_t * node)
+{
+ ASSERT (h->ht_nodes_used > 0);
+
+ /* Return 'node' to the free list */
+ pool_put (h->ht_nodes, node);
+ h->ht_nodes_used--;
+
+}
+
+/*
+ * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'.
+ */
+int
+hicn_hashtb_next_node (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 * ctx)
+{
+ int i, j, ret = HICN_ERROR_HASHTB_INVAL;
+ u32 bidx, entry;
+ hicn_hash_bucket_t *bucket;
+
+ if ((h == NULL) || (pnode == NULL) || (ctx == NULL))
+ {
+ goto done;
+ }
+ /* Special-case for new iteration */
+ if (*ctx == HICN_HASH_WALK_CTX_INITIAL)
+ {
+ bidx = 0;
+ bucket = &h->ht_buckets[0];
+ entry = 0;
+ j = 0;
+ i = 0;
+ goto search_table;
+ }
+ /* Convert context to bucket and entry indices */
+ bidx = *ctx & 0xffffffffLL;
+ entry = *ctx >> 32;
+
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+
+ /* Init total index into entries (includes fixed bucket and overflow) */
+ j = 0;
+
+skip_processed_bucket_chunks:
+ /*
+ * Figure out where to resume the search for the next entry in the
+ * table, by trying to find the last entry returned, from the cookie.
+ * Loop walks one (regular or overflow) bucket chunk, label is used
+ * for walking chain of chunks. Note that if there was a deletion or
+ * an addition that created an overflow, iterator can skip entries or
+ * return duplicate entries, for entries that are present from before
+ * the walk starts until after it ends.
+ */
+
+ for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++, j++)
+ {
+ if (j > entry)
+ {
+ /*
+ * Start search for next here, use existing 'bucket'
+ * and 'i'
+ */
+ break;
+ }
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+ /*
+ * Be prepared to continue to an overflow bucket if
+ * necessary. (We only expect the last entry in a bucket to
+ * refer to an overflow bucket...)
+ */
+ if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+
+ /* Increment overall entry counter 'j' */
+ j++;
+
+ goto skip_processed_bucket_chunks;
+ }
+ /*
+ * end of row (end of fixed bucket plus any
+ * overflows)
+ */
+ i = 0;
+ j = 0;
+
+ bidx++;
+
+ /* Special case - we're at the end */
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+ break;
+ }
+ }
+
+search_table:
+
+ /*
+ * Now we're searching through the table for the next entry that's
+ * set
+ */
+
+ for (; i < HICN_HASHTB_BUCKET_ENTRIES; i++, j++)
+ {
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+ /* Is this entry set? */
+ if (bucket->hb_entries[i].he_node != 0)
+ {
+
+ /* Retrieve the node struct */
+ *pnode = pool_elt_at_index (h->ht_nodes,
+ bucket->hb_entries[i].he_node);
+
+ /*
+ * Set 'entry' as we exit, so we can update the
+ * cookie
+ */
+ entry = j;
+ ret = HICN_ERROR_NONE;
+ break;
+ }
+ /*
+ * Be prepared to continue to an overflow bucket if
+ * necessary. (We only expect the last entry in a bucket to
+ * refer to an overflow bucket...)
+ */
+ if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ /*
+ * Reset per-bucket index 'i', here (not done
+ * in iterator)
+ */
+ i = 0;
+ /* Increment overall entry counter 'j' */
+ j++;
+
+ goto search_table;
+ }
+ else
+ {
+ /*
+ * Move to next bucket, resetting per-bucket
+ * and overall entry indexes
+ */
+ i = 0;
+ j = 0;
+
+ bidx++;
+
+ /* Special case - we're at the end */
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+ goto search_table;
+ }
+ }
+ }
+
+done:
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ /* Update context */
+ *ctx = bidx;
+ *ctx |= ((u64) entry << 32);
+ }
+ return (ret);
+}
+
+int
+hicn_hashtb_key_to_buf (u8 ** vec_res, hicn_hashtb_h h,
+ const hicn_hash_node_t * node)
+{
+ int ret = HICN_ERROR_NONE;
+ u8 *vec = *vec_res;
+
+ if (node->hn_keysize <= HICN_HASH_KEY_BYTES)
+ {
+ vec_add (vec, node->hn_key.ks.key, node->hn_keysize);
+ }
+ *vec_res = vec;
+ return (ret);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hashtb.h b/hicn-plugin/src/hashtb.h
new file mode 100755
index 000000000..1690419a1
--- /dev/null
+++ b/hicn-plugin/src/hashtb.h
@@ -0,0 +1,550 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_HASHTB_H__
+#define __HICN_HASHTB_H__
+
+#include <stdint.h>
+#include <vppinfra/bihash_8_8.h>
+#include <vppinfra/bihash_24_8.h>
+
+#include "params.h"
+#include "parser.h"
+#include "error.h"
+
+/* Handy abbreviations for success status, and for boolean values */
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/*
+ * Lookup is finding a hashtable record whose name matches the name being
+ * looked up. Most of the lookup work is based on the hash value of the two
+ * names. Note that the intel cache line size is 64 bytes, and some platforms
+ * load in 2 cache lines together. - first step is to match a record at the
+ * bucket/slot level (htab has an array of htbucket_t/htbc_elmt, where each
+ * bucket has 7 slots to hold indices for entries.) Matching at this level
+ * implies - the hashes of the lookup name and the record map to the same
+ * bucket - the high 32 bits of the hashes (slot bce_hash_msb32s) match. Read
+ * cost (on the hash table size, i.e. ignoring reading the name being looked
+ * up): - First step normally requires 1 cache line load to pull in the
+ * 64-byte htbucket_t with the 7 element slot table holding the hash_msb32s.
+ * - In the event (hopefully rare for a hash table with appropriate number of
+ * buckets) that more than 7 elements hash to the same bucket, lookup may
+ * well need to look not only at the static htbc_elmt_t but at the chain of
+ * dynamically allocated htbc_elmt_t's linked to the static htbc_elmt_t,
+ * where each of these holds slot entries for additional elements. - Before
+ * reaching that point, it is initially required is to read in the hash table
+ * record fields (ht_bucket_buf, htnode buf, etc) holding pointers to the
+ * arrays, but these cache lines are common to all lookups so will likely
+ * already be in the cache. - second step is to match at the record level
+ * (htnode/htkb level) once a slot-level match happens. Matching at this
+ * level implies the following match - the hash values (the full 64 bits vs.
+ * bucket+32 msb, above) With siphash, two names hashing to the same 64-bit
+ * value is quite rare. - the name which, on the hash table side, is stored
+ * as a list of htkb_t (key buffers). [In some cases, the full name is not
+ * compared, and a match is assumed based on hash value match. Read cost: -
+ * htnode_t, in one cache line, holds hash value and index for the htkb at
+ * the head of the key buffer list - each key buffer (htkb_t) is cache line
+ * aligned/sized, and holds 60 bytes of the name and requires a cache line
+ * read. Simplification is that a fib lookup requires 3 cache lines: - bucket
+ * - htnode - single key buffer (for cases where a name comparision is done)
+ *
+ * Some hashtables (for which rare false positives are tolerable) store hash
+ * values but no keys. (In ISM NDN forwarder, this was used for dcm_dpf: data
+ * cache manager's dataplane filter, where speed was critical and very rare
+ * false positives would be detected in the full dcm check.) - No key buffers
+ * are used (or even allocated at hash table creation).
+ */
+
+#define HICN_HASH_INVALID_IDX ~0
+/*
+ * for hicn_hashtb_next_node() iterator, this otherwise illegal context value
+ * indicates first call of iteration. Note: must not be 0, which is a legal
+ * context value.
+ */
+#define HICN_HASH_WALK_CTX_INITIAL (~((u64)0))
+
+/*
+ * Key memory allocation scheme.
+ *
+ * The key is the bytestring that a hashtable entry is storing, e.g. a fib
+ * prefix or packet name. The hash of the name is used not just to pick the
+ * bucket, but also as a surrogate for the actual key value.
+ *
+ * Client calls pass key/name as contiguous memory for lookup/add/delete but
+ * hashable stores its copy of the key/name as a list of one or more hash_key
+ * structs. - key memory is managed as a list of keys (cache line
+ * sized/aligned buffers). - If (keysize < 128) then use key struct's full
+ * 128 bytes - If not, first key struct is head of a linked list of elements
+ * where the first bytes are used for the key and the last 4 bytes are the
+ * index of the next entry (or an end marker). - key memory is generally the
+ * single largest use of memory in the hash table, especially for PIT, as
+ * names are bigger than node structs (which is also per name/entry).
+ *
+ */
+
+/* Compute hash node index from node pointer */
+#define NODE_IDX_FROM_NODE(p, h) \
+ (u32)((p) - ((h)->ht_nodes))
+
+#define HICN_HASH_KEY_BYTES 20
+
+typedef struct
+{
+ struct
+ {
+ u8 key[HICN_HASH_KEY_BYTES];
+ } ks; /* Entire key in one block */
+} hicn_hash_key_t;
+
+/*
+ * Ratio of extra key blocks to allocate, in case the embedded ones aren't
+ * sufficient. This is the fraction of the number of entries allocated.
+ */
+#define HICN_HASHTB_KEY_RATIO 8
+
+/*
+ * hash node, used to store a hash table entry; indexed by an entry in a
+ * bucket. the node contains an embedded key; long keys are stored as chains
+ * of keys.
+ *
+ * The memory block for a node includes space for client data, additional memory
+ * located off the end of the htnode data structure. Size of client-supplied
+ * data is fixed, so we can use vpp pools. The PIT and FIB need to ensure
+ * that they fit within the available data area, or change the size to
+ * accomodate their needs.
+ *
+ * NOTE: app_data_size currently applies to all apps, i.e. bigger FIB nodes
+ * means (leads to, requires) bigger PCS nodes
+ */
+
+/* Size this so that we can offer 64B aligned on 64-bits to the applications */
+/* New PIT entry syze 62B */
+#define HICN_HASH_NODE_APP_DATA_SIZE 4184 //to support 512 entry //96 //190 to support 50 faces
+
+/* How to align in the right way */
+typedef struct __attribute__ ((packed)) hicn_hash_node_s
+{
+ /* Bucket id containing the corresponding hash entry. */
+ u32 bucket_id;
+
+ /* Hash entry index in the bucket */
+ u32 entry_idx;
+
+ /* Total size of the key */
+ u16 hn_keysize;
+
+ /* 1 byte of flags for application use */
+ u8 hn_flags;
+
+ u8 _hn_reserved1; /* TBD, to align what follows back to
+ * 32 */
+
+ hicn_hash_key_t hn_key; /* Key value embedded in the node, may chain
+ * to more key buffers if necessary */
+
+ /* 32B + HICN_HASH_NODE_APP_DATA_SIZE */
+ /* Followed by app-specific data (fib or pit or cs entry, e.g.) */
+ u8 hn_data[HICN_HASH_NODE_APP_DATA_SIZE];
+
+} hicn_hash_node_t;
+
+#define HICN_HASH_NODE_FLAGS_DEFAULT 0x00
+#define HICN_HASH_NODE_CS_FLAGS 0x01
+#define HICN_HASH_NODE_OVERFLOW_BUCKET 0x02
+
+/*
+ * hicn_hash_entry_t Structure holding all or part of a hash value, a node
+ * index, and other key pieces of info.
+ *
+ * - 128 bytes/bucket with 19 bytes/entry gives 6 entries, or 5 entries plus
+ * next bucket ptr if overflow Changes in this structure will affect
+ * hicn_hash_bucket_t
+ */
+typedef struct __attribute__ ((packed)) hicn_hash_entry_s
+{
+
+ /* MSB of the hash value */
+ u64 he_msb64;
+
+ /* Index of node block */
+ u32 he_node;
+
+ /*
+ * Lock to prevent hash_node deletion while there are still interest
+ * or data referring to it
+ */
+ u32 locks;
+
+ /* A few flags, including 'this points to a chain of buckets' */
+ u8 he_flags;
+
+ /*
+ * Index of the virtual function table corresponding to the dpo_ctx
+ * strategy
+ */
+ u8 vft_id;
+
+ /* Index of dpo */
+ u8 dpo_ctx_id;
+
+} hicn_hash_entry_t;
+
+#define HICN_HASH_ENTRY_FLAGS_DEFAULT 0x00
+
+/* If entry is PIT this flag is 0 */
+#define HICN_HASH_ENTRY_FLAG_CS_ENTRY 0x01
+
+/*
+ * This entry heads a chain of overflow buckets (we expect to see this only
+ * in the last entry in a bucket.) In this case, the index is to an overflow
+ * bucket rather than to a single node block.
+ */
+#define HICN_HASH_ENTRY_FLAG_OVERFLOW 0x04
+
+/* This entry has been marked for deletion */
+#define HICN_HASH_ENTRY_FLAG_DELETED 0x08
+
+/* Use fast he_timeout units for expiration, slow if not */
+#define HICN_HASH_ENTRY_FLAG_FAST_TIMEOUT 0x10
+
+/*
+ * hash bucket: Contains an array of entries. Cache line sized/aligned, so no
+ * room for extra fields unless bucket size is increased to 2 cache lines or
+ * the entry struct shrinks.
+ */
+
+/*
+ * Overflow bucket ratio as a fraction of the fixed/configured count; a pool
+ * of hash buckets used if a row in the fixed table overflows.
+ */
+#define HICN_HASHTB_BUCKET_ENTRIES 6
+
+typedef struct __attribute__ ((packed))
+{
+ hicn_hash_entry_t hb_entries[HICN_HASHTB_BUCKET_ENTRIES];
+ u64 align1;
+ u32 align2;
+ u16 align3;
+} hicn_hash_bucket_t;
+
+/* Overall target fill-factor for the hashtable */
+#define HICN_HASHTB_FILL_FACTOR 4
+
+#define HICN_HASHTB_MIN_ENTRIES (1 << 4) // includes dummy node 0 entry
+#define HICN_HASHTB_MAX_ENTRIES (1 << 24)
+
+#define HICN_HASHTB_MIN_BUCKETS (1 << 10)
+
+/*
+ * htab_t
+ *
+ * Hash table main structure.
+ *
+ * Contains - pointers to dynamically allocated arrays of cache-line
+ * sized/aligned structures (buckets, nodes, keys). Put frequently accessed
+ * fields in the first cache line.
+ */
+typedef struct hicn_hashtb_s
+{
+
+ /* 8B - main array of hash buckets */
+ hicn_hash_bucket_t *ht_buckets;
+
+ /* 8B - just-in-case block of overflow buckets */
+ hicn_hash_bucket_t *ht_overflow_buckets;
+
+ /* 8B - block of nodes associated with entries in buckets */
+ hicn_hash_node_t *ht_nodes;
+
+ /* Flags */
+ u32 ht_flags;
+
+ /* Count of buckets allocated in the main array */
+ u32 ht_bucket_count;
+
+ /* Count of overflow buckets allocated */
+ u32 ht_overflow_bucket_count;
+ u32 ht_overflow_buckets_used;
+
+ /* Count of nodes allocated */
+ u32 ht_node_count;
+ u32 ht_nodes_used;
+
+ /* Count of overflow key structs allocated */
+ u32 ht_key_count;
+ u32 ht_keys_used;
+
+} hicn_hashtb_t, *hicn_hashtb_h;
+
+/*
+ * Offset to aligned start of additional data (PIT/CS, FIB) embedded in each
+ * node.
+ */
+extern u32 ht_node_data_offset_aligned;
+
+/* Flags for hashtable */
+
+#define HICN_HASHTB_FLAGS_DEFAULT 0x00
+
+/*
+ * Don't use the last entry in each bucket - only use it for overflow. We use
+ * this for the FIB, currently, so that we can support in-place FIB changes
+ * that would be difficult if there were hash entry copies as part of
+ * overflow handling.
+ */
+#define HICN_HASHTB_FLAG_USE_SEVEN 0x04
+#define HICN_HASHTB_FLAG_KEY_FMT_PFX 0x08
+#define HICN_HASHTB_FLAG_KEY_FMT_NAME 0x10
+
+/*
+ * Max prefix name components we'll support in our incremental hashing;
+ * currently used only for LPM in the FIB.
+ */
+#define HICN_HASHTB_MAX_NAME_COMPS HICN_PARAM_FIB_ENTRY_PFX_COMPS_MAX
+
+/*
+ * APIs and inlines
+ */
+
+/* Compute hash node index from node pointer */
+static inline u32
+hicn_hashtb_node_idx_from_node (hicn_hashtb_h h, hicn_hash_node_t * p)
+{
+ return (p - h->ht_nodes);
+}
+
+/* Retrieve a hashtable node by node index */
+static inline hicn_hash_node_t *
+hicn_hashtb_node_from_idx (hicn_hashtb_h h, u32 idx)
+{
+ return (pool_elt_at_index (h->ht_nodes, idx));
+}
+
+/* Allocate a brand-new hashtable */
+int
+hicn_hashtb_alloc (hicn_hashtb_h * ph, u32 max_elems, size_t app_data_size);
+
+/* Free a hashtable, including its embedded arrays */
+int hicn_hashtb_free (hicn_hashtb_h * ph);
+
+/* Hash a bytestring, currently using bihash */
+u64 hicn_hashtb_hash_bytestring (const u8 * key, u32 keylen);
+
+always_inline hicn_hash_entry_t *
+hicn_hashtb_get_entry (hicn_hashtb_h h, u32 entry_idx, u32 bucket_id,
+ u8 bucket_overflow)
+{
+ hicn_hash_bucket_t *bucket;
+ if (bucket_overflow)
+ bucket = pool_elt_at_index (h->ht_overflow_buckets, bucket_id);
+ else
+ bucket = (hicn_hash_bucket_t *) (h->ht_buckets + bucket_id);
+
+ return &(bucket->hb_entries[entry_idx]);
+}
+
+/* Hash a name, currently using bihash */
+always_inline u64
+hicn_hashtb_hash_name (const u8 * key, u16 keylen)
+{
+ if (key != NULL && keylen == HICN_V4_NAME_LEN)
+ {
+ clib_bihash_kv_8_8_t kv;
+ kv.key = ((u64 *) key)[0];
+ return clib_bihash_hash_8_8 (&kv);
+ }
+ else if (key != NULL && keylen == HICN_V6_NAME_LEN)
+ {
+ clib_bihash_kv_24_8_t kv;
+ kv.key[0] = ((u64 *) key)[0];
+ kv.key[1] = ((u64 *) key)[1];
+ kv.key[2] = ((u32 *) key)[4];
+ return clib_bihash_hash_24_8 (&kv);
+ }
+ else
+ {
+ return (-1LL);
+ }
+}
+
+
+/*
+ * Prepare a hashtable node for insertion, supplying the key and computed
+ * hash info. This sets up the node->key relationship, possibly allocating
+ * overflow key buffers.
+ */
+void
+hicn_hashtb_init_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ const u8 * key, u32 keylen);
+
+/*
+ * Insert a node into the hashtable. We expect the caller has used the init
+ * api to set the node key and hash info, and populated the extra data area
+ * (if any) - or done the equivalent work itself.
+ */
+int
+hicn_hashtb_insert (hicn_hashtb_h h, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hash,
+ u32 * node_id,
+ u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+/*
+ * Basic api to lookup a specific hash+key tuple. This does the entire lookup
+ * operation, retrieving node structs and comparing keys, so it's not
+ * optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+/*
+ * Extended api to lookup a specific hash+key tuple. The implementation
+ * allows the caller to locate nodes that are marked for deletion; this is
+ * part of some hashtable applications, such as the FIB.
+ *
+ * This does the entire lookup operation, retrieving node structs and comparing
+ * keys, so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node_ex (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ int include_deleted_p, u32 * node_id,
+ u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+/**
+ * @brief Compares the key in the node with the given key
+ *
+ * This function allows to split the hash verification from the comparison of
+ * the entire key. Useful to exploit prefertching.
+ * @result 1 if equals, 0 otherwise
+ */
+int hicn_node_compare (const u8 * key, u32 keylen, hicn_hash_node_t * node);
+
+/*
+ * Remove a node from a hashtable using the node itself. The internal data
+ * structs are cleaned up, but the node struct itself is not: the caller must
+ * free the node itself.
+ */
+void hicn_hashtb_remove_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ u64 hashval);
+
+/*
+ * Delete a node from a hashtable using the node itself, and delete/free the
+ * node. Caller's pointer is cleared on success.
+ */
+void hicn_hashtb_delete (hicn_hashtb_h h, hicn_hash_node_t ** pnode,
+ u64 hashval);
+
+/*
+ * Utility to init a new entry in a hashtable bucket/row. We use this to add
+ * new a node+hash, and to clear out an entry during removal.
+ */
+void
+hicn_hashtb_init_entry (hicn_hash_entry_t * entry,
+ u32 nodeidx, u64 hashval, u32 locks);
+
+
+/*
+ * Return data area embedded in a hash node struct. We maintain an 'offset'
+ * value in case the common node body struct doesn't leave the data area
+ * aligned properly.
+ */
+static inline void *
+hicn_hashtb_node_data (hicn_hash_node_t * node)
+{
+ return ((u8 *) (node) + ht_node_data_offset_aligned);
+}
+
+/*
+ * Use some bits of the low half of the hash to locate a row/bucket in the
+ * table
+ */
+static inline u32
+hicn_hashtb_bucket_idx (hicn_hashtb_h h, u64 hashval)
+{
+ return ((u32) (hashval & (h->ht_bucket_count - 1)));
+}
+
+/*
+ * Return a hash node struct from the free list, or NULL. Note that the
+ * returned struct is _not_ cleared/zeroed - init is up to the caller.
+ */
+static inline hicn_hash_node_t *
+hicn_hashtb_alloc_node (hicn_hashtb_h h)
+{
+ hicn_hash_node_t *p = NULL;
+
+ if (h->ht_nodes_used < h->ht_node_count)
+ {
+ pool_get_aligned (h->ht_nodes, p, 8);
+ h->ht_nodes_used++;
+ }
+ return (p);
+}
+
+/*
+ * Release a hashtable node back to the free list when an entry is cleared
+ */
+void hicn_hashtb_free_node (hicn_hashtb_h h, hicn_hash_node_t * node);
+
+/*
+ * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'
+ * between calls.
+ *
+ * Set the context value to HICN_HASH_WALK_CTX_INITIAL to start an iteration.
+ */
+int
+hicn_hashtb_next_node (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 * ctx);
+
+
+int
+hicn_hashtb_key_to_str (hicn_hashtb_h h, const hicn_hash_node_t * node,
+ char *buf, int bufsize, int must_fit);
+
+/*
+ * single hash full name can pass offset for two hashes calculation in case
+ * we use CS and PIT in a two steps hashes (prefix + seqno)
+ */
+always_inline int
+hicn_hashtb_fullhash (const u8 * name, u16 namelen, u64 * name_hash)
+{
+ *name_hash = hicn_hashtb_hash_name (name, namelen);
+ return (*name_hash != (-1LL) ? HICN_ERROR_NONE : HICN_ERROR_HASHTB_INVAL);
+}
+
+#endif /* // __HICN_HASHTB_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn.api b/hicn-plugin/src/hicn.api
new file mode 100755
index 000000000..e7d7d33c4
--- /dev/null
+++ b/hicn-plugin/src/hicn.api
@@ -0,0 +1,538 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+define hicn_api_node_params_set
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Enable / disable ICN forwarder in VPP */
+ u8 enable_disable;
+
+ /* PIT maximum size, otherwise -1 to assign default value */
+ i32 pit_max_size;
+
+ /* CS maximum size, otherwise -1 to assign default value */
+ i32 cs_max_size;
+
+ /* Portion of CS reserved to application, otherwise -1 to assign default value */
+ i32 cs_reserved_app;
+
+ /* Default PIT entry lifetime */
+ f64 pit_dflt_lifetime_sec;
+
+ /* Lower bound on PIT entry lifetime */
+ f64 pit_min_lifetime_sec;
+
+ /* Upper bound on PIT entry lifetime */
+ f64 pit_max_lifetime_sec;
+};
+
+define hicn_api_node_params_set_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_node_params_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_node_params_get_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Enabled / disabled flag */
+ u8 is_enabled;
+
+ /* compile-time plugin features */
+ u8 feature_cs;
+
+ /* Number of VPP workers */
+ u32 worker_count;
+
+ /* PIT maximum size, otherwise -1 to assign default value */
+ u32 pit_max_size;
+
+ /* CS maximum size, otherwise -1 to assign default value */
+ u32 cs_max_size;
+
+ /* Default PIT entry lifetime */
+ f64 pit_dflt_lifetime_sec;
+
+ /* Lower bound on PIT entry lifetime */
+ f64 pit_min_lifetime_sec;
+
+ /* Upper bound on PIT entry lifetime */
+ f64 pit_max_lifetime_sec;
+};
+
+define hicn_api_node_stats_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_node_stats_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* ICN packets processed */
+ u64 pkts_processed;
+
+ /* ICN interests forwarded */
+ u64 pkts_interest_count;
+
+ /* ICN data msgs forwarded */
+ u64 pkts_data_count;
+
+ /* ICN cached data msg replies */
+ u64 pkts_from_cache_count;
+
+ /* ICN no PIT entry drops */
+ u64 pkts_no_pit_count;
+
+ /* ICN expired PIT entries */
+ u64 pit_expired_count;
+
+ /* ICN expired CS entries */
+ u64 cs_expired_count;
+
+ /* ICN LRU CS entries freed */
+ u64 cs_lru_count;
+
+ /* ICN msgs dropped due to no packet buffers */
+ u64 pkts_drop_no_buf;
+
+ /* ICN Interest messages aggregated in PIT */
+ u64 interests_aggregated;
+
+ /* ICN Interest messages retransmitted */
+ u64 interests_retx;
+
+ /* ICN Interest messages colliding in hashtb */
+ u64 interests_hash_collision;
+
+ /* Number of entries in PIT at the present moment */
+ u64 pit_entries_count;
+
+ /* Number of entries in CS at the present moment */
+ u64 cs_entries_count;
+
+ /* Number of entries in CS at the present moment */
+ u64 cs_entries_ntw_count;
+};
+
+define hicn_api_face_ip_add
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* IP local address */
+ u64 nh_addr[2];
+
+ /* IPv4 local port number */
+ u32 swif;
+};
+
+define hicn_api_face_ip_add_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value: new Face ID, ~0 means no Face was created */
+ u32 faceid;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_face_ip_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face ID to be deleted */
+ u16 faceid;
+};
+
+define hicn_api_face_ip_del_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_face_ip_params_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face to be retrieved */
+ u16 faceid;
+};
+
+define hicn_api_face_ip_params_get_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* IP local address */
+ u64 nh_addr[2];
+
+ /* VPP interface (index) associated with the face */
+ u32 swif;
+
+ /* Face flags */
+ u32 flags;
+};
+
+define hicn_api_route_nhops_add
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to be added to the FIB */
+ u64 prefix[2];
+
+ /* Length of the prefix */
+ u8 len;
+
+ /* A Face ID to the next hop forwarder for the specified prefix */
+ u32 face_ids[7];
+
+ /* Number of face to add */
+ u8 n_faces;
+};
+
+define hicn_api_route_nhops_add_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_route_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to be added to the FIB */
+ u64 prefix[2];
+
+ /* Length of the prefix */
+ u8 len;
+};
+
+define hicn_api_route_del_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_route_nhop_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to be added to the FIB */
+ u64 prefix[2];
+
+ /* Length of the prefix */
+ u8 len;
+
+ /* Specific next-hop to be removed */
+ u16 faceid;
+};
+
+define hicn_api_route_nhop_del_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_route_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Route prefix */
+ u64 prefix[2];
+
+ /* Prefix len */
+ u8 len;
+};
+
+define hicn_api_route_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* List of faces pointing to the next hops */
+ u16 faceids[1000];
+
+ /* Strategy */
+ u32 strategy_id;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_strategies_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_strategies_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Number of available strategies */
+ u8 n_strategies;
+
+ /* Strategies */
+ u32 strategy_id[256];
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_strategy_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Route prefix */
+ u32 strategy_id;
+};
+
+define hicn_api_strategy_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Strategy description */
+ u8 description[200];
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_punting_add
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to match */
+ u64 prefix[2];
+
+ /* Subnet */
+ u8 len;
+
+ /* Interface id */
+ u32 swif;
+};
+
+define hicn_api_punting_add_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_punting_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to match */
+ u64 prefix[2];
+
+ /* Subnet */
+ u8 len;
+
+ /* Interface id */
+ u32 swif;
+};
+
+define hicn_api_punting_del_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_register_prod_app
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u64 context;
+
+ /* Prefix to match */
+ u64 prefix[2];
+
+ /* Subnet */
+ u8 len;
+
+ /* sw_if id */
+ u32 swif;
+
+ /* CS memory reserved -- in number of packets */
+ u32 cs_reserved;
+};
+
+define hicn_api_register_prod_app_reply
+{
+ /* From the request */
+ u64 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Actual CS memory reserved -- in number of packets */
+ u32 cs_reserved;
+
+ /* Prod address (ipv4 or ipv6) */
+ u64 prod_addr[2];
+
+ /* Return value: new Face ID, ~0 means no Face was created */
+ u32 faceid;
+};
+
+define hicn_api_register_cons_app
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u64 context;
+
+ /* swif */
+ u32 swif;
+};
+
+define hicn_api_register_cons_app_reply
+{
+ /* From the request */
+ u64 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Ip4 address */
+ u32 src_addr4;
+
+ /* Ip6 address */
+ u64 src_addr6[2];
+
+ /* Return value: new Face ID, ~0 means no Face was created */
+ u32 faceid;
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/hicn.c b/hicn-plugin/src/hicn.c
new file mode 100755
index 000000000..a7b04de74
--- /dev/null
+++ b/hicn-plugin/src/hicn.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+
+#include "hicn.h"
+#include "params.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "mgmt.h"
+#include "punt.h"
+#include "error.h"
+#include "faces/app/address_mgr.h"
+#include "face_db.h"
+#include "faces/udp/face_udp.h"
+
+hicn_main_t hicn_main;
+/* Module vars */
+int hicn_infra_fwdr_initialized = 0;
+
+/*
+ * Global time counters we're trying out for opportunistic hashtable
+ * expiration.
+ */
+uint16_t hicn_infra_fast_timer; /* Counts at 1 second intervals */
+uint16_t hicn_infra_slow_timer; /* Counts at 1 minute intervals */
+
+hicn_face_bucket_t *hicn_face_bucket_pool;
+
+/*
+ * Init hicn forwarder with configurable PIT, CS sizes
+ */
+static int
+hicn_infra_fwdr_init (uint32_t shard_pit_size, uint32_t shard_cs_size,
+ uint32_t cs_reserved)
+{
+ int ret = 0;
+
+ if (hicn_infra_fwdr_initialized)
+ {
+ ret = HICN_ERROR_FWD_ALREADY_ENABLED;
+ goto done;
+ }
+ /* Init per worker limits */
+ hicn_infra_pit_size = shard_pit_size;
+ hicn_infra_cs_size = shard_cs_size;
+
+ /* Init the global time-compression counters */
+ hicn_infra_fast_timer = 1;
+ hicn_infra_slow_timer = 1;
+
+ ret = hicn_pit_create (&hicn_main.pitcs, hicn_infra_pit_size);
+ hicn_pit_set_lru_max (&hicn_main.pitcs,
+ hicn_infra_cs_size -
+ (hicn_infra_cs_size * cs_reserved / 100));
+ hicn_pit_set_lru_app_max (&hicn_main.pitcs,
+ hicn_infra_cs_size * cs_reserved / 100);
+
+done:
+ if ((ret == HICN_ERROR_NONE) && !hicn_infra_fwdr_initialized)
+ {
+ hicn_infra_fwdr_initialized = 1;
+ }
+ return (ret);
+}
+
+/*
+ * Action function shared between message handler and debug CLI NOTICE: we're
+ * only 'enabling' now
+ */
+int
+hicn_infra_plugin_enable_disable (int enable_disable,
+ int pit_size_req,
+ f64 pit_dflt_lifetime_sec_req,
+ f64 pit_min_lifetime_sec_req,
+ f64 pit_max_lifetime_sec_req,
+ int cs_size_req, int cs_reserved_app)
+{
+ int ret = 0;
+
+ hicn_main_t *sm = &hicn_main;
+ uint32_t pit_size, cs_size, cs_reserved;
+
+ /* Notice if we're already enabled... */
+ if (sm->is_enabled)
+ {
+ ret = HICN_ERROR_FWD_ALREADY_ENABLED;
+ goto done;
+ }
+ /* Set up params and call fwdr_init set up PIT/CS, forwarder nodes */
+
+ /* Check the range and assign some globals */
+ if (pit_min_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_min_ms = HICN_PARAM_PIT_LIFETIME_DFLT_MIN_MS;
+ }
+ else
+ {
+ if (pit_min_lifetime_sec_req < HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC ||
+ pit_min_lifetime_sec_req > HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_MINLT_OOB;
+ goto done;
+ }
+ sm->pit_lifetime_min_ms = pit_min_lifetime_sec_req * SEC_MS;
+ }
+
+ if (pit_max_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_max_ms = HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS;
+ }
+ else
+ {
+ if (pit_max_lifetime_sec_req < HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC ||
+ pit_max_lifetime_sec_req > HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_MAXLT_OOB;
+ goto done;
+ }
+ sm->pit_lifetime_max_ms = pit_max_lifetime_sec_req * SEC_MS;
+ }
+ if (sm->pit_lifetime_min_ms > sm->pit_lifetime_max_ms)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_MINMAXLT;
+ goto done;
+ }
+ if (pit_dflt_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_dflt_ms = HICN_PARAM_PIT_LIFETIME_DFLT_DFLT_MS;
+ }
+ else
+ {
+ sm->pit_lifetime_dflt_ms = pit_dflt_lifetime_sec_req * SEC_MS;
+ }
+ if (sm->pit_lifetime_dflt_ms < sm->pit_lifetime_min_ms ||
+ sm->pit_lifetime_dflt_ms > sm->pit_lifetime_max_ms)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_DFTLT_OOB;
+ goto done;
+ }
+ if (pit_size_req < 0)
+ {
+ pit_size = HICN_PARAM_PIT_ENTRIES_DFLT;
+ }
+ else
+ {
+ if (pit_size_req < HICN_PARAM_PIT_ENTRIES_MIN ||
+ pit_size_req > HICN_PARAM_PIT_ENTRIES_MAX)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_SIZE_OOB;
+ goto done;
+ }
+ pit_size = (uint32_t) pit_size_req;
+ }
+
+ if (cs_size_req < 0)
+ {
+ cs_size = HICN_PARAM_CS_ENTRIES_DFLT;
+ }
+ else
+ {
+ if (cs_size_req > HICN_PARAM_CS_ENTRIES_MAX)
+ {
+ ret = HICN_ERROR_CS_CONFIG_SIZE_OOB;
+ goto done;
+ }
+ cs_size = (uint32_t) cs_size_req;
+ }
+
+ if (cs_reserved_app < 0)
+ {
+ cs_reserved = HICN_PARAM_CS_RESERVED_APP;
+ }
+ else
+ {
+ if (cs_reserved_app >= 100)
+ ret = HICN_ERROR_CS_CONFIG_RESERVED_OOB;
+ cs_reserved = cs_reserved_app;
+ }
+
+ ret = hicn_infra_fwdr_init (pit_size, cs_size, cs_reserved);
+
+ hicn_face_db_init (pit_size);
+
+ if (ret != HICN_ERROR_NONE)
+ {
+ goto done;
+ }
+ sm->is_enabled = 1;
+
+ hicn_face_udp_init_internal ();
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Init entry-point for the icn plugin
+ */
+static clib_error_t *
+hicn_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+
+ hicn_main_t *sm = &hicn_main;
+
+ /* Init other elements in the 'main' struct */
+ sm->is_enabled = 0;
+
+ error = hicn_api_plugin_hookup (vm);
+
+ /* Init the hash table */
+ hicn_punt_init (vm);
+
+ /* Init the dpo module */
+ hicn_dpos_init ();
+
+ /* Init the app manager */
+ address_mgr_init ();
+
+ hicn_face_module_init (vm);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (hicn_init);
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER() =
+{
+ .description = "hICN forwarder"
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn.h b/hicn-plugin/src/hicn.h
new file mode 100755
index 000000000..02a3dfa52
--- /dev/null
+++ b/hicn-plugin/src/hicn.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_H__
+#define __HICN_H__
+
+#include <hicn/hicn.h>
+
+#include <netinet/in.h>
+#include <vnet/ip/ip.h>
+#include <vnet/tcp/tcp_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/buffer.h>
+
+/* Helper for avoiding warnings about type-punning */
+#define UNION_CAST(x, destType) \
+ (((union {__typeof__(x) a; destType b;})x).b)
+
+/*
+ * Update CMakeLists.txt as we have to manually replace the type for
+ * vppapigen
+ */
+typedef u8 weight_t;
+
+#define ISV6(isv6, dov6, dov4) isv6 ? dov6 : dov4
+#define HICN_IS_NAMEHASH_CACHED(b) (((u64)(b->opaque2)[0] != 0) || ((u64)(b->opaque2)[1] != 0))
+
+#ifndef VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
+#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE (128)
+#endif
+
+/* The following is stored in the opaque2 field in the vlib_buffer_t */
+typedef struct
+{
+ /* hash of the name */
+ u64 name_hash;
+
+ /* ids to prefetch a PIT/CS entry */
+ u32 node_id;
+ u32 bucket_id;
+ u8 hash_entry_id;
+ u8 hash_bucket_flags;
+
+ u8 is_appface; /* 1 the incoming face is an
+ * application face, 0 otherwise */
+ u8 dpo_ctx_id; /* used for data path */
+ u8 vft_id; /* " */
+
+ dpo_id_t face_dpo_id; /* ingress face ,sizeof(iface_dpo_id)
+ * <= sizeof(u64) */
+
+ hicn_type_t type;
+} hicn_buffer_t;
+
+STATIC_ASSERT (sizeof (hicn_buffer_t) <=
+ STRUCT_SIZE_OF (vlib_buffer_t, opaque2),
+ "hICN buffer opaque2 meta-data too large for vlib_buffer");
+
+
+always_inline hicn_buffer_t *
+hicn_get_buffer (vlib_buffer_t * b0)
+{
+ return (hicn_buffer_t *) & (b0->opaque2[0]);
+}
+
+#endif /* __HICN_H__ */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn_all_api_h.h b/hicn-plugin/src/hicn_all_api_h.h
new file mode 100755
index 000000000..1263ea4a2
--- /dev/null
+++ b/hicn-plugin/src/hicn_all_api_h.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hicn/hicn.api.h>
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn_api.c b/hicn-plugin/src/hicn_api.c
new file mode 100755
index 000000000..8becde12c
--- /dev/null
+++ b/hicn-plugin/src/hicn_api.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip6.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include "hicn.h"
+#include "faces/ip/face_ip.h"
+#include "infra.h"
+#include "parser.h"
+#include "mgmt.h"
+#include "strategy_dpo_manager.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy.h"
+#include "pg.h"
+#include "error.h"
+#include "punt.h"
+#include "faces/app/face_prod.h"
+#include "faces/app/face_cons.h"
+#include "route.h"
+
+/* define message IDs */
+#include <hicn/hicn_msg_enum.h>
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <hicn/hicn_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <hicn/hicn_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n, v) static u32 api_version=(v);
+#include <hicn/hicn_all_api_h.h>
+#undef vl_api_version
+
+#define REPLY_MSG_ID_BASE sm->msg_id_base
+#include <vlibapi/api_helper_macros.h>
+
+/****** List of message types that this plugin understands ******/
+
+#define foreach_hicn_plugin_api_msg \
+ _(HICN_API_NODE_PARAMS_SET, hicn_api_node_params_set) \
+ _(HICN_API_NODE_PARAMS_GET, hicn_api_node_params_get) \
+ _(HICN_API_NODE_STATS_GET, hicn_api_node_stats_get) \
+ _(HICN_API_FACE_IP_ADD, hicn_api_face_ip_add) \
+ _(HICN_API_FACE_IP_DEL, hicn_api_face_ip_del) \
+ _(HICN_API_FACE_IP_PARAMS_GET, hicn_api_face_ip_params_get) \
+ _(HICN_API_ROUTE_GET, hicn_api_route_get) \
+ _(HICN_API_ROUTE_NHOPS_ADD, hicn_api_route_nhops_add) \
+ _(HICN_API_ROUTE_DEL, hicn_api_route_del) \
+ _(HICN_API_ROUTE_NHOP_DEL, hicn_api_route_nhop_del) \
+ _(HICN_API_STRATEGIES_GET, hicn_api_strategies_get) \
+ _(HICN_API_STRATEGY_GET, hicn_api_strategy_get) \
+ _(HICN_API_PUNTING_ADD, hicn_api_punting_add) \
+ _(HICN_API_PUNTING_DEL, hicn_api_punting_del) \
+ _(HICN_API_REGISTER_PROD_APP, hicn_api_register_prod_app) \
+ _(HICN_API_REGISTER_CONS_APP, hicn_api_register_cons_app)
+
+
+/****** SUPPORTING FUNCTION DECLARATIONS ******/
+
+/*
+ * Convert a unix return code to a vnet_api return code. Currently stubby:
+ * should have more cases.
+ */
+always_inline vnet_api_error_t
+hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
+ vl_api_hicn_api_face_ip_params_get_reply_t
+ * reply);
+
+
+/****************** API MESSAGE HANDLERS ******************/
+
+/****** NODE ******/
+
+static void
+vl_api_hicn_api_node_params_set_t_handler (vl_api_hicn_api_node_params_set_t *
+ mp)
+{
+ vl_api_hicn_api_node_params_set_reply_t *rmp;
+ int rv;
+
+ hicn_main_t *sm = &hicn_main;
+
+ int pit_max_size = clib_net_to_host_i32 (mp->pit_max_size);
+ f64 pit_dflt_lifetime_sec = mp->pit_dflt_lifetime_sec;
+ f64 pit_min_lifetime_sec = mp->pit_min_lifetime_sec;
+ f64 pit_max_lifetime_sec = mp->pit_max_lifetime_sec;
+ int cs_max_size = clib_net_to_host_i32 (mp->cs_max_size);
+ int cs_reserved_app = clib_net_to_host_i32 (mp->cs_reserved_app);
+
+ cs_reserved_app = cs_reserved_app >= 0
+ && cs_reserved_app < 100 ? cs_reserved_app : HICN_PARAM_CS_RESERVED_APP;
+
+ rv = hicn_infra_plugin_enable_disable ((int) (mp->enable_disable),
+ pit_max_size,
+ pit_dflt_lifetime_sec,
+ pit_min_lifetime_sec,
+ pit_max_lifetime_sec,
+ cs_max_size, cs_reserved_app);
+
+ REPLY_MACRO (VL_API_HICN_API_NODE_PARAMS_SET_REPLY /* , rmp, mp, rv */ );
+}
+
+static void
+vl_api_hicn_api_node_params_get_t_handler (vl_api_hicn_api_node_params_get_t *
+ mp)
+{
+ vl_api_hicn_api_node_params_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_NODE_PARAMS_GET_REPLY, (
+ {
+ rmp->is_enabled = sm->is_enabled;
+ rmp->feature_cs = HICN_FEATURE_CS;
+ rmp->pit_max_size = clib_host_to_net_u32 (hicn_infra_pit_size);
+ rmp->pit_dflt_lifetime_sec = ((f64) sm->pit_lifetime_dflt_ms) / SEC_MS;
+ rmp->pit_min_lifetime_sec = ((f64) sm->pit_lifetime_min_ms) / SEC_MS;
+ rmp->pit_max_lifetime_sec = ((f64) sm->pit_lifetime_max_ms) / SEC_MS;
+ rmp->cs_max_size = clib_host_to_net_u32 (hicn_infra_cs_size);
+ rmp->retval = clib_host_to_net_i32 (rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_hicn_api_node_stats_get_t_handler (vl_api_hicn_api_node_stats_get_t *
+ mp)
+{
+ vl_api_hicn_api_node_stats_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_NODE_STATS_GET_REPLY, (
+ {
+ rv = hicn_mgmt_node_stats_get (rmp);
+ rmp->retval =clib_host_to_net_i32 (rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+
+/****** FACE *******/
+
+static void
+vl_api_hicn_api_face_ip_add_t_handler (vl_api_hicn_api_face_ip_add_t * mp)
+{
+ vl_api_hicn_api_face_ip_add_reply_t *rmp;
+ int rv;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = HICN_FACE_NULL;
+ ip46_address_t nh_addr;
+ nh_addr.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->nh_addr))[0]);
+ nh_addr.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->nh_addr))[1]);
+
+ u32 swif = clib_net_to_host_u32 (mp->swif);
+ rv = hicn_face_ip_add (&nh_addr, NULL, swif, &faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_FACE_IP_ADD_REPLY /* , rmp, mp, rv */ ,(
+ {
+ rmp->faceid = clib_host_to_net_u16 ((u16) faceid);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_hicn_api_face_ip_del_t_handler (vl_api_hicn_api_face_ip_del_t * mp)
+{
+ vl_api_hicn_api_face_ip_del_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = clib_net_to_host_u16 (mp->faceid);
+ rv = hicn_face_del (faceid);
+
+ REPLY_MACRO (VL_API_HICN_API_FACE_IP_DEL_REPLY /* , rmp, mp, rv */ );
+
+}
+
+static void
+ vl_api_hicn_api_face_ip_params_get_t_handler
+ (vl_api_hicn_api_face_ip_params_get_t * mp)
+{
+ vl_api_hicn_api_face_ip_params_get_reply_t *rmp;
+ int rv = 0;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = clib_net_to_host_u16 (mp->faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_FACE_IP_PARAMS_GET_REPLY, (
+ {
+ rv = hicn_face_api_entry_params_serialize(faceid, rmp);
+ rmp->retval = clib_host_to_net_u32(rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+/****** ROUTE *******/
+
+static void
+vl_api_hicn_api_route_nhops_add_t_handler (vl_api_hicn_api_route_nhops_add_t
+ * mp)
+{
+ vl_api_hicn_api_route_nhops_add_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+ hicn_face_id_t face_ids[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+
+ hicn_main_t *sm = &hicn_main;
+
+ ip46_address_t prefix;
+ prefix.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[0]);
+ prefix.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[1]);
+
+ u8 len = mp->len;
+ u8 n_faces = mp->n_faces;
+
+ for (int i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ face_ids[i] = clib_net_to_host_u16 (mp->face_ids[i]);
+ }
+
+ if ((face_ids == NULL) || (n_faces > HICN_PARAM_FIB_ENTRY_NHOPS_MAX))
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+ if (rv == HICN_ERROR_NONE)
+ {
+ rv = hicn_route_add (face_ids, n_faces, &prefix, len);
+
+ if (rv == HICN_ERROR_ROUTE_ALREADY_EXISTS)
+ {
+ rv = hicn_route_add_nhops (face_ids, n_faces, &prefix, len);
+ }
+ }
+ REPLY_MACRO (VL_API_HICN_API_ROUTE_NHOPS_ADD_REPLY /* , rmp, mp, rv */ );
+}
+
+
+static void vl_api_hicn_api_route_del_t_handler
+ (vl_api_hicn_api_route_del_t * mp)
+{
+ vl_api_hicn_api_route_del_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ ip46_address_t prefix;
+ prefix.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[0]);
+ prefix.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[1]);
+ u8 len = mp->len;
+
+ rv = hicn_route_del (&prefix, len);
+
+ REPLY_MACRO (VL_API_HICN_API_ROUTE_DEL_REPLY /* , rmp, mp, rv */ );
+}
+
+static void vl_api_hicn_api_route_nhop_del_t_handler
+ (vl_api_hicn_api_route_nhop_del_t * mp)
+{
+ vl_api_hicn_api_route_nhop_del_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ ip46_address_t prefix;
+ prefix.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[0]);
+ prefix.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[1]);
+ u8 len = mp->len;
+ hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
+
+
+ rv = hicn_route_del_nhop (&prefix, len, faceid);
+
+ REPLY_MACRO (VL_API_HICN_API_ROUTE_NHOP_DEL_REPLY /* , rmp, mp, rv */ );
+}
+
+static void vl_api_hicn_api_route_get_t_handler
+ (vl_api_hicn_api_route_get_t * mp)
+{
+ vl_api_hicn_api_route_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ ip46_address_t prefix;
+ prefix.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[0]);
+ prefix.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[1]);
+ u8 len = mp->len;
+ const dpo_id_t *hicn_dpo_id;
+ const hicn_dpo_vft_t *hicn_dpo_vft;
+ hicn_dpo_ctx_t *hicn_dpo_ctx;
+ u32 fib_index;
+
+ rv = hicn_route_get_dpo (&prefix, len, &hicn_dpo_id, &fib_index);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_ROUTE_GET_REPLY, (
+ {
+ if (rv == HICN_ERROR_NONE)
+ {
+ hicn_dpo_vft = hicn_dpo_get_vft(hicn_dpo_id->dpoi_index);
+ hicn_dpo_ctx = hicn_dpo_vft->hicn_dpo_get_ctx(hicn_dpo_id->dpoi_index);
+ for (int i = 0; i < hicn_dpo_ctx->entry_count; i++)
+ {
+ if (dpo_id_is_valid(&hicn_dpo_ctx->next_hops[i]))
+ {
+ rmp->faceids[i] =((dpo_id_t *) &hicn_dpo_ctx->next_hops[i])->dpoi_index;}
+ }
+ rmp->strategy_id = clib_host_to_net_u32(hicn_dpo_get_vft_id(hicn_dpo_id));}
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_hicn_api_strategies_get_t_handler
+ (vl_api_hicn_api_strategies_get_t * mp)
+{
+ vl_api_hicn_api_strategies_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ int n_strategies = hicn_strategy_get_all_available ();
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_STRATEGIES_GET_REPLY/* , rmp, mp, rv */ ,(
+ {
+ int j = 0;
+ for (u32 i = 0; i < (u32) n_strategies; i++)
+ {
+ if (hicn_dpo_strategy_id_is_valid (i) == HICN_ERROR_NONE)
+ {
+ rmp->strategy_id[j] = clib_host_to_net_u32 (i); j++;}
+ }
+ rmp->n_strategies = n_strategies;
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_hicn_api_strategy_get_t_handler
+ (vl_api_hicn_api_strategy_get_t * mp)
+{
+ vl_api_hicn_api_strategy_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ u32 strategy_id = clib_net_to_host_u32 (mp->strategy_id);
+ rv = hicn_dpo_strategy_id_is_valid (strategy_id);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_STRATEGY_GET_REPLY /* , rmp, mp, rv */ ,(
+ {
+ if (rv == HICN_ERROR_NONE)
+ {
+ const hicn_dpo_vft_t * hicn_dpo_vft =
+ hicn_dpo_get_vft (strategy_id);
+ hicn_dpo_vft->format_hicn_dpo (rmp->description, 0);}
+ }));
+ /* *INDENT-ON* */
+}
+
+/****** PUNTING *******/
+
+static void vl_api_hicn_api_punting_add_t_handler
+ (vl_api_hicn_api_punting_add_t * mp)
+{
+ vl_api_hicn_api_punting_add_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+ vlib_main_t *vm = vlib_get_main ();
+
+ hicn_main_t *sm = &hicn_main;
+
+ ip46_address_t prefix;
+ prefix.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[0]);
+ prefix.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[1]);
+ u8 subnet_mask = mp->len;
+ u32 swif = clib_net_to_host_u32 (mp->swif);
+
+ rv =
+ hicn_punt_interest_data_for_ethernet (vm, &prefix, subnet_mask, swif, 0);
+
+ REPLY_MACRO (VL_API_HICN_API_PUNTING_ADD_REPLY /* , rmp, mp, rv */ );
+}
+
+static void vl_api_hicn_api_punting_del_t_handler
+ (vl_api_hicn_api_punting_del_t * mp)
+{
+ vl_api_hicn_api_punting_del_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ rv = HICN_ERROR_NONE;
+
+ REPLY_MACRO (VL_API_HICN_API_ROUTE_DEL_REPLY /* , rmp, mp, rv */ );
+}
+
+/************* APP FACE ****************/
+
+static void vl_api_hicn_api_register_prod_app_t_handler
+ (vl_api_hicn_api_register_prod_app_t * mp)
+{
+ vl_api_hicn_api_register_prod_app_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_prefix_t prefix;
+ prefix.name.as_u64[0] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[0]);
+ prefix.name.as_u64[1] = clib_net_to_host_u64 (((u64 *) (&mp->prefix))[1]);
+ prefix.len = mp->len;
+ u32 swif = clib_net_to_host_u32 (mp->swif);
+ u32 cs_reserved = clib_net_to_host_u32 (mp->cs_reserved);
+ u32 faceid;
+
+ ip46_address_t prod_addr;
+ ip46_address_reset (&prod_addr);
+ rv = hicn_face_prod_add (&prefix, swif, &cs_reserved, &prod_addr, &faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_REGISTER_PROD_APP_REPLY, (
+ {
+ rmp->prod_addr[0] = prod_addr.as_u64[0];
+ rmp->prod_addr[1] = prod_addr.as_u64[1];
+ rmp->cs_reserved = clib_net_to_host_u32(cs_reserved);
+ rmp->faceid = clib_net_to_host_u32(faceid);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_hicn_api_register_cons_app_t_handler
+ (vl_api_hicn_api_register_cons_app_t * mp)
+{
+ vl_api_hicn_api_register_cons_app_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+ ip4_address_t src_addr4;
+ ip6_address_t src_addr6;
+ src_addr4.as_u32 = (u32) 0;
+ src_addr6.as_u64[0] = (u64) 0;
+ src_addr6.as_u64[1] = (u64) 1;
+
+ u32 swif = clib_net_to_host_u32 (mp->swif);
+ u32 faceid;
+
+ rv = hicn_face_cons_add (&src_addr4, &src_addr6, swif, &faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_REGISTER_CONS_APP_REPLY, (
+ {
+ rmp->src_addr4 = clib_net_to_host_u32(src_addr4.as_u32);
+ rmp->src_addr6[0] = clib_net_to_host_u64(src_addr6.as_u64[0]);
+ rmp->src_addr6[1] = clib_net_to_host_u64(src_addr6.as_u64[1]);
+ rmp->faceid = clib_net_to_host_u32(faceid);
+ }));
+ /* *INDENT-ON* */
+}
+
+/************************************************************************************/
+
+/* Set up the API message handling tables */
+clib_error_t *
+hicn_api_plugin_hookup (vlib_main_t * vm)
+{
+ hicn_main_t *sm = &hicn_main;
+
+ /* Get a correctly-sized block of API message decode slots */
+ u8 *name = format (0, "hicn_%08x%c", api_version, 0);
+ sm->msg_id_base = vl_msg_api_get_msg_ids ((char *) name,
+ VL_MSG_FIRST_AVAILABLE);
+ vec_free (name);
+
+#define _(N, n) \
+ vl_msg_api_set_handlers(sm->msg_id_base + VL_API_##N, \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_hicn_plugin_api_msg;
+#undef _
+
+ return 0;
+}
+
+
+
+
+
+
+
+/******************* SUPPORTING FUNCTIONS *******************/
+
+/*
+ * Binary serialization for get face configuration API. for the moment
+ * assuming only ip faces here. To be completed with othet types of faces
+ */
+vnet_api_error_t
+hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
+ vl_api_hicn_api_face_ip_params_get_reply_t
+ * reply)
+{
+ int rv = HICN_ERROR_NONE;
+
+ if (!reply)
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ goto done;
+ }
+ hicn_face_t *face = hicn_dpoi_get_from_idx (faceid);
+
+ ip_adjacency_t *ip_adj = adj_get (face->shared.adj);
+
+ if (ip_adj != NULL)
+ {
+ reply->nh_addr[0] =
+ clib_host_to_net_u64 (ip_adj->sub_type.nbr.next_hop.as_u64[0]);
+ reply->nh_addr[1] =
+ clib_host_to_net_u64 (ip_adj->sub_type.nbr.next_hop.as_u64[1]);
+ reply->swif = clib_host_to_net_u32 (face->shared.sw_if);
+ reply->flags = clib_host_to_net_u32 (face->shared.flags);
+ }
+ else
+ rv = HICN_ERROR_FACE_IP_ADJ_NOT_FOUND;
+
+done:
+ return (rv);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn_api.h b/hicn-plugin/src/hicn_api.h
new file mode 100755
index 000000000..79b561be4
--- /dev/null
+++ b/hicn-plugin/src/hicn_api.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_API_H__
+#define __HICN_API_H__
+
+#define HICN_STRATEGY_NULL ~0
+
+/* define message structures */
+#define vl_typedefs
+#include <hicn/hicn_all_api_h.h>
+#undef vl_typedefs
+
+#endif /* // __HICN_API_H___ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn_api_test.c b/hicn-plugin/src/hicn_api_test.c
new file mode 100755
index 000000000..9d4519bf4
--- /dev/null
+++ b/hicn-plugin/src/hicn_api_test.c
@@ -0,0 +1,1046 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vppinfra/error.h>
+
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/format.h>
+
+#define __plugin_msg_base hicn_test_main.msg_id_base
+#include <vlibapi/vat_helper_macros.h>
+
+
+#include <hicn/hicn_api.h>
+#include "error.h"
+
+// uword unformat_sw_if_index(unformat_input_t * input, va_list * args);
+
+/* Declare message IDs */
+#include "hicn_msg_enum.h"
+
+/* declare message handlers for each api */
+
+#define vl_endianfun /* define message structures */
+#include "hicn_all_api_h.h"
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include "hicn_all_api_h.h"
+#undef vl_printfun
+
+/* Get the API version number. */
+#define vl_api_version(n, v) static u32 api_version=(v);
+#include "hicn_all_api_h.h"
+#undef vl_api_version
+
+/* SUPPORTING FUNCTIONS NOT LOADED BY VPP_API_TEST */
+uword
+unformat_ip46_address (unformat_input_t * input, va_list * args)
+{
+ ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
+ ip46_type_t type = va_arg (*args, ip46_type_t);
+ if ((type != IP46_TYPE_IP6) &&
+ unformat (input, "%U", unformat_ip4_address, &ip46->ip4))
+ {
+ ip46_address_mask_ip4 (ip46);
+ return 1;
+ }
+ else if ((type != IP46_TYPE_IP4) &&
+ unformat (input, "%U", unformat_ip6_address, &ip46->ip6))
+ {
+ return 1;
+ }
+ return 0;
+}
+
+/////////////////////////////////////////////////////
+
+#define HICN_FACE_NULL ~0
+
+typedef struct
+{
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} hicn_test_main_t;
+
+hicn_test_main_t hicn_test_main;
+
+#define foreach_standard_reply_retval_handler \
+_(hicn_api_node_params_set_reply) \
+_(hicn_api_face_ip_del_reply) \
+_(hicn_api_route_nhops_add_reply) \
+_(hicn_api_route_del_reply) \
+_(hicn_api_route_nhop_del_reply)
+
+#define _(n) \
+ static void vl_api_##n##_t_handler \
+ (vl_api_##n##_t * mp) \
+ { \
+ vat_main_t * vam = hicn_test_main.vat_main; \
+ i32 retval = ntohl(mp->retval); \
+ if (vam->async_mode) { \
+ vam->async_errors += (retval < 0); \
+ } else { \
+ fformat (vam->ofp,"%s\n", get_error_string(retval));\
+ vam->retval = retval; \
+ vam->result_ready = 1; \
+ } \
+ }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers we just
+ * generated
+ */
+#define foreach_vpe_api_reply_msg \
+_(HICN_API_NODE_PARAMS_SET_REPLY, hicn_api_node_params_set_reply) \
+_(HICN_API_NODE_PARAMS_GET_REPLY, hicn_api_node_params_get_reply) \
+_(HICN_API_NODE_STATS_GET_REPLY, hicn_api_node_stats_get_reply) \
+_(HICN_API_FACE_IP_DEL_REPLY, hicn_api_face_ip_del_reply) \
+_(HICN_API_FACE_IP_ADD_REPLY, hicn_api_face_ip_add_reply) \
+_(HICN_API_ROUTE_NHOPS_ADD_REPLY, hicn_api_route_nhops_add_reply) \
+_(HICN_API_FACE_IP_PARAMS_GET_REPLY, hicn_api_face_ip_params_get_reply) \
+_(HICN_API_ROUTE_GET_REPLY, hicn_api_route_get_reply) \
+_(HICN_API_ROUTE_DEL_REPLY, hicn_api_route_del_reply) \
+_(HICN_API_ROUTE_NHOP_DEL_REPLY, hicn_api_route_nhop_del_reply) \
+_(HICN_API_STRATEGIES_GET_REPLY, hicn_api_strategies_get_reply) \
+_(HICN_API_STRATEGY_GET_REPLY, hicn_api_strategy_get_reply) \
+_(HICN_API_REGISTER_PROD_APP_REPLY, hicn_api_register_prod_app_reply) \
+_(HICN_API_REGISTER_CONS_APP_REPLY, hicn_api_register_cons_app_reply)
+
+
+static int
+api_hicn_api_node_params_set (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ int enable_disable = 1;
+ int pit_size = -1, cs_size = -1;
+ f64 pit_dflt_lifetime_sec = -1.0f;
+ f64 pit_min_lifetime_sec = -1.0f, pit_max_lifetime_sec = -1.0f;
+ int ret;
+
+ vl_api_hicn_api_node_params_set_t *mp;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "disable"))
+ {
+ enable_disable = 0;
+ }
+ else if (unformat (input, "PIT size %d", &pit_size))
+ {;
+ }
+ else if (unformat (input, "CS size %d", &cs_size))
+ {;
+ }
+ else if (unformat (input, "PIT dfltlife %f", &pit_dflt_lifetime_sec))
+ {;
+ }
+ else if (unformat (input, "PIT minlife %f", &pit_min_lifetime_sec))
+ {;
+ }
+ else if (unformat (input, "PIT maxlife %f", &pit_max_lifetime_sec))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Construct the API message */
+ M (HICN_API_NODE_PARAMS_SET, mp);
+ mp->enable_disable = enable_disable;
+ mp->pit_max_size = clib_host_to_net_i32 (pit_size);
+ mp->cs_max_size = clib_host_to_net_i32 (cs_size);
+ mp->pit_dflt_lifetime_sec = pit_dflt_lifetime_sec;
+ mp->pit_min_lifetime_sec = pit_min_lifetime_sec;
+ mp->pit_max_lifetime_sec = pit_max_lifetime_sec;
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_node_params_get (vat_main_t * vam)
+{
+ vl_api_hicn_api_node_params_get_t *mp;
+ int ret;
+
+ //Construct the API message
+ M (HICN_API_NODE_PARAMS_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_node_params_get_reply_t_handler
+ (vl_api_hicn_api_node_params_get_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ fformat (vam->ofp,
+ "Enabled %d\n"
+ " Features: cs:%d\n"
+ " PIT size %d\n"
+ " PIT lifetime dflt %.3f, min %.3f, max %.3f\n"
+ " CS size %d\n",
+ mp->is_enabled,
+ mp->feature_cs,
+ clib_net_to_host_u32 (mp->pit_max_size),
+ mp->pit_dflt_lifetime_sec,
+ mp->pit_min_lifetime_sec,
+ mp->pit_max_lifetime_sec, clib_net_to_host_u32 (mp->cs_max_size));
+}
+
+static int
+api_hicn_api_node_stats_get (vat_main_t * vam)
+{
+ vl_api_hicn_api_node_stats_get_t *mp;
+ int ret;
+
+ /* Construct the API message */
+ M (HICN_API_NODE_STATS_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_node_stats_get_reply_t_handler
+ (vl_api_hicn_api_node_stats_get_reply_t * rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ else
+ {
+ fformat (vam->ofp, //compare hicn_cli_show_command_fn block:should match
+ " PIT entries (now): %d\n"
+ " CS entries (now): %d\n"
+ " Forwarding statistics:"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_nak_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " pkts_nacked_interests_count: %d\n"
+ " pkts_nak_hoplimit_count: %d\n"
+ " pkts_nak_no_route_count: %d\n"
+ " pkts_no_pit_count: %d\n"
+ " pit_expired_count: %d\n"
+ " cs_expired_count: %d\n"
+ " cs_lru_count: %d\n"
+ " pkts_drop_no_buf: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64 (rmp->pkts_no_pit_count),
+ clib_net_to_host_u64 (rmp->pit_expired_count),
+ clib_net_to_host_u64 (rmp->cs_expired_count),
+ clib_net_to_host_u64 (rmp->cs_lru_count),
+ clib_net_to_host_u64 (rmp->pkts_drop_no_buf),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
+ }
+}
+
+static int
+api_hicn_api_face_ip_add (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ ip46_address_t nh_addr;
+ vl_api_hicn_api_face_ip_add_t *mp;
+ int swif, ret;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "add %d %U",
+ &swif, unformat_ip46_address, &nh_addr))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check for presence of both addresses */
+ if ((nh_addr.as_u64[0] == (u64) 0) && (nh_addr.as_u64[1] == (u64) 0))
+ {
+ clib_warning ("Next hop address not specified");
+ return (1);
+ }
+ /* Construct the API message */
+ M (HICN_API_FACE_IP_ADD, mp);
+ mp->nh_addr[0] = clib_host_to_net_u64 (nh_addr.as_u64[0]);
+ mp->nh_addr[1] = clib_host_to_net_u64 (nh_addr.as_u64[0]);
+ mp->swif = clib_host_to_net_u32 (swif);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_face_ip_add_reply_t_handler
+ (vl_api_hicn_api_face_ip_add_reply_t * rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ fformat (vam->ofp, "New Face ID: %d\n", ntohl (rmp->faceid));
+}
+
+static int
+api_hicn_api_face_ip_del (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_face_ip_del_t *mp;
+ int faceid = 0, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ //Check for presence of face ID
+ if (faceid == 0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_FACE_IP_DEL, mp);
+ mp->faceid = clib_host_to_net_i32 (faceid);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_face_ip_params_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_face_ip_params_get_t *mp;
+ int faceid = 0, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ //Check for presence of face ID
+ if (faceid == 0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_FACE_IP_PARAMS_GET, mp);
+ mp->faceid = clib_host_to_net_i32 (faceid);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_face_ip_params_get_reply_t_handler
+ (vl_api_hicn_api_face_ip_params_get_reply_t * rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+ u8 *sbuf = 0;
+ u64 nh_addr[2];
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ vec_reset_length (sbuf);
+ nh_addr[0] = clib_net_to_host_u64 (rmp->nh_addr[0]);
+ nh_addr[1] = clib_net_to_host_u64 (rmp->nh_addr[1]);
+ sbuf =
+ format (sbuf, "%U", format_ip46_address, &nh_addr,
+ 0 /* IP46_ANY_TYPE */ );
+
+ fformat (vam->ofp, "nh_addr %s swif %d flags %d\n",
+ sbuf,
+ clib_net_to_host_u16 (rmp->swif),
+ clib_net_to_host_i32 (rmp->flags));
+}
+
+static int
+api_hicn_api_route_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+
+ vl_api_hicn_api_route_get_t *mp;
+ ip46_address_t prefix;
+ u8 plen;
+ int ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.as_u64[0] == 0) && (prefix.as_u64[1] == 0)) || (plen == 0))
+ {
+ clib_warning ("Please specify a valid prefix...");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_ROUTE_GET, mp);
+ mp->prefix[0] = clib_host_to_net_u64 (((u64 *) & prefix)[0]);
+ mp->prefix[1] = clib_host_to_net_u64 (((u64 *) & prefix)[1]);
+ mp->len = plen;
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static void
+vl_api_hicn_api_route_get_reply_t_handler (vl_api_hicn_api_route_get_reply_t *
+ rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+ u8 *sbuf = 0;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ int i = 0;
+ u8 null_face = 0;
+ u32 faceid;
+
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "Faces: \n");
+ while (i < 1000 && !null_face)
+ {
+ faceid = clib_net_to_host_u32 (rmp->faceids[i]);
+ if (faceid != HICN_FACE_NULL)
+ {
+ sbuf =
+ format (sbuf, "faceid %d",
+ clib_net_to_host_u32 (rmp->faceids[i]));
+ i++;
+ }
+ else
+ {
+ null_face = 1;
+ }
+ }
+
+ fformat (vam->ofp, "%s\n Strategy: %d",
+ sbuf, clib_net_to_host_u32 (rmp->strategy_id));
+}
+
+static int
+api_hicn_api_route_nhops_add (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_route_nhops_add_t *mp;
+
+ ip46_address_t prefix;
+ u8 plen;
+ u32 faceid = 0;
+ int ret;
+
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "add prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.as_u64[0] == 0) && (prefix.as_u64[1] == 0)) || (plen == 0)
+ || (faceid == 0))
+ {
+ clib_warning ("Please specify prefix and faceid...");
+ return 1;
+ }
+ /* Construct the API message */
+ M (HICN_API_ROUTE_NHOPS_ADD, mp);
+ mp->prefix[0] = clib_host_to_net_u64 (((u64 *) & prefix)[0]);
+ mp->prefix[1] = clib_host_to_net_u64 (((u64 *) & prefix)[1]);
+ mp->len = plen;
+
+ mp->face_ids[0] = clib_host_to_net_u32 (faceid);
+ mp->n_faces = 1;
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_route_del (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_route_del_t *mp;
+
+ ip46_address_t prefix;
+ u8 plen;
+ int ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.as_u64[0] == 0) && (prefix.as_u64[1] == 0)) || (plen == 0))
+ {
+ clib_warning ("Please specify prefix...");
+ return 1;
+ }
+ /* Construct the API message */
+ M (HICN_API_ROUTE_DEL, mp);
+ mp->prefix[0] = clib_host_to_net_u64 (((u64 *) & prefix)[0]);
+ mp->prefix[1] = clib_host_to_net_u64 (((u64 *) & prefix)[1]);
+ mp->len = plen;
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+
+}
+
+static int
+api_hicn_api_route_nhop_del (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_route_nhop_del_t *mp;
+
+ ip46_address_t prefix;
+ u8 plen;
+ int faceid = 0, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "del prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.as_u64[0] == 0) && (prefix.as_u64[1] == 0)) || (plen == 0)
+ || (faceid == HICN_FACE_NULL))
+ {
+ clib_warning ("Please specify prefix and faceid...");
+ return 1;
+ }
+ /* Construct the API message */
+ M (HICN_API_ROUTE_NHOP_DEL, mp);
+ mp->prefix[0] = clib_host_to_net_u64 (((u64 *) & prefix)[0]);
+ mp->prefix[1] = clib_host_to_net_u64 (((u64 *) & prefix)[1]);
+ mp->len = plen;
+
+ mp->faceid = clib_host_to_net_u32 (faceid);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_strategies_get (vat_main_t * vam)
+{
+ vl_api_hicn_api_strategies_get_t *mp;
+ int ret;
+
+ //TODO
+ /* Construct the API message */
+ M (HICN_API_STRATEGIES_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_strategies_get_reply_t_handler
+ (vl_api_hicn_api_strategies_get_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+ u8 *sbuf = 0;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ int n_strategies = clib_net_to_host_i32 (mp->n_strategies);
+
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "Available strategies:\n");
+
+ int i;
+ for (i = 0; i < n_strategies; i++)
+ {
+ u32 strategy_id = clib_net_to_host_u32 (mp->strategy_id[i]);
+ sbuf = format (sbuf, "%d ", strategy_id);
+ }
+ fformat (vam->ofp, "%s", sbuf);
+}
+
+static int
+api_hicn_api_strategy_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_strategy_get_t *mp;
+ int ret;
+
+ u32 strategy_id = HICN_STRATEGY_NULL;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "strategy %d", strategy_id))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ if (strategy_id == HICN_STRATEGY_NULL)
+ {
+ clib_warning ("Please specify strategy id...");
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (HICN_API_STRATEGY_GET, mp);
+ mp->strategy_id = clib_host_to_net_u32 (strategy_id);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_strategy_get_reply_t_handler
+ (vl_api_hicn_api_strategy_get_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ fformat (vam->ofp, "%s", mp->description);
+}
+
+static int
+api_hicn_api_register_prod_app (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_register_prod_app_t *mp;
+ ip46_address_t prefix;
+ int plen;
+ u32 swif = ~0;
+ int ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else if (unformat (input, "id %d", &swif))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.as_u64[0] == 0) && (prefix.as_u64[1] == 0)) || (plen == 0))
+ {
+ clib_warning ("Please specify prefix...");
+ return 1;
+ }
+ /* Construct the API message */
+ M (HICN_API_REGISTER_PROD_APP, mp);
+ mp->prefix[0] = clib_host_to_net_u64 (prefix.as_u64[0]);
+ mp->prefix[1] = clib_host_to_net_u64 (prefix.as_u64[1]);
+ mp->len = (u8) plen;
+
+ mp->swif = clib_host_to_net_u32 (swif);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_register_prod_app_reply_t_handler
+ (vl_api_hicn_api_register_prod_app_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+}
+
+static int
+api_hicn_api_register_cons_app (vat_main_t * vam)
+{
+ vl_api_hicn_api_register_cons_app_t *mp;
+ int ret;
+
+ /* Construct the API message */
+ M (HICN_API_REGISTER_CONS_APP, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_register_cons_app_reply_t_handler
+ (vl_api_hicn_api_register_cons_app_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ ip4_address_t src_addr4;
+ src_addr4.as_u32 = clib_net_to_host_u32 (mp->src_addr4);
+ ip6_address_t src_addr6;
+ src_addr6.as_u64[0] = clib_net_to_host_u64 (mp->src_addr6[0]);
+ src_addr6.as_u64[1] = clib_net_to_host_u64 (mp->src_addr6[1]);
+
+ fformat (vam->ofp,
+ "ip4 address %U\n"
+ "ip6 address :%U\n"
+ "appif id :%d\n",
+ format_ip4_address, &src_addr4, format_ip6_address, &src_addr6);
+}
+
+/*
+ * List of messages that the api test plugin sends, and that the data plane
+ * plugin processes
+ */
+#define foreach_vpe_api_msg \
+_(hicn_api_node_params_set, "PIT size <sz> CS size <sz>" \
+ "PIT minlimit <f> PIT maxlimit <f> [disable] ") \
+_(hicn_api_node_params_get, "") \
+_(hicn_api_node_stats_get, "") \
+_(hicn_api_face_ip_del, "face <faceID>") \
+_(hicn_api_face_ip_add, "add <swif> <address>") \
+_(hicn_api_route_nhops_add, "add prefix <IP4/IP6>/<subnet> face <faceID> weight <weight>") \
+_(hicn_api_face_ip_params_get, "face <faceID>") \
+_(hicn_api_route_get, "prefix <IP4/IP6>/<subnet>") \
+_(hicn_api_route_del, "prefix <IP4/IP6>/<subnet>") \
+_(hicn_api_route_nhop_del, "del prefix <IP4/IP6>/<subnet> face <faceID>") \
+_(hicn_api_strategies_get, "") \
+_(hicn_api_strategy_get, "strategy <id>") \
+_(hicn_api_register_prod_app, "prefix <IP4/IP6>/<subnet> id <appif_id>") \
+_(hicn_api_register_cons_app, "")
+
+void
+hicn_vat_api_hookup (vat_main_t * vam)
+{
+ hicn_test_main_t *sm = &hicn_test_main;
+ /* Hook up handlers for replies from the data plane plug-in */
+#define _(N, n) \
+ vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_reply_msg;
+#undef _
+
+ /* API messages we can send */
+#define _(n, h) hash_set_mem (vam->function_by_name, #n, api_##n);
+ foreach_vpe_api_msg;
+#undef _
+
+ /* Help strings */
+#define _(n, h) hash_set_mem (vam->help_by_name, #n, h);
+ foreach_vpe_api_msg;
+#undef _
+}
+
+clib_error_t *
+vat_plugin_register (vat_main_t * vam)
+{
+ hicn_test_main_t *sm = &hicn_test_main;
+ u8 *name;
+
+ sm->vat_main = vam;
+
+ /* Ask the vpp engine for the first assigned message-id */
+ name = format (0, "hicn_%08x%c", api_version, 0);
+ sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
+
+ if (sm->msg_id_base != (u16) ~ 0)
+ hicn_vat_api_hookup (vam);
+
+ vec_free (name);
+
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/hicn_msg_enum.h b/hicn-plugin/src/hicn_msg_enum.h
new file mode 100755
index 000000000..291e6226c
--- /dev/null
+++ b/hicn-plugin/src/hicn_msg_enum.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_MSG_ENUM_H__
+#define __HICN_MSG_ENUM_H__
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n, h) n,
+typedef enum
+{
+#include <hicn/hicn_all_api_h.h>
+ /* We'll want to know how many messages IDs we need... */
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* __HICN_MSG_ENUM_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/infra.h b/hicn-plugin/src/infra.h
new file mode 100755
index 000000000..a9744fe97
--- /dev/null
+++ b/hicn-plugin/src/infra.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INFRA_H__
+#define __HICN_INFRA_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/**
+ * hICN plugin global state: see also
+ * - fib and pits
+ */
+typedef struct hicn_main_s
+{
+ /* Binary API message ID base */
+ u16 msg_id_base;
+
+ /* Have we been enabled */
+ u16 is_enabled;
+
+ /* Forwarder PIT/CS */
+ hicn_pit_cs_t pitcs;
+
+ /* Global PIT lifetime info */
+ /*
+ * Default PIT entry timeout to use in case an interest does not
+ * contain a valid interest lifetime
+ */
+ u64 pit_lifetime_dflt_ms;
+ /*
+ * Boundarier for the interest lifetime. If outside,
+ * pit_lifetime_dflt_ms is used in the PIT
+ */
+ u64 pit_lifetime_min_ms;
+ u64 pit_lifetime_max_ms;
+
+} hicn_main_t;
+
+extern hicn_main_t hicn_main;
+
+extern int hicn_infra_fwdr_initialized;
+
+/* PIT and CS size */
+u32 hicn_infra_pit_size;
+u32 hicn_infra_cs_size;
+
+/**
+ * @brief Enable and disable the hicn plugin
+ *
+ * Enable the time the hICN plugin and set the forwarder parameters.
+ * @param enable_disable 1 if to enable, 0 otherwisw (currently only enable is supported)
+ * @param pit_max_size Max size of the PIT
+ * @param pit_dflt_lifetime_sec_req Default PIT entry timeout to use in case an interest does not contain a valid interest lifetime
+ * @param pit_min_lifetime_sec_req Minimum timeout allowed for a PIT entry lifetime
+ * @param pit_max_lifetime_sec_req Maximum timeout allowed for a PIT entry lifetime
+ * @param cs_max_size CS size. Must be <= than pit_max_size
+ * @param cs_reserved_app Amount of CS reserved for application faces
+ */
+int
+hicn_infra_plugin_enable_disable (int enable_disable,
+ int pit_max_size,
+ f64 pit_dflt_lifetime_sec_req,
+ f64 pit_min_lifetime_sec_req,
+ f64 pit_max_lifetime_sec_req,
+ int cs_max_size, int cs_reserved_app);
+
+
+/* vlib nodes that compose the hICN forwarder */
+extern vlib_node_registration_t hicn_interest_pcslookup_node;
+extern vlib_node_registration_t hicn_data_pcslookup_node;
+extern vlib_node_registration_t hicn_data_fwd_node;
+extern vlib_node_registration_t hicn_data_store_node;
+extern vlib_node_registration_t hicn_interest_hitpit_node;
+extern vlib_node_registration_t hicn_interest_hitcs_node;
+extern vlib_node_registration_t hicn_pg_interest_node;
+extern vlib_node_registration_t hicn_pg_data_node;
+extern vlib_node_registration_t hicn_pg_server_node;
+
+
+#endif /* // __HICN_INFRA_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/interest_hitcs.h b/hicn-plugin/src/interest_hitcs.h
new file mode 100755
index 000000000..82b0ace54
--- /dev/null
+++ b/hicn-plugin/src/interest_hitcs.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INTEREST_HITCS_H__
+#define __HICN_INTEREST_HITCS_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_interest_hitcs_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_interest_hitcs_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_interest_hitcs_trace_t;
+
+typedef enum
+{
+ HICN_INTEREST_HITCS_NEXT_V4_LOOKUP,
+ HICN_INTEREST_HITCS_NEXT_V6_LOOKUP,
+ HICN_INTEREST_HITCS_NEXT_ERROR_DROP,
+ HICN_INTEREST_HITCS_N_NEXT,
+} hicn_interest_hitcs_next_t;
+
+#endif /* // __HICN_INTEREST_HITCS_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/interest_hitcs_node.c b/hicn-plugin/src/interest_hitcs_node.c
new file mode 100755
index 000000000..f9c8c4898
--- /dev/null
+++ b/hicn-plugin/src/interest_hitcs_node.c
@@ -0,0 +1,300 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vppinfra/string.h>
+
+#include "interest_hitcs.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "data_fwd.h"
+#include "infra.h"
+#include "state.h"
+#include "error.h"
+
+/* packet trace format function */
+static u8 *hicn_interest_hitcs_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_interest_hitcs_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+vlib_node_registration_t hicn_interest_hitcs_node;
+
+always_inline void drop_packet (u32 * next0);
+
+always_inline void
+clone_from_cs (vlib_main_t * vm, u32 * bi0_cs, vlib_buffer_t * dest)
+{
+ /* Retrieve the buffer to clone */
+ vlib_buffer_t *cs_buf = vlib_get_buffer (vm, *bi0_cs);
+
+ if (cs_buf->current_data >= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
+ && ((i16) cs_buf->current_length) < (i16) 0)
+ {
+ vlib_buffer_advance (cs_buf,
+ -(((i16) cs_buf->current_length) +
+ VLIB_BUFFER_MIN_CHAIN_SEG_SIZE));
+
+ clib_memcpy (vlib_buffer_get_current (dest),
+ vlib_buffer_get_current (cs_buf), cs_buf->current_length);
+ clib_memcpy (dest->opaque2, cs_buf->opaque2, sizeof (cs_buf->opaque2));
+ dest->current_data = cs_buf->current_data;
+ dest->current_length = cs_buf->current_length;
+ dest->total_length_not_including_first_buffer = 0;
+ cs_buf->current_data += VLIB_BUFFER_MIN_CHAIN_SEG_SIZE;
+ cs_buf->current_length -= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE;
+ }
+ else
+ {
+ vlib_buffer_advance (cs_buf, -VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
+
+ if (PREDICT_FALSE (cs_buf->n_add_refs == 255))
+ {
+ vlib_buffer_t *cs_buf2 = vlib_buffer_copy (vm, cs_buf);
+ vlib_buffer_advance (cs_buf, VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
+ cs_buf = cs_buf2;
+ }
+
+ clib_memcpy (vlib_buffer_get_current (dest),
+ vlib_buffer_get_current (cs_buf),
+ VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
+ dest->current_length = VLIB_BUFFER_MIN_CHAIN_SEG_SIZE;
+ vlib_buffer_advance (cs_buf, VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
+ vlib_buffer_attach_clone (vm, dest, cs_buf);
+ }
+}
+
+/*
+ * ICN forwarder node for interests: handling of Interests delivered based on
+ * ACL. - 1 packet at a time - ipv4/tcp ipv6/tcp
+ */
+static uword
+hicn_interest_hitcs_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_hitcs_next_t next_index;
+ hicn_interest_hitcs_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+ int ret;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_hitcs_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ hicn_buffer_t *hicnb0;
+ hicn_hash_node_t *node0;
+ hicn_pcs_entry_t *pitp;
+ hicn_hash_entry_t *hash_entry0;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Get hicn buffer and state */
+ hicnb0 = hicn_get_buffer (b0);
+ hicn_get_internal_state (hicnb0, rt->pitcs, &node0, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ nameptr = (u8 *) (&name);
+ pitp = hicn_pit_get_data (node0);
+
+ dpo_id_t hicn_dpo_id0 =
+ { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+
+ if (PREDICT_FALSE
+ (ret != HICN_ERROR_NONE ||
+ !hicn_node_compare (nameptr, namelen, node0)))
+ {
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ drop_packet (&next0);
+ goto end_processing;
+ }
+ if ((tnow > pitp->shared.expire_time))
+ {
+ /* Delete and clean up expired CS entry */
+ hicn_pcs_delete (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ stats.cs_expired_count++;
+ /* Forward interest to the strategy node */
+ next0 =
+ isv6 ? HICN_INTEREST_HITCS_NEXT_V6_LOOKUP :
+ HICN_INTEREST_HITCS_NEXT_V4_LOOKUP;
+ }
+ else
+ {
+ if (PREDICT_TRUE
+ (!(hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED)))
+ hicn_pcs_cs_update (vm, rt->pitcs, pitp, node0);
+
+ /*
+ * Retrieve the incoming iface and forward
+ * the data through it
+ */
+ ASSERT (hicnb0->face_dpo_id.dpoi_index <
+ HICN_PARAM_PIT_ENTRY_PHOPS_MAX);
+ next0 = hicnb0->face_dpo_id.dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
+ hicnb0->face_dpo_id.dpoi_index;
+
+ clone_from_cs (vm, &pitp->u.cs.cs_pkt_buf, b0);
+
+ stats.pkts_from_cache_count++;
+ stats.pkts_data_count++;
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ }
+
+ end_processing:
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_hitcs_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+
+ vlib_node_increment_counter (vm, hicn_interest_hitcs_node.index,
+ HICNFWD_ERROR_CACHED,
+ stats.pkts_from_cache_count);
+
+ vlib_node_increment_counter (vm, hicn_interest_hitcs_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ update_node_counter (vm, hicn_interest_hitcs_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ return (frame->n_vectors);
+}
+
+always_inline void
+drop_packet (u32 * next0)
+{
+ *next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
+}
+
+/* packet trace format function */
+static u8 *
+hicn_interest_hitcs_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_interest_hitcs_trace_t *t =
+ va_arg (*args, hicn_interest_hitcs_trace_t *);
+
+ s = format (s, "INTEREST-HITCS: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_interest_hitcs_node) =
+{
+ .function = hicn_interest_hitcs_node_fn,
+ .name = "hicn-interest-hitcs",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_hitcs_runtime_t),
+ .format_trace = hicn_interest_hitcs_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_hitcs_error_strings),
+ .error_strings = hicn_interest_hitcs_error_strings,
+ .n_next_nodes = HICN_INTEREST_HITCS_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_INTEREST_HITCS_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICN_INTEREST_HITCS_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICN_INTEREST_HITCS_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/interest_hitpit.h b/hicn-plugin/src/interest_hitpit.h
new file mode 100755
index 000000000..28427d342
--- /dev/null
+++ b/hicn-plugin/src/interest_hitpit.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INTEREST_HITPIT_H__
+#define __HICN_INTEREST_HITPIT_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_interest_hitpit_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_interest_hitpit_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_interest_hitpit_trace_t;
+
+typedef enum
+{
+ HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS,
+ HICN_INTEREST_HITPIT_NEXT_IP4_LOOKUP,
+ HICN_INTEREST_HITPIT_NEXT_IP6_LOOKUP,
+ HICN_INTEREST_HITPIT_NEXT_ERROR_DROP,
+ HICN_INTEREST_HITPIT_N_NEXT,
+} hicn_interest_hitpit_next_t;
+
+#endif /* // __HICN_INTEREST_HITPIT_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/interest_hitpit_node.c b/hicn-plugin/src/interest_hitpit_node.c
new file mode 100755
index 000000000..21ba97db3
--- /dev/null
+++ b/hicn-plugin/src/interest_hitpit_node.c
@@ -0,0 +1,313 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+
+#include "interest_hitpit.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "data_fwd.h"
+#include "infra.h"
+#include "strategy.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+#include "state.h"
+#include "error.h"
+#include "face_db.h"
+
+/* packet trace format function */
+static u8 *hicn_interest_hitpit_format_trace (u8 * s, va_list * args);
+
+/* Stats string values */
+static char *hicn_interest_hitpit_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+vlib_node_registration_t hicn_interest_hitpit_node;
+
+always_inline void drop_packet (u32 * next0);
+
+/*
+ * hICN forwarder node for interests hitting the PIT
+ */
+static uword
+hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_hitpit_next_t next_index;
+ hicn_interest_hitpit_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_hitpit_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ hicn_hash_node_t *node0;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ hicn_pcs_entry_t *pitp;
+ u8 dpo_ctx_id0;
+ u8 found = 0;
+ int nh_idx;
+ dpo_id_t *outface;
+ hicn_hash_entry_t *hash_entry0;
+ hicn_buffer_t *hicnb0;
+ int ret;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Get hicn buffer and state */
+ hicnb0 = hicn_get_buffer (b0);
+ hicn_get_internal_state (hicnb0, rt->pitcs, &node0, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+
+
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ nameptr = (u8 *) (&name);
+ pitp = hicn_pit_get_data (node0);
+ dpo_id_t hicn_dpo_id0 =
+ { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+
+ /*
+ * Check if the hit is instead a collision in the
+ * hash table. Unlikely to happen.
+ */
+ if (PREDICT_FALSE
+ (ret != HICN_ERROR_NONE
+ || !hicn_node_compare (nameptr, namelen, node0)))
+ {
+ stats.interests_hash_collision++;
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ drop_packet (&next0);
+
+ goto end_processing;
+ }
+ /*
+ * If the entry is expired, remove it no matter of
+ * the possible cases.
+ */
+ if (tnow > pitp->shared.expire_time)
+ {
+ strategy_vft0->hicn_on_interest_timeout (dpo_ctx_id0);
+ hicn_pcs_delete (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ stats.pit_expired_count++;
+ next0 =
+ isv6 ? HICN_INTEREST_HITPIT_NEXT_IP6_LOOKUP :
+ HICN_INTEREST_HITPIT_NEXT_IP4_LOOKUP;
+ }
+ else
+ {
+ if ((hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY))
+ {
+ next0 = HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS;
+ }
+ else
+ {
+ /*
+ * Distinguish between aggregation or
+ * retransmission
+ */
+
+ found =
+ hicn_face_search (&(hicnb0->face_dpo_id),
+ &(pitp->u.pit.faces));
+
+ if (found)
+ {
+ /*
+ * Remove lock on the dpo
+ * stored in the vlib_buffer
+ */
+ dpo_unlock (&hicnb0->face_dpo_id);
+ strategy_vft0->hicn_select_next_hop (dpo_ctx_id0,
+ &nh_idx, &outface);
+ /* Retransmission */
+ /*
+ * Prepare the packet for the
+ * forwarding
+ */
+ next0 = outface->dpoi_next_node;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
+ outface->dpoi_index;
+
+ /*
+ * Update the egress face in
+ * the PIT
+ */
+ pitp->u.pit.pe_txnh = nh_idx;
+ stats.interests_retx++;
+ }
+ else
+ {
+ hicn_face_db_add_face_dpo (&hicnb0->face_dpo_id,
+ &pitp->u.pit.faces);
+
+ /* Aggregation */
+ drop_packet (&next0);
+ stats.interests_aggregated++;
+ }
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+
+ }
+ }
+ end_processing:
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_hitpit_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+
+
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_INTEREST_AGG,
+ stats.interests_aggregated);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_INT_RETRANS,
+ stats.interests_retx);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_PIT_EXPIRED,
+ stats.pit_expired_count);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_HASH_COLL_HASHTB_COUNT,
+ stats.interests_hash_collision);
+
+ update_node_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_interest_hitpit_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_interest_hitpit_trace_t *t =
+ va_arg (*args, hicn_interest_hitpit_trace_t *);
+
+ s = format (s, "INTEREST-HITPIT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+void
+drop_packet (u32 * next0)
+{
+ *next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_interest_hitpit_node) =
+{
+ .function = hicn_interest_hitpit_node_fn,
+ .name = "hicn-interest-hitpit",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_hitpit_runtime_t),
+ .format_trace = hicn_interest_hitpit_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_hitpit_error_strings),
+ .error_strings = hicn_interest_hitpit_error_strings,
+ .n_next_nodes = HICN_INTEREST_HITPIT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS] = "hicn-interest-hitcs",
+ [HICN_INTEREST_HITPIT_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ [HICN_INTEREST_HITPIT_NEXT_IP6_LOOKUP] = "ip6-lookup",
+ [HICN_INTEREST_HITPIT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/interest_pcslookup.h b/hicn-plugin/src/interest_pcslookup.h
new file mode 100755
index 000000000..e27673a9e
--- /dev/null
+++ b/hicn-plugin/src/interest_pcslookup.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INTEREST_PCSLOOKUP_H__
+#define __HICN_INTEREST_PCSLOOKUP_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_interest_pcslookup_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_interest_pcslookup_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_interest_pcslookup_trace_t;
+
+typedef enum
+{
+ HICN_INTEREST_PCSLOOKUP_NEXT_V4_LOOKUP,
+ HICN_INTEREST_PCSLOOKUP_NEXT_V6_LOOKUP,
+ HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT,
+ HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITCS,
+ HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP,
+ HICN_INTEREST_PCSLOOKUP_N_NEXT,
+} hicn_interest_pcslookup_next_t;
+
+#endif /* // __HICN_INTEREST_PCSLOOKUP_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/interest_pcslookup_node.c b/hicn-plugin/src/interest_pcslookup_node.c
new file mode 100755
index 000000000..40d62510b
--- /dev/null
+++ b/hicn-plugin/src/interest_pcslookup_node.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+
+#include "interest_pcslookup.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "state.h"
+
+/**
+ * @FILE This node performs a lookup in the PIT and CS for a received interest packet.
+ *
+ * This node passes the packet to the interest-hitpit and interest-hitcs nodes
+ * when there is a hit in the pit or content store, respectively.
+ */
+
+/* Functions declarations */
+
+/* packet trace format function */
+static u8 *hicn_interest_pcslookup_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_interest_pcslookup_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+vlib_node_registration_t hicn_interest_pcslookup_node;
+
+/*
+ * ICN forwarder node for interests: handling of Interests delivered based on
+ * ACL. - 1 packet at a time - ipv4/tcp ipv6/tcp
+ */
+static uword
+hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_pcslookup_next_t next_index;
+ hicn_interest_pcslookup_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ int ret;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_pcslookup_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ u64 name_hash = 0;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ u32 node_id0 = 0;
+ u8 dpo_ctx_id0 = 0;
+ u8 vft_id0 = 0;
+ u8 is_cs0 = 0;
+ u8 hash_entry_id = 0;
+ u8 bucket_is_overflown = 0;
+ u32 bucket_id = ~0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ {
+ next0 =
+ isv6 ? HICN_INTEREST_PCSLOOKUP_NEXT_V6_LOOKUP :
+ HICN_INTEREST_PCSLOOKUP_NEXT_V4_LOOKUP;
+ }
+ nameptr = (u8 *) (&name);
+ stats.pkts_processed++;
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE ||
+ hicn_hashtb_fullhash (nameptr, namelen,
+ &name_hash) !=
+ HICN_ERROR_NONE))
+ {
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ }
+ else
+ {
+ if (hicn_hashtb_lookup_node (rt->pitcs->pcs_table, nameptr,
+ namelen, name_hash,
+ 0 /* is_data */ , &node_id0,
+ &dpo_ctx_id0, &vft_id0, &is_cs0,
+ &hash_entry_id, &bucket_id,
+ &bucket_is_overflown) ==
+ HICN_ERROR_NONE)
+ {
+ next0 =
+ HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT + is_cs0;
+ }
+ stats.pkts_interest_count++;
+ }
+
+ hicn_store_internal_state (b0, name_hash, node_id0, dpo_ctx_id0,
+ vft_id0, hash_entry_id, bucket_id,
+ bucket_is_overflown);
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_pcslookup_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pit_get_cs_count (rt->pitcs);
+ u32 pcs_ntw_count = hicn_pcs_get_ntw_count (rt->pitcs);
+
+
+ vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+
+ vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_CS_NTW_COUNT, pcs_ntw_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_interest_pcslookup_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_interest_pcslookup_trace_t *t =
+ va_arg (*args, hicn_interest_pcslookup_trace_t *);
+
+ s = format (s, "INTEREST_PCSLOOKUP: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_interest_pcslookup_node) =
+{
+ .function = hicn_interest_pcslookup_node_fn,
+ .name = "hicn-interest-pcslookup",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_pcslookup_runtime_t),
+ .format_trace = hicn_interest_pcslookup_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_pcslookup_error_strings),
+ .error_strings = hicn_interest_pcslookup_error_strings,
+ .n_next_nodes = HICN_INTEREST_PCSLOOKUP_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_INTEREST_PCSLOOKUP_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT] = "hicn-interest-hitpit",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITCS] = "hicn-interest-hitcs",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/mapme.h b/hicn-plugin/src/mapme.h
new file mode 100755
index 000000000..e0786eff8
--- /dev/null
+++ b/hicn-plugin/src/mapme.h
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_MAPME__
+#define __HICN_MAPME__
+
+#include <vnet/dpo/load_balance.h>
+#include <vnet/buffer.h>
+#include <hicn/hicn.h>
+#include <hicn/mapme.h>
+
+#include "hicn.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h" // dpo_is_hicn
+
+#define HICN_MAPME_ALLOW_LOCATORS 1
+
+//#define HICN_MAPME_NOTIFICATIONS 1
+
+#define NOT_A_NOTIFICATION false
+#define TIMER_NO_REPEAT false
+
+#define INVALID_SEQ 0
+#define INIT_SEQ 1
+
+typedef struct hicn_mapme_conf_s
+{
+ hicn_mapme_conf_t conf;
+ bool remove_dpo; // FIXME used ?
+
+ vlib_main_t *vm;
+ vlib_log_class_t log_class;
+} hicn_mapme_main_t;
+
+#define foreach_hicn_mapme_event \
+ _(FACE_ADD) \
+ _(FACE_DEL) \
+ _(FACE_APP_ADD) \
+ _(FACE_APP_DEL) \
+ _(FACE_NH_SET) \
+ _(FACE_NH_ADD) \
+ _(FACE_PH_ADD) \
+ _(FACE_PH_DEL)
+
+typedef enum
+{
+#define _(a) HICN_MAPME_EVENT_##a,
+ foreach_hicn_mapme_event
+#undef _
+} hicn_mapme_event_t;
+
+typedef hicn_dpo_ctx_t hicn_mapme_tfib_t;
+
+/*
+ * Ideally we might need to care about alignment, but this struct is only
+ * used for casting hicn_dpo_ctx_t.
+ *
+ * See otherwise vnet/dpo/dpo.h
+ */
+
+STATIC_ASSERT (sizeof (hicn_mapme_tfib_t) <= sizeof (hicn_dpo_ctx_t),
+ "hicn_mapme_tfib_t is greater than hicn_dpo_ctx_t");
+
+#define TFIB(dpo) ((hicn_mapme_tfib_t*)(dpo))
+
+static_always_inline int
+hicn_mapme_nh_set (hicn_mapme_tfib_t * tfib, dpo_id_t * face_id)
+{
+ tfib->next_hops[0] = *face_id;
+ tfib->entry_count = 1;
+ return 0;
+}
+
+/**
+ * @brief Add a next hop iif it is not already a next hops
+ */
+static_always_inline int
+hicn_mapme_nh_add (hicn_mapme_tfib_t * tfib, dpo_id_t * face_id)
+{
+ for (u8 pos = 0; pos < tfib->entry_count; pos++)
+ if (dpo_cmp (&tfib->next_hops[pos], face_id) == 0)
+ return 0;
+ tfib->next_hops[tfib->entry_count++] = *face_id;
+ return 0;
+}
+
+/**
+ * Add a 'previous' hop to the TFIB
+ *
+ * XXX we should have the for look in the reverse order for simpler code.
+ */
+static_always_inline int
+hicn_mapme_tfib_add (hicn_mapme_tfib_t * tfib, dpo_id_t * face_id)
+{
+ u8 pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+
+ //XXX don 't add if it already exist
+ // eg.an old IU received on a face on which we are retransmitting
+ for (u8 pos2 = pos; pos2 < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos2++)
+ if (dpo_cmp (&tfib->next_hops[pos2], face_id) == 0)
+ return 0;
+
+ //Make sure we have enough room
+ if (pos <= tfib->entry_count)
+ return -1;
+
+ tfib->next_hops[pos - 1] = *face_id;
+ tfib->tfib_entry_count++;
+
+ return 0;
+}
+
+static_always_inline int
+hicn_mapme_tfib_del (hicn_mapme_tfib_t * tfib, dpo_id_t * face_id)
+{
+ /*
+ * We need to do a linear scan of TFIB entries to find the one to
+ * remove
+ */
+ u8 start_pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+ u8 pos = ~0;
+ for (pos = start_pos; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos++)
+ if (dpo_cmp (&tfib->next_hops[pos], face_id) == 0)
+ break;
+ if (pos == HICN_PARAM_FIB_ENTRY_NHOPS_MAX)
+ /* Not found */
+ return -1;
+
+ tfib->tfib_entry_count--;
+
+ /* Likely we won't receive a new IU twice from the same face */
+ if (PREDICT_TRUE (pos > start_pos))
+ memmove (tfib->next_hops + start_pos, tfib->next_hops + start_pos + 1,
+ (pos - start_pos) * sizeof (dpo_id_t));
+
+ return 0;
+}
+
+/**
+ * @brief Performs an Exact Prefix Match lookup on the FIB
+ * @returns the corresponding DPO (hICN or IP LB), or NULL
+ */
+static_always_inline
+ dpo_id_t * fib_epm_lookup (ip46_address_t * addr, u8 plen)
+{
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index;
+ u32 fib_index;
+ dpo_id_t *dpo_id;
+ load_balance_t *lb;
+
+ const dpo_id_t *load_balance_dpo_id;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (addr, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ /* Check if the route already exist in the fib : EPM */
+ fib_index = fib_table_find (fib_pfx.fp_proto, HICN_FIB_TABLE);
+
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ if (fib_entry_index == FIB_NODE_INDEX_INVALID)
+ return NULL;
+
+ load_balance_dpo_id = fib_entry_contribute_ip_forwarding (fib_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ return NULL;
+
+ /* former_dpo_id is a load_balance dpo */
+ lb = load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ /* Check if there is only one bucket */
+
+ /*
+ * We now distinguish the case where we have an hICN route (the
+ * regular case), and the case where we have an IP route, to be able
+ * to apply MAP-Me mechanisms even to a locator IP address.
+ */
+
+ for (int i = 0; i < lb->lb_n_buckets; i++)
+ {
+ /* un-const */
+ dpo_id = (dpo_id_t *) load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (dpo_id))
+ return dpo_id;
+ }
+
+ /* un-const */
+ return (dpo_id_t *) load_balance_dpo_id;
+}
+
+/* DPO types */
+
+extern dpo_type_t hicn_face_udp_type;
+extern dpo_type_t hicn_face_ip_type;
+
+/* VLIB EDGE IDs */
+
+/* in faces/ip/face_ip.c */
+extern u32 strategy_face_ip4_vlib_edge;
+extern u32 strategy_face_ip6_vlib_edge;
+/* in faces/udp/face_udp.c */
+extern u32 strategy_face_udp6_vlib_edge;
+extern u32 strategy_face_udp6_vlib_edge;
+
+
+/**
+ * @brief Returns the next hop vlib edge on which we can send an Interest packet.
+ *
+ * This is both used to preprocess a dpo that will be stored as a next hop in the FIB, and to determine on which node to send an Interest Update.
+ */
+always_inline u32
+hicn_mapme_get_dpo_vlib_edge (dpo_id_t * dpo)
+{
+ if (dpo->dpoi_type == hicn_face_ip_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return strategy_face_ip4_vlib_edge;
+ case DPO_PROTO_IP6:
+ return strategy_face_ip6_vlib_edge;
+ default:
+ return ~0;
+ }
+ }
+ else if (dpo->dpoi_type == hicn_face_udp_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return strategy_face_udp6_vlib_edge;
+ case DPO_PROTO_IP6:
+ return strategy_face_udp6_vlib_edge;
+ default:
+ return ~0;
+ }
+ }
+ else
+ {
+ return ~0;
+ }
+}
+
+/**
+ * @brief Returns the next hop node on which we can send an Update packet
+ */
+always_inline char *
+hicn_mapme_get_dpo_face_node (dpo_id_t * dpo)
+{
+ if (dpo->dpoi_type == hicn_face_ip_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return "hicn-face-ip4-output";
+ case DPO_PROTO_IP6:
+ return "hicn-face-ip6-output";
+ default:
+ return NULL;
+ }
+ }
+ else if (dpo->dpoi_type == hicn_face_udp_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return "hicn-face-udp4-output";
+ case DPO_PROTO_IP6:
+ return "hicn-face-udp6-output";
+ default:
+ return NULL;
+ }
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+
+#define DEBUG(...) vlib_log_debug(mapme_main.log_class, __VA_ARGS__)
+#define WARN(...) vlib_log_warn(mapme_main.log_class, __VA_ARGS__)
+#define ERROR(...) vlib_log_err(mapme_main.log_class, __VA_ARGS__)
+
+#endif /* __HICN_MAPME__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/mapme_ack.h b/hicn-plugin/src/mapme_ack.h
new file mode 100755
index 000000000..98a219982
--- /dev/null
+++ b/hicn-plugin/src/mapme_ack.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by Cisco Systems Inc. All Rights Reserved.
+ *
+ */
+
+#ifndef HICN_MAPME_ACK_H
+#define HICN_MAPME_ACK_H
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/* Node context data */
+typedef struct hicn_mapme_ack_runtime_s
+{
+ int id;
+} hicn_mapme_ack_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_mapme_ack_trace_t;
+
+typedef enum
+{
+ HICN_MAPME_ACK_NEXT_ERROR_DROP,
+ HICN_MAPME_ACK_N_NEXT,
+} hicn_mapme_ack_next_t;
+
+#endif /* HICN_MAPME_ACK_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/mapme_ack_node.c b/hicn-plugin/src/mapme_ack_node.c
new file mode 100755
index 000000000..21e177bb6
--- /dev/null
+++ b/hicn-plugin/src/mapme_ack_node.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <hicn/hicn.h>
+
+#include "mapme.h"
+#include "mapme_ack.h"
+#include "mapme_eventmgr.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "data_fwd.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "state.h"
+
+extern hicn_mapme_main_t mapme_main;
+
+/* packet trace format function */
+static u8 *hicn_mapme_ack_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_mapme_ack_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/*
+ * @brief Process incoming ack messages (Interest Update Ack)
+ * @param vm vlib main data structure
+ * @param b Control packet (IU)
+ * @param face_id Ingress face id
+ */
+bool
+hicn_mapme_process_ack (vlib_main_t * vm, vlib_buffer_t * b,
+ dpo_id_t * in_face)
+{
+ seq_t fib_seq;
+ const dpo_id_t *dpo;
+ hicn_prefix_t prefix;
+ mapme_params_t params;
+ int rc;
+
+ /* Parse incoming message */
+ rc =
+ hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ if (rc < 0)
+ goto ERR_PARSE;
+
+ if (params.seq == INVALID_SEQ)
+ {
+ DEBUG ("Invalid sequence number found in IU");
+ return true;
+ }
+
+ dpo = fib_epm_lookup (&(prefix.name), prefix.len);
+ if (!dpo)
+ {
+ DEBUG ("Ignored ACK for non-existing FIB entry. Ignored.");
+ return true;
+
+ }
+
+ /* We are only expecting ACKs for hICN DPOs */
+ ASSERT (dpo_is_hicn (dpo));
+
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (dpo->dpoi_type);
+ hicn_mapme_tfib_t *tfib =
+ TFIB (dpo_vft->hicn_dpo_get_ctx (dpo->dpoi_index));
+ fib_seq = tfib->seq;
+
+ /*
+ * As we always retransmit IU with the latest seq, we are not interested in
+ * ACKs with inferior seq
+ */
+ if (params.seq < fib_seq)
+ {
+ DEBUG ("Ignored ACK for low seq");
+ return true;
+ }
+
+ hicn_mapme_tfib_del (tfib, in_face);
+
+ /*
+ * Is the ingress face in TFIB ? if so, remove it, otherwise it might be a
+ * duplicate
+ */
+ retx_t *retx =
+ vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_PH_DEL, 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,.dpo = *dpo};
+ return true;
+
+ERR_PARSE:
+ return false;
+}
+
+vlib_node_registration_t hicn_mapme_ack_node;
+
+static uword
+hicn_mapme_ack_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ hicn_buffer_t *hb;
+ hicn_mapme_ack_next_t next_index;
+ u32 n_left_from, *from, *to_next;
+ n_left_from = frame->n_vectors;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0) // buffers in the current frame
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = HICN_MAPME_ACK_NEXT_ERROR_DROP;
+ u32 sw_if_index0;
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vlib_cli_output (vm, "Received IUAck");
+ hb = hicn_get_buffer (b0);
+ hicn_mapme_process_ack (vm, b0, &hb->face_dpo_id);
+
+ /* Single loop: process 1 packet here */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_mapme_ack_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ /* $$$$$ Done processing 1 packet here $$$$$ */
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+// vlib_node_increment_counter (vm, hicn_mapme_ack_node.index,
+// HICN_MAPME_ACK_ERROR_SWAPPED, pkts_swapped);
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_mapme_ack_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_mapme_ack_trace_t *t = va_arg (*args, hicn_mapme_ack_trace_t *);
+
+ s = format (s, "MAPME_ACK: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the MAP-Me node processing special interests
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mapme_ack_node) =
+{
+ .function = hicn_mapme_ack_node_fn,
+ .name = "hicn-mapme-ack",
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (hicn_mapme_ack_runtime_t),
+ .format_trace = hicn_mapme_ack_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_mapme_ack_error_strings),
+ .error_strings = hicn_mapme_ack_error_strings,
+ .n_next_nodes = HICN_MAPME_ACK_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_MAPME_ACK_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/mapme_ctrl.h b/hicn-plugin/src/mapme_ctrl.h
new file mode 100755
index 000000000..e7c1cdf64
--- /dev/null
+++ b/hicn-plugin/src/mapme_ctrl.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by Cisco Systems Inc. All Rights Reserved.
+ *
+ */
+
+#ifndef HICN_MAPME_CTRL_H
+#define HICN_MAPME_CTRL_H
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/* Node context data */
+typedef struct hicn_mapme_ctrl_runtime_s
+{
+ int id;
+} hicn_mapme_ctrl_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_mapme_ctrl_trace_t;
+
+typedef enum
+{
+ HICN_MAPME_CTRL_NEXT_IP4_OUTPUT,
+ HICN_MAPME_CTRL_NEXT_IP6_OUTPUT,
+ HICN_MAPME_CTRL_NEXT_UDP46_OUTPUT,
+ HICN_MAPME_CTRL_NEXT_UDP66_OUTPUT,
+ HICN_MAPME_CTRL_NEXT_ERROR_DROP,
+ HICN_MAPME_CTRL_N_NEXT,
+} hicn_mapme_ctrl_next_t;
+/**
+ * @brief Returns the next hop node on which we can send an ACK packet
+ */
+always_inline hicn_mapme_ctrl_next_t
+hicn_mapme_get_dpo_iface_node (dpo_id_t * dpo)
+{
+ if (dpo->dpoi_type == hicn_face_ip_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return HICN_MAPME_CTRL_NEXT_IP4_OUTPUT;
+ case DPO_PROTO_IP6:
+ return HICN_MAPME_CTRL_NEXT_IP6_OUTPUT;
+ default:
+ return HICN_MAPME_CTRL_NEXT_ERROR_DROP;
+ }
+ }
+ else if (dpo->dpoi_type == hicn_face_udp_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return HICN_MAPME_CTRL_NEXT_UDP46_OUTPUT;
+ case DPO_PROTO_IP6:
+ return HICN_MAPME_CTRL_NEXT_UDP66_OUTPUT;
+ default:
+ return HICN_MAPME_CTRL_NEXT_ERROR_DROP;
+ }
+ }
+ else
+ {
+ return HICN_MAPME_CTRL_NEXT_ERROR_DROP;
+ }
+}
+
+#endif /* HICN_MAPME_CTRL_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/mapme_ctrl_node.c b/hicn-plugin/src/mapme_ctrl_node.c
new file mode 100755
index 000000000..9fc0c9055
--- /dev/null
+++ b/hicn-plugin/src/mapme_ctrl_node.c
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This node processses MAP-Me control messages.
+ */
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/dpo/load_balance.h>
+#include <hicn/hicn.h>
+
+#include "mapme.h"
+#include "mapme_ctrl.h"
+#include "mapme_eventmgr.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "state.h"
+
+extern hicn_mapme_main_t mapme_main;
+
+#define MS2NS(x) x * 1000000
+
+/* Functions declarations */
+
+/* packet trace format function */
+static u8 *hicn_mapme_ctrl_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_mapme_ctrl_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/**
+ * Preprocess the ingress face so as to make it a candidate next hop, which is
+ * what MAP-Me will handle
+ */
+static_always_inline void
+preprocess_in_face (hicn_type_t type, dpo_id_t * in, dpo_id_t * out)
+{
+ u32 vlib_edge = hicn_mapme_get_dpo_vlib_edge (in);
+ *out = *in;
+ out->dpoi_next_node = vlib_edge;
+}
+
+/*
+ * @brief Process incoming control messages (Interest Update)
+ * @param vm vlib main data structure
+ * @param b Control packet (IU)
+ * @param face_id Ingress face id
+ *
+ * NOTE:
+ * - this function answers locally to the IU interest by replying with a Ack
+ * (Data) packet, unless in case of outdated information, in which we can
+ * consider the interest is dropped, and another IU (aka ICMP error) is sent so
+ * that retransmissions stop.
+ */
+static_always_inline bool
+hicn_mapme_process_ctrl (vlib_main_t * vm, vlib_buffer_t * b,
+ dpo_id_t * in_face)
+{
+ seq_t fib_seq;
+ const dpo_id_t *dpo;
+ hicn_prefix_t prefix;
+ mapme_params_t params;
+ int rc;
+
+ /* Parse incoming message */
+ rc =
+ hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ if (rc < 0)
+ goto ERR_PARSE;
+
+ vlib_cli_output (vm, "IU - type:%d seq:%d len:%d", params.type, params.seq,
+ prefix.len);
+
+ if (params.seq == INVALID_SEQ)
+ {
+ vlib_log_warn (mapme_main.log_class,
+ "Invalid sequence number found in IU");
+
+ return true;
+ }
+
+ /* We forge the ACK which we be the packet forwarded by the node */
+ hicn_mapme_create_ack (vlib_buffer_get_current (b), &params);
+
+ dpo = fib_epm_lookup (&prefix.name, prefix.len);
+ if (!dpo)
+ {
+#ifdef HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY
+ /*
+ * This might happen for a node hosting a producer which has moved.
+ * Destroying the face has led to removing all corresponding FIB
+ * entries. In that case, we need to correctly restore the FIB entries.
+ */
+ DEBUG ("Re-creating FIB entry with next hop on connection")
+#error "not implemented"
+#else
+ //ERROR("Received IU for non-existing FIB entry");
+ return false;
+#endif /* HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY */
+
+ }
+
+#ifdef HICN_MAPME_ALLOW_LOCATORS
+ if (!dpo_is_hicn ((dpo)))
+ {
+ /* We have an IP DPO */
+ WARN ("Not implemented yet.");
+ return false;
+ }
+#endif
+
+ /* Process the hICN DPO */
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (dpo->dpoi_type);
+ hicn_mapme_tfib_t *tfib =
+ TFIB (dpo_vft->hicn_dpo_get_ctx (dpo->dpoi_index));
+ fib_seq = tfib->seq;
+
+ if (params.seq > fib_seq)
+ {
+ DEBUG
+ ("Higher sequence number than FIB %d > %d, updating seq and next hops",
+ params.seq, fib_seq);
+
+ /* This has to be done first to allow processing ack */
+ tfib->seq = params.seq;
+
+ // in_face and next_hops are face_id_t
+
+ /* Remove ingress face from TFIB in case it was present */
+ hicn_mapme_tfib_del (tfib, in_face);
+
+ /* Move next hops to TFIB... but in_face... */
+ for (u8 pos = 0; pos < tfib->entry_count; pos++)
+ {
+ if (dpo_cmp (&tfib->next_hops[pos], in_face) == 0)
+ continue;
+ hicn_mapme_tfib_add (tfib, &tfib->next_hops[pos]);
+ }
+
+ /* ... and set ingress face as next_hop */
+ hicn_mapme_nh_set (tfib, in_face);
+
+ /* We transmit both the prefix and the full dpo (type will be needed to pick the right transmit node */
+ retx_t *retx =
+ vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_NH_SET, 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,.dpo = *dpo};
+
+ }
+ else if (params.seq == fib_seq)
+ {
+ DEBUG ("Same sequence number than FIB %d > %d, adding next hop",
+ params.seq, fib_seq);
+
+ /* Remove ingress face from TFIB in case it was present */
+ hicn_mapme_tfib_del (tfib, in_face);
+
+ /* Add ingress face to next hops */
+ hicn_mapme_nh_add (tfib, in_face);
+
+ /* Multipath, multihoming, multiple producers or duplicate interest */
+ retx_t *retx =
+ vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_NH_ADD, 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,.dpo = *dpo};
+ }
+ else // params.seq < fib_seq
+ {
+ /*
+ * face is propagating outdated information, we can just consider it as a
+ * prevHops
+ */
+ hicn_mapme_tfib_add (tfib, in_face);
+
+ retx_t *retx =
+ vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_PH_ADD, 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,.dpo = *dpo};
+ }
+
+ /* We just raise events, the event_mgr is in charge of forging packet. */
+
+ return true;
+
+//ERR_ACK_CREATE:
+ERR_PARSE:
+ return false;
+}
+
+vlib_node_registration_t hicn_mapme_ctrl_node;
+
+static uword
+hicn_mapme_ctrl_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ hicn_buffer_t *hb;
+ hicn_mapme_ctrl_next_t next_index;
+ u32 n_left_from, *from, *to_next;
+ n_left_from = frame->n_vectors;
+ dpo_id_t in_face;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0) // buffers in the current frame
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+ hb = hicn_get_buffer (b0);
+
+ /* This determines the next node on which the ack will be sent back */
+ u32 next0 = hicn_mapme_get_dpo_iface_node (&hb->face_dpo_id);
+
+ /* Preprocessing is needed to precompute in the dpo the next node
+ * that will have to be followed by regular interests when being
+ * forwarder on a given next hop
+ */
+ preprocess_in_face (hb->type, &hb->face_dpo_id, &in_face);
+ hicn_mapme_process_ctrl (vm, b0, &in_face);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ // vlib_node_increment_counter (vm, hicn_mapme_ctrl_node.index,
+ // HICN_MAPME_CTRL_ERROR_SWAPPED, pkts_swapped);
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+hicn_mapme_ctrl_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_mapme_ctrl_trace_t *t = va_arg (*args, hicn_mapme_ctrl_trace_t *);
+
+ s = format (s, "MAPME_CTRL: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the MAP-Me node processing special interests
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mapme_ctrl_node) =
+{
+ .function = hicn_mapme_ctrl_node_fn,
+ .name = "hicn-mapme-ctrl",
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (hicn_mapme_ctrl_runtime_t),
+ .format_trace = hicn_mapme_ctrl_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_mapme_ctrl_error_strings),
+ .error_strings = hicn_mapme_ctrl_error_strings,
+ .n_next_nodes = HICN_MAPME_CTRL_N_NEXT,
+ .next_nodes =
+ {
+ /*
+ * Control packets are not forwarded by this node, but sent by the Event
+ * Manager. This node is only responsible for sending ACK back,
+ * Acks are like data packets are output on iface's
+ */
+ [HICN_MAPME_CTRL_NEXT_IP4_OUTPUT] = "hicn-iface-ip4-output",
+ [HICN_MAPME_CTRL_NEXT_IP6_OUTPUT] = "hicn-iface-ip6-output",
+ [HICN_MAPME_CTRL_NEXT_UDP46_OUTPUT] = "hicn-iface-udp4-output",
+ [HICN_MAPME_CTRL_NEXT_UDP66_OUTPUT] = "hicn-iface-udp6-output",
+ [HICN_MAPME_CTRL_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/mapme_eventmgr.c b/hicn-plugin/src/mapme_eventmgr.c
new file mode 100755
index 000000000..5d5916403
--- /dev/null
+++ b/hicn-plugin/src/mapme_eventmgr.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hicn.h"
+#include "strategy_dpo_ctx.h"
+#include "mapme.h"
+#include "mapme_eventmgr.h"
+#include "strategies/dpo_mw.h"
+
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+
+#define DEFAULT_TIMEOUT 1.0 /* s */
+
+hicn_mapme_main_t mapme_main;
+
+hicn_prefix_t *retx_pool;
+uword *retx_hash;
+
+void
+hicn_mapme_init (vlib_main_t * vm)
+{
+ mapme_main.vm = vm;
+ mapme_main.log_class = vlib_log_register_class ("hicn_mapme", 0);
+}
+
+/* borrowed from vnet/fib/ip4_fib.c */
+
+typedef struct ip4_fib_show_walk_ctx_t_
+{
+ fib_node_index_t *ifsw_indicies;
+} ip4_fib_show_walk_ctx_t;
+
+static fib_table_walk_rc_t
+ip4_fib_show_walk_cb (fib_node_index_t fib_entry_index, void *arg)
+{
+ ip4_fib_show_walk_ctx_t *ctx = arg;
+
+ vec_add1 (ctx->ifsw_indicies, fib_entry_index);
+
+ return (FIB_TABLE_WALK_CONTINUE);
+}
+
+/* borrowed from vnet/fib/ip6_fib.c */
+
+typedef struct ip6_fib_show_ctx_t_
+{
+ fib_node_index_t *entries;
+} ip6_fib_show_ctx_t;
+
+static fib_table_walk_rc_t
+ip6_fib_table_show_walk (fib_node_index_t fib_entry_index, void *arg)
+{
+ ip6_fib_show_ctx_t *ctx = arg;
+
+ vec_add1 (ctx->entries, fib_entry_index);
+
+ return (FIB_TABLE_WALK_CONTINUE);
+}
+
+void
+hicn_mapme_process_fib_entry (vlib_main_t * vm, dpo_id_t face,
+ const fib_node_index_t * fib_entry_index)
+{
+ const dpo_id_t *load_balance_dpo_id;
+ load_balance_t *lb;
+ dpo_id_t *dpo_id;
+ fib_entry_t *fib_entry;
+
+ load_balance_dpo_id = fib_entry_contribute_ip_forwarding (*fib_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ return;
+
+ /* former_dpo_id is a load_balance dpo */
+ lb = load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ for (int i = 0; i < lb->lb_n_buckets; i++)
+ {
+ /* un-const */
+ dpo_id = (dpo_id_t *) load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (dpo_id))
+ {
+ fib_entry = fib_entry_get (*fib_entry_index);
+ vlib_cli_output (vm, "set face pending %U", format_fib_prefix,
+ &fib_entry->fe_prefix);
+ }
+ }
+}
+
+void
+hicn_mapme_process_ip4_fib (vlib_main_t * vm, dpo_id_t face)
+{
+ ip4_main_t *im4 = &ip4_main;
+ fib_table_t *fib_table;
+ int table_id = -1, fib_index = ~0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im4->fibs,
+ ({
+ ip4_fib_t *fib = pool_elt_at_index(im4->v4_fibs, fib_table->ft_index);
+
+ if (table_id >= 0 && table_id != (int)fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int)fib->index)
+ continue;
+
+ fib_node_index_t *fib_entry_index;
+ ip4_fib_show_walk_ctx_t ctx = {
+ .ifsw_indicies = NULL,
+ };
+
+ ip4_fib_table_walk(fib, ip4_fib_show_walk_cb, &ctx);
+ //vec_sort_with_function(ctx.ifsw_indicies, fib_entry_cmp_for_sort);
+
+ vec_foreach(fib_entry_index, ctx.ifsw_indicies)
+ {
+ hicn_mapme_process_fib_entry(vm, face, fib_entry_index);
+ }
+
+ vec_free(ctx.ifsw_indicies);
+ }));
+ /* *INDENT-ON* */
+}
+
+void
+hicn_mapme_process_ip6_fib (vlib_main_t * vm, dpo_id_t face)
+{
+ /* Walk IPv6 FIB */
+ ip6_main_t *im6 = &ip6_main;
+ fib_table_t *fib_table;
+ ip6_fib_t *fib;
+ int table_id = -1, fib_index = ~0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im6->fibs,
+ ({
+ fib = pool_elt_at_index(im6->v6_fibs, fib_table->ft_index);
+
+ if (table_id >= 0 && table_id != (int)fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int)fib->index)
+ continue;
+ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL)
+ continue;
+
+ fib_node_index_t *fib_entry_index;
+ ip6_fib_show_ctx_t ctx = {
+ .entries = NULL,
+ };
+
+ ip6_fib_table_walk(fib->index, ip6_fib_table_show_walk, &ctx);
+ //vec_sort_with_function(ctx.entries, fib_entry_cmp_for_sort);
+
+ vec_foreach(fib_entry_index, ctx.entries)
+ {
+ hicn_mapme_process_fib_entry(vm, face, fib_entry_index);
+ }
+
+ vec_free(ctx.entries);
+
+ }));
+ /* *INDENT-ON* */
+}
+
+
+/**
+ * Callback called everytime a new face is created (not including app faces)
+ */
+void
+hicn_mapme_on_face_added (vlib_main_t * vm, dpo_id_t face)
+{
+ hicn_mapme_process_ip4_fib (vm, face);
+ hicn_mapme_process_ip6_fib (vm, face);
+}
+
+/*
+ * We need a retransmission pool holding all necessary information for crafting
+ * special interests, thus including both the DPO and the prefix associated to
+ * it.
+ */
+#define NUM_RETX_ENTRIES 100
+#define NUM_RETX_SLOT 2
+#define NEXT_SLOT(cur) (1-cur)
+#define CUR retx_array[cur]
+#define NXT retx_array[NEXT_SLOT(cur)]
+#define CURLEN retx_len[cur]
+#define NXTLEN retx_len[NEXT_SLOT(cur)]
+
+static_always_inline void *
+get_packet_buffer (vlib_main_t * vm, u32 node_index, u32 dpoi_index,
+ ip46_address_t * addr, hicn_type_t type)
+{
+ vlib_frame_t *f;
+ vlib_buffer_t *b; // for newly created packet
+ u32 *to_next;
+ u32 bi;
+ u8 *buffer;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("buffer allocation failure");
+ return NULL;
+ }
+
+ /* Create a new packet from scratch */
+ b = vlib_get_buffer (vm, bi);
+ ASSERT (b->current_data == 0);
+
+ /* Face information for next hop node index */
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = dpoi_index;
+ hicn_get_buffer (b)->type = type;
+
+ /* Enqueue the packet right now */
+ f = vlib_get_frame_to_node (vm, node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, node_index, f);
+
+ // pointer to IP layer ? do we need to prepare for ethernet ???
+ buffer = vlib_buffer_get_current (b);
+ b->current_length =
+ (type.l1 == IPPROTO_IPV6) ? HICN_MAPME_V6_HDRLEN : HICN_MAPME_V4_HDRLEN;
+
+ return buffer;
+}
+
+static_always_inline bool
+hicn_mapme_send_message (vlib_main_t * vm, const hicn_prefix_t * prefix,
+ mapme_params_t * params, dpo_id_t * face)
+{
+ size_t n;
+
+ /* This should be retrieved from face information */
+ DEBUG ("Retransmission for prefix %U seq=%d", format_ip46_address,
+ &prefix->name, IP46_TYPE_ANY, params->seq);
+
+ char *node_name = hicn_mapme_get_dpo_face_node (face);
+ if (!node_name)
+ {
+ clib_warning
+ ("Could not determine next node for sending MAP-Me packet");
+ return false;
+ }
+
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) node_name);
+ u32 node_index = node->index;
+
+ u8 *buffer =
+ get_packet_buffer (vm, node_index, face->dpoi_index,
+ (ip46_address_t *) prefix,
+ (params->protocol ==
+ IPPROTO_IPV6) ? HICN_TYPE_IPV6_ICMP :
+ HICN_TYPE_IPV4_ICMP);
+ n = hicn_mapme_create_packet (buffer, prefix, params);
+ if (n <= 0)
+ {
+ clib_warning ("Could not create MAP-Me packet");
+ return false;
+ }
+
+ return true;
+}
+
+static_always_inline void
+hicn_mapme_send_updates (vlib_main_t * vm, hicn_prefix_t * prefix,
+ dpo_id_t dpo, bool send_all)
+{
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (dpo.dpoi_type);
+ hicn_mapme_tfib_t *tfib = TFIB (dpo_vft->hicn_dpo_get_ctx (dpo.dpoi_index));
+ u8 tfib_last_idx = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+ if (!tfib)
+ {
+ DEBUG ("NULL TFIB entry id=%d", dpo.dpoi_index);
+ return;
+ }
+
+ mapme_params_t params = {
+ .protocol = ip46_address_is_ip4 (&prefix->name)
+ ? IPPROTO_IP : IPPROTO_IPV6,
+ .type = UPDATE,
+ .seq = tfib->seq,
+ };
+
+ if (send_all)
+ {
+ for (u8 pos = tfib_last_idx; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX;
+ pos++)
+ {
+ hicn_mapme_send_message (vm, prefix, &params,
+ &tfib->next_hops[pos]);
+ }
+ }
+ else
+ {
+ hicn_mapme_send_message (vm, prefix, &params,
+ &tfib->next_hops[tfib_last_idx]);
+ }
+}
+
+static uword
+hicn_mapme_eventmgr_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ f64 timeout = 0; /* By default, no timer is run */
+ f64 current_time, due_time;
+ u8 idle = 0;
+
+ retx_t retx_array[NUM_RETX_SLOT][NUM_RETX_ENTRIES];
+ u8 retx_len[NUM_RETX_SLOT] = { 0 };
+ u8 cur = 0; /* current slot */
+
+ hicn_mapme_init (vm);
+
+ for (;;)
+ {
+ /* NOTE: returned timeout seems to always be 0 with get_event_data
+ * instead of get_event, and we thus need to reimplement timeout
+ * management on top, as done elsewhere in VPP code.
+ *
+ * The most probable event. For simplicity, for new faces, we pass the same retx_t with no
+ * prefix
+ */
+ if (timeout != 0)
+ {
+ /* timeout = */ vlib_process_wait_for_event_or_clock (vm, timeout);
+ current_time = vlib_time_now (vm);
+
+ /*
+ * As we don't accummulate errors, we allow for simple timer
+ * management with no error correction accounting for elapsed time.
+ * Also, we only run a timer when there are pending retransmissions.
+ */
+ timeout =
+ (due_time >
+ current_time) ? due_time - current_time : DEFAULT_TIMEOUT;
+ due_time = current_time + timeout;
+ }
+ else
+ {
+ vlib_process_wait_for_event (vm);
+ }
+
+ uword event_type = ~0;
+ void *event_data = vlib_process_get_event_data (vm, &event_type);
+
+ switch (event_type)
+ {
+ case HICN_MAPME_EVENT_FACE_ADD:
+ {
+ /*
+ * A face has been added:
+ * - In case of a local app face, we need to advertise a new prefix
+ * - For another local face type, we need to advertise local
+ * prefixes and schedule retransmissions
+ */
+ retx_t *retx_events = event_data;
+ for (u8 i = 0; i < vec_len (retx_events); i++)
+ {
+ hicn_mapme_on_face_added (vm, retx_events[i].dpo);
+ }
+ idle = 0;
+ }
+ break;
+
+ case HICN_MAPME_EVENT_FACE_DEL:
+ idle = 0;
+ break;
+
+ case HICN_MAPME_EVENT_FACE_NH_SET:
+ {
+ /*
+ * An hICN FIB entry has been modified. All operations so far
+ * have been procedded in the nodes. Here we need to track
+ * retransmissions upon timeout: we mark the FIB entry as pending in
+ * the second-to-next slot
+ */
+
+ /* Mark FIB entry as pending for second-to-next slot */
+ retx_t *retx_events = event_data;
+ for (u8 i = 0; i < vec_len (retx_events); i++)
+ {
+ /*
+ * retx_events[i] corresponds to the dpoi_index of the (T)FIB
+ * structure that has been modified. Multiple successive
+ * events might correspond to the same entry.
+ *
+ * The FIB entry has a new next hop, and its TFIB section has:
+ * - eventually previous prev hops for which a IU with a
+ * lower seqno has been sent
+ * - the prev hops that have just been added.
+ *
+ * We don't distinguish any and just send an updated IU to all
+ * of them. The retransmission of the latest IU to all
+ * facilitates the matching of ACKs to a single seqno which is
+ * the one stored in the FIB.
+ *
+ * Since we retransmit to all prev hops, we can remove this
+ * (T)FIB entry for the check at the end of the current slot.
+ */
+ retx_t *retx = (retx_t *) & retx_events[i];
+
+ /*
+ * Transmit IU for all TFIB entries with latest seqno (we have
+ * at least one for sure!)
+ */
+ hicn_mapme_send_updates (vm, &retx->prefix, retx->dpo, true);
+
+ /* Delete entry_id from retransmissions in the current slot (if present) ... */
+ for (u8 j = 0; j < CURLEN; j++)
+ if (dpo_cmp (&(CUR[j].dpo), &retx->dpo))
+ {
+ CUR[j].dpo.dpoi_index = ~0; /* sufficient */
+ }
+
+ /* ... and schedule it for next slot (if not already) */
+ u8 j;
+ for (j = 0; j < NXTLEN; j++)
+ if (dpo_cmp (&NXT[j].dpo, &retx->dpo))
+ break;
+ if (j == NXTLEN) /* not found */
+ NXT[NXTLEN++] = *retx;
+ }
+ idle = 0;
+ }
+ break;
+
+ case HICN_MAPME_EVENT_FACE_NH_ADD:
+ /*
+ * As per the description of states, this event should add the face
+ * to the list of next hops, and eventually remove it from TFIB.
+ * This corresponds to the multipath case.
+ *
+ * In all cases, we assume the propagation was already done when the first
+ * interest with the same sequence number was received, so we stop here
+ * No change in TFIB = no IU to send
+ *
+ * No change in timers.
+ */
+ vlib_cli_output (vm, "[hicn_event_mgr] ADD NEXT HOP IN FIB");
+
+ /* Add ingress face as next hop */
+ idle = 0;
+
+ break;
+
+ case HICN_MAPME_EVENT_FACE_PH_ADD:
+ /* Back-propagation, interesting even for IN (desync) */
+ {
+ retx_t *retx_events = event_data;
+ for (u8 i = 0; i < vec_len (retx_events); i++)
+ {
+ hicn_mapme_send_updates (vm, &retx_events[i].prefix,
+ retx_events[i].dpo, false);
+ }
+ idle = 0;
+ }
+ break;
+
+ case HICN_MAPME_EVENT_FACE_PH_DEL:
+ /* Ack : remove an element from TFIB */
+ break;
+
+ case ~0:
+ /* Timeout occurred, we have to retransmit IUs for all pending
+ * prefixes having entries in TFIB
+ *
+ * timeouts are slotted
+ * | | | |
+ *
+ * ^
+ * +- event occurred
+ * new face, wait for the second next
+ * (having two arrays and swapping cur and next)
+ * retx : put in next
+ */
+ idle += 1;
+ for (u8 pos = 0; pos < CURLEN; pos++)
+ {
+ retx_t *retx = &CUR[pos];
+
+ if (retx->dpo.dpoi_index == ~0) /* deleted entry */
+ continue;
+
+ const hicn_dpo_vft_t *dpo_vft =
+ hicn_dpo_get_vft (retx->dpo.dpoi_type);
+ hicn_mapme_tfib_t *tfib =
+ TFIB (dpo_vft->hicn_dpo_get_ctx (retx->dpo.dpoi_index));
+ if (!tfib)
+ {
+ DEBUG ("NULL TFIB entry for dpoi_index=%d",
+ retx->dpo.dpoi_index);
+ continue;
+ }
+
+ hicn_mapme_send_updates (vm, &retx->prefix, retx->dpo, true);
+
+ /*
+ * We did some retransmissions, so let's reschedule a check in the
+ * next slot
+ */
+ NXT[NXTLEN++] = CUR[pos];
+ idle = 0;
+ }
+
+ /* Reset events in this slot and prepare for next one */
+ CURLEN = 0;
+ cur = NEXT_SLOT (cur);
+
+ /* After two empty slots, we disable the timer */
+
+ break;
+ }
+
+ if (event_data)
+ vlib_process_put_event_data (vm, event_data);
+
+ timeout = (idle > 1) ? 0 : DEFAULT_TIMEOUT;
+
+ // if (vlib_process_suspend_time_is_zero (timeout)) { ... }
+
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+
+/* Not static as we need to access it from hicn_face */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mapme_eventmgr_process_node) = { //,static) = {
+ .function = hicn_mapme_eventmgr_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "mapme-eventmgr-process",
+ .process_log2_n_stack_bytes = 16,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/mapme_eventmgr.h b/hicn-plugin/src/mapme_eventmgr.h
new file mode 100755
index 000000000..2f8106d6c
--- /dev/null
+++ b/hicn-plugin/src/mapme_eventmgr.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h> // vlib_node_registration_t (vlib/node.h)
+
+/*
+ * Structure carrying all necessary information for managing Special Interest
+ * (re)transmissions.
+ */
+typedef struct
+{
+ hicn_prefix_t prefix;
+ dpo_id_t dpo;
+} retx_t;
+
+#define HASH32(x) ((u16)x ^ (x << 16))
+
+/**
+ * @brief This is a process node reacting to face events.
+ */
+// not static !
+vlib_node_registration_t hicn_mapme_eventmgr_process_node;
+
+/**
+ * @brief Initialize MAP-Me on forwarder
+ * @params vm - vlib_main_t pointer
+ */
+void hicn_mapme_init (vlib_main_t * vm);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/mgmt.c b/hicn-plugin/src/mgmt.c
new file mode 100755
index 000000000..b992ba15c
--- /dev/null
+++ b/hicn-plugin/src/mgmt.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+
+#include "hicn.h"
+#include "infra.h"
+#include "mgmt.h"
+
+/* define message IDs */
+#include "hicn_msg_enum.h"
+
+/* shared routine betweeen API and CLI, leveraging API message structure */
+int
+hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp)
+{
+ rmp->pkts_processed = 0;
+ rmp->pkts_interest_count = 0;
+ rmp->pkts_data_count = 0;
+ rmp->pkts_from_cache_count = 0;
+ rmp->pkts_no_pit_count = 0;
+ rmp->pit_expired_count = 0;
+ rmp->cs_expired_count = 0;
+ rmp->cs_lru_count = 0;
+ rmp->pkts_drop_no_buf = 0;
+ rmp->interests_aggregated = 0;
+ rmp->interests_retx = 0;
+ rmp->pit_entries_count =
+ clib_host_to_net_u64 (hicn_main.pitcs.pcs_pit_count);
+ rmp->cs_entries_count = clib_host_to_net_u64 (hicn_main.pitcs.pcs_cs_count);
+ rmp->cs_entries_ntw_count =
+ clib_host_to_net_u64 (hicn_main.pitcs.policy_state.count);
+
+ vlib_error_main_t *em;
+ vlib_node_t *n;
+ foreach_vlib_main ((
+ {
+ em = &this_vlib_main->error_main;
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_interest_pcslookup_node.index);
+ u32 node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_PROCESSED]);
+ rmp->pkts_interest_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_INTERESTS]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_data_pcslookup_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_PROCESSED]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_data_pcslookup_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_data_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_DATAS]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_interest_hitcs_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_from_cache_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_CACHED]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_interest_hitpit_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->interests_aggregated +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_INTEREST_AGG]);
+ rmp->interests_retx +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_INT_RETRANS]);}));
+ return (HICN_ERROR_NONE);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/mgmt.h b/hicn-plugin/src/mgmt.h
new file mode 100755
index 000000000..08b1de089
--- /dev/null
+++ b/hicn-plugin/src/mgmt.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_MGMT_H__
+#define __HICN_MGMT_H__
+
+#include <vppinfra/error.h>
+#include "faces/face.h"
+#include "hicn_api.h"
+
+typedef struct icn_stats_s
+{
+ u32 pkts_processed;
+ u32 pkts_interest_count;
+ u32 pkts_data_count;
+ u32 pkts_from_cache_count;
+ u32 pkts_no_pit_count;
+ u32 pit_expired_count;
+ u32 cs_expired_count;
+ u32 no_bufs_count;
+ u32 pkts_interest_agg;
+ u32 pkts_int_retrans;
+ u32 pit_int_count;
+ u32 pit_cs_count;
+} icn_stats_t;
+
+typedef enum
+{
+ HICN_MGMT_FACE_OP_NONE = 0,
+ HICN_MGMT_FACE_OP_CREATE,
+ HICN_MGMT_FACE_OP_DELETE,
+ HICN_MGMT_FACE_OP_ADMIN,
+ HICN_MGMT_FACE_OP_HELLO,
+} hicn_mgmt_face_op_e;
+
+
+typedef enum
+{
+ HICN_MGMT_PUNTING_OP_NONE = 0,
+ HICN_MGMT_PUNTING_OP_CREATE,
+ HICN_MGMT_PUNTING_OP_DELETE,
+ HICN_MGMT_PUNTING_OP_ENABLE,
+ HICN_MGMT_PUNTING_OP_DISABLE
+} hicn_mgmt_punting_op_e;
+
+typedef enum
+{
+ HICN_MGMT_MAPME_OP_NONE = 0,
+ HICN_MGMT_MAPME_OP_CREATE,
+ HICN_MGMT_MAPME_OP_DELETE,
+ HICN_MGMT_MAPME_OP_ENABLE,
+ HICN_MGMT_MAPME_OP_DISABLE
+} hicn_mgmt_mapme_op_e;
+
+typedef enum
+{
+ HICN_ADDRESS_TYPE_NONE,
+ HICN_ADDRESS_TYPE_V4,
+ HICN_ADDRESS_TYPE_V6
+} hicn_address_type_e;
+
+/*
+ * Utility to update error counters in all hICN nodes
+ */
+always_inline void
+update_node_counter (vlib_main_t * vm, u32 node_idx, u32 counter_idx, u64 val)
+{
+ vlib_node_t *node = vlib_get_node (vm, node_idx);
+ vlib_error_main_t *em = &(vm->error_main);
+ u32 base_idx = node->error_heap_index;
+
+ em->counters[base_idx + counter_idx] = val;
+}
+
+
+/*
+ * Stats for the forwarding node, which end up called "error" even though
+ * they aren't...
+ */
+#define foreach_hicnfwd_error \
+ _(PROCESSED, "hICN packets processed") \
+ _(INTERESTS, "hICN interests forwarded") \
+ _(DATAS, "hICN data msgs forwarded") \
+ _(CACHED, "Cached data ") \
+ _(NO_PIT, "hICN no PIT entry drops") \
+ _(PIT_EXPIRED, "hICN expired PIT entries") \
+ _(CS_EXPIRED, "hICN expired CS entries") \
+ _(CS_LRU, "hICN LRU CS entries freed") \
+ _(NO_BUFS, "No packet buffers") \
+ _(INTEREST_AGG, "Interests aggregated") \
+ _(INTEREST_AGG_ENTRY, "Interest aggregated per entry") \
+ _(INT_RETRANS, "Interest retransmissions") \
+ _(INT_COUNT, "Interests in PIT") \
+ _(CS_COUNT, "CS total entries") \
+ _(CS_NTW_COUNT, "CS ntw entries") \
+ _(CS_APP_COUNT, "CS app entries") \
+ _(HASH_COLL_HASHTB_COUNT, "Collisions in Hash table")
+
+typedef enum
+{
+#define _(sym, str) HICNFWD_ERROR_##sym,
+ foreach_hicnfwd_error
+#undef _
+ HICNFWD_N_ERROR,
+} hicnfwd_error_t;
+
+/*
+ * Declarations
+ */
+clib_error_t *hicn_api_plugin_hookup (vlib_main_t * vm);
+
+int hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp);
+
+#endif /* // __HICN_MGMT_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/params.h b/hicn-plugin/src/params.h
new file mode 100755
index 000000000..fc890f602
--- /dev/null
+++ b/hicn-plugin/src/params.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PARAM_H__
+#define __HICN_PARAM_H__
+
+/*
+ * Features
+ */
+#define HICN_FEATURE_CS 1 //1 enable 0 disable
+
+/*
+ * Face compile-time parameters
+ */
+#define HICN_PARAM_FACES_MAX 64
+
+/*
+ * Max length for hICN names
+ */
+#define HICN_PARAM_HICN_NAME_LEN_MAX 20 //bytes
+
+// Max next - hops supported in a FIB entry
+#define HICN_PARAM_FIB_ENTRY_NHOPS_MAX 5
+
+// Default and limit on weight, whatever weight means
+#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT 0x10
+#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX 0xff
+
+/*
+ * PIT compile-time parameters
+ */
+#define HICN_PARAM_PIT_ENTRIES_MIN 1024
+#define HICN_PARAM_PIT_ENTRIES_DFLT 1024 * 128
+#define HICN_PARAM_PIT_ENTRIES_MAX 2 * 1024 * 1024
+
+// aggregation limit(interest previous hops)
+#define HICN_PARAM_PIT_ENTRY_PHOPS_MAX 516
+
+// PIT lifetime limits on API override this(in seconds, long -float type)
+#define HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC 0.100L
+#define HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC 20.000L
+
+//PIT lifetime params if not set at API(in mseconds, integer type)
+#define HICN_PARAM_PIT_LIFETIME_DFLT_MIN_MS 200
+#define HICN_PARAM_PIT_LIFETIME_DFLT_DFLT_MS 20000
+#define HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS 20000
+
+// Face CS reservation params
+#define HICN_PARAM_FACE_MAX_CS_RESERVED 10000 //packets
+#define HICN_PARAM_FACE_MIN_CS_RESERVED 0 //packets
+#define HICN_PARAM_FACE_DFT_CS_RESERVED 1000 //packets
+
+/*
+ * CS compile-time parameters
+ */
+#define HICN_PARAM_CS_ENTRIES_MIN 0 // can disable CS
+#define HICN_PARAM_CS_ENTRIES_DFLT 4 * 1024
+#define HICN_PARAM_CS_ENTRIES_MAX 1024 * 1024
+
+#define HICN_PARAM_CS_LRU_DEFAULT (16 * 1024)
+
+/* CS lifetime defines, in mseconds, integer type */
+#define HICN_PARAM_CS_LIFETIME_MIN 100
+#define HICN_PARAM_CS_LIFETIME_DFLT (5 * 60 * 1000) // 300 seconds
+#define HICN_PARAM_CS_LIFETIME_MAX (24 * 3600 * 1000) //24 hours...
+
+/* CS reserved portion for applications */
+#define HICN_PARAM_CS_RESERVED_APP 30 //%
+
+/* Cloning parameters */
+/* ip4 */
+#define HICN_IP4_VERSION_HEADER_LENGTH 0x45
+#define HICN_IP4_PROTOCOL IP_PROTOCOL_TCP
+#define HICN_IP4_TTL_DEFAULT 128
+
+/* ip6 */
+#define IPV6_DEFAULT_VERSION 6
+#define IPV6_DEFAULT_TRAFFIC_CLASS 0
+#define IPV6_DEFAULT_FLOW_LABEL 0
+#define HCIN_IP6_VERSION_TRAFFIC_FLOW (IPV6_DEFAULT_VERSION << 28) | \
+ (IPV6_DEFAULT_TRAFFIC_CLASS << 20) | \
+ (IPV6_DEFAULT_FLOW_LABEL & 0xfffff)
+#define HICN_IP6_PROTOCOL IP_PROTOCOL_TCP
+#define HICN_IP6_HOP_LIMIT 0x40
+
+#endif /* // __HICN_PARAM_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/parser.h b/hicn-plugin/src/parser.h
new file mode 100755
index 000000000..cbc5696ba
--- /dev/null
+++ b/hicn-plugin/src/parser.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PARSER_H__
+#define __HICN_PARSER_H__
+
+#include <vlib/vlib.h>
+
+#include "hicn.h"
+#include "error.h"
+
+
+/*
+ * Key type codes for header, header tlvs, body tlvs, and child tlvs
+ */
+
+// FIXME(reuse lib struct, no more control ?)
+enum hicn_pkt_type_e
+{
+ HICN_PKT_TYPE_INTEREST = 0,
+ HICN_PKT_TYPE_CONTENT = 1,
+};
+
+always_inline int
+hicn_interest_parse_pkt (vlib_buffer_t * pkt, hicn_name_t * name,
+ u16 * namelen, hicn_header_t ** pkt_hdrp, u8 * isv6)
+{
+ if (pkt == NULL)
+ return HICN_ERROR_PARSER_PKT_INVAL;
+ hicn_header_t *pkt_hdr = vlib_buffer_get_current (pkt);
+ *pkt_hdrp = pkt_hdr;
+ u8 *ip_pkt = vlib_buffer_get_current (pkt);
+ u8 version = (pkt_hdr->v4.ip.version_ihl & 0xf0) >> 4;
+ *isv6 = ((version & 2) >> 1);
+ u8 ip_proto = (*isv6) * IPPROTO_IPV6;
+ u8 next_proto_offset = 6 + (1 - *isv6) * 3;
+ //in the ipv6 header the next header field is at byte 6
+ // in the ipv4 header the protocol field is at byte 9
+ hicn_type_t type = (hicn_type_t) { {
+ .l4 = IPPROTO_NONE,.l3 =
+ IPPROTO_NONE,.l2 =
+ ip_pkt[next_proto_offset],.l1 =
+ ip_proto}
+ };
+ hicn_get_buffer (pkt)->type = type;
+
+ hicn_ops_vft[type.l1]->get_interest_name (type, &pkt_hdr->protocol, name);
+ *namelen = (1 - (*isv6)) * HICN_V4_NAME_LEN + (*isv6) * HICN_V6_NAME_LEN;
+
+ return HICN_ERROR_NONE;
+}
+
+always_inline int
+hicn_data_parse_pkt (vlib_buffer_t * pkt, hicn_name_t * name,
+ u16 * namelen, hicn_header_t ** pkt_hdrp, u8 * isv6)
+{
+ if (pkt == NULL)
+ return HICN_ERROR_PARSER_PKT_INVAL;
+ hicn_header_t *pkt_hdr = vlib_buffer_get_current (pkt);
+ *pkt_hdrp = pkt_hdr;
+ *pkt_hdrp = pkt_hdr;
+ u8 *ip_pkt = vlib_buffer_get_current (pkt);
+ u8 version = (pkt_hdr->v4.ip.version_ihl & 0xf0) >> 4;
+ *isv6 = ((version & 2) >> 1);
+ u8 ip_proto = (*isv6) * IPPROTO_IPV6;
+ /*
+ * in the ipv6 header the next header field is at byte 6 in the ipv4
+ * header the protocol field is at byte 9
+ */
+ u8 next_proto_offset = 6 + (1 - *isv6) * 3;
+ hicn_type_t type = (hicn_type_t) { {.l4 = IPPROTO_NONE,.l3 =
+ IPPROTO_NONE,.l2 =
+ ip_pkt[next_proto_offset],.l1 =
+ ip_proto}
+ };
+ hicn_get_buffer (pkt)->type = type;
+ hicn_ops_vft[type.l1]->get_data_name (type, &pkt_hdr->protocol, name);
+ *namelen = (1 - (*isv6)) * HICN_V4_NAME_LEN + (*isv6) * HICN_V6_NAME_LEN;
+
+ return HICN_ERROR_NONE;
+}
+
+
+#endif /* // __HICN_PARSER_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/pcs.c b/hicn-plugin/src/pcs.c
new file mode 100755
index 000000000..4226291a1
--- /dev/null
+++ b/hicn-plugin/src/pcs.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <vlib/vlib.h>
+
+#include "hashtb.h"
+#include "pcs.h"
+#include "cache_policies/cs_lru.h"
+
+int
+hicn_pit_create (hicn_pit_cs_t * p, u32 num_elems)
+{
+ int ret =
+ hicn_hashtb_alloc (&p->pcs_table, num_elems, sizeof (hicn_pcs_entry_t));
+ p->pcs_table->ht_flags |= HICN_HASHTB_FLAG_KEY_FMT_NAME;
+
+ p->pcs_pit_count = p->pcs_cs_count = 0;
+
+ p->policy_state.max =
+ HICN_PARAM_CS_LRU_DEFAULT -
+ (HICN_PARAM_CS_LRU_DEFAULT * HICN_PARAM_CS_RESERVED_APP / 100);
+ p->policy_state.count = 0;
+ p->policy_state.head = p->policy_state.tail = 0;
+ p->pcs_app_max = HICN_PARAM_CS_LRU_DEFAULT - p->policy_state.max;
+
+ p->policy_vft.hicn_cs_insert = hicn_cs_lru.hicn_cs_insert;
+ p->policy_vft.hicn_cs_update = hicn_cs_lru.hicn_cs_update;
+ p->policy_vft.hicn_cs_dequeue = hicn_cs_lru.hicn_cs_dequeue;
+ p->policy_vft.hicn_cs_delete_get = hicn_cs_lru.hicn_cs_delete_get;
+ p->policy_vft.hicn_cs_trim = hicn_cs_lru.hicn_cs_trim;
+
+ return (ret);
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/pcs.h b/hicn-plugin/src/pcs.h
new file mode 100755
index 000000000..375a7d537
--- /dev/null
+++ b/hicn-plugin/src/pcs.h
@@ -0,0 +1,836 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PCS_H__
+#define __HICN_PCS_H__
+
+#include "hashtb.h"
+#include "face_db.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "cache_policies/cs_policy.h"
+#include "faces/face.h"
+#include "faces/ip/dpo_ip.h"
+#include "faces/app/face_prod.h"
+
+/* The PIT and CS are stored as a union */
+#define HICN_PIT_NULL_TYPE 0
+#define HICN_PIT_TYPE 1
+#define HICN_CS_TYPE 2
+
+/*
+ * Definitions and Forward refs for the time counters we're trying out.
+ * Counters are maintained by the background process.
+ */
+#define SEC_MS 1000
+#define HICN_INFRA_FAST_TIMER_SECS 1
+#define HICN_INFRA_FAST_TIMER_MSECS (HICN_INFRA_FAST_TIMER_SECS * SEC_MS)
+#define HICN_INFRA_SLOW_TIMER_SECS 60
+#define HICN_INFRA_SLOW_TIMER_MSECS (HICN_INFRA_SLOW_TIMER_SECS * SEC_MS)
+
+/*
+ * Max number of incoming (interest) faces supported, for now. Note that
+ * changing this may change alignment within the PIT struct, so be careful.
+ */
+typedef struct __attribute__ ((packed)) hicn_pcs_shared_s
+{
+
+ /* Installation/creation time (vpp float units, for now) */
+ f64 create_time;
+
+ /* Expiration time (vpp float units, for now) */
+ f64 expire_time;
+
+ /* Shared 'flags' octet */
+ u8 entry_flags;
+
+ /* Needed to align for the pit or cs portion */
+ u8 padding;
+} hicn_pcs_shared_t;
+
+#define HICN_PCS_ENTRY_CS_FLAG 0x01
+
+/*
+ * PIT entry, unioned with a CS entry below
+ */
+typedef struct __attribute__ ((packed)) hicn_pit_entry_s
+{
+
+ /* Shared size 8 + 8 + 2 = 18B */
+
+ /*
+ * Egress next hop (containes the egress face) This id refers to the
+ * nh
+ */
+ /* choosen in the next_hops array of the dpo */
+ /* 18B + 1B = 19B */
+ u8 pe_txnh;
+
+ /* Array of faces */
+ /* 24B + 32B (8B*4) =56B */
+ hicn_face_db_t faces;
+
+} hicn_pit_entry_t;
+
+#define HICN_CS_ENTRY_OPAQUE_SIZE HICN_HASH_NODE_APP_DATA_SIZE - 40
+
+/*
+ * CS entry, unioned with a PIT entry below
+ */
+typedef struct __attribute__ ((packed)) hicn_cs_entry_s
+{
+ /* 22B + 2B = 24B */
+ u16 align;
+
+ /* Packet buffer, if held */
+ /* 18B + 4B = 22B */
+ u32 cs_pkt_buf;
+
+ /* Ingress face */
+ /* 24B + 8B = 32B */
+ //Fix alignment issues
+ union
+ {
+ dpo_id_t cs_rxface;
+ u64 cs_rxface_u64;
+ };
+
+ /* Linkage for LRU, in the form of hashtable node indexes */
+ /* 32B + 8B = 40B */
+ u32 cs_lru_prev;
+ u32 cs_lru_next;
+
+ /* Reserved for implementing cache policy different than LRU */
+ /* 40B + 56B = 96B */
+ u8 opaque[HICN_CS_ENTRY_OPAQUE_SIZE];
+
+
+} __attribute__ ((packed)) hicn_cs_entry_t;
+
+/*
+ * Combined PIT/CS entry data structure, embedded in a hashtable entry after
+ * the common hashtable preamble struct. This MUST fit in the available
+ * (fixed) space in a hashtable node.
+ */
+typedef struct hicn_pcs_entry_s
+{
+
+ hicn_pcs_shared_t shared;
+
+ union
+ {
+ hicn_pit_entry_t pit;
+ hicn_cs_entry_t cs;
+ } u;
+} hicn_pcs_entry_t;
+
+
+/*
+ * Overall PIT/CS table, based on the common hashtable
+ */
+typedef struct hicn_pit_cs_s
+{
+
+ hicn_hashtb_t *pcs_table;
+
+ /* Counters for PIT/CS sentries */
+ u32 pcs_pit_count;
+ u32 pcs_cs_count;
+ u32 pcs_cs_dealloc;
+ u32 pcs_pit_dealloc;
+
+ /* Total size of PCS */
+ u32 pcs_size;
+
+ /* Memory reserved for appfaces */
+ u32 pcs_app_max;
+ u32 pcs_app_count;
+
+ hicn_cs_policy_t policy_state;
+ hicn_cs_policy_vft_t policy_vft;
+
+} hicn_pit_cs_t;
+
+/* Functions declarations */
+int hicn_pit_create (hicn_pit_cs_t * p, u32 num_elems);
+
+always_inline void
+hicn_pit_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pcs_entry, hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t * node, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id, dpo_id_t * inface_id, u8 is_appface);
+
+always_inline void
+hicn_pcs_cs_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node);
+
+always_inline void
+hicn_pcs_cs_delete (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t ** pcs_entry, hicn_hash_node_t ** node,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline int
+hicn_pcs_cs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+always_inline int
+hicn_pcs_cs_insert_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+always_inline int
+hicn_pcs_pit_insert (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t * entry,
+ hicn_hash_node_t * node, hicn_hash_entry_t ** hash_entry,
+ u64 hashval, u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+always_inline void
+hicn_pcs_pit_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline int
+hicn_pcs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval, u32 * node_id,
+ u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs, u8 * hash_entry_id,
+ u32 * bucket_id, u8 * bucket_is_overflow);
+
+always_inline void
+hicn_pcs_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline void
+hicn_pcs_remove_lock (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline void
+hicn_cs_delete_trimmed (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t ** node, vlib_main_t * vm);
+
+/* Function implementation */
+/* Accessor for pit/cs data inside hash table node */
+static inline hicn_pcs_entry_t *
+hicn_pit_get_data (hicn_hash_node_t * node)
+{
+ return (hicn_pcs_entry_t *) (hicn_hashtb_node_data (node));
+}
+
+/* Init pit/cs data block (usually inside hash table node) */
+static inline void
+hicn_pit_init_data (hicn_pcs_entry_t * p)
+{
+ memset (p, 0, sizeof (hicn_pcs_entry_t));
+ hicn_face_bucket_t *face_bkt;
+ pool_get (hicn_face_bucket_pool, face_bkt);
+
+ p->u.pit.faces.next_bucket = face_bkt - hicn_face_bucket_pool;
+}
+
+
+
+static inline f64
+hicn_pcs_get_exp_time (f64 cur_time_sec, u64 lifetime_msec)
+{
+ return (cur_time_sec + ((f64) lifetime_msec) / SEC_MS);
+}
+
+/*
+ * Configure CS LRU limit. Zero is accepted, means 'no limit', probably not a
+ * good choice.
+ */
+static inline void
+hicn_pit_set_lru_max (hicn_pit_cs_t * p, u32 limit)
+{
+ p->policy_state.max = limit;
+}
+
+/*
+ * Configure CS LRU limit. Zero is accepted, means 'no limit', probably not a
+ * good choice.
+ */
+static inline void
+hicn_pit_set_lru_app_max (hicn_pit_cs_t * p, u32 limit)
+{
+ p->pcs_app_max = limit;
+}
+
+/*
+ * Accessor for PIT interest counter.
+ */
+static inline u32
+hicn_pit_get_int_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_pit_count);
+}
+
+/*
+ * Accessor for PIT cs entries counter.
+ */
+static inline u32
+hicn_pit_get_cs_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_cs_count);
+}
+
+static inline u32
+hicn_pcs_get_ntw_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->policy_state.count);
+}
+
+static inline u32
+hicn_pit_get_htb_bucket_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_table->ht_overflow_buckets_used);
+}
+
+static inline int
+hicn_cs_enabled (hicn_pit_cs_t * pit)
+{
+ switch (HICN_FEATURE_CS)
+ {
+ case 0:
+ default:
+ return (0);
+ case 1:
+ return (pit->policy_state.max > 0);
+ }
+}
+
+/*
+ * Delete a PIT/CS entry from the hashtable, freeing the hash node struct.
+ * The caller's pointers are zeroed! If cs_trim is true, entry has already
+ * been removed from lru list The main purpose of this wrapper is helping
+ * maintain the per-PIT stats.
+ */
+always_inline void
+hicn_pcs_delete_internal (hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id)
+{
+ hicn_pcs_entry_t *pcs = *pcs_entryp;
+
+ ASSERT (pcs == hicn_hashtb_node_data (*node));
+
+ if (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
+ {
+ pitcs->pcs_cs_dealloc++;
+
+ /* Free any associated packet buffer */
+ vlib_buffer_free_one (vm, pcs->u.cs.cs_pkt_buf);
+ pcs->u.cs.cs_pkt_buf = ~0;
+ ASSERT ((pcs->u.cs.cs_lru_prev == 0)
+ && (pcs->u.cs.cs_lru_prev == pcs->u.cs.cs_lru_next));
+ }
+ else
+ {
+ pitcs->pcs_pit_dealloc++;
+ dpo_vft->hicn_dpo_unlock_dpo_ctx (hicn_dpo_id);
+
+ /* Flush faces */
+ hicn_faces_flush (&(pcs->u.pit.faces));
+ }
+
+ hicn_hashtb_delete (pitcs->pcs_table, node, hash_entry->he_msb64);
+ memset (*pcs_entryp, 0, sizeof (hicn_pcs_entry_t));
+ *pcs_entryp = NULL;
+}
+
+/*
+ * Convert a PIT entry into a CS entry (assumes that the entry is already in
+ * the hashtable.) This is primarily here to maintain the internal counters.
+ */
+always_inline void
+hicn_pit_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pcs_entry, hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t * node, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id, dpo_id_t * inface_id, u8 is_appface)
+{
+
+ /*
+ * Different from the insert node. In here we don't need to add a new
+ * hash entry.
+ */
+ pitcs->pcs_pit_count--;
+ dpo_vft->hicn_dpo_unlock_dpo_ctx (hicn_dpo_id);
+ /* Flush faces */
+ hicn_faces_flush (&(pcs_entry->u.pit.faces));
+ memset (&(pcs_entry->u.cs), ~0, sizeof (hicn_cs_entry_t));
+
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ node->hn_flags |= HICN_HASH_NODE_CS_FLAGS;
+ pcs_entry->shared.entry_flags |= HICN_PCS_ENTRY_CS_FLAG;
+
+ pcs_entry->u.cs.cs_rxface = *inface_id;
+
+ /* Update the CS according to the policy */
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ if (is_appface)
+ {
+ dpo_id_t *face_dpo = (dpo_id_t *) & (pcs_entry->u.cs.cs_rxface);
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_dpo->dpoi_index);
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+ policy_state = &prod_face->policy;
+ policy_vft = &prod_face->policy_vft;
+ }
+ else
+ {
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+ }
+
+ policy_vft->hicn_cs_insert (pitcs, node, pcs_entry, policy_state);
+ pitcs->pcs_cs_count++;
+
+ if (policy_state->count > policy_state->max)
+ {
+ hicn_hash_node_t *node;
+ hicn_pcs_entry_t *pcs_entry;
+ hicn_hash_entry_t *hash_entry;
+ policy_vft->hicn_cs_delete_get (pitcs, policy_state,
+ &node, &pcs_entry, &hash_entry);
+
+
+ /*
+ * We don't have to decrease the lock (therefore we cannot
+ * use hicn_pcs_cs_delete function)
+ */
+ policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
+
+ hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
+
+ /* Update the global CS counter */
+ pitcs->pcs_cs_count--;
+ }
+}
+
+/* Functions specific for PIT or CS */
+
+always_inline void
+hicn_pcs_cs_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node)
+{
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ dpo_id_t *face_dpo = (dpo_id_t *) & (entry->u.cs.cs_rxface);
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ if (face_dpo->dpoi_type == hicn_face_ip_type)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_dpo->dpoi_index);
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD)
+ {
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+ policy_state = &prod_face->policy;
+ policy_vft = &prod_face->policy_vft;
+ }
+ }
+ /* Update the CS LRU, moving this item to the head */
+ policy_vft->hicn_cs_update (pitcs, node, entry, policy_state);
+}
+
+always_inline void
+hicn_pcs_cs_delete (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t ** pcs_entryp, hicn_hash_node_t ** nodep,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ if (!(hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
+ {
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ dpo_id_t *face_dpo = (dpo_id_t *) & ((*pcs_entryp)->u.cs.cs_rxface);
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ if (face_dpo->dpoi_type == hicn_face_ip_type)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_dpo->dpoi_index);
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD)
+ {
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+ policy_state = &prod_face->policy;
+ policy_vft = &prod_face->policy_vft;
+ }
+ }
+ policy_vft->hicn_cs_dequeue (pitcs, (*nodep), (*pcs_entryp),
+ policy_state);
+
+ /* Update the global CS counter */
+ pitcs->pcs_cs_count--;
+ }
+ hash_entry->locks--;
+ if (hash_entry->locks == 0)
+ {
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, nodep, vm, dpo_vft, hicn_dpo_id);
+ }
+ else
+ {
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ }
+}
+
+always_inline int
+hicn_pcs_cs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ ASSERT (entry == hicn_hashtb_node_data (node));
+
+ int ret =
+ hicn_hashtb_insert (pitcs->pcs_table, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow);
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ {
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ dpo_id_t *face_dpo = (dpo_id_t *) & (entry->u.cs.cs_rxface);
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ if (face_dpo->dpoi_type == hicn_face_ip_type)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_dpo->dpoi_index);
+ if (face->shared.flags & HICN_FACE_FLAGS_APPFACE_PROD)
+ {
+ hicn_face_prod_t *prod_face = (hicn_face_prod_t *) face->data;
+ policy_state = &prod_face->policy;
+ policy_vft = &prod_face->policy_vft;
+ }
+ }
+ policy_vft->hicn_cs_insert (pitcs, node, entry, policy_state);
+ pitcs->pcs_cs_count++;
+
+ if (policy_state->count > policy_state->max)
+ {
+ hicn_hash_node_t *node;
+ hicn_pcs_entry_t *pcs_entry;
+ hicn_hash_entry_t *hash_entry;
+ policy_vft->hicn_cs_delete_get (pitcs, policy_state,
+ &node, &pcs_entry, &hash_entry);
+
+ hicn_pcs_cs_delete (vm, pitcs, &pcs_entry, &node, hash_entry, NULL,
+ NULL);
+ }
+ }
+ return ret;
+}
+
+/*
+ * Insert CS entry into the hashtable The main purpose of this wrapper is
+ * helping maintain the per-PIT stats.
+ */
+always_inline int
+hicn_pcs_cs_insert_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ int ret;
+
+ ASSERT (entry == hicn_hashtb_node_data (node));
+
+ ret =
+ hicn_pcs_cs_insert (vm, pitcs, entry, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow);
+
+ /* A content already exists in CS with the same name */
+ if (ret == HICN_ERROR_HASHTB_EXIST)
+ {
+ /* Update the entry */
+ hicn_hash_node_t *existing_node =
+ hicn_hashtb_node_from_idx (pitcs->pcs_table, *node_id);
+ hicn_pcs_entry_t *pitp = hicn_pit_get_data (existing_node);
+
+ /* Free associated packet buffer and update counter */
+ pitcs->pcs_cs_dealloc++;
+ vlib_buffer_free_one (vm, pitp->u.cs.cs_pkt_buf);
+
+ pitp->shared.create_time = entry->shared.create_time;
+ pitp->shared.expire_time = entry->shared.expire_time;
+ pitp->u.cs.cs_pkt_buf = entry->u.cs.cs_pkt_buf;
+ hicn_pcs_cs_update (vm, pitcs, pitp, existing_node);
+ }
+ return (ret);
+}
+
+/*
+ * Insert PIT entry into the hashtable The main purpose of this wrapper is
+ * helping maintain the per-PIT stats.
+ */
+always_inline int
+hicn_pcs_pit_insert (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t * entry,
+ hicn_hash_node_t * node, hicn_hash_entry_t ** hash_entry,
+ u64 hashval, u32 * node_id, u8 * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ ASSERT (entry == hicn_hashtb_node_data (node));
+
+ int ret =
+ hicn_hashtb_insert (pitcs->pcs_table, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow);
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ pitcs->pcs_pit_count++;
+
+ return ret;
+}
+
+always_inline void
+hicn_pcs_pit_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ hash_entry->locks--;
+ pitcs->pcs_pit_count--;
+ if (hash_entry->locks == 0)
+ {
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, hicn_dpo_id);
+ }
+ else
+ {
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ }
+}
+
+
+/* Generic functions for PIT/CS */
+
+/*
+ * Insert PIT/CS entry into the hashtable The main purpose of this wrapper is
+ * helping maintain the per-PIT stats.
+ */
+always_inline int
+hicn_pcs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval, u32 * node_id,
+ u8 * dpo_ctx_id, u8 * vft_id, u8 * is_cs, u8 * hash_entry_id,
+ u32 * bucket_id, u8 * bucket_is_overflow)
+{
+ int ret;
+
+ if ((*hash_entry)->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
+ {
+ ret =
+ hicn_pcs_cs_insert (vm, pitcs, entry, node, hash_entry, hashval,
+ node_id, dpo_ctx_id, vft_id, is_cs, hash_entry_id,
+ bucket_id, bucket_is_overflow);
+ }
+ else
+ {
+ ret =
+ hicn_pcs_pit_insert (pitcs, entry, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id,
+ bucket_id, bucket_is_overflow);
+ }
+
+ return (ret);
+}
+
+
+/*
+ * Delete entry if there are no pending lock on the entry, otherwise mark it
+ * as to delete.
+ */
+always_inline void
+hicn_pcs_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** nodep, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ /*
+ * If the entry has already been marked as deleted, it has already
+ * been dequeue
+ */
+ if (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
+ {
+ hicn_pcs_cs_delete (vm, pitcs, pcs_entryp, nodep, hash_entry,
+ dpo_vft, hicn_dpo_id);
+ }
+ else
+ {
+ hicn_pcs_pit_delete (pitcs, pcs_entryp, nodep, vm,
+ hash_entry, dpo_vft, hicn_dpo_id);
+ }
+}
+
+/*
+ * Remove a lock in the entry and delete it if there are no pending lock and
+ * the entry is marked as to be deleted
+ */
+always_inline void
+hicn_pcs_remove_lock (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ hash_entry->locks--;
+ if (hash_entry->locks == 0
+ && (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
+ {
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, hicn_dpo_id);
+ }
+}
+
+/*
+ * Delete entry which has already been bulk-removed from lru list
+ */
+always_inline void
+hicn_cs_delete_trimmed (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t ** node, vlib_main_t * vm)
+{
+
+
+ if (hash_entry->locks == 0)
+ {
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (hash_entry->vft_id);
+ dpo_id_t hicn_dpo_id =
+ { dpo_vft->hicn_dpo_get_type (), 0, 0, hash_entry->dpo_ctx_id };
+
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, &hicn_dpo_id);
+ }
+ else
+ {
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ }
+}
+
+/*
+ * wrappable counter math (assumed uint16_t): return sum of addends
+ */
+always_inline u16
+hicn_infra_seq16_sum (u16 addend1, u16 addend2)
+{
+ return (addend1 + addend2);
+}
+
+/*
+ * for comparing wrapping numbers, return lt,eq,gt 0 for a lt,eq,gt b
+ */
+always_inline int
+hicn_infra_seq16_cmp (u16 a, u16 b)
+{
+ return ((int16_t) (a - b));
+}
+
+/*
+ * below are wrappers for lt, le, gt, ge seq16 comparators
+ */
+always_inline int
+hicn_infra_seq16_lt (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) < 0);
+}
+
+always_inline int
+hicn_infra_seq16_le (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) <= 0);
+}
+
+always_inline int
+hicn_infra_seq16_gt (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) > 0);
+}
+
+always_inline int
+hicn_infra_seq16_ge (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) >= 0);
+}
+
+
+extern u16 hicn_infra_fast_timer; /* Counts at 1 second intervals */
+extern u16 hicn_infra_slow_timer; /* Counts at 1 minute intervals */
+
+/*
+ * Utilities to convert lifetime into expiry time based on compressed clock,
+ * suitable for the opportunistic hashtable entry timeout processing.
+ */
+
+//convert time in msec to time in clicks
+always_inline u16
+hicn_infra_ms2clicks (u64 time_ms, u64 ms_per_click)
+{
+ f64 time_clicks =
+ ((f64) (time_ms + ms_per_click - 1)) / ((f64) ms_per_click);
+ return ((u16) time_clicks);
+}
+
+always_inline u16
+hicn_infra_get_fast_exp_time (u64 lifetime_ms)
+{
+ u16 lifetime_clicks =
+ hicn_infra_ms2clicks (lifetime_ms, HICN_INFRA_FAST_TIMER_MSECS);
+ return (hicn_infra_seq16_sum (hicn_infra_fast_timer, lifetime_clicks));
+}
+
+always_inline u16
+hicn_infra_get_slow_exp_time (u64 lifetime_ms)
+{
+ u16 lifetime_clicks =
+ hicn_infra_ms2clicks (lifetime_ms, HICN_INFRA_SLOW_TIMER_MSECS);
+ return (hicn_infra_seq16_sum (hicn_infra_slow_timer, lifetime_clicks));
+}
+
+#endif /* // __HICN_PCS_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/pg.c b/hicn-plugin/src/pg.c
new file mode 100755
index 000000000..643aff2be
--- /dev/null
+++ b/hicn-plugin/src/pg.c
@@ -0,0 +1,1147 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include "hicn.h"
+#include "pg.h"
+#include "parser.h"
+#include "infra.h"
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_pg_interest_node;
+vlib_node_registration_t hicn_pg_data_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_hicnpg_error \
+ _(PROCESSED, "hICN PG packets processed") \
+ _(DROPPED, "hICN PG packets dropped") \
+ _(INTEREST_MSGS_GENERATED, "hICN PG Interests generated") \
+ _(CONTENT_MSGS_RECEIVED, "hICN PG Content msgs received")
+
+typedef enum
+{
+#define _(sym,str) HICNPG_ERROR_##sym,
+ foreach_hicnpg_error
+#undef _
+ HICNPG_N_ERROR,
+} hicnpg_error_t;
+
+static char *hicnpg_error_strings[] = {
+#define _(sym,string) string,
+ foreach_hicnpg_error
+#undef _
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_INTEREST_NEXT_V4_LOOKUP,
+ HICNPG_INTEREST_NEXT_V6_LOOKUP,
+ HICNPG_INTEREST_NEXT_IFACE_IP4_INPUT,
+ HICNPG_INTEREST_NEXT_IFACE_IP6_INPUT,
+ HICNPG_INTEREST_NEXT_DROP,
+ HICNPG_N_NEXT,
+} hicnpg_interest_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} hicnpg_trace_t;
+
+hicnpg_main_t hicnpg_main = {
+ .index = (u32) 0,
+ .index_ifaces = (u32) 1,
+ .max_seq_number = (u32) ~ 0,
+ .interest_lifetime = 4,
+ .n_flows = (u32) 0,
+ .n_ifaces = (u32) 1,
+ .hicn_underneath = 0
+};
+
+hicnpg_server_main_t hicnpg_server_main = {
+ .node_index = 0,
+ .hicn_underneath = 0
+};
+
+/* packet trace format function */
+static u8 *
+format_hicnpg_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+
+ s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_index);
+ return (s);
+}
+
+always_inline void
+hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 lifetime, u32 next_flow, u32 iface);
+
+always_inline void
+hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 lifetime, u32 next_flow, u32 iface);
+
+always_inline void
+convert_interest_to_data_v4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0);
+
+always_inline void
+convert_interest_to_data_v6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0);
+
+always_inline void
+calculate_tcp_checksum_v4 (vlib_main_t * vm, vlib_buffer_t * b0);
+
+always_inline void
+calculate_tcp_checksum_v6 (vlib_main_t * vm, vlib_buffer_t * b0);
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the vpp
+ * packet generator to generate icn request traffic.
+ */
+static uword
+hicnpg_client_interest_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicnpg_interest_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 interest_msgs_generated = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ hicn_header_t *hicn0 = NULL, *hicn1 = NULL;
+ hicn_name_t name0, name1;
+ u16 namelen0, namelen1;
+ hicnpg_main_t *hpgm = &hicnpg_main;
+ int iface = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = HICNPG_INTEREST_NEXT_DROP;
+ u32 next1 = HICNPG_INTEREST_NEXT_DROP;
+ u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
+ u8 isv6_0;
+ u8 isv6_1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* Check icn packets, locate names */
+ if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+ /* Rewrite and send */
+ isv6_0 ? hicn_rewrite_interestv6 (vm, b0,
+ (hpgm->index /
+ hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (vm, b0,
+ (hpgm->index / hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next0 =
+ isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ next0 += 2 * hpgm->hicn_underneath;
+ }
+ if (hicn_interest_parse_pkt (b1, &name1, &namelen1, &hicn1, &isv6_1)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+ /* Rewrite and send */
+ isv6_1 ? hicn_rewrite_interestv6 (vm, b1,
+ (hpgm->index /
+ hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (vm, b1,
+ (hpgm->index / hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next1 =
+ isv6_1 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ next1 += 2 * hpgm->hicn_underneath;
+ }
+ /* Send pkt to next node */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ if (next0 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueues, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = HICNPG_INTEREST_NEXT_DROP;
+ u32 sw_if_index0;
+ u8 isv6_0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* Check icn packets, locate names */
+ if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+
+ /* Rewrite and send */
+ isv6_0 ? hicn_rewrite_interestv6 (vm, b0,
+ (hpgm->index /
+ hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (vm, b0,
+ (hpgm->index / hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next0 =
+ isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ next0 += 2 * hpgm->hicn_underneath;
+ }
+ /* Send pkt to ip lookup */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicnpg_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed += 1;
+
+ if (next0 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_DROPPED, pkts_dropped);
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_INTEREST_MSGS_GENERATED,
+ interest_msgs_generated);
+
+ return (frame->n_vectors);
+}
+
+void
+hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 interest_lifetime, u32 next_flow, u32 iface)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Generate the right src and dst corresponding to flow and iface */
+ ip46_address_t src_addr = {
+ .ip4 = hicnpg_main.pgen_clt_src_addr.ip4,
+ };
+ hicn_name_t dst_name = {
+ .ip4.prefix_as_ip4 = hicnpg_main.pgen_clt_hicn_name.ip4,
+ .ip4.suffix = seq_number,
+ };
+
+ src_addr.ip4.as_u32 += clib_host_to_net_u32 (iface);
+ dst_name.ip4.prefix_as_ip4.as_u32 += clib_net_to_host_u32 (next_flow);
+
+ /* Update locator and name */
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ HICN_OPS4->set_interest_locator (type, &h0->protocol, &src_addr);
+ HICN_OPS4->set_interest_name (type, &h0->protocol, &dst_name);
+
+ /* Update lifetime (currently L4 checksum is not updated) */
+ HICN_OPS4->set_lifetime (type, &h0->protocol, interest_lifetime);
+
+ /* Update checksums */
+ HICN_OPS4->update_checksums (type, &h0->protocol, 0, 0);
+}
+
+/**
+ * @brief Rewrite the IPv6 header as the next generated packet
+ *
+ * Set up a name prefix
+ * - etiher generate interest in which the name varies only after the prefix
+ * (inc : seq_number), then the flow acts on the prefix (CHECK)
+ * seq_number => TCP, FLOW =>
+ *
+ * SRC : pgen_clt_src_addr.ip6 DST = generate name (pgen_clt_hicn_name.ip6)
+ * ffff:ffff:ffff:ffff ffff:ffff:ffff:ffff
+ * \__/ \__/
+ * +iface + flow
+ * Source is used to emulate different consumers.
+ * FIXME iface is ill-named, better name it consumer id
+ * Destination is used to iterate on the content.
+ */
+void
+hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 interest_lifetime, u32 next_flow, u32 iface)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Generate the right src and dst corresponding to flow and iface */
+ ip46_address_t src_addr = {
+ .ip6 = hicnpg_main.pgen_clt_src_addr.ip6,
+ };
+ hicn_name_t dst_name = {
+ .ip6.prefix_as_ip6 = hicnpg_main.pgen_clt_hicn_name.ip6,
+ .ip6.suffix = seq_number,
+ };
+ src_addr.ip6.as_u32[3] += clib_host_to_net_u32 (iface);
+ dst_name.ip6.prefix_as_ip6.as_u32[3] += clib_net_to_host_u32 (next_flow);
+
+ /* Update locator and name */
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ HICN_OPS6->set_interest_locator (type, &h0->protocol, &src_addr);
+ HICN_OPS6->set_interest_name (type, &h0->protocol, &dst_name);
+
+ /* Update lifetime */
+ HICN_OPS6->set_lifetime (type, &h0->protocol, interest_lifetime);
+
+ /* Update checksums */
+ calculate_tcp_checksum_v6 (vm, b0);
+}
+
+
+
+void
+calculate_tcp_checksum_v4 (vlib_main_t * vm, vlib_buffer_t * b0)
+{
+ ip4_header_t *ip0;
+ tcp_header_t *tcp0;
+ ip_csum_t sum0;
+ u32 tcp_len0;
+
+ ip0 = (ip4_header_t *) (vlib_buffer_get_current (b0));
+ tcp0 =
+ (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip4_header_t));
+ tcp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip4_header_t);
+
+ /* Initialize checksum with header. */
+ if (BITS (sum0) == 32)
+ {
+ sum0 = clib_mem_unaligned (&ip0->src_address, u32);
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry
+ (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ tcp0->checksum = 0;
+
+ u32 tcp_offset = sizeof (ip4_header_t);
+ sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+
+ tcp0->checksum = ~ip_csum_fold (sum0);
+}
+
+void
+calculate_tcp_checksum_v6 (vlib_main_t * vm, vlib_buffer_t * b0)
+{
+ ip6_header_t *ip0;
+ tcp_header_t *tcp0;
+ ip_csum_t sum0;
+ u32 tcp_len0;
+
+ ip0 = (ip6_header_t *) (vlib_buffer_get_current (b0));
+ tcp0 =
+ (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip6_header_t));
+ tcp_len0 = clib_net_to_host_u16 (ip0->payload_length);
+
+ /* Initialize checksum with header. */
+ if (BITS (sum0) == 32)
+ {
+ sum0 = clib_mem_unaligned (&ip0->src_address, u32);
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry
+ (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ tcp0->checksum = 0;
+
+ u32 tcp_offset = sizeof (ip6_header_t);
+ sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+
+ tcp0->checksum = ~ip_csum_fold (sum0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_pg_interest_node) ={
+ .function = hicnpg_client_interest_node_fn,
+ .name = "hicnpg-interest",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicnpg_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicnpg_error_strings),
+ .error_strings = hicnpg_error_strings,
+ .n_next_nodes = HICNPG_N_NEXT,
+ .next_nodes =
+ {
+ [HICNPG_INTEREST_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_INTEREST_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_INTEREST_NEXT_IFACE_IP4_INPUT] = "hicn-iface-ip4-input",
+ [HICNPG_INTEREST_NEXT_IFACE_IP6_INPUT] = "hicn-iface-ip6-input",
+ [HICNPG_INTEREST_NEXT_DROP] = "error-drop"
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_DATA_NEXT_DROP,
+ HICNPG_DATA_N_NEXT,
+} hicnpg_data_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icnpg_data_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_hicnpg_data_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+
+ s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the vpp
+ * packet generator to generate icn request traffic.
+ */
+static uword
+hicnpg_client_data_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicnpg_data_next_t next_index;
+ u32 pkts_processed = 0;
+ u32 content_msgs_received = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 1, msg_type1 = 1;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = HICNPG_DATA_NEXT_DROP;
+ u32 next1 = HICNPG_DATA_NEXT_DROP;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* Increment a counter */
+ content_msgs_received += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ pkts_processed += 2;
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = HICNPG_DATA_NEXT_DROP;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* Increment a counter */
+ content_msgs_received++;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed++;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_data_node.index,
+ HICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_data_node.index,
+ HICNPG_ERROR_CONTENT_MSGS_RECEIVED,
+ content_msgs_received);
+ return (frame->n_vectors);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_pg_data_node) =
+{
+ .function = hicnpg_client_data_node_fn,
+ .name = "hicnpg-data",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicnpg_data_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicnpg_error_strings),
+ .error_strings = hicnpg_error_strings,
+ .n_next_nodes = HICNPG_DATA_N_NEXT,
+ .next_nodes =
+ {
+ [HICNPG_DATA_NEXT_DROP] = "error-drop"
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * End of packet-generator client node
+ */
+
+/*
+ * Beginning of packet-generation server node
+ */
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_pg_server_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_icnpg_server_error \
+_(PROCESSED, "hICN PG Server packets processed") \
+_(DROPPED, "hICN PG Server packets dropped")
+
+typedef enum
+{
+#define _(sym,str) HICNPG_SERVER_ERROR_##sym,
+ foreach_icnpg_server_error
+#undef _
+ HICNPG_SERVER_N_ERROR,
+} icnpg_server_error_t;
+
+static char *icnpg_server_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icnpg_server_error
+#undef _
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_SERVER_NEXT_V4_LOOKUP,
+ HICNPG_SERVER_NEXT_V6_LOOKUP,
+ HICNPG_SERVER_NEXT_FACE_IP4_INPUT,
+ HICNPG_SERVER_NEXT_FACE_IP6_INPUT,
+ HICNPG_SERVER_NEXT_DROP,
+ HICNPG_SERVER_N_NEXT,
+} icnpg_server_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} hicnpg_server_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_icnpg_server_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_server_trace_t *t = va_arg (*args, hicnpg_server_trace_t *);
+
+ s =
+ format (s,
+ "HICNPG SERVER: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type, t->sw_if_index,
+ t->next_index);
+ return (s);
+}
+
+/*
+ * Node function for the icn packet-generator server.
+ */
+static uword
+hicnpg_node_server_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ icnpg_server_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ hicn_header_t *hicn0 = NULL, *hicn1 = NULL;
+ hicn_name_t name0, name1;
+ u16 namelen0, namelen1;
+
+ hicnpg_server_main_t *hpgsm = &hicnpg_server_main;
+
+ from = vlib_frame_vector_args (frame);
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = HICNPG_SERVER_NEXT_DROP;
+ u32 next1 = HICNPG_SERVER_NEXT_DROP;
+ u8 isv6_0 = 0;
+ u8 isv6_1 = 0;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
+
+ isv6_0 ? convert_interest_to_data_v6 (vm, b0, rb,
+ bi0) :
+ convert_interest_to_data_v4 (vm, b0, rb, bi0);
+
+ next0 =
+ isv6_0 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ /* if hicn_underneath ,the following will results as next0 = HICNPG_SERVER_NEXT_DATA_LOOKUP */
+ next0 += 2 * hpgsm->hicn_underneath;
+ }
+ if (hicn_interest_parse_pkt (b1, &name1, &namelen1, &hicn1, &isv6_1)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
+
+ isv6_1 ? convert_interest_to_data_v6 (vm, b1, rb,
+ bi1) :
+ convert_interest_to_data_v4 (vm, b1, rb, bi1);
+
+ next1 =
+ isv6_1 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ /* if hicn_underneath ,the following will results as next0 = HICNPG_SERVER_NEXT_DATA_LOOKUP */
+ next1 += 2 * hpgsm->hicn_underneath;
+ }
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ if (next0 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueues, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = HICNPG_SERVER_NEXT_DROP;
+ u32 sw_if_index0 = ~0;
+ u8 isv6_0 = 0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+
+ if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
+
+ isv6_0 ? convert_interest_to_data_v6 (vm, b0, rb,
+ bi0) :
+ convert_interest_to_data_v4 (vm, b0, rb, bi0);
+
+ next0 =
+ isv6_0 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ /* if hicn_underneath ,the following will results as next0 = HICNPG_SERVER_NEXT_DATA_LOOKUP */
+ next0 += 2 * hpgsm->hicn_underneath;
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed += 1;
+
+ if (next0 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_server_node.index,
+ HICNPG_SERVER_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_server_node.index,
+ HICNPG_SERVER_ERROR_DROPPED, pkts_dropped);
+
+ return (frame->n_vectors);
+}
+
+void
+convert_interest_to_data_v4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Get the packet length */
+ u16 pkt_len = clib_net_to_host_u16 (h0->v4.ip.len);
+
+ /*
+ * Rule of thumb: We want the size of the IP packet to be <= 1500 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if ((bytes_to_copy + pkt_len) > 1500)
+ {
+ bytes_to_copy = 1500 - pkt_len;
+ }
+ /* Add content to the data packet */
+ vlib_buffer_add_data (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX, &bi0,
+ rb->data, bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ip4_address_t src_addr = h0->v4.ip.saddr;
+ h0->v4.ip.saddr = h0->v4.ip.daddr;
+ h0->v4.ip.daddr = src_addr;
+
+ h0->v4.ip.len = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ h0->v4.ip.csum = ip4_header_checksum ((ip4_header_t *) & (h0->v4.ip));
+ calculate_tcp_checksum_v4 (vm, b0);
+}
+
+void
+convert_interest_to_data_v6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Get the packet length */
+ uint16_t pkt_len =
+ clib_net_to_host_u16 (h0->v6.ip.len) + sizeof (ip6_header_t);
+
+ /*
+ * Figure out how many bytes we can add to the content
+ *
+ * Rule of thumb: We want the size of the IP packet to be <= 1400 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if ((bytes_to_copy + pkt_len) > 1500)
+ {
+ bytes_to_copy = 1500 - pkt_len;
+ }
+ /* Add content to the data packet */
+ vlib_buffer_add_data (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX, &bi0,
+ rb->data, bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = vlib_buffer_get_current (b0);
+ ip6_address_t src_addr = h0->v6.ip.saddr;
+ h0->v6.ip.saddr = h0->v6.ip.daddr;
+ h0->v6.ip.daddr = src_addr;
+
+ h0->v6.ip.len = clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (vm, b0) - sizeof (ip6_header_t));
+ h0->v6.tcp.data_offset_and_reserved |= 0x0f;
+ h0->v6.tcp.urg_ptr = htons (0xffff);
+
+ calculate_tcp_checksum_v6 (vm, b0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_pg_server_node) =
+{
+ .function = hicnpg_node_server_fn,
+ .name = "hicnpg-server",
+ .vector_size = sizeof(u32),
+ .format_trace = format_icnpg_server_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(icnpg_server_error_strings),
+ .error_strings = icnpg_server_error_strings,
+ .n_next_nodes = HICNPG_SERVER_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICNPG_SERVER_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_SERVER_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_SERVER_NEXT_FACE_IP4_INPUT] = "hicn-face-ip4-input",
+ [HICNPG_SERVER_NEXT_FACE_IP6_INPUT] = "hicn-face-ip6-input",
+ [HICNPG_SERVER_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * End of packet-generator server node
+ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/pg.h b/hicn-plugin/src/pg.h
new file mode 100755
index 000000000..083afb6b3
--- /dev/null
+++ b/hicn-plugin/src/pg.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PG_H__
+#define __HICN_PG_H__
+
+/* Subnet-mask for punting data in the client node */
+#define SUBNET_MASK4 32
+#define SUBNET_MASK6 128
+
+typedef struct hicnpg_main_s
+{
+ u32 index;
+ u32 index_ifaces;
+ u32 max_seq_number;
+ u32 n_flows;
+ u32 n_ifaces;
+ u32 hicn_underneath;
+ ip46_address_t pgen_clt_src_addr;
+ ip46_address_t pgen_clt_hicn_name;
+ u16 interest_lifetime;
+} hicnpg_main_t;
+
+extern hicnpg_main_t hicnpg_main;
+
+typedef struct hicnpg_server_main_s
+{
+ u32 node_index;
+ u32 hicn_underneath;
+ /* Arbitrary content */
+ u32 pgen_svr_buffer_idx;
+} hicnpg_server_main_t;
+
+extern hicnpg_server_main_t hicnpg_server_main;
+
+#endif // __HICN_PG_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/punt.c b/hicn-plugin/src/punt.c
new file mode 100755
index 000000000..ea553bf76
--- /dev/null
+++ b/hicn-plugin/src/punt.c
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdarg.h>
+#include <stddef.h> // offsetof()
+#include <inttypes.h>
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/format.h>
+#include <vnet/classify/in_out_acl.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ethernet/packet.h>
+#include <vlib/global_funcs.h>
+#include <hicn/hicn.h>
+
+#include "hicn.h"
+#include "infra.h"
+#include "parser.h"
+#include "mgmt.h"
+#include "punt.h"
+#include "error.h"
+#include "route.h"
+
+/* Those are not static as they are used for pgen in hicn_cli.c */
+ip_version_t ipv4 = {
+ .tbl = (u32 *) hicn_punt_glb.ip4_vnet_tbl_idx,
+ .addr_len_bits = IPV4_ADDR_LEN_BITS,
+ .protocol_field = &ipv4_protocol,
+ .version_field = &ipv4_version,
+ .ip_version = 0x40,
+};
+
+ip_version_t ipv6 = {
+ .tbl = (u32 *) hicn_punt_glb.ip6_vnet_tbl_idx,
+ .addr_len_bits = IPV6_ADDR_LEN_BITS,
+ .protocol_field = &ipv6_protocol,
+ .version_field = &ipv6_version,
+ .ip_version = 0x60,
+};
+
+ip_version_t ipv44 = {
+ .tbl = (u32 *) hicn_punt_glb.udp44_vnet_tbl_idx,
+ .addr_len_bits = IPV4_ADDR_LEN_BITS,
+ .protocol_field = &udp4_protocol,
+ .udp_sport = &udp4_sport,
+ .udp_dport = &udp4_dport,
+ .ip_version = 0x40,
+};
+
+ip_version_t ipv64 = {
+ .tbl = (u32 *) hicn_punt_glb.udp64_vnet_tbl_idx,
+ .addr_len_bits = IPV4_ADDR_LEN_BITS,
+ .protocol_field = &udp6_protocol,
+ .udp_sport = &udp6_sport,
+ .udp_dport = &udp6_dport,
+ .ip_version = 0x60,
+};
+
+ip_version_t ipv46 = {
+ .tbl = (u32 *) hicn_punt_glb.udp46_vnet_tbl_idx,
+ .addr_len_bits = IPV6_ADDR_LEN_BITS,
+ .protocol_field = &udp4_protocol,
+ .udp_sport = &udp4_sport,
+ .udp_dport = &udp4_dport,
+ .ip_version = 0x40,
+};
+
+ip_version_t ipv66 = {
+ .tbl = (u32 *) hicn_punt_glb.udp66_vnet_tbl_idx,
+ .addr_len_bits = IPV6_ADDR_LEN_BITS,
+ .protocol_field = &udp6_protocol,
+ .udp_sport = &udp6_sport,
+ .udp_dport = &udp6_dport,
+ .ip_version = 0x60,
+};
+
+#define _(NAME, BASE, LAYER, FIELD, PUNT_ID) \
+ field_t NAME = { \
+ .offset = BASE + offsetof(LAYER, FIELD), \
+ .len = STRUCT_SIZE_OF(LAYER, FIELD), \
+ .punt_id = PUNT_ID, \
+ };
+foreach_field
+#undef _
+/*
+ * In the latest version, we let faces direct the traffic towards Interest
+ * processing, or MAP-Me nodes. Punting should only make sure that the ICMP
+ * packets are also sent to the face node. We added the following defines to
+ * determine the next node to send punted packets. Ideally we might remove
+ * protocol number check from punting rule.
+ */
+#define NEXT_MAPME_CTRL4 hicn_punt_glb.next_hit_interest_ipv4
+#define NEXT_MAPME_ACK4 hicn_punt_glb.next_hit_data_ipv4
+#define NEXT_MAPME_CTRL6 hicn_punt_glb.next_hit_interest_ipv6
+#define NEXT_MAPME_ACK6 hicn_punt_glb.next_hit_data_ipv6
+
+/* Maximum number of vector allowed in match. Value hardcoded in vnet_classify_hash_packet_inline in vnet_classify.h */
+#define MAX_MATCH_SIZE 5
+/**
+ * HICN global Punt Info
+ *
+ *
+ *
+ */
+hicn_punt_glb_t hicn_punt_glb;
+
+/**
+ * We use the function build_bit_array to populate an initially empty buffer
+ * with masks/values for the parts of the packet to match. The function also
+ * returns the correct skip and match values to pass to vnet_classify_*, which
+ * are the number of vectors to skip/match during classification (they should be
+ * multiples of vector size = CLASSIFIER_VECTOR_SIZE).
+ *
+ * offsets:
+ * 0 14 offsetof(IP_HDR, SRC)
+ * | | /
+ * +----------+----+-------+-------+----+-...
+ * | ETH | IP . src . dst . |
+ * +----------+----+-------+-------+----+-...
+ * | | |
+ * |<- skip=1 ->|<--- match=2/3 --->|
+ *
+ *
+ */
+
+/**
+ * The following section defines a couple of protocol fields that we will use
+ * for creating the buffer. We retrieve the offset and length on those fields
+ * based on the (portable) header struct aliases defined in libhicn.
+ *
+ * In the foreach_field macro, the punt_id field is used as convenience as we
+ * will have to create different classifier tables based on whether we punt
+ * interests (on dst) or data (on src). It is undefined (NA) otherwise.
+ */
+
+#define NA 0
+
+
+/**
+ * @brief Create a bitmask from mask length.
+ * @param mask [in] mask length (in bits)
+ * @param buffer [out] output buffer
+ * @param len [out] output buffer length
+ */
+static void
+build_ip_address_mask (u8 mask, u8 * buffer, u32 len)
+{
+ u32 hi_bytes = mask / 8;
+ u32 hi_bits = mask % 8;
+ u8 byte_mask = 0xff;
+
+ /*
+ * memset buffer with 0xff in case of IPV6 16 bytes will be used for
+ * match
+ */
+ memset (buffer, 0, len);
+ //might not be needed if buffer is already 0 'ed XXX
+ memset (buffer, 0xff, hi_bytes);
+ if (hi_bits != 0)
+ {
+ for (int i = 0; i < (8 - hi_bits); i++)
+ byte_mask = byte_mask << 1;
+ buffer[hi_bytes] = byte_mask;
+ }
+}
+
+#define CEIL_DIV(x, y) (1 + ((x - 1) / y))
+
+/**
+ * @brief Create a bit array from field/value list
+ * @param buffer [out] output buffer
+ * @param len [out] output buffer length
+ * @param skip [out] number of CLASSIFIER_VECTOR to skip
+ * @param match [out] number of CLASSIFIER_VECTOR to match
+ * @param ... [in] list of [field_t *, value] * used to populate buffer
+ */
+static int
+build_bit_array (u8 * buffer, u32 len, u32 base_offset, u32 * skip,
+ u32 * match, va_list vl)
+{
+ u8 min = len, max = 0;
+ field_t *field;
+ u8 *value;
+ int pos;
+ int count = 0;
+
+ /* Clear buffer */
+ memset (buffer, 0, len);
+
+ for (;;)
+ {
+ count++;
+ field = va_arg (vl, field_t *);
+ if (!field)
+ break;
+
+ /* Check that the field belongs to the reserved buffer */
+ if (field->offset + field->len > len)
+ goto ERR_PUNT;
+
+ /*
+ * Copy the value of the field inside the buffer at the
+ * correct offset
+ */
+ pos = base_offset + field->offset;
+ value = va_arg (vl, u8 *);
+ memcpy (buffer + pos, value, field->len);
+ if (min > pos)
+ min = pos;
+ if (max < pos + field->len)
+ max = pos + field->len;
+ }
+
+ /* We can skip multiples of the vector match */
+ *skip = min / CLASSIFIER_VECTOR_SIZE;
+ *match = CEIL_DIV (max, CLASSIFIER_VECTOR_SIZE) - *skip;
+
+ if (*match > MAX_MATCH_SIZE)
+ *match = MAX_MATCH_SIZE;
+
+ return HICN_ERROR_NONE;
+
+ERR_PUNT:
+ *skip = 0;
+ *match = 0;
+ return HICN_ERROR_PUNT_INVAL;
+}
+
+void
+update_table4_index (u32 intfc, u32 table_index)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+
+ if (hicn_punt_glb.head_ip4[intfc] == ~0)
+ hicn_punt_glb.head_ip4[intfc] = table_index;
+
+ /* Update the table in tail to poin to this */
+ if (hicn_punt_glb.tail_ip4[intfc] != ~0)
+ {
+ vnet_classify_table_t *t =
+ pool_elt_at_index (cm->tables, hicn_punt_glb.tail_ip4[intfc]);
+ t->next_table_index = table_index;
+ }
+ hicn_punt_glb.tail_ip4[intfc] = table_index;
+}
+
+void
+update_table6_index (u32 intfc, u32 table_index)
+{
+ vnet_classify_main_t *cm = &vnet_classify_main;
+
+ if (hicn_punt_glb.head_ip6[intfc] == ~0)
+ hicn_punt_glb.head_ip6[intfc] = table_index;
+
+ /* Update the table in tail to poin to this */
+ if (hicn_punt_glb.tail_ip6[intfc] != ~0)
+ {
+ vnet_classify_table_t *t =
+ pool_elt_at_index (cm->tables, hicn_punt_glb.tail_ip6[intfc]);
+ t->next_table_index = table_index;
+ }
+ hicn_punt_glb.tail_ip6[intfc] = table_index;
+}
+
+/**
+ * @brief Add or remove a vnet table matching the list of fields/values passed
+ * as parameters.
+ *
+ * @param punt_id Storage identifier (HICN_PUNT_SRC | HICN_PUNT_DST)
+ * @param mask Subnet mask to match in the table
+ * @param next_tbl_index next table to match in case of miss
+ * @param intfc Interface identifier
+ * @param is_add 1 if the table must be created, 0 if removed
+ * @param ... list of (field_t, value) to be matched
+ *
+ * @result Returns:
+ * HICN_ERROR_TBL_EXIST if is_add == 1 and a table for the same mask
+ * already exists,
+ * HICN_ERROR_TBL_NOT_FOUND if is_add == 0 and there is no table for the
+ * given mask,
+ * HICN_ERROR_NONE if no * error occurred.
+ */
+int
+_hicn_punt_add_del_vnettbl (ip_version_t * ip, u8 punt_id, u8 mask,
+ u32 next_tbl_index, u32 intfc, int base_offset,
+ int is_add, u8 use_current_data, ...)
+{
+ u8 buffer[PUNT_BUFFER_SIZE]; /* must be dimensioned
+ * large enough */
+ int rt;
+ va_list vl;
+ u32 *table_index;
+ u32 new_table_index;
+ u32 skip, match;
+
+
+ /* Build the buffer right from the start to determine the skip size */
+ va_start (vl, use_current_data);
+ build_bit_array (buffer, sizeof (buffer), base_offset, &skip, &match, vl);
+ va_end (vl);
+
+ ASSERT (skip < 4);
+ //Hardcoded limit in following array
+
+ table_index = TABLE_ELT_P (ip, intfc, skip, punt_id, mask);
+
+ if (is_add && *table_index != HICNP_PUNY_INVALID_TBL)
+ return HICN_ERROR_PUNT_TBL_EXIST;
+ if (!is_add && *table_index == HICNP_PUNY_INVALID_TBL)
+ return HICN_ERROR_PUNT_TBL_NOT_FOUND;
+
+ new_table_index = ~0;
+ rt = vnet_classify_add_del_table (&vnet_classify_main,
+ buffer + skip * CLASSIFIER_VECTOR_SIZE,
+ HICN_CLASSIFY_NBUCKETS,
+ HICN_CLASSIFY_TABLE_MEMORY_SIZE, skip,
+ match, HICN_CLASSIFY_NO_NEXT_TABLE,
+ HICN_CLASSIFY_MISS_NEXT_INDEX,
+ &new_table_index,
+ use_current_data,
+ HICN_CLASSIFY_CURRENT_DATA_OFFSET, is_add,
+ HICN_CLASSIFY_DON_T_DEL_CHAIN);
+
+ if (rt != 0)
+ return HICN_ERROR_PUNT_INVAL;
+
+ *table_index = new_table_index;
+ if (ip->ip_version == 0x40)
+ update_table4_index (intfc, new_table_index);
+ else
+ update_table6_index (intfc, new_table_index);
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Add or remove a vnet table matching the ip_version and field (src/dst)
+ */
+int
+hicn_punt_add_del_vnettbl (ip_version_t * ip, field_t * field, u8 mask,
+ u32 next_tbl_index, u32 intfc, u8 base_offset,
+ u8 use_current_data, int is_add)
+{
+ u8 ip_mask[IPV6_ADDR_LEN];
+ build_ip_address_mask (mask, ip_mask, sizeof (ip_mask));
+
+ return _hicn_punt_add_del_vnettbl (ip, field->punt_id, mask, next_tbl_index,
+ intfc, base_offset, is_add,
+ use_current_data, field, ip_mask, NULL);
+}
+
+
+/**
+ * @brief Add or remove a vnet table for udp tunnels matching the ip_version and field (src/dst)
+ *
+ */
+int
+hicn_punt_add_del_vnettbl_udp (ip_version_t * outer, ip_version_t * inner,
+ field_t * field, u8 mask, u32 next_tbl_index,
+ u32 intfc, u8 base_offset, int is_add)
+{
+ u8 udp_mask[inner->addr_len_bits];
+ build_ip_address_mask (mask, udp_mask, sizeof (udp_mask));
+ u16 port_value = 0xffff;
+ u8 protocol_value = 0xff;
+
+ return _hicn_punt_add_del_vnettbl (outer, field->punt_id, mask,
+ next_tbl_index, intfc, base_offset,
+ is_add,
+ HICN_CLASSIFY_NO_CURRENT_DATA_FLAG,
+ outer->protocol_field, &protocol_value,
+ outer->udp_sport, &port_value,
+ outer->udp_dport, &port_value, field,
+ udp_mask, NULL);
+}
+
+#define hicn_punt_add_vnettbl_udp(outer, inner, field, mask, next_tbl_index, intfc, base_offset) \
+ (hicn_punt_add_del_vnettbl_udp(outer, inner, field, mask, next_tbl_index, intfc, base_offset, OP_ADD))
+
+#define hicn_punt_del_vnettbl_udp(outer, inner, field, mask, next_tbl_index, intfc, base_offset) \
+ (hicn_punt_add_del_vnettbl_udp(outer, inner, field, mask, next_tbl_index, intfc, base_offset, OP_DEL))
+
+/**
+ * @brief Add or remove a vnet session matching the list of fields/values passed
+ * as parameters.
+ *
+ * @param punt_id Storage identifier (HICN_PUNT_SRC | HICN_PUNT_DST)
+ * @param v4_address IPv4 address to match in the session // XXX v4/v6
+ * @param mask Subnet mask to match in the session
+ * @param next_hit_index vlib arch id pointing to the next node
+ * @param intfc Interface identifier
+ * @param is_add 1 if the session must be create, 0 if removed
+ * @param ... list of (field_t, value) to be matched
+ *
+ * @result Returns:
+ * HICN_ERROR_TBL_NOT_FOUND there is no table for the given mask,
+ * HICN_ERROR_PUNT_SSN_NOT_FOUND if is_add == 0 and there is no session for
+ * the given address,
+ * HICN_ERROR_NONE if no error * occurred.
+ */
+int
+_hicn_punt_add_del_vnetssn (ip_version_t * ip, u8 punt_id, u8 mask,
+ u32 next_hit_index, u32 intfc, int base_offset,
+ int is_add, ...)
+{
+ u8 buffer[PUNT_BUFFER_SIZE]; /* must be dimensioned
+ * large enough */
+ int rt;
+ va_list vl;
+ u32 table_index;
+ u32 skip, match;
+
+ /* Build the buffer right from the start to determine the skip size */
+ va_start (vl, is_add);
+ build_bit_array (buffer, sizeof (buffer), base_offset, &skip, &match, vl);
+ va_end (vl);
+
+ ASSERT (skip < 4);
+ //Hardcoded limit in following array
+
+ table_index = TABLE_ELT (ip, intfc, skip, punt_id, mask);
+
+ if (table_index == HICNP_PUNY_INVALID_TBL)
+ return HICN_ERROR_PUNT_TBL_NOT_FOUND;
+
+ rt = vnet_classify_add_del_session (&vnet_classify_main, table_index, buffer, //+skip * CLASSIFIER_VECTOR_SIZE,
+ next_hit_index,
+ HICN_CLASSIFY_OPAQUE_INDEX,
+ HICN_CLASSIFY_ADVANCE,
+ HICN_CLASSIFY_ACTION,
+ HICN_CLASSIFY_METADATA, is_add);
+
+ if (rt == VNET_API_ERROR_NO_SUCH_ENTRY)
+ rt = HICN_ERROR_PUNT_SSN_NOT_FOUND;
+
+ return rt;
+}
+
+/**
+ * @brief Add or remove a vnet session matching the ip6 src address
+ *
+ * See hicn_punt_add_del_vnetssn for details about parameters.
+ */
+int
+hicn_punt_add_del_vnetssn (ip_version_t * ip, field_t * field,
+ ip46_address_t * v46_address, u8 mask,
+ u32 next_hit_index, u32 intfc, u8 base_offset,
+ int is_add)
+{
+ return _hicn_punt_add_del_vnetssn (ip, field->punt_id, mask, next_hit_index,
+ intfc, base_offset, is_add, field,
+ ip46_address_is_ip4 (v46_address) ?
+ v46_address->ip4.as_u8 : v46_address->
+ ip6.as_u8, NULL);
+}
+
+
+
+/**
+ * @brief Add or remove a vnet session for udp tunnels matching the ip6 src address
+ *
+ * See hicn_punt_add_del_vnetssn for details about parameters.
+ */
+int
+hicn_punt_add_del_vnetssn_udp (ip_version_t * outer, ip_version_t * inner,
+ field_t * field, ip46_address_t * v46_address,
+ u8 mask, u32 next_hit_index, u32 intfc,
+ u8 base_offset, u8 protocol, u16 sport,
+ u16 dport, int is_add)
+{
+ return _hicn_punt_add_del_vnetssn (outer, field->punt_id, mask,
+ next_hit_index, intfc, base_offset,
+ is_add, outer->protocol_field, &protocol,
+ outer->udp_sport, &sport,
+ outer->udp_dport, &dport, field,
+ v46_address->as_u8, NULL);
+}
+
+#define hicn_punt_add_vnetssn_udp(outer, inner, field, addr, mask, index, intfc, offset, protocol, sport, dport) \
+ (hicn_punt_add_del_vnetssn_udp(outer, inner, field, addr, mask, index, intfc, offset, protocol, sport, dport, OP_ADD))
+
+#define hicn_punt_del_vnetssn_udp(outer, inner, field, addr, mask, index, intfc, offset, protocol, sport, dport) \
+ (hicn_punt_add_del_vnetssn_udp(outer, inner, field, addr, mask, index, intfc, offset, protocol, sport, dport, OP_DEL))
+
+/*
+ * Enable the table on a given interface considering the table type
+ */
+void
+hicn_punt_enable_disable_vnet_ip4_table_on_intf (vlib_main_t * vm,
+ u32 sw_if_index,
+ int is_enable)
+{
+ if (hicn_punt_glb.head_ip4[sw_if_index] != HICNP_PUNY_INVALID_TBL)
+ (void) vnet_set_input_acl_intfc (vm, sw_if_index,
+ hicn_punt_glb.head_ip4[sw_if_index],
+ 0xFFFFFFFF, 0xFFFFFFFF, is_enable);
+ return;
+}
+
+/*
+ * Enable the table on a given interface considering the table type
+ *
+ * XXX replace skip by base_offset XXX are we sure we always have ETH_L2, and
+ * not base_offset ???
+ */
+int
+hicn_punt_remove_ip4_address (vlib_main_t * vm, ip4_address_t * addr,
+ u8 mask, int skip, u32 sw_if_index,
+ int is_enable)
+{
+
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vnet_classify_table_t *vnet_table = NULL;
+
+ u32 table_index = ~0;
+
+ u32 base_offset = (skip ? ETH_L2 : NO_L2);
+ ip46_address_t addr46;
+ ip46_address_set_ip4 (&addr46, addr);
+
+ hicn_punt_del_vnetssn (&ipv4, &ipv4_src, &addr46, mask,
+ hicn_punt_glb.next_hit_data_ipv4, sw_if_index,
+ ETH_L2);
+ hicn_punt_del_vnetssn (&ipv4, &ipv4_dst, &addr46, mask,
+ hicn_punt_glb.next_hit_interest_ipv4, sw_if_index,
+ ETH_L2);
+
+ table_index =
+ hicn_punt_glb.ip4_vnet_tbl_idx[sw_if_index][skip][HICN_PUNT_DST][mask];
+ vnet_table = pool_elt_at_index (cm->tables, table_index);
+ if (vnet_table->active_elements == 0)
+ {
+ hicn_punt_del_vnettbl (&ipv4, &ipv4_dst, mask,
+ hicn_punt_glb.ip4_vnet_tbl_idx[sw_if_index][skip]
+ [HICN_PUNT_SRC][mask], sw_if_index, base_offset);
+ }
+ table_index =
+ hicn_punt_glb.ip4_vnet_tbl_idx[sw_if_index][skip][HICN_PUNT_SRC][mask];
+ vnet_table = pool_elt_at_index (cm->tables, table_index);
+ if (vnet_table->active_elements == 0)
+ {
+ hicn_punt_del_vnettbl (&ipv4, &ipv4_src, mask, ~0, sw_if_index,
+ base_offset);
+ }
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_punt_remove_ip6_address (vlib_main_t * vm, ip6_address_t * addr,
+ u8 mask, int skip, u32 sw_if_index,
+ int is_enable)
+{
+
+ vnet_classify_main_t *cm = &vnet_classify_main;
+ vnet_classify_table_t *vnet_table = NULL;
+
+ u32 table_index = ~0;
+
+ u32 base_offset = (skip ? ETH_L2 : NO_L2);
+
+ hicn_punt_del_vnetssn (&ipv6, &ipv6_src, (ip46_address_t *) addr, mask,
+ hicn_punt_glb.next_hit_data_ipv6, sw_if_index,
+ ETH_L2);
+ hicn_punt_del_vnetssn (&ipv6, &ipv6_dst, (ip46_address_t *) addr, mask,
+ hicn_punt_glb.next_hit_interest_ipv6, sw_if_index,
+ ETH_L2);
+
+ table_index =
+ hicn_punt_glb.ip6_vnet_tbl_idx[sw_if_index][skip][HICN_PUNT_DST][mask];
+ vnet_table = pool_elt_at_index (cm->tables, table_index);
+ if (vnet_table->active_elements == 0)
+ {
+ hicn_punt_del_vnettbl (&ipv6, &ipv6_dst, mask,
+ hicn_punt_glb.ip6_vnet_tbl_idx[sw_if_index][skip]
+ [HICN_PUNT_SRC][mask], sw_if_index, base_offset);
+ }
+ table_index =
+ hicn_punt_glb.ip6_vnet_tbl_idx[sw_if_index][skip][HICN_PUNT_SRC][mask];
+ vnet_table = pool_elt_at_index (cm->tables, table_index);
+ if (vnet_table->active_elements == 0)
+ {
+ hicn_punt_del_vnettbl (&ipv6, &ipv6_src, mask, ~0, sw_if_index,
+ base_offset);
+ }
+ return HICN_ERROR_NONE;
+}
+
+/*
+ * Enable the table on a given interface considering the table type
+ */
+void
+hicn_punt_enable_disable_vnet_ip6_table_on_intf (vlib_main_t * vm,
+ u32 sw_if_index,
+ int is_enable)
+{
+ if (hicn_punt_glb.head_ip6[sw_if_index] != HICNP_PUNY_INVALID_TBL)
+ (void) vnet_set_input_acl_intfc (vm, sw_if_index,
+ 0xFFFFFFFF,
+ hicn_punt_glb.head_ip6[sw_if_index],
+ 0xFFFFFFFF, is_enable);
+ return;
+}
+
+/*
+ * HICN PUNT vlibd node addtion
+ */
+void
+hicn_punt_vlib_node_add (vlib_main_t * vm)
+{
+ u32 hit_next_index = 0xFFFFFFFF;
+ vlib_node_t *node;
+
+ /* to remove the warning */
+ hit_next_index = hit_next_index;
+
+ //Accquire the node indexes
+
+ /* ip face */
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-ip4-input");
+ hicn_punt_glb.hicn_node_info.hicn_face_ip4_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-ip6-input");
+ hicn_punt_glb.hicn_node_info.hicn_face_ip6_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-ip4-output");
+ hicn_punt_glb.hicn_node_info.hicn_face_ip4_output_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-ip6-output");
+ hicn_punt_glb.hicn_node_info.hicn_face_ip6_output_index = node->index;
+
+ /* ip iface */
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-ip4-input");
+ hicn_punt_glb.hicn_node_info.hicn_iface_ip4_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-ip6-input");
+ hicn_punt_glb.hicn_node_info.hicn_iface_ip6_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-ip4-output");
+ hicn_punt_glb.hicn_node_info.hicn_iface_ip4_output_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-ip6-output");
+ hicn_punt_glb.hicn_node_info.hicn_iface_ip4_output_index = node->index;
+
+ /* udp face */
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-udp4-input");
+ hicn_punt_glb.hicn_node_info.hicn_face_udp4_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-udp6-input");
+ hicn_punt_glb.hicn_node_info.hicn_face_udp6_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-udp4-output");
+ hicn_punt_glb.hicn_node_info.hicn_face_udp4_output_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-face-udp6-output");
+ hicn_punt_glb.hicn_node_info.hicn_face_udp6_output_index = node->index;
+
+ /* udp iface */
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-udp4-input");
+ hicn_punt_glb.hicn_node_info.hicn_iface_udp4_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-udp6-input");
+ hicn_punt_glb.hicn_node_info.hicn_iface_udp6_input_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-udp4-output");
+ hicn_punt_glb.hicn_node_info.hicn_iface_udp4_output_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "hicn-iface-udp6-output");
+ hicn_punt_glb.hicn_node_info.hicn_iface_udp6_output_index = node->index;
+
+ node = vlib_get_node_by_name (vm, (u8 *) "ip4-inacl");
+ hicn_punt_glb.hicn_node_info.ip4_inacl_node_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "ip6-inacl");
+ hicn_punt_glb.hicn_node_info.ip6_inacl_node_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+ hicn_punt_glb.hicn_node_info.ip4_lookup_node_index = node->index;
+ node = vlib_get_node_by_name (vm, (u8 *) "ip6-lookup");
+ hicn_punt_glb.hicn_node_info.ip6_lookup_node_index = node->index;
+
+
+ hicn_punt_glb.next_hit_data_ipv4 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip4_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_face_ip4_input_index);
+
+ hicn_punt_glb.next_hit_interest_ipv4 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip4_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_iface_ip4_input_index);
+
+ hicn_punt_glb.next_hit_data_ipv6 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip6_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_face_ip6_input_index);
+
+ hicn_punt_glb.next_hit_interest_ipv6 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip6_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_iface_ip6_input_index);
+
+ hicn_punt_glb.next_hit_data_udp4 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip4_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_face_udp4_input_index);
+
+ hicn_punt_glb.next_hit_interest_udp4 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip4_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_iface_udp4_input_index);
+
+ hicn_punt_glb.next_hit_data_udp6 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip6_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_face_udp6_input_index);
+
+ hicn_punt_glb.next_hit_interest_udp6 = vlib_node_add_next (vm,
+ hicn_punt_glb.hicn_node_info.
+ ip6_inacl_node_index,
+ hicn_punt_glb.hicn_node_info.
+ hicn_iface_udp6_input_index);
+
+ return;
+}
+
+/*
+ * HICN PUNT INIT
+ */
+void
+hicn_punt_init (vlib_main_t * vm)
+{
+ u32 table_index = ~0;
+ //Create vnet classify tables and store the table indexes
+ memset (hicn_punt_glb.ip4_vnet_tbl_idx, table_index,
+ sizeof (u32) * 4 * 2 * HICN_PUNT_IP4_MASK * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.ip6_vnet_tbl_idx, table_index,
+ sizeof (u32) * 4 * 2 * HICN_PUNT_IP6_MASK * HICN_MAX_INTFC);
+
+ memset (hicn_punt_glb.udp44_vnet_tbl_idx, table_index,
+ sizeof (u32) * 4 * 2 * HICN_PUNT_IP4_MASK * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.udp46_vnet_tbl_idx, table_index,
+ sizeof (u32) * 4 * 2 * HICN_PUNT_IP6_MASK * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.udp64_vnet_tbl_idx, table_index,
+ sizeof (u32) * 4 * 2 * HICN_PUNT_IP4_MASK * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.udp66_vnet_tbl_idx, table_index,
+ sizeof (u32) * 4 * 2 * HICN_PUNT_IP6_MASK * HICN_MAX_INTFC);
+ //Register hicn nodes after vnet table creation
+ hicn_punt_vlib_node_add (vm);
+ memset (hicn_punt_glb.head_ip4, ~0, sizeof (u32) * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.tail_ip4, ~0, sizeof (u32) * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.head_ip6, ~0, sizeof (u32) * HICN_MAX_INTFC);
+ memset (hicn_punt_glb.tail_ip6, ~0, sizeof (u32) * HICN_MAX_INTFC);
+ return;
+}
+
+u32
+hicn_punt_interest_data_for_udp (vlib_main_t * vm,
+ ip46_address_t * prefix, u8 mask,
+ u32 swif, u8 punt_type, u16 sport, u16 dport)
+{
+ int skip = 1;
+ u32 table_index;
+
+ if (punt_type != HICN_PUNT_IP_TYPE && punt_type != HICN_PUNT_UDP4_TYPE
+ && punt_type != HICN_PUNT_UDP6_TYPE)
+ return HICN_ERROR_PUNT_INVAL;
+
+ if (ip46_address_is_ip4 (prefix))
+ {
+ if (mask > IPV4_ADDR_LEN_BITS)
+ return HICN_ERROR_PUNT_INVAL;
+
+ if (punt_type == HICN_PUNT_UDP4_TYPE)
+ {
+ skip = 2;
+ /* Create Vnet table for a given mask */
+ hicn_punt_add_vnettbl_udp (&ipv44, &ipv4, &udp44_src, mask, ~0,
+ swif, ETH_L2);
+
+ table_index =
+ hicn_punt_glb.udp44_vnet_tbl_idx[swif][skip][HICN_PUNT_SRC][mask];
+
+ hicn_punt_add_vnettbl_udp (&ipv44, &ipv4, &udp44_dst, mask,
+ table_index, swif, ETH_L2);
+ /*
+ * Add a session for the specified ip address and
+ * subnet mask
+ */
+ hicn_punt_add_vnetssn_udp (&ipv44, &ipv4, &udp44_src,
+ prefix, mask,
+ hicn_punt_glb.next_hit_data_udp4,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+
+ hicn_punt_add_vnetssn_udp (&ipv44, &ipv4, &udp44_dst,
+ prefix, mask,
+ hicn_punt_glb.next_hit_interest_udp4,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+
+ hicn_punt_enable_disable_vnet_ip4_table_on_intf (vm, swif,
+ OP_ENABLE);
+ }
+ else //PUNTING is UDP6
+ {
+ skip = 3;
+ /* Create Vnet table for a given mask */
+ hicn_punt_add_vnettbl_udp (&ipv64, &ipv6, &udp64_src, mask, ~0,
+ swif, ETH_L2);
+
+ table_index =
+ hicn_punt_glb.udp64_vnet_tbl_idx[swif][skip][HICN_PUNT_SRC][mask];
+
+ hicn_punt_add_vnettbl_udp (&ipv64, &ipv6, &udp64_dst, mask,
+ table_index, swif, ETH_L2);
+
+ /*
+ * Add a session for the specified ip address and
+ * subnet mask
+ */
+ hicn_punt_add_vnetssn_udp (&ipv64, &ipv4, &udp64_src,
+ prefix, mask,
+ hicn_punt_glb.next_hit_data_udp6,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+
+ hicn_punt_add_vnetssn_udp (&ipv64, &ipv4, &udp64_dst,
+ prefix, mask,
+ hicn_punt_glb.next_hit_interest_udp6,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+
+ hicn_punt_enable_disable_vnet_ip6_table_on_intf (vm, swif,
+ OP_ENABLE);
+ }
+ }
+ else
+ {
+ if (punt_type == HICN_PUNT_UDP4_TYPE)
+ {
+ skip = 2;
+ /* Create Vnet table for a given mask */
+ if (mask > 96)
+ return HICN_ERROR_PUNT_INVAL;
+
+ hicn_punt_add_vnettbl_udp (&ipv46, &ipv4, &udp46_src, mask, ~0,
+ swif, ETH_L2);
+
+ table_index =
+ hicn_punt_glb.udp46_vnet_tbl_idx[swif][skip][HICN_PUNT_SRC][mask];
+ hicn_punt_add_vnettbl_udp (&ipv46, &ipv4, &udp46_dst, mask,
+ table_index, swif, ETH_L2);
+
+ /*
+ * Add a session for the specified ip address and
+ * subnet mask
+ */
+ hicn_punt_add_vnetssn_udp (&ipv46, &ipv4, &udp46_src,
+ prefix, mask,
+ hicn_punt_glb.next_hit_data_udp4,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+ hicn_punt_add_vnetssn_udp (&ipv46, &ipv4, &udp46_dst,
+ prefix, mask,
+ hicn_punt_glb.next_hit_interest_udp4,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+
+ hicn_punt_enable_disable_vnet_ip4_table_on_intf (vm, swif,
+ OP_ENABLE);
+ }
+ else
+ {
+ if (mask > 122)
+ return HICN_ERROR_PUNT_INVAL;
+
+ skip = 3;
+ hicn_punt_add_vnettbl_udp (&ipv66, &ipv6, &udp66_src, mask, ~0,
+ swif, ETH_L2);
+
+ table_index =
+ hicn_punt_glb.udp66_vnet_tbl_idx[swif][skip][HICN_PUNT_SRC][mask];
+ hicn_punt_add_vnettbl_udp (&ipv66, &ipv6, &udp66_dst, mask,
+ table_index, swif, ETH_L2);
+
+ /*
+ * Add a session for the specified ip address and
+ * subnet mask
+ */
+ hicn_punt_add_vnetssn_udp (&ipv66, &ipv6, &udp66_src,
+ prefix, mask,
+ hicn_punt_glb.next_hit_data_udp6,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+ hicn_punt_add_vnetssn_udp (&ipv66, &ipv6, &udp66_dst,
+ prefix, mask,
+ hicn_punt_glb.next_hit_interest_udp6,
+ swif, ETH_L2, IPPROTO_UDP, sport, dport);
+
+ hicn_punt_enable_disable_vnet_ip6_table_on_intf (vm, swif,
+ OP_ENABLE);
+ }
+
+ }
+ return HICN_ERROR_NONE;
+}
+
+
+
+u32
+hicn_punt_interest_data_for_ethernet (vlib_main_t * vm,
+ ip46_address_t * prefix, u8 mask,
+ u32 swif, u8 punt_type)
+{
+ int skip = 1;
+ u32 table_index;
+ u8 use_current_data = HICN_CLASSIFY_NO_CURRENT_DATA_FLAG;
+
+ if (punt_type != HICN_PUNT_IP_TYPE && punt_type != HICN_PUNT_UDP4_TYPE
+ && punt_type != HICN_PUNT_UDP6_TYPE)
+ return HICN_ERROR_PUNT_INVAL;
+
+ if (ip46_address_is_ip4 (prefix))
+ {
+ if (mask > IPV4_ADDR_LEN_BITS)
+ return HICN_ERROR_PUNT_INVAL;
+
+ if (punt_type == HICN_PUNT_IP_TYPE)
+ {
+ /* Create Vnet table for a given mask */
+ hicn_punt_add_vnettbl (&ipv4, &ipv4_src, mask, ~0, swif, ETH_L2,
+ use_current_data);
+
+ table_index =
+ hicn_punt_glb.ip4_vnet_tbl_idx[swif][skip][HICN_PUNT_SRC][mask];
+
+ hicn_punt_add_vnettbl (&ipv4, &ipv4_dst, mask, table_index, swif,
+ ETH_L2, use_current_data);
+
+ /*
+ * Add a session for the specified ip address and
+ * subnet mask
+ */
+ hicn_punt_add_vnetssn (&ipv4, &ipv4_src,
+ prefix, mask,
+ hicn_punt_glb.next_hit_data_ipv4, swif,
+ ETH_L2);
+ hicn_punt_add_vnetssn (&ipv4, &ipv4_dst,
+ prefix, mask,
+ hicn_punt_glb.next_hit_interest_ipv4, swif,
+ ETH_L2);
+
+ hicn_punt_enable_disable_vnet_ip4_table_on_intf (vm, swif,
+ OP_ENABLE);
+ }
+ else
+ {
+ return HICN_ERROR_PUNT_INVAL;
+ }
+ }
+ else
+ {
+ if (punt_type == HICN_PUNT_IP_TYPE)
+ {
+ if (mask > IPV6_ADDR_LEN_BITS)
+ return HICN_ERROR_PUNT_INVAL;
+
+ /* Create Vnet table for a given mask */
+ hicn_punt_add_vnettbl (&ipv6, &ipv6_src, mask, ~0, swif, ETH_L2,
+ use_current_data);
+
+ table_index =
+ hicn_punt_glb.ip6_vnet_tbl_idx[swif][skip][HICN_PUNT_SRC][mask];
+
+ hicn_punt_add_vnettbl (&ipv6, &ipv6_dst, mask, table_index, swif,
+ ETH_L2, use_current_data);
+
+ /*
+ * Add a session for the specified ip address and
+ * subnet mask
+ */
+ hicn_punt_add_vnetssn (&ipv6, &ipv6_src, prefix,
+ mask, hicn_punt_glb.next_hit_data_ipv6, swif,
+ ETH_L2);
+ hicn_punt_add_vnetssn (&ipv6, &ipv6_dst, prefix,
+ mask, hicn_punt_glb.next_hit_interest_ipv6,
+ swif, ETH_L2);
+
+ hicn_punt_enable_disable_vnet_ip6_table_on_intf (vm, swif,
+ OP_ENABLE);
+ }
+ else
+ {
+ return HICN_ERROR_PUNT_INVAL;
+ }
+
+ }
+ return HICN_ERROR_NONE;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/punt.h b/hicn-plugin/src/punt.h
new file mode 100755
index 000000000..ebc27e9d4
--- /dev/null
+++ b/hicn-plugin/src/punt.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PUNT_H__
+#define __HICN_PUNT_H__
+
+#include <vppinfra/error.h>
+#include <hicn/hicn.h>
+
+#define HICN_CLASSIFY_TABLE_MEMORY_SIZE (2*1024*1024) // 2MB allocated for the classification table
+#define HICN_PUNTING_BUFFER_SIZE_32 (32)
+#define HICN_PUNTING_BUFFER_SIZE_48 (48)
+#define HICN_PUNTING_BUFFER_SIZE_64 (64)
+#define HICN_PUNTING_BUFFER_SIZE_80 (80)
+#define HICN_PUNTING_BUFFER_SIZE_128 (128)
+
+/* Limits */
+
+#define HICN_PUNT_IP4 0
+#define HICN_PUNT_IP6 1
+
+#define HICN_MAX_INTFC 256
+
+/* We also consider mask = 0 to match everything */
+#define HICN_PUNT_IP4_MASK 33
+#define HICN_PUNT_IP6_MASK 129
+
+#define HICN_PUNT_IP_TYPE 0
+#define HICN_PUNT_UDP4_TYPE 1
+#define HICN_PUNT_UDP6_TYPE 2
+/*
+ * u32 ip4_vnet_tbl_idx[HICN_MAX_INTFC][2][3][HICN_PUNT_IP4_MASK];
+ * //[skip][src][mask],[skip][dst][mask] u32
+ * ip6_vnet_tbl_idx[HICN_MAX_INTFC][2][3][HICN_PUNT_IP6_MASK];
+ * //[skip][src][mask],[skip][dst][mask]
+ */
+#define PUNT_MASK(ip) (ip->addr_len_bits + 1)
+#define TABLE_ELT_P(ip, i, j, k, l) (ip->tbl + (4 * 2 * PUNT_MASK(ip)) * i + (2 * PUNT_MASK(ip)) * j + k * PUNT_MASK(ip) + l)
+#define TABLE_ELT(ip, i, j, k, l) (*(TABLE_ELT_P(ip, i, j, k, l)))
+
+#define NO_L2 0
+#define ETH_L2 sizeof(ethernet_header_t)
+
+#define IPPROTO_MASK 0xFF
+
+/* Index to access vnet table index */
+#define HICN_PUNT_SRC 0
+#define HICN_PUNT_DST 1
+
+#define HICN_PUNT_OK 0
+#define HICN_PUNT_ERR 1
+
+#define HICNP_PUNY_INVALID_TBL ~0
+
+/* Number of bytes before the next header/protocol field in ip6/4 */
+#define BYTES_TO_PROTOCOL_IP4 9
+#define BYTES_TO_NEXT_HEADER_IP6 6
+
+#define PUNT_BUFFER_SIZE 100 /* B */
+#define CLASSIFIER_VECTOR_SIZE 16 /* B */
+
+#define OP_DEL 0
+#define OP_ADD 1
+#define OP_DISABLE 0
+#define OP_ENABLE 1
+
+/* vnet_classify_add_del_table */
+#define HICN_CLASSIFY_NO_NEXT_TABLE 0xFFFFFFFF
+#define HICN_CLASSIFY_MISS_NEXT_INDEX 16
+#define HICN_CLASSIFY_CURRENT_DATA_FLAG CLASSIFY_FLAG_USE_CURR_DATA
+#define HICN_CLASSIFY_NO_CURRENT_DATA_FLAG 0
+#define HICN_CLASSIFY_CURRENT_DATA_OFFSET 0
+#define HICN_CLASSIFY_DON_T_DEL_CHAIN 0
+
+/* vnet_classify_add_del_session */
+#define HICN_CLASSIFY_OPAQUE_INDEX 0xFFFFFFFF
+#define HICN_CLASSIFY_ADVANCE 0
+#define HICN_CLASSIFY_ACTION 0
+#define HICN_CLASSIFY_METADATA 0
+
+/* This should be equal to the number of rules we expect in each table */
+#define HICN_CLASSIFY_NBUCKETS 3
+
+
+/* HICN punt node index */
+typedef struct _hicn_node_info_s
+{
+ u32 hicn_face_ip4_input_index;
+ u32 hicn_face_ip6_input_index;
+ u32 hicn_iface_ip4_input_index;
+ u32 hicn_iface_ip6_input_index;
+ u32 hicn_face_ip4_output_index;
+ u32 hicn_face_ip6_output_index;
+ u32 hicn_iface_ip4_output_index;
+ u32 hicn_iface_ip6_output_index;
+ u32 hicn_face_udp4_input_index;
+ u32 hicn_face_udp6_input_index;
+ u32 hicn_iface_udp4_input_index;
+ u32 hicn_iface_udp6_input_index;
+ u32 hicn_face_udp4_output_index;
+ u32 hicn_face_udp6_output_index;
+ u32 hicn_iface_udp4_output_index;
+ u32 hicn_iface_udp6_output_index;
+ u32 ip4_inacl_node_index;
+ u32 ip6_inacl_node_index;
+ u32 ip4_lookup_node_index;
+ u32 ip6_lookup_node_index;
+} hicn_node_info_t;
+
+/*
+ * HICN global PUNT info
+ */
+typedef struct _hicn_punt_glb_s
+{
+ hicn_node_info_t hicn_node_info;
+
+ /*
+ * The following nodes are used to create the vlib node graph, and
+ * point classified packets to the right node.
+ */
+ u32 next_hit_interest_ipv4;
+ //node - graph index to forward packets to our hicn nodes
+ u32 next_hit_data_ipv4;
+ u32 next_hit_interest_ipv6;
+ //node - graph index to forward packets to our hicn nodes
+ u32 next_hit_data_ipv6;
+ u32 next_hit_interest_udp4;
+ //node - graph index to forward packets to our hicn nodes
+ u32 next_hit_data_udp4;
+ u32 next_hit_interest_udp6;
+ //node - graph index to forward packets to our hicn nodes
+ u32 next_hit_data_udp6;
+
+ /*
+ * One table is created : - per interface : so that we can have
+ * different punted prefixes per interface, and thus decrease the
+ * amount of matched rules per packet. An interface will be
+ * consistently receiving packets with or without the ethernet
+ * header, and thus the offsets should always be correct. - per skip
+ * (assuming it is for the base offset (ethernet or not), in which
+ * case the interface should be sufficient. - per prefix length to
+ * allow for sorting later. - per src / dst (?)
+ *
+ * Note that there is no test on the packet type (v4 or v6), as they
+ * follow distinct paths in the vpp graph and will thus be dispatched
+ * to distinct classifiers. This is also why we duplicate the state
+ * for both IPv4 and IPv6 in this implementation.
+ *
+ * Tables are chained per interface in the order they are added. Each
+ * table consists in a set of rules (named sessions).
+ *
+ * / interface --> table i [.next_table_index=j] --> table j [.nti=~0]
+ * -- drop \ | | +-- on match,
+ * send to node m +-- [...] to node n
+ *
+ * For debugging purposes, you can use the following commands:
+ *
+ * vppctl show inacl type ip4 vppctl show inacl type ip6
+ *
+ * vppctl show classify tables [verbose]
+ *
+ * TODO: - allow tables to be removed - sort tables with decreasing
+ * prefix length to allow for LPM. - directly access the linked list
+ * through vpp APIs and remove global variables. They are not
+ * sufficient anyways for removal.
+ */
+
+ /**
+ * Given the current implementation, the following multidimensional array
+ * stores the table indexes uniquerly identified by the 4-tuple (interface,
+ * skip, src/dst, mask).
+ *
+ * For flexibility, some macros and functions will be defined in the .c to
+ * manipulate this array.
+ */
+ u32 ip4_vnet_tbl_idx[HICN_MAX_INTFC][4][2][HICN_PUNT_IP4_MASK];
+ //[skip][src][mask],[skip][dst][mask]
+ u32 ip6_vnet_tbl_idx[HICN_MAX_INTFC][4][2][HICN_PUNT_IP6_MASK];
+ //[skip][src][mask],[skip][dst][mask]
+ u32 udp44_vnet_tbl_idx[HICN_MAX_INTFC][4][2][HICN_PUNT_IP4_MASK];
+ //[skip][src][mask],[skip][dst][mask]
+ u32 udp46_vnet_tbl_idx[HICN_MAX_INTFC][4][2][HICN_PUNT_IP6_MASK];
+ //[skip][src][mask],[skip][dst][mask]
+ u32 udp64_vnet_tbl_idx[HICN_MAX_INTFC][4][2][HICN_PUNT_IP4_MASK];
+ //[skip][src][mask],[skip][dst][mask]
+ u32 udp66_vnet_tbl_idx[HICN_MAX_INTFC][4][2][HICN_PUNT_IP6_MASK];
+ //[skip][src][mask],[skip][dst][mask]
+
+ /*
+ * The first and last tables associated to each interface (both for
+ * v4 and v6) are stored. They are respectively used to : - start
+ * classification on the correct table depending on the input
+ * interface: the assumption is that different interfaces with punt
+ * different prefixes, which should decreate the number of potential
+ * rules to match for each incoming packet. see.
+ * vnet_set_input_acl_intfc() - maintain the chaining between tables
+ * so that upon addition, the newly created table can be chained to
+ * the previous last one.
+ */
+ u32 head_ip4[HICN_MAX_INTFC];
+ u32 tail_ip4[HICN_MAX_INTFC];
+ u32 head_ip6[HICN_MAX_INTFC];
+ u32 tail_ip6[HICN_MAX_INTFC];
+
+} hicn_punt_glb_t;
+
+extern hicn_punt_glb_t hicn_punt_glb;
+
+
+
+/* XXX The two following structs might be opaque */
+
+#define NA 0
+
+typedef struct
+{
+ u32 offset;
+ u32 len; /* bytes */
+ u32 punt_id; /* see explanation in hicn_punt.c */
+} field_t;
+
+/* Format: _(name, base, layer, field, punt_id) */
+#define foreach_field \
+ _(ipv6_src, 0, _ipv6_header_t, saddr, HICN_PUNT_SRC) \
+ _(ipv6_dst, 0, _ipv6_header_t, daddr, HICN_PUNT_DST) \
+ _(ipv6_protocol, 0, _ipv6_header_t, nxt, NA) \
+ _(ipv4_src, 0, _ipv4_header_t, saddr, HICN_PUNT_SRC) \
+ _(ipv4_dst, 0, _ipv4_header_t, daddr, HICN_PUNT_DST) \
+ _(ipv4_protocol, 0, _ipv4_header_t, protocol, NA) \
+ \
+ _(ipv4_version, 0, _ipv4_header_t, version_ihl, NA) \
+ _(ipv6_version, 0, _ipv6_header_t, vfc, NA) \
+ _(udp4_sport, IPV4_HDRLEN, _udp_header_t, src_port, NA) \
+ _(udp4_dport, IPV4_HDRLEN, _udp_header_t, dst_port, NA) \
+ _(udp6_sport, IPV6_HDRLEN, _udp_header_t, src_port, NA) \
+ _(udp6_dport, IPV6_HDRLEN, _udp_header_t, dst_port, NA) \
+ _(udp6_protocol, 0, _ipv6_header_t, nxt, NA) \
+ _(udp4_protocol, 0, _ipv4_header_t, protocol, NA) \
+ _(udp46_src, IPV4_HDRLEN + UDP_HDRLEN, _ipv6_header_t, saddr, HICN_PUNT_SRC) \
+ _(udp46_dst, IPV4_HDRLEN + UDP_HDRLEN, _ipv6_header_t, daddr, HICN_PUNT_DST) \
+ _(udp44_src, IPV4_HDRLEN + UDP_HDRLEN, _ipv4_header_t, saddr, HICN_PUNT_SRC) \
+ _(udp44_dst, IPV4_HDRLEN + UDP_HDRLEN, _ipv4_header_t, daddr, HICN_PUNT_DST) \
+ _(udp66_src, IPV6_HDRLEN + UDP_HDRLEN, _ipv6_header_t, saddr, HICN_PUNT_SRC) \
+ _(udp66_dst, IPV6_HDRLEN + UDP_HDRLEN, _ipv6_header_t, daddr, HICN_PUNT_DST) \
+ _(udp64_src, IPV6_HDRLEN + UDP_HDRLEN, _ipv6_header_t, saddr, HICN_PUNT_SRC) \
+ _(udp64_dst, IPV6_HDRLEN + UDP_HDRLEN, _ipv6_header_t, daddr, HICN_PUNT_DST) \
+
+
+#define _(NAME, BASE, LAYER, FIELD, PUNT_ID) \
+ extern field_t NAME;
+foreach_field
+#undef _
+ typedef struct
+{
+ u32 *tbl;
+ u8 addr_len_bits;
+ field_t *protocol_field;
+ field_t *version_field;
+ field_t *udp_sport;
+ field_t *udp_dport;
+ u8 ip_version;
+} ip_version_t;
+
+extern ip_version_t ipv4;
+extern ip_version_t ipv6;
+
+
+/* ------------------------- */
+
+/**
+ * @brief Punt table APIs
+ *
+ * Those APIs are called when the first punting table is created for a given
+ * interface, so as to point to the start of the chain.
+ */
+void
+hicn_punt_enable_disable_vnet_ip4_table_on_intf (vlib_main_t * vm,
+ u32 sw_if_index,
+ int is_enable);
+void
+hicn_punt_enable_disable_vnet_ip6_table_on_intf (vlib_main_t * vm,
+ u32 sw_if_index,
+ int is_enable);
+u32 hicn_punt_interest_data_for_udp (vlib_main_t * vm,
+ ip46_address_t * prefix, u8 mask,
+ u32 swif, u8 punt_type, u16 sport,
+ u16 dport);
+u32 hicn_punt_interest_data_for_ethernet (vlib_main_t * vm,
+ ip46_address_t * prefix, u8 mask,
+ u32 swif, u8 type);
+int hicn_punt_remove_ip6_address (vlib_main_t * vm, ip6_address_t * addr,
+ u8 mask, int skip, u32 swif, int is_enable);
+int hicn_punt_remove_ip4_address (vlib_main_t * vm, ip4_address_t * addr,
+ u8 mask, int skip, u32 swif, int is_enable);
+void hicn_punt_init (vlib_main_t * vm);
+
+int
+hicn_punt_add_del_vnettbl (ip_version_t * ip, field_t * field, u8 mask, u32
+ next_tbl_index, u32 intfc, u8 base_offset,
+ u8 use_current_data, int is_add);
+
+#define hicn_punt_add_vnettbl(ip, field, mask, next_tbl_index, intfc, base_offset, use_current_data) \
+ (hicn_punt_add_del_vnettbl(ip, field, mask, next_tbl_index, intfc, base_offset, use_current_data, OP_ADD))
+
+#define hicn_punt_del_vnettbl(ip, field, mask, next_tbl_index, intfc, base_offset) \
+ (hicn_punt_add_del_vnettbl(ip, field, mask, next_tbl_index, intfc, base_offset, HICN_CLASSIFY_NO_CURRENT_DATA_FLAG, OP_DEL))
+
+int
+hicn_punt_add_del_vnetssn (ip_version_t * ip, field_t * field,
+ ip46_address_t * v46_address, u8 mask,
+ u32 next_hit_index, u32 intfc, u8 base_offset,
+ int is_add);
+
+#define hicn_punt_add_vnetssn(ip, field, addr, mask, index, intfc, offset) \
+ (hicn_punt_add_del_vnetssn(ip, field, addr, mask, index, intfc, offset, OP_ADD))
+
+#define hicn_punt_del_vnetssn(ip, field, addr, mask, index, intfc, offset) \
+ (hicn_punt_add_del_vnetssn(ip, field, addr, mask, index, intfc, offset, OP_DEL))
+
+#endif /* // __HICN_PUNT_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/route.c b/hicn-plugin/src/route.c
new file mode 100755
index 000000000..9202efbd4
--- /dev/null
+++ b/hicn-plugin/src/route.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/dpo/load_balance.h>
+#include <vlib/global_funcs.h>
+
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+#include "strategy.h"
+#include "faces/face.h"
+#include "error.h"
+#include "strategies/dpo_mw.h"
+
+int
+hicn_route_get_dpo (const ip46_address_t * prefix, u8 plen,
+ const dpo_id_t ** hicn_dpo, u32 * fib_index)
+{
+ fib_prefix_t fib_pfx;
+ const dpo_id_t *load_balance_dpo_id;
+ const dpo_id_t *former_dpo_id;
+ int found = 0, ret = HICN_ERROR_ROUTE_NOT_FOUND;
+ fib_node_index_t fib_entry_index;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (prefix, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+
+ /* Check if the route already exist in the fib */
+ /*
+ * ASSUMPTION: we use table 0 which is the default table and it is
+ * already existing and locked
+ */
+ *fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PLUGIN_HI);
+ fib_entry_index = fib_table_lookup_exact_match (*fib_index, &fib_pfx);
+
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ /* Route already existing. We need to update the dpo. */
+ load_balance_dpo_id =
+ fib_entry_contribute_ip_forwarding (fib_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ ret = HICN_ERROR_ROUTE_NO_LD;
+ else
+ {
+ /* former_dpo_id is a load_balance dpo */
+ load_balance_t *lb =
+ load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ /* FIB entry exists but there is no hicn dpo. */
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ for (int i = 0; i < lb->lb_n_buckets && !found; i++)
+ {
+ former_dpo_id = load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (former_dpo_id))
+ {
+ *hicn_dpo = former_dpo_id;
+ ret = HICN_ERROR_NONE;
+ found = 1;
+ }
+ }
+ }
+ }
+ /*
+ * Remove the lock from the table. We keep one lock per route, not
+ * per dpo
+ */
+ fib_table_unlock (*fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+
+ return ret;
+}
+
+/* Add a new route for a name prefix */
+int
+hicn_route_add (hicn_face_id_t * face_id, u32 len,
+ const ip46_address_t * prefix, u8 plen)
+{
+
+ fib_prefix_t fib_pfx;
+ dpo_id_t dpo = DPO_INVALID;
+ const dpo_id_t *hicn_dpo_id;
+ int ret = HICN_ERROR_NONE;
+ dpo_id_t face_dpo_tmp[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+ int n_face_dpo = 0;
+ index_t dpo_idx;
+ u32 fib_index;
+ vlib_main_t *vm = vlib_get_main ();
+ hicn_face_vft_t *face_vft = NULL;
+
+ if (face_id == NULL)
+ {
+ return HICN_ERROR_ROUTE_INVAL;
+ }
+ /*
+ * Check is the faces are available, otherwise skip the face
+ * id_adjacency existance is not checked. It should be checked before
+ * sending a packet out
+ */
+ for (int i = 0; i < clib_min (HICN_PARAM_FIB_ENTRY_NHOPS_MAX, len); i++)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id[i]);
+ face_vft = hicn_face_get_vft (face->shared.face_type);
+ dpo_id_t face_dpo = DPO_INVALID;
+ face_vft->hicn_face_get_dpo (face, &face_dpo);
+
+ if (!dpo_id_is_valid (&face_dpo))
+ {
+ vlib_cli_output (vm, "Face %d not found, skip...\n", face_id[i]);
+ return ret;
+ }
+ else
+ {
+ face_dpo_tmp[n_face_dpo++] = face_dpo;
+ }
+ }
+
+ ret = hicn_route_get_dpo (prefix, plen, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_ROUTE_NOT_FOUND)
+ {
+ /* The Fib entry does not exist */
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (prefix, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ dpo_id_t nhops[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+ for (int i = 0; i < n_face_dpo; i++)
+ {
+ clib_memcpy (&nhops[i], &face_dpo_tmp[i], sizeof (dpo_id_t));
+ }
+
+ ret =
+ default_dpo.hicn_dpo_create (fib_pfx.fp_proto, nhops, n_face_dpo,
+ &dpo_idx);
+
+ if (ret)
+ {
+ return ret;
+ }
+ /* the value we got when we registered */
+ /*
+ * This should be taken from the name?!? the index of the
+ * object
+ */
+ dpo_set (&dpo,
+ default_dpo.hicn_dpo_get_type (),
+ (ip46_address_is_ip4 (prefix) ? DPO_PROTO_IP4 : DPO_PROTO_IP6),
+ dpo_idx);
+
+ /* Here is where we create the "via" like route */
+ /*
+ * For the moment we use the global one the prefix you want
+ * to match Neale suggested -- FIB_SOURCE_HICN the client
+ * that is adding them -- no easy explanation at this time…
+ */
+ fib_node_index_t new_fib_node_index =
+ fib_table_entry_special_dpo_add (fib_index,
+ &fib_pfx,
+ FIB_SOURCE_PLUGIN_HI,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &dpo);
+
+ /* We added a route, therefore add one lock to the table */
+ fib_table_lock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+
+ dpo_unlock (&dpo);
+ ret =
+ (new_fib_node_index !=
+ FIB_NODE_INDEX_INVALID) ? HICN_ERROR_NONE :
+ HICN_ERROR_ROUTE_NO_INSERT;
+
+ /*
+ * TODO: we might want to store the fib index in the face.
+ * This will help to update the fib entries when a face is
+ * deleted. Fib_index_t is returned from
+ * fib_table_entry_special_dpo_add.
+ */
+ }
+ else if (ret == HICN_ERROR_NONE)
+ {
+ ret = HICN_ERROR_ROUTE_ALREADY_EXISTS;
+ }
+ return ret;
+}
+
+int
+hicn_route_add_nhops (hicn_face_id_t * face_id, u32 len,
+ const ip46_address_t * prefix, u8 plen)
+{
+ const dpo_id_t *hicn_dpo_id;
+ int ret = HICN_ERROR_NONE;
+ dpo_id_t faces_dpo_tmp[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+ int n_face_dpo = 0;
+ const hicn_dpo_vft_t *dpo_vft;
+ u32 fib_index;
+ vlib_main_t *vm = vlib_get_main ();
+ hicn_face_vft_t *face_vft = NULL;
+
+ if (face_id == NULL)
+ {
+ return HICN_ERROR_ROUTE_INVAL;
+ }
+ /*
+ * Check is the faces are available, otherwise skip the face
+ * id_adjacency existance is not checked. It should be checked before
+ * sending a packet out
+ */
+ for (int i = 0; i < clib_min (HICN_PARAM_FIB_ENTRY_NHOPS_MAX, len); i++)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id[i]);
+ face_vft = hicn_face_get_vft (face->shared.face_type);
+ dpo_id_t face_dpo = DPO_INVALID;
+ face_vft->hicn_face_get_dpo (face, &face_dpo);
+
+ if (!dpo_id_is_valid (&face_dpo))
+ {
+ vlib_cli_output (vm, "Face %d not found, skip...\n", face_id[i]);
+ return ret;
+ }
+ else
+ {
+ faces_dpo_tmp[n_face_dpo++] = face_dpo;
+ }
+ }
+
+ ret = hicn_route_get_dpo (prefix, plen, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ for (int i = 0; i < n_face_dpo && (ret == HICN_ERROR_NONE); i++)
+ {
+ u32 vft_id = hicn_dpo_get_vft_id (hicn_dpo_id);
+ dpo_vft = hicn_dpo_get_vft (vft_id);
+ ret = dpo_vft->hicn_dpo_add_update_nh (&faces_dpo_tmp[i],
+ hicn_dpo_id->dpoi_index);
+ }
+ }
+ return ret;
+}
+
+int
+hicn_route_del (ip46_address_t * prefix, u8 plen)
+{
+ fib_prefix_t fib_pfx;
+ const dpo_id_t *hicn_dpo_id;
+ int ret = HICN_ERROR_NONE;
+ u32 fib_index;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (prefix, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ /* Remove the fib entry only if the dpo is of type hicn */
+ ret = hicn_route_get_dpo (prefix, plen, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ fib_table_entry_special_remove (HICN_FIB_TABLE, &fib_pfx,
+ FIB_SOURCE_PLUGIN_HI);
+
+ /*
+ * Remove the lock from the table. We keep one lock per route
+ */
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PLUGIN_HI);
+ }
+ //Remember to remove the lock from the table when removing the entry
+ return ret;
+}
+
+int
+hicn_route_del_nhop (ip46_address_t * prefix, u8 plen, hicn_face_id_t face_id)
+{
+
+ fib_prefix_t fib_pfx;
+ const dpo_id_t *hicn_dpo_id;
+ int ret;
+ u32 vft_id;
+ const hicn_dpo_vft_t *dpo_vft;
+ u32 fib_index;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (prefix, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ ret = hicn_route_get_dpo (prefix, plen, &hicn_dpo_id, &fib_index);
+
+ /* Check if the dpo is an hicn_dpo_t */
+ if (ret == HICN_ERROR_NONE)
+ {
+ vft_id = hicn_dpo_get_vft_id (hicn_dpo_id);
+ dpo_vft = hicn_dpo_get_vft (vft_id);
+ return dpo_vft->hicn_dpo_del_nh (face_id, hicn_dpo_id->dpoi_index,
+ &fib_pfx);
+ }
+ //Remember to remove the lock from the table when removing the entry
+ return ret;
+}
+
+int
+hicn_route_set_strategy (ip46_address_t * prefix, u8 plen, u8 strategy_id)
+{
+ fib_prefix_t fib_pfx;
+ const dpo_id_t *hicn_dpo_id;
+ dpo_id_t new_dpo_id = DPO_INVALID;
+ int ret;
+ hicn_dpo_ctx_t *old_hicn_dpo_ctx;
+ const hicn_dpo_vft_t *old_dpo_vft;
+ const hicn_dpo_vft_t *new_dpo_vft;
+ index_t new_hicn_dpo_idx;
+ u32 fib_index;
+ u32 old_vft_id;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (prefix, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ ret = hicn_route_get_dpo (prefix, plen, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ old_vft_id = hicn_dpo_get_vft_id (hicn_dpo_id);
+ old_dpo_vft = hicn_dpo_get_vft (old_vft_id);
+ old_hicn_dpo_ctx =
+ old_dpo_vft->hicn_dpo_get_ctx (hicn_dpo_id->dpoi_index);
+
+ new_dpo_vft = hicn_dpo_get_vft_from_id (strategy_id);
+
+ if (new_dpo_vft == NULL)
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+
+ /* Create a new dpo for the new strategy */
+ new_dpo_vft->hicn_dpo_create (hicn_dpo_id->dpoi_proto,
+ old_hicn_dpo_ctx->next_hops,
+ old_hicn_dpo_ctx->entry_count,
+ &new_hicn_dpo_idx);
+
+ /* the value we got when we registered */
+ dpo_set (&new_dpo_id,
+ new_dpo_vft->hicn_dpo_get_type (),
+ (ip46_address_is_ip4 (prefix) ? DPO_PROTO_IP4 :
+ DPO_PROTO_IP6), new_hicn_dpo_idx);
+
+ /* Here is where we create the "via" like route */
+ /*
+ * For the moment we use the global one the prefix you want
+ * to match Neale suggested -- FIB_SOURCE_HICN the client
+ * that is adding them -- no easy explanation at this time…
+ */
+ fib_node_index_t new_fib_node_index =
+ fib_table_entry_special_dpo_update (fib_index,
+ &fib_pfx,
+ FIB_SOURCE_PLUGIN_HI,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &new_dpo_id);
+
+ dpo_unlock (&new_dpo_id);
+ ret =
+ (new_fib_node_index !=
+ FIB_NODE_INDEX_INVALID) ? HICN_ERROR_NONE :
+ HICN_ERROR_ROUTE_NOT_UPDATED;
+ }
+ //Remember to remove the lock from the table when removing the entry
+ return ret;
+
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/route.h b/hicn-plugin/src/route.h
new file mode 100755
index 000000000..be15b9906
--- /dev/null
+++ b/hicn-plugin/src/route.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_ROUTE__
+#define __HICN_ROUTE__
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include "hicn.h"
+#include "faces/face.h"
+
+/*
+ * Retrieve the hicn dpo corresponding to a hicn prefix
+ */
+int
+hicn_route_get_dpo (const ip46_address_t * prefix, u8 plen,
+ const dpo_id_t ** hicn_dpo, u32 * fib_index);
+
+/*
+ * Add a new route for a name prefix
+ */
+int
+hicn_route_add (hicn_face_id_t * face_id, u32 len,
+ const ip46_address_t * prefix, u8 plen);
+
+/*
+ * Add new next hops for a prefix route
+ */
+int
+hicn_route_add_nhops (hicn_face_id_t * face_id, u32 len,
+ const ip46_address_t * prefix, u8 plen);
+
+/* Remove a route for a name prefix */
+int hicn_route_del (ip46_address_t * prefix, u8 plen);
+
+/* Remove a next hop route for a name prefix */
+int hicn_route_del_nhop (ip46_address_t * prefix, u8 plen, u32 face_id);
+
+/* Remove a next hop route for a name prefix */
+int
+hicn_route_set_strategy (ip46_address_t * prefix, u8 plen, u32 strategy_id);
+
+#endif /* //__HICN_ROUTE__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/state.h b/hicn-plugin/src/state.h
new file mode 100755
index 000000000..7e984e6c3
--- /dev/null
+++ b/hicn-plugin/src/state.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STATE__
+#define __HICN_STATE__
+
+#include <netinet/in.h>
+#include <vnet/buffer.h>
+
+#include "hicn.h"
+#include "pcs.h"
+#include "hashtb.h"
+#include "strategy.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+
+always_inline void
+hicn_prefetch_pcs_entry (hicn_buffer_t * hicnb, hicn_pit_cs_t * pitcs)
+{
+ hicn_hash_node_t *node = pool_elt_at_index (pitcs->pcs_table->ht_nodes,
+ hicnb->node_id);
+
+ hicn_hash_bucket_t *bucket;
+ if (hicnb->hash_bucket_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
+ bucket =
+ pool_elt_at_index (pitcs->pcs_table->ht_overflow_buckets,
+ hicnb->bucket_id);
+ else
+ bucket =
+ (hicn_hash_bucket_t *) (pitcs->pcs_table->ht_buckets +
+ hicnb->bucket_id);
+
+ CLIB_PREFETCH (node, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (bucket, CLIB_CACHE_LINE_BYTES, STORE);
+}
+
+always_inline void
+hicn_get_internal_state (hicn_buffer_t * hicnb, hicn_pit_cs_t * pitcs,
+ hicn_hash_node_t ** node,
+ const hicn_strategy_vft_t ** strategy_vft,
+ const hicn_dpo_vft_t ** dpo_vft, u8 * dpo_ctx_id,
+ hicn_hash_entry_t ** hash_entry)
+{
+ *node = pool_elt_at_index (pitcs->pcs_table->ht_nodes, hicnb->node_id);
+ *strategy_vft = hicn_dpo_get_strategy_vft (hicnb->vft_id);
+ *dpo_vft = hicn_dpo_get_vft (hicnb->vft_id);
+ *dpo_ctx_id = hicnb->dpo_ctx_id;
+
+ hicn_hash_bucket_t *bucket;
+ if (hicnb->hash_bucket_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
+ bucket =
+ pool_elt_at_index (pitcs->pcs_table->ht_overflow_buckets,
+ hicnb->bucket_id);
+ else
+ bucket =
+ (hicn_hash_bucket_t *) (pitcs->pcs_table->ht_buckets +
+ hicnb->bucket_id);
+
+ *hash_entry = &(bucket->hb_entries[hicnb->hash_entry_id]);
+}
+
+/*
+ * This function set the PCS entry index, the dpo index and the vft index in
+ * the opaque2 buffer. In this way, the interest-hitpit and interest-hitcs
+ * nodes can prefetch the corresponding state (PIT entry, dpo_ctx and the
+ * strategy vft
+ */
+always_inline void
+hicn_store_internal_state (vlib_buffer_t * b, u64 name_hash, u32 node_id,
+ u8 dpo_ctx_id, u8 vft_id, u8 hash_entry_id,
+ u32 bucket_id, u8 bucket_is_overflow)
+{
+ hicn_buffer_t *hicnb = hicn_get_buffer (b);
+ hicnb->name_hash = name_hash;
+ hicnb->node_id = node_id;
+ hicnb->dpo_ctx_id = dpo_ctx_id;
+ hicnb->vft_id = vft_id;
+ hicnb->hash_entry_id = hash_entry_id;
+ hicnb->bucket_id = bucket_id;
+ hicnb->hash_bucket_flags =
+ HICN_HASH_NODE_OVERFLOW_BUCKET * bucket_is_overflow;
+}
+
+#endif /* // __HICN_STATE__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/strategies/dpo_mw.c b/hicn-plugin/src/strategies/dpo_mw.c
new file mode 100755
index 000000000..882368e6e
--- /dev/null
+++ b/hicn-plugin/src/strategies/dpo_mw.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../strategy_dpo_ctx.h"
+#include "dpo_mw.h"
+#include "strategy_mw.h"
+#include "../strategy_dpo_manager.h"
+
+hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx_pool;
+
+const static char *const hicn_ip6_nodes[] = {
+ "hicn-mw-strategy", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+const static char *const hicn_ip4_nodes[] = {
+ "hicn-mw-strategy", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+const static char *const *const hicn_nodes_mw[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP6] = hicn_ip6_nodes,
+ [DPO_PROTO_IP4] = hicn_ip4_nodes,
+};
+
+/**
+ * @brief DPO type value for the mw_strategy
+ */
+static dpo_type_t hicn_dpo_type_mw;
+
+static const hicn_dpo_vft_t hicn_dpo_mw_vft = {
+ .hicn_dpo_get_ctx = &hicn_strategy_mw_ctx_get,
+ .hicn_dpo_is_type = &hicn_dpo_is_type_strategy_mw,
+ .hicn_dpo_get_type = &hicn_dpo_strategy_mw_get_type,
+ .hicn_dpo_module_init = &hicn_dpo_strategy_mw_module_init,
+ .hicn_dpo_create = &hicn_strategy_mw_ctx_create,
+ .hicn_dpo_add_update_nh = &hicn_strategy_mw_ctx_add_nh,
+ .hicn_dpo_del_nh = &hicn_strategy_mw_ctx_del_nh,
+ .hicn_dpo_lock_dpo_ctx = &hicn_strategy_mw_ctx_lock,
+ .hicn_dpo_unlock_dpo_ctx = hicn_strategy_mw_ctx_unlock,
+ .format_hicn_dpo = &format_hicn_dpo_strategy_mw
+};
+
+int
+hicn_dpo_is_type_strategy_mw (const dpo_id_t * dpo)
+{
+ return dpo->dpoi_type == hicn_dpo_type_mw;
+}
+
+void
+hicn_dpo_strategy_mw_module_init (void)
+{
+ pool_validate_index (hicn_strategy_mw_ctx_pool, 0);
+ /*
+ * Register our type of dpo
+ */
+ hicn_dpo_type_mw =
+ hicn_dpo_register_new_type (hicn_nodes_mw, &hicn_dpo_mw_vft,
+ hicn_mw_strategy_get_vft (),
+ &dpo_strategy_mw_ctx_vft);
+}
+
+u8 *
+format_hicn_dpo_strategy_mw (u8 * s, va_list * ap)
+{
+
+ u32 indent = va_arg (*ap, u32);
+ s =
+ format (s,
+ "Static Weights: weights are updated by the control plane, next hop is the one with the maximum weight.\n",
+ indent);
+ return (s);
+}
+
+dpo_type_t
+hicn_dpo_strategy_mw_get_type (void)
+{
+ return hicn_dpo_type_mw;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+void
+hicn_strategy_mw_ctx_lock (dpo_id_t * dpo)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) hicn_strategy_mw_ctx_get (dpo->dpoi_index);
+ hicn_strategy_mw_ctx->default_ctx.locks++;
+}
+
+void
+hicn_strategy_mw_ctx_unlock (dpo_id_t * dpo)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) hicn_strategy_mw_ctx_get (dpo->dpoi_index);
+ hicn_strategy_mw_ctx->default_ctx.locks--;
+
+ if (0 == hicn_strategy_mw_ctx->default_ctx.locks)
+ {
+ pool_put (hicn_strategy_mw_ctx_pool, hicn_strategy_mw_ctx);
+ }
+}
+
+u8 *
+format_hicn_strategy_mw_ctx (u8 * s, va_list * ap)
+{
+ int i = 0;
+ index_t index = va_arg (*ap, index_t);
+ hicn_strategy_mw_ctx_t *dpo = NULL;
+ dpo_id_t *next_hop = NULL;
+ hicn_face_vft_t *face_vft = NULL;
+ u32 indent = va_arg (*ap, u32);;
+
+ dpo = (hicn_strategy_mw_ctx_t *) hicn_strategy_mw_ctx_get (index);
+
+ s = format (s, "hicn-mw");
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ next_hop = &dpo->default_ctx.next_hops[i];
+ face_vft = hicn_face_get_vft (next_hop->dpoi_type);
+ if (face_vft != NULL)
+ {
+ s = format (s, "\n");
+ s =
+ format (s, "%U ", face_vft->format_face, next_hop->dpoi_index,
+ indent);
+ s = format (s, "weight %u", dpo->weight[i]);
+ }
+ }
+
+ return (s);
+}
+
+static index_t
+hicn_strategy_mw_ctx_get_index (hicn_strategy_mw_ctx_t * cd)
+{
+ return (cd - hicn_strategy_mw_ctx_pool);
+}
+
+int
+hicn_strategy_mw_ctx_create (dpo_proto_t proto, const dpo_id_t * next_hop,
+ int nh_len, index_t * dpo_idx)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx;
+ int ret = HICN_ERROR_NONE, i;
+ dpo_id_t invalid = NEXT_HOP_INVALID;
+
+ /* Allocate a hicn_dpo_ctx on the vpp pool and initialize it */
+ pool_get (hicn_strategy_mw_ctx_pool, hicn_strategy_mw_ctx);
+
+ *dpo_idx = hicn_strategy_mw_ctx_get_index (hicn_strategy_mw_ctx);
+ for (int i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ hicn_strategy_mw_ctx->default_ctx.next_hops[i] = invalid;
+ }
+
+ hicn_strategy_mw_ctx->default_ctx.entry_count = 0;
+ hicn_strategy_mw_ctx->default_ctx.locks = 0;
+
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX && i < nh_len; i++)
+ {
+ clib_memcpy (&hicn_strategy_mw_ctx->default_ctx.next_hops[i],
+ &next_hop[i], sizeof (dpo_id_t));
+ hicn_strategy_mw_ctx->default_ctx.entry_count++;
+ }
+
+ memset (hicn_strategy_mw_ctx->weight, 0, HICN_PARAM_FIB_ENTRY_NHOPS_MAX);
+
+ return ret;
+}
+
+hicn_dpo_ctx_t *
+hicn_strategy_mw_ctx_get (index_t index)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx = NULL;
+ if (!pool_is_free_index (hicn_strategy_mw_ctx_pool, index))
+ {
+ hicn_strategy_mw_ctx =
+ (pool_elt_at_index (hicn_strategy_mw_ctx_pool, index));
+ }
+ return &hicn_strategy_mw_ctx->default_ctx;
+}
+
+int
+hicn_strategy_mw_ctx_add_nh (const dpo_id_t * nh, index_t dpo_idx)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) hicn_strategy_mw_ctx_get (dpo_idx);
+
+ if (hicn_strategy_mw_ctx != NULL)
+ {
+
+ int empty = hicn_strategy_mw_ctx->default_ctx.entry_count;
+
+ /* Iterate through the list of faces to add new faces */
+ for (int i = 0; i < hicn_strategy_mw_ctx->default_ctx.entry_count; i++)
+ {
+ if (!memcmp
+ (nh, &hicn_strategy_mw_ctx->default_ctx.next_hops[i],
+ sizeof (dpo_id_t)))
+ {
+ /* If face is marked as deleted, ignore it */
+ hicn_face_t *face =
+ hicn_dpoi_get_from_idx (hicn_strategy_mw_ctx->
+ default_ctx.next_hops[i].dpoi_index);
+ if (face->shared.flags & HICN_FACE_FLAGS_DELETED)
+ {
+ continue;
+ }
+ return HICN_ERROR_DPO_CTX_NHOPS_EXISTS;
+ }
+ }
+
+ /* Get an empty place */
+ if (empty > HICN_PARAM_FIB_ENTRY_NHOPS_MAX)
+ {
+ return HICN_ERROR_DPO_CTX_NHOPS_NS;
+ }
+ if (PREDICT_FALSE (empty > HICN_PARAM_FIB_ENTRY_NHOPS_MAX))
+ {
+ return HICN_ERROR_DPO_CTX_NHOPS_NS;
+ }
+ clib_memcpy (&hicn_strategy_mw_ctx->default_ctx.next_hops[empty], nh,
+ sizeof (dpo_id_t));
+ hicn_strategy_mw_ctx->default_ctx.entry_count++;
+
+ return HICN_ERROR_NONE;
+ }
+ return HICN_ERROR_DPO_CTX_NOT_FOUND;
+}
+
+int
+hicn_strategy_mw_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx,
+ fib_prefix_t * fib_pfx)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) hicn_strategy_mw_ctx_get (dpo_idx);
+ int ret = HICN_ERROR_NONE;
+ int nh_id = ~0;
+ dpo_id_t invalid = NEXT_HOP_INVALID;
+
+ if (hicn_strategy_mw_ctx != NULL)
+ {
+ for (int i = 0; i < hicn_strategy_mw_ctx->default_ctx.entry_count; i++)
+ {
+ if (hicn_strategy_mw_ctx->default_ctx.next_hops[i].dpoi_index ==
+ face_id)
+ {
+ nh_id = i;
+ hicn_face_unlock (&hicn_strategy_mw_ctx->default_ctx.
+ next_hops[i]);
+ hicn_strategy_mw_ctx->default_ctx.next_hops[i] = invalid;
+ hicn_strategy_mw_ctx->default_ctx.entry_count--;
+ }
+ }
+
+ if (0 == hicn_strategy_mw_ctx->default_ctx.entry_count)
+ {
+ fib_table_entry_special_remove (HICN_FIB_TABLE, fib_pfx,
+ FIB_SOURCE_PLUGIN_HI);
+ }
+ }
+ else
+ {
+ ret = HICN_ERROR_DPO_CTX_NOT_FOUND;
+ }
+
+ /*
+ * Remove any possible hole in the arrays of dpos
+ */
+ if (hicn_strategy_mw_ctx->default_ctx.entry_count > 0 && nh_id != ~0
+ && nh_id < hicn_strategy_mw_ctx->default_ctx.entry_count - 1)
+ {
+ int i;
+ for (i = nh_id; i < hicn_strategy_mw_ctx->default_ctx.entry_count; i++)
+ {
+ clib_memcpy (&hicn_strategy_mw_ctx->default_ctx.next_hops[i],
+ &hicn_strategy_mw_ctx->default_ctx.next_hops[i + 1],
+ sizeof (dpo_id_t));
+ }
+ /* Set as invalid the last dpo */
+ hicn_strategy_mw_ctx->default_ctx.next_hops[i] = invalid;
+ }
+ return ret;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategies/dpo_mw.h b/hicn-plugin/src/strategies/dpo_mw.h
new file mode 100755
index 000000000..a8c0a3b43
--- /dev/null
+++ b/hicn-plugin/src/strategies/dpo_mw.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_MW_H__
+#define __HICN_DPO_MW_H__
+
+#include <vnet/dpo/dpo.h>
+#include "../strategy_dpo_ctx.h"
+
+typedef struct hicn_strategy_mw_ctx_s
+{
+ hicn_dpo_ctx_t default_ctx;
+
+ u8 weight[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+} hicn_strategy_mw_ctx_t;
+
+/**
+ * @brief Lock the mw ctx
+ *
+ * @param dpo Identifier of the dpo of the mw ctx
+ */
+void hicn_strategy_mw_ctx_lock (dpo_id_t * dpo);
+
+/**
+ * @brief Unlock the mw ctx
+ *
+ * @param dpo Identifier of the dpo of the mw ctx
+ */
+void hicn_strategy_mw_ctx_unlock (dpo_id_t * dpo);
+
+/**
+ * @brief Format the dpo ctx for a human-readable string
+ *
+ * @param s String to which to append the formatted dpo ctx
+ * @param ap List of parameters for the formatting
+ *
+ * @result The string with the formatted dpo ctx
+ */
+u8 *format_hicn_strategy_mw_ctx (u8 * s, va_list * ap);
+
+const static dpo_vft_t dpo_strategy_mw_ctx_vft = {
+ .dv_lock = hicn_strategy_mw_ctx_lock,
+ .dv_unlock = hicn_strategy_mw_ctx_unlock,
+ .dv_format = format_hicn_strategy_mw_ctx,
+};
+
+/**
+ * @brief Retrieve an hicn_strategy_mw_ctx object
+ *
+ * @param indext Index of the hicn_dpo_ctx to retrieve
+ * @return The hicn_dpo_ctx object or NULL
+ */
+hicn_dpo_ctx_t *hicn_strategy_mw_ctx_get (index_t index);
+
+/**
+ * @brief Create a new mw ctx
+ *
+ * @param proto The protocol to which the dpo is meant for (see vpp docs)
+ * @param next_hop A list of next hops to be inserted in the dpo ctx
+ * @param nh_len Size of the list
+ * @param dpo_idx index_t that will hold the index of the created dpo ctx
+ * @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
+ */
+int
+hicn_strategy_mw_ctx_create (dpo_proto_t proto, const dpo_id_t * next_hop,
+ int nh_len, index_t * dpo_idx);
+
+/**
+ * @brief Add or update a next hop in the dpo ctx.
+ *
+ * This function is meant to be used in the control plane and not in the data plane,
+ * as it is not optimized for the latter.
+ *
+ * @param nh Next hop to insert in the dpo ctx
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
+ */
+int hicn_strategy_mw_ctx_add_nh (const dpo_id_t * nh, index_t dpo_idx);
+
+/**
+ * @brief Delete a next hop in the dpo ctx.
+ *
+ * @param face_id Face identifier of the next hop
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
+ */
+int
+hicn_strategy_mw_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx,
+ fib_prefix_t * fib_pfx);
+
+/**
+ * @brief Prefetch a dpo
+ *
+ * @param dpo_idx Index of the dpo ctx to prefetch
+ */
+void hicn_strategy_mw_ctx_prefetch (index_t dpo_idx);
+
+int hicn_dpo_is_type_strategy_mw (const dpo_id_t * dpo);
+
+void hicn_dpo_strategy_mw_module_init (void);
+
+dpo_type_t hicn_dpo_strategy_mw_get_type (void);
+
+u8 *format_hicn_dpo_strategy_mw (u8 * s, va_list * ap);
+
+
+#endif // __HICN_DPO_MW_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategies/strategy_mw.c b/hicn-plugin/src/strategies/strategy_mw.c
new file mode 100755
index 000000000..144dd145e
--- /dev/null
+++ b/hicn-plugin/src/strategies/strategy_mw.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "../strategy.h"
+#include "../strategy_dpo_ctx.h"
+#include "dpo_mw.h"
+#include "../faces/face.h"
+#include "../route.h"
+#include "../pcs.h"
+#include "../strategy_dpo_manager.h"
+
+/* Simple strategy that chooses the next hop with the maximum weight */
+/* It does not require to exend the hicn_dpo */
+void hicn_receive_data_mw (index_t dpo_idx, int nh_idx);
+void hicn_add_interest_mw (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+void hicn_on_interest_timeout_mw (index_t dpo_idx);
+u32 hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx,
+ dpo_id_t ** outface);
+u32 get_strategy_node_index_mw (void);
+
+static hicn_strategy_vft_t hicn_strategy_mw_vft = {
+ .hicn_receive_data = &hicn_receive_data_mw,
+ .hicn_add_interest = &hicn_add_interest_mw,
+ .hicn_on_interest_timeout = &hicn_on_interest_timeout_mw,
+ .hicn_select_next_hop = &hicn_select_next_hop_mw,
+ .get_strategy_node_index = get_strategy_node_index_mw
+};
+
+/* Stats string values */
+static char *hicn_strategy_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/*
+ * Return the vft of the strategy.
+ */
+hicn_strategy_vft_t *
+hicn_mw_strategy_get_vft (void)
+{
+ return &hicn_strategy_mw_vft;
+}
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_mw_strategy_node;
+
+u32
+get_strategy_node_index_mw (void)
+{
+ return hicn_mw_strategy_node.index;
+}
+
+/* DPO should be give in input as it containes all the information to calculate the next hops*/
+u32
+hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx, dpo_id_t ** outface)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) hicn_strategy_mw_ctx_get (dpo_idx);
+
+ u8 next_hop_index = 0;
+ for (int i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ if (dpo_id_is_valid (&hicn_strategy_mw_ctx->default_ctx.next_hops[i]))
+ {
+ if (hicn_strategy_mw_ctx->weight[next_hop_index] <
+ hicn_strategy_mw_ctx->weight[i])
+ {
+ next_hop_index = i;
+ }
+ }
+ }
+
+ if (!dpo_id_is_valid
+ (&hicn_strategy_mw_ctx->default_ctx.next_hops[next_hop_index]))
+ return HICN_ERROR_MW_STRATEGY_NH_NOT_FOUND;
+
+ *outface =
+ (dpo_id_t *) & hicn_strategy_mw_ctx->default_ctx.
+ next_hops[next_hop_index];
+
+ return HICN_ERROR_NONE;
+}
+
+uword
+hicn_mw_strategy_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ return hicn_forward_interest_fn (vm, node, frame, &hicn_strategy_mw_vft,
+ &hicn_mw_strategy_node);
+}
+
+void
+hicn_add_interest_mw (index_t dpo_ctx_idx, hicn_hash_entry_t * hash_entry)
+{
+ hash_entry->dpo_ctx_id = dpo_ctx_idx;
+ dpo_id_t hicn_dpo_id =
+ { hicn_dpo_strategy_mw_get_type (), 0, 0, dpo_ctx_idx };
+ hicn_strategy_mw_ctx_lock (&hicn_dpo_id);
+ hash_entry->vft_id = hicn_dpo_get_vft_id (&hicn_dpo_id);
+}
+
+void
+hicn_on_interest_timeout_mw (index_t dpo_idx)
+{
+ /* Nothign to do in the mw strategy when we receive an interest */
+}
+
+void
+hicn_receive_data_mw (index_t dpo_idx, int nh_idx)
+{
+}
+
+
+/* packet trace format function */
+static u8 *
+hicn_strategy_format_trace_mw (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_strategy_trace_t *t = va_arg (*args, hicn_strategy_trace_t *);
+
+ s = format (s, "Strategy_mw: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mw_strategy_node) =
+{
+ .name = "hicn-mw-strategy",
+ .function = hicn_mw_strategy_node_fn,
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (int) + sizeof(hicn_pit_cs_t *),
+ .format_trace = hicn_strategy_format_trace_mw,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_strategy_error_strings),
+ .error_strings = hicn_strategy_error_strings,
+ .n_next_nodes = HICN_STRATEGY_N_NEXT,
+ .next_nodes = {
+ [HICN_STRATEGY_NEXT_INTEREST_HITPIT] = "hicn-interest-hitpit",
+ [HICN_STRATEGY_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategies/strategy_mw.h b/hicn-plugin/src/strategies/strategy_mw.h
new file mode 100755
index 000000000..10b08c05f
--- /dev/null
+++ b/hicn-plugin/src/strategies/strategy_mw.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_MW_H__
+#define __HICN_STRATEGY_MW_H__
+
+#include "../strategy.h"
+
+hicn_strategy_vft_t *hicn_mw_strategy_get_vft (void);
+
+#endif // __HICN_STRATEGY_MW_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategies/strategy_mw_cli.c b/hicn-plugin/src/strategies/strategy_mw_cli.c
new file mode 100755
index 000000000..ff4125258
--- /dev/null
+++ b/hicn-plugin/src/strategies/strategy_mw_cli.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+
+#include "../strategy_dpo_manager.h"
+#include "../faces/face.h"
+#include "../error.h"
+#include "../route.h"
+#include "dpo_mw.h"
+
+static clib_error_t *
+hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+ int ret = HICN_ERROR_NONE;
+ ip46_address_t prefix;
+ hicn_face_id_t faceid = HICN_FACE_NULL;
+ u32 fib_index;
+ u32 weight = HICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT;
+ u32 plen = 0;
+ hicn_dpo_ctx_t *hicn_dpo_ctx;
+ const dpo_id_t *hicn_dpo_id;
+ u32 vft_id;
+ const hicn_dpo_vft_t *dpo_vft;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "prefix %U/%u", unformat_ip46_address,
+ &prefix, IP46_TYPE_ANY, &plen))
+ ;
+ else if (unformat (line_input, "face %u", &faceid))
+ ;
+ else if (unformat (line_input, "weight %u", &weight))
+ ;
+ else
+ {
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL));
+ }
+
+ }
+ }
+
+ if (((weight < 0) || (weight > HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX)))
+ {
+ cl_err = clib_error_return (0,
+ "Next-hop weight must be between 0 and %d",
+ (int) HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX);
+ goto done;
+ }
+
+ if (((ip46_address_is_zero (&prefix)) || faceid == HICN_FACE_NULL))
+ {
+ cl_err =
+ clib_error_return (0, "Please specify prefix and a valid faceid...");
+ goto done;
+ }
+
+ fib_prefix_t fib_pfx;
+ fib_prefix_from_ip46_addr (&prefix, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ ret = hicn_route_get_dpo (&prefix, plen, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ vft_id = hicn_dpo_get_vft_id (hicn_dpo_id);
+ dpo_vft = hicn_dpo_get_vft (vft_id);
+ hicn_dpo_ctx = dpo_vft->hicn_dpo_get_ctx (hicn_dpo_id->dpoi_index);
+
+ if (hicn_dpo_ctx == NULL
+ || hicn_dpo_id->dpoi_type != hicn_dpo_strategy_mw_get_type ())
+ {
+ cl_err = clib_error_return (0, get_error_string (ret));
+ goto done;
+ }
+
+ hicn_strategy_mw_ctx_t *mw_dpo =
+ (hicn_strategy_mw_ctx_t *) hicn_dpo_ctx;
+ int idx = ~0;
+ for (int i = 0; i < hicn_dpo_ctx->entry_count; i++)
+ if (hicn_dpo_ctx->next_hops[i].dpoi_index == (index_t) faceid)
+ idx = i;
+
+ if (idx == ~0)
+ {
+ cl_err =
+ clib_error_return (0,
+ get_error_string
+ (HICN_ERROR_MW_STRATEGY_NH_NOT_FOUND));
+ goto done;
+ }
+
+ mw_dpo->weight[idx] = weight;
+ }
+ else
+ {
+ cl_err = clib_error_return (0, get_error_string (ret));
+
+ }
+
+done:
+
+ return (cl_err);
+
+}
+
+/* cli declaration for 'strategy mw' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND(hicn_mw_strategy_cli_set_weight_command, static)=
+{
+ .path = "hicn strategy mw set",
+ .short_help = "hicn strategy mw set prefix <prefix> face <face_id> weight <weight>",
+ .function = hicn_mw_strategy_cli_set_weight_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategy.c b/hicn-plugin/src/strategy.c
new file mode 100755
index 000000000..56de34e6b
--- /dev/null
+++ b/hicn-plugin/src/strategy.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "hicn.h"
+#include "parser.h"
+#include "strategy.h"
+#include "strategy_dpo_ctx.h"
+#include "face_db.h"
+#include "infra.h"
+#include "mgmt.h"
+#include "pcs.h"
+#include "state.h"
+
+/*
+ * Node context data (to be used in all the strategy nodes); we think this is
+ * per-thread/instance
+ */
+typedef struct hicn_strategy_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_strategy_runtime_t;
+
+always_inline int
+hicn_new_interest (hicn_strategy_runtime_t * rt, vlib_buffer_t * b0,
+ u32 * next, f64 tnow, u8 * nameptr,
+ u16 namelen, dpo_id_t * outface, int nh_idx,
+ index_t hicn_dpo_idx, hicn_strategy_vft_t * strategy,
+ u8 isv6, vl_api_hicn_api_node_stats_get_reply_t * stats)
+{
+ int ret;
+ hicn_hash_node_t *nodep;
+ hicn_pcs_entry_t *pitp;
+ hicn_header_t *hicn0;
+ hicn_main_t *sm = &hicn_main;
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
+ u32 node_id0 = 0;
+ u8 dpo_ctx_id0 = 0;
+ u8 vft_id0 = 0;
+ u8 is_cs0 = 0;
+ u8 hash_entry_id = 0;
+ u8 bucket_is_overflow = 0;
+ u32 bucket_id = ~0;
+
+
+ /* Create PIT node and init PIT entry */
+ nodep = hicn_hashtb_alloc_node (rt->pitcs->pcs_table);
+ if (PREDICT_FALSE (nodep == NULL))
+ {
+ /* Nothing we can do - no mem */
+ *next = HICN_STRATEGY_NEXT_ERROR_DROP;
+ return HICN_ERROR_HASHTB_NOMEM;
+ }
+ pitp = hicn_pit_get_data (nodep);
+ hicn_pit_init_data (pitp);
+ pitp->shared.create_time = tnow;
+
+ hicn0 = vlib_buffer_get_current (b0);
+ hicn_lifetime_t imsg_lifetime;
+ hicn_type_t type = hicnb0->type;
+ hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
+ &imsg_lifetime);
+
+ if (imsg_lifetime < sm->pit_lifetime_min_ms
+ || imsg_lifetime > sm->pit_lifetime_max_ms)
+ {
+ imsg_lifetime = sm->pit_lifetime_dflt_ms;
+ }
+ pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, imsg_lifetime);
+
+ /* Set up the hash node and insert it */
+ hicn_hash_entry_t *hash_entry;
+ hicn_hashtb_init_node (rt->pitcs->pcs_table, nodep, nameptr, namelen);
+
+ ret =
+ hicn_pcs_pit_insert (rt->pitcs, pitp, nodep, &hash_entry,
+ hicnb0->name_hash, &node_id0, &dpo_ctx_id0, &vft_id0,
+ &is_cs0, &hash_entry_id, &bucket_id,
+ &bucket_is_overflow);
+ if (ret == HICN_ERROR_NONE)
+ {
+ strategy->hicn_add_interest (vnet_buffer (b0)->ip.adj_index[VLIB_TX],
+ hash_entry);
+
+ /* Add face */
+ hicn_face_db_add_face_dpo (&hicnb0->face_dpo_id, &(pitp->u.pit.faces));
+
+ /* Remove lock on the dpo stored in the vlib_buffer */
+ dpo_unlock (&hicnb0->face_dpo_id);
+
+ *next = outface->dpoi_next_node;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = outface->dpoi_index;
+ stats->pkts_interest_count++;
+ }
+ else
+ {
+ /* Interest aggregate in PIT */
+ if (ret == HICN_ERROR_HASHTB_EXIST)
+ {
+ hicn_store_internal_state (b0, hicnb0->name_hash, node_id0,
+ dpo_ctx_id0, vft_id0, hash_entry_id,
+ bucket_id, bucket_is_overflow);
+ *next = HICN_STRATEGY_NEXT_INTEREST_HITPIT;
+ }
+ else
+ {
+ /* Send the packet to the interest-hitpit node */
+ *next = HICN_STRATEGY_NEXT_ERROR_DROP;
+ }
+ hicn_faces_flush (&(pitp->u.pit.faces));
+ hicn_hashtb_free_node (rt->pitcs->pcs_table, nodep);
+ }
+
+ return (ret);
+
+}
+
+/*
+ * ICN strategy later node for interests: - 1 packet at a time - ipv4/tcp
+ * ipv6/tcp
+ */
+uword
+hicn_forward_interest_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ hicn_strategy_vft_t * strategy,
+ vlib_node_registration_t * hicn_strategy_node)
+{
+
+ u32 n_left_from, *from, *to_next, n_left_to_next;
+ hicn_strategy_next_t next_index;
+ hicn_strategy_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = (hicn_strategy_next_t) node->cached_next_index;
+ rt = vlib_node_get_runtime_data (vm, hicn_strategy_node->index);
+ rt->pitcs = &hicn_main.pitcs;
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ dpo_id_t *outface = NULL;
+ int nh_idx;
+ u32 next0 = next_index;
+ int ret;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&b1->trace_index, 2 * CLIB_CACHE_LINE_BYTES,
+ STORE);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ next0 = HICN_STRATEGY_NEXT_ERROR_DROP;
+
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+
+ stats.pkts_processed++;
+ /* Select next hop */
+ /*
+ * Double check that the interest has been through
+ * the interest-pcslookup node due to misconfiguration in
+ * the punting rules.
+ */
+ if (PREDICT_TRUE
+ (ret == HICN_ERROR_NONE && HICN_IS_NAMEHASH_CACHED (b0)
+ && strategy->hicn_select_next_hop (vnet_buffer (b0)->
+ ip.adj_index[VLIB_TX],
+ &nh_idx,
+ &outface) ==
+ HICN_ERROR_NONE))
+ {
+ /*
+ * No need to check if parsing was successful
+ * here. Already checked in the interest_pcslookup
+ * node
+ */
+ nameptr = (u8 *) (&name);
+ hicn_new_interest (rt, b0, &next0, tnow, nameptr, namelen,
+ outface, nh_idx,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX],
+ strategy, isv6, &stats);
+ }
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_strategy_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ /*
+ * Fix in case of a wrong speculation. Needed for
+ * cloning the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_strategy_node->index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, hicn_strategy_node->index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/strategy.h b/hicn-plugin/src/strategy.h
new file mode 100755
index 000000000..6b06a6ce9
--- /dev/null
+++ b/hicn-plugin/src/strategy.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY__
+#define __HICN_STRATEGY__
+
+#include "hicn.h"
+#include "hashtb.h"
+#include "mgmt.h"
+#include "faces/face.h"
+
+/**
+ * @File
+ *
+ * A strategy is defined as a vpp node and a set of function that will be called
+ * during the packet processing. Having one vpp node per strategy allows to
+ * easily process multiple interests in the same node (x2 or x4) and call the
+ * same function for choosing the next hop.
+ * Here we provide:
+ * - a template for the callbacks to implement in order to create a new strategy
+ * (hicn_fwd_strategy_t)
+ * - the base structure for a strategy node
+ * (list of next vpp nodes, errors, tracing and the main function processing an
+ * interest and calling hicn_select_next_hop)
+ */
+
+typedef struct hicn_strategy_vft_s
+{
+ void (*hicn_receive_data) (index_t dpo_idx, int nh_idx);
+ void (*hicn_on_interest_timeout) (index_t dpo_idx);
+ void (*hicn_add_interest) (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+ u32 (*hicn_select_next_hop) (index_t dpo_idx, int *nh_idx,
+ dpo_id_t ** outface);
+ u32 (*get_strategy_node_index) (void);
+ /**< Return the vlib node index implementing the strategy */
+} hicn_strategy_vft_t;
+
+hicn_face_vft_t *hicn_strategy_get_face_vft (u16 index);
+
+/* Strategy node API */
+/* Basic interest processing function. To be called in all the strategy nodes */
+uword
+hicn_forward_interest_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ hicn_strategy_vft_t * strategy,
+ vlib_node_registration_t * hicn_strategy_node);
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_strategy_trace_t;
+
+typedef enum
+{
+ HICN_STRATEGY_NEXT_INTEREST_HITPIT,
+ HICN_STRATEGY_NEXT_ERROR_DROP,
+ HICN_STRATEGY_N_NEXT,
+} hicn_strategy_next_t;
+
+#endif /* //__HICN_STRATEGY__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/strategy_dpo_ctx.h b/hicn-plugin/src/strategy_dpo_ctx.h
new file mode 100755
index 000000000..5d2dbc47c
--- /dev/null
+++ b/hicn-plugin/src/strategy_dpo_ctx.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_DPO_CTX_H__
+#define __HICN_STRATEGY_DPO_CTX_H__
+
+#include <vnet/dpo/dpo.h>
+#include <vnet/fib/fib_table.h>
+
+#include "hicn.h"
+#include "params.h"
+#include "faces/face.h"
+
+#define HICN_FIB_TABLE 0
+
+#define DATA_LEN 8
+
+#define NEXT_HOP_INVALID DPO_INVALID
+
+/*
+ * An hicn dpo is a list of next hops (face + weight).
+ */
+typedef struct __attribute__ ((packed)) hicn_dpo_ctx_s
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ /* 8B*5 = 40B */
+ dpo_id_t next_hops[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+ /* 40B + 4B = 44B */
+ u32 locks;
+ /* 44B + 1B = 45B */
+ u8 entry_count;
+ /* 45B + 1B = 46B */
+ /* Number of TFIB entries (stored at the end of the next_hops array */
+ u8 tfib_entry_count;
+
+ /* 46B + 2B = 48B */
+ u16 padding; /* To align to 8B */
+
+#ifdef HICN_MAPME_NOTIFICATIONS
+ /* (8B) last acked update for IU/IN heuristic on producer */
+ f64 last_iu_ack;
+#endif
+ /* (4B) last sequence number */
+ seq_t seq;
+
+} hicn_dpo_ctx_t;
+
+STATIC_ASSERT (sizeof (hicn_dpo_ctx_t) <= CLIB_CACHE_LINE_BYTES,
+ "sizeof hicn_dpo_ctx_t is greater than 64B");
+
+#endif /* // __HICN_STRATEGY_DPO_CTX_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/strategy_dpo_manager.c b/hicn-plugin/src/strategy_dpo_manager.c
new file mode 100755
index 000000000..c1723eccc
--- /dev/null
+++ b/hicn-plugin/src/strategy_dpo_manager.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/dpo.h>
+
+#include "strategy_dpo_manager.h"
+#include "strategies/dpo_mw.h"
+#include "strategy.h"
+#include "faces/face.h"
+
+static dpo_type_t *strategies_id;
+static const hicn_dpo_vft_t **hicn_dpo_vfts;
+
+static const hicn_strategy_vft_t **hicn_strategy_vfts;
+
+int hicn_strategies = 0;
+
+hicn_dpo_vft_t default_dpo;
+
+dpo_type_t
+hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
+ const hicn_dpo_vft_t * hicn_dpo_vft,
+ const hicn_strategy_vft_t * hicn_strategy_vft,
+ const dpo_vft_t * dpo_ctx_vft)
+{
+ dpo_type_t dpo_type = dpo_register_new_type (dpo_ctx_vft, hicn_nodes);
+ vec_validate (hicn_dpo_vfts, dpo_type);
+ hicn_dpo_vfts[dpo_type] = hicn_dpo_vft;
+
+ vec_validate (hicn_strategy_vfts, dpo_type);
+ hicn_strategy_vfts[dpo_type] = hicn_strategy_vft;
+
+ vec_validate (strategies_id, hicn_strategies);
+ strategies_id[hicn_strategies] = dpo_type;
+ hicn_strategies++;
+
+ return dpo_type;
+}
+
+u32
+dpo_is_hicn (const dpo_id_t * dpo)
+{
+ for (int i = 0; i < hicn_strategies; i++)
+ {
+ if (hicn_dpo_vfts[strategies_id[i]]->hicn_dpo_is_type (dpo))
+ return 1;
+ }
+ return 0;
+}
+
+dpo_type_t
+hicn_dpo_get_vft_id (const dpo_id_t * dpo)
+{
+ return dpo->dpoi_type;
+}
+
+const hicn_dpo_vft_t *
+hicn_dpo_get_vft (dpo_type_t vfts_id)
+{
+ return hicn_dpo_vfts[vfts_id];
+}
+
+const hicn_dpo_vft_t *
+hicn_dpo_get_vft_from_id (u8 strategy_id)
+{
+ return hicn_dpo_vfts[strategies_id[strategy_id]];
+}
+
+const hicn_strategy_vft_t *
+hicn_dpo_get_strategy_vft (dpo_type_t vfts_id)
+{
+ return hicn_strategy_vfts[vfts_id];
+}
+
+const hicn_strategy_vft_t *
+hicn_dpo_get_strategy_vft_from_id (u8 vfts_id)
+{
+ return hicn_strategy_vfts[strategies_id[vfts_id]];
+}
+
+void
+hicn_dpos_init (void)
+{
+ hicn_dpo_strategy_mw_module_init ();
+
+ default_dpo.hicn_dpo_get_ctx = &hicn_strategy_mw_ctx_get;
+ default_dpo.hicn_dpo_is_type = &hicn_dpo_is_type_strategy_mw;
+ default_dpo.hicn_dpo_get_type = &hicn_dpo_strategy_mw_get_type;
+ default_dpo.hicn_dpo_module_init = &hicn_dpo_strategy_mw_module_init;
+ default_dpo.hicn_dpo_create = &hicn_strategy_mw_ctx_create;
+ default_dpo.hicn_dpo_add_update_nh = &hicn_strategy_mw_ctx_add_nh;
+ default_dpo.hicn_dpo_del_nh = &hicn_strategy_mw_ctx_del_nh;
+ default_dpo.hicn_dpo_lock_dpo_ctx = &hicn_strategy_mw_ctx_lock;
+ default_dpo.hicn_dpo_unlock_dpo_ctx = hicn_strategy_mw_ctx_unlock;
+ default_dpo.format_hicn_dpo = &format_hicn_strategy_mw_ctx;
+}
+
+u8 *
+format_hicn_strategy_list (u8 * s, int n, ...)
+{
+ va_list ap;
+ va_start (ap, n);
+ u32 indent = va_arg (ap, u32);
+ va_end (ap);
+
+ s = format (s, "Strategies:\n", indent);
+ indent += 4;
+ int i;
+ vec_foreach_index (i, strategies_id)
+ {
+ s = format (s, "(%d) ", i, indent);
+ s = hicn_dpo_vfts[strategies_id[i]]->format_hicn_dpo (s, &ap);
+ }
+
+ return (s);
+}
+
+u8
+hicn_dpo_strategy_id_is_valid (int strategy_id)
+{
+ return vec_len (strategies_id) > strategy_id ?
+ HICN_ERROR_NONE : HICN_ERROR_DPO_MGR_ID_NOT_VALID;
+}
+
+int
+hicn_strategy_get_all_available (void)
+{
+ return hicn_strategies;
+}
+
+/**
+ * @brief Registers a dpo by calling its module init function.
+ *
+ * This is typically called from the ctor for dpo's registered at compilation
+ * time.
+ */
+void
+hicn_dpo_register (const hicn_dpo_vft_t * hicn_dpo)
+{
+ hicn_dpo->hicn_dpo_module_init ();
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/strategy_dpo_manager.h b/hicn-plugin/src/strategy_dpo_manager.h
new file mode 100755
index 000000000..686c2f8c8
--- /dev/null
+++ b/hicn-plugin/src/strategy_dpo_manager.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_DPO_MANAGER_H__
+#define __HICN_STRATEGY_DPO_MANAGER_H__
+
+#include "strategy_dpo_ctx.h"
+#include "strategy.h"
+
+/**
+ * @brief Definition of the virtual function table for a hICN DPO.
+ *
+ * An hICN dpo is a combination of a dpo context (hicn_dpo_ctx or struct that
+ * extends a hicn_dpo_ctx) and a strategy node. The following virtual function table
+ * template that glues together the fuction to interact with the context and the
+ * creating the dpo
+ */
+typedef struct hicn_dpo_vft_s
+{
+ hicn_dpo_ctx_t *(*hicn_dpo_get_ctx) (index_t dpo_idx); /**< Retrieve the dpo ctx*/
+ int (*hicn_dpo_is_type) (const dpo_id_t * dpo);
+ /**< Check if the type of the
+ hICN DPO is the expected */
+ dpo_type_t (*hicn_dpo_get_type) (void);
+ /**< Return the type of the hICN dpo */
+ void (*hicn_dpo_module_init) (void); /**< Initialize the hICN dpo */
+ int (*hicn_dpo_create) (dpo_proto_t proto, const dpo_id_t * nh, int nh_len, index_t * dpo_idx); /**< Create the context of the hICN dpo */
+ int (*hicn_dpo_add_update_nh) (const dpo_id_t * nh, index_t dpo_idx); /**< Add a next hop to the hICN dpo context */
+ int (*hicn_dpo_del_nh) (hicn_face_id_t face_id, index_t dpo_idx,
+ fib_prefix_t * fib_pfx);
+ /**< Add a next hop to the hICN dpo context */
+ void (*hicn_dpo_lock_dpo_ctx) (dpo_id_t * dpo);
+ void (*hicn_dpo_unlock_dpo_ctx) (dpo_id_t * dpo);
+ u8 *(*format_hicn_dpo) (u8 * s, va_list * ap);
+ /**< Format an hICN dpo*/
+} hicn_dpo_vft_t;
+
+/*
+ * Default dpo to be used to create fib entry when a strategy is not
+ * specified
+ */
+extern hicn_dpo_vft_t default_dpo;
+
+/**
+ * @brief Register a new hICN dpo to the manager.
+ *
+ * An hICN DPO is a combination of:
+ * - a hICN DPO ctx (context) that holds the structure containing the
+ * information to choose the next hop,
+ * - a strategy containing: (i) the vpp node that processes Interest packets
+ * subjected to such strategy, (ii) the definition of the vft that defines
+ * the hICN strategy functions
+ * Registering a hICN DPO allows the plugin to be aware of the new dpo an be
+ * able to apply it to the FIB entries.
+ *
+ * @param hicn_nodes A list of vpp to which pass an interest that matches with
+ * the FIB entry to which the hICN DPO is applied. This list must contain the
+ * name of the strategy node (or nodes in case of differentiation between IPv4
+ * and IPv6).
+ * @param hicn_dpo_vft The structure holding the virtual function table to
+ * interact with the hICN dpo and its context.
+ * @param hicn_strategy_vft The structure holding the virtual function table
+ * containing the hICN strategy functions.
+ * @return the dpo type registered in the VPP Data plane graph.
+ */
+dpo_type_t
+hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
+ const hicn_dpo_vft_t * hicn_dpo_vft,
+ const hicn_strategy_vft_t *
+ hicn_strategy_vft, const dpo_vft_t * dpo_ctx_vft);
+
+/**
+ * @brief Check if the type of the dpo is among the list of hicn dpo types
+ *
+ * Iterate through the list of dpo types registered in the hicn dpo manager.
+ *
+ * @param dpo The id of the dpo to which check the type
+ * @return 1 if there is a match, 0 otherwise.
+ */
+u32 dpo_is_hicn (const dpo_id_t * dpo);
+
+/**
+ * @brief Return the dpo_vtf and strategy_vtf identifier
+ *
+ * Iterate through the list of dpo types registered in the hicn dpo manager and
+ * retrieve the corresponding dpo_vtf/strategy_vtf identifier.
+ *
+ * @param dpo The id of the dpo to which check the type
+ * @return the dpo_vft/strategy_vft id or HICN_ERROR_DPO_NOT_FOUND in case the dpo is not an hICN dpo.
+ */
+u8 hicn_dpo_get_vft_id (const dpo_id_t * dpo);
+
+/**
+ * @brief Get the vft to manage the dpo context.
+ *
+ * @param The id of the hicn_dpo_vft to retrieve.
+ * @return The vft struct that contains the list of callbacks that allows to
+ * manage the dpo context.
+ */
+const hicn_dpo_vft_t *hicn_dpo_get_vft (dpo_type_t vfts_id);
+
+/**
+ * @brief Get the vft to manage the dpo context from the strategy id.
+ *
+ * @param The strategy id of the hicn_dpo_vft to retrieve.
+ * @return The vft struct that contains the list of callbacks that allows to
+ * manage the dpo context.
+ */
+const hicn_dpo_vft_t *hicn_dpo_get_vft_from_id (u8 strategy_id);
+
+/**
+ * @brief Get the vft with the hICN strategy functions.
+ *
+ * @param The id of the hicn_strategy_vft to retrieve.
+ * @return The vft struct that contains the list hICN strategy functions.
+ */
+const hicn_strategy_vft_t *hicn_dpo_get_strategy_vft (dpo_type_t vfts_id);
+
+/**
+ * @brief Get the vft with the hICN strategy functions from the strategy id.
+ *
+ * @param The id of the hicn_strategy_vft to retrieve.
+ * @return The vft struct that contains the list hICN strategy functions.
+ */
+const hicn_strategy_vft_t *hicn_dpo_get_strategy_vft_from_id (u8 vfts_id);
+
+/**
+ * @brief Initialize all the types hicn dpo registered
+ *
+ * Call the init functions of all the hicn dpo implemented.
+ * This init is called when the plugin bootstrap.
+ */
+void hicn_dpos_init (void);
+
+/**
+ * @brief Print the list of the registered hICN DPO
+ *
+ * @param s String to which to append the list of hICN DPO (strategies)
+ * @param n number of parameters to pass
+ *
+ * @result The string with the list of hICN DPO (strategies)
+ */
+u8 *format_hicn_strategy_list (u8 * s, int n, ...);
+
+/**
+ * @brief Check if a given id points to a strategy and the corresponding dpo ctx
+ *
+ * @param The id of the strategy to check.
+ *
+ * @result HICN_ERROR_NONE is the id is valid, otherwise EINVAL
+ */
+u8 hicn_dpo_strategy_id_is_valid (int strategy_id);
+
+/**
+ * @brief Return the number of available strategies. This number can be used to
+ * as an upperbond for valid vfts_id.
+ *
+ * @result Return the number of available strategies.
+ */
+int hicn_strategy_get_all_available (void);
+
+/**
+ * @brief Registers a module at compilation time to be initialized as part of
+ * the ctor.
+ */
+void hicn_dpo_register (const hicn_dpo_vft_t * hicn_dpo);
+
+#endif /* // __HICN_STRATEGY_DPO_MANAGER_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/utils.h b/hicn-plugin/src/utils.h
new file mode 100755
index 000000000..ecad47e9b
--- /dev/null
+++ b/hicn-plugin/src/utils.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_UTILS_H__
+#define __HICN_UTILS_H__
+
+#include "hicn.h"
+
+always_inline void
+hicn_print_name6 (hicn_name_t * name)
+{
+ u8 *s0;
+ s0 = format (0, "Source addr %U, seq_number %u", format_ip6_address,
+ (ip6_address_t *) name->ip6.prefix,
+ clib_net_to_host_u32 (name->ip6.suffix));
+
+ printf ("%s\n", s0);
+}
+
+always_inline void
+hicn_print6 (hicn_header_t * hicn0)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u8 *s0;
+ s0 = format (0, "Source addr %U:%u, dest addr %U:%u", format_ip6_address,
+ &(hicn0->v6.ip.saddr),
+ clib_net_to_host_u32 (hicn0->v6.tcp.seq), format_ip6_address,
+ &(hicn0->v6.ip.daddr),
+ clib_net_to_host_u32 (hicn0->v6.tcp.seq));
+
+ vlib_cli_output (vm, "%s\n", s0);
+}
+
+always_inline void
+hicn_print4 (hicn_header_t * hicn0)
+{
+ u8 *s0;
+ s0 = format (0, "Source addr %U:%u, dest addr %U:%u", format_ip4_address,
+ &(hicn0->v4.ip.saddr),
+ clib_net_to_host_u32 (hicn0->v4.tcp.seq), format_ip4_address,
+ &(hicn0->v4.ip.daddr),
+ clib_net_to_host_u32 (hicn0->v4.tcp.seq));
+
+ printf ("%s\n", s0);
+}
+
+#endif /* // __HICN_UTILS_H__ */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/vface_db.h b/hicn-plugin/src/vface_db.h
new file mode 100755
index 000000000..b98a2f46d
--- /dev/null
+++ b/hicn-plugin/src/vface_db.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_DB_H__
+#define __HICN_FACE_DB_H__
+
+#include <vnet/dpo/dpo.h>
+#include "faces/face.h"
+
+/* Must be power of two*/
+#define HICN_FACE_DB_INLINE_FACES 4
+
+#define HICN_PIT_N_HOP_BITMAP_SIZE HICN_PARAM_PIT_ENTRY_PHOPS_MAX
+
+#define HICN_PIT_N_HOP_BUCKET (HICN_PARAM_PIT_ENTRY_PHOPS_MAX - HICN_FACE_DB_INLINE_FACES)
+
+STATIC_ASSERT ((HICN_PIT_N_HOP_BUCKET & (HICN_PIT_N_HOP_BUCKET - 1)) == 0,
+ "HICN_PARAM_PIT_ENTRY_PHOP_MAX must be a power of 2 + 4");
+
+/* Takes 2 cache lines */
+typedef struct __attribute__ ((packed)) hicn_face_bucket_s
+{
+ /* Array of indexes of virtual faces */
+ dpo_id_t faces[HICN_PIT_N_HOP_BUCKET];
+
+ CLIB_CACHE_LINE_ALIGN_MARK (cache_line1);
+
+ /* Used to check if interests are retransmission */
+ /* How much are we gaining (performance)/wasting (memory) wrt the linear */
+ /* search on the array of faces? */
+ u8 bitmap[HICN_PIT_N_HOP_BITMAP_SIZE];
+
+} hicn_face_bucket_t;
+
+extern hicn_face_bucket_t *hicn_face_bucket_pool;
+
+/*
+ * Virtual faces will be stored in a pool and when a virtual face is created and
+ * its index will be saved in the pit entry. In case of interest aggregation we
+ * have to look on all the virtual faces to understand if there is a duplicated
+ * interest
+ */
+typedef struct __attribute__ ((packed)) hicn_face_db_s
+{
+ /* 19B + 1B = 20B */
+ /* Equal to one or zero */
+ u8 is_overflow;
+
+ /* Number of faces in the last bucket */
+ /* Or next availabe entry for storing a dpo_id_t */
+ /* 20B + 4B = 24B */
+ u32 n_faces;
+
+ /* 24B + 32B (8*4) = 56B */
+ /* Array of indexes of virtual faces */
+ dpo_id_t inline_faces[HICN_FACE_DB_INLINE_FACES];
+
+ /* 56B + 4B = 60B */
+ u32 next_bucket;
+
+ /* 60B + 4B = 64B */
+ u32 align; //align back to 64
+
+} hicn_face_db_t;
+
+//STATIC_ASSERT(HICN_PIT_N_HOP_BITMAP_SIZE <= (HICN_PARAM_PIT_ENTRY_PHOPS_MAX/8));
+
+always_inline dpo_id_t *
+hicn_face_db_get_dpo_face (u32 index, hicn_face_db_t * face_db)
+{
+ ASSERT (index < face_db->n_faces);
+
+ return index < HICN_FACE_DB_INLINE_FACES ? &(face_db->inline_faces[index]) :
+ &(pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket)->faces
+ [(index - HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
+}
+
+always_inline void
+hicn_face_db_init (int max_element)
+{
+ pool_init_fixed (hicn_face_bucket_pool, max_element);
+}
+
+always_inline hicn_face_bucket_t *
+hicn_face_db_get_bucket (u32 bucket_index)
+{
+ return pool_elt_at_index (hicn_face_bucket_pool, bucket_index);
+}
+
+always_inline void
+hicn_face_db_add_face_dpo (dpo_id_t * dpo, hicn_face_db_t * face_db)
+{
+ ASSERT (dpo->dpoi_index != ~0);
+
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ dpo_id_t *face =
+ face_db->n_faces <
+ HICN_FACE_DB_INLINE_FACES ? &(face_db->inline_faces[face_db->n_faces]) :
+ &(faces_bkt->faces
+ [(face_db->n_faces -
+ HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
+
+ clib_memcpy (face, dpo, sizeof (dpo_id_t));
+
+ /* This access the dpoi to increase the lock */
+ dpo_lock (dpo);
+
+ u32 bitmap_index = dpo->dpoi_index % HICN_PIT_N_HOP_BITMAP_SIZE;
+ faces_bkt->bitmap[bitmap_index] |= 0x01;
+ face_db->n_faces++;
+}
+
+always_inline u8
+hicn_face_search (dpo_id_t * dpo, hicn_face_db_t * face_db)
+{
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ u32 bitmap_index = dpo->dpoi_index % HICN_PIT_N_HOP_BITMAP_SIZE;
+
+ return faces_bkt->bitmap[bitmap_index] & 0x01;
+}
+
+always_inline void
+hicn_faces_flush (hicn_face_db_t * face_db)
+{
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ clib_memset_u64 (&(faces_bkt->bitmap), 0, HICN_PIT_N_HOP_BITMAP_SIZE / 8);
+ face_db->n_faces = 0;
+ pool_put_index (hicn_face_bucket_pool, face_db->next_bucket);
+}
+
+
+#endif // __HICN_FACE_DB_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */