aboutsummaryrefslogtreecommitdiffstats
path: root/hicn-plugin/src/faces
diff options
context:
space:
mode:
Diffstat (limited to 'hicn-plugin/src/faces')
-rw-r--r--hicn-plugin/src/faces/app/address_mgr.c241
-rw-r--r--hicn-plugin/src/faces/app/address_mgr.h76
-rw-r--r--hicn-plugin/src/faces/app/face_app_cli.c205
-rw-r--r--hicn-plugin/src/faces/app/face_cons.c116
-rw-r--r--hicn-plugin/src/faces/app/face_cons.h76
-rw-r--r--hicn-plugin/src/faces/app/face_prod.c369
-rw-r--r--hicn-plugin/src/faces/app/face_prod.h106
-rw-r--r--hicn-plugin/src/faces/app/face_prod_node.c314
-rw-r--r--hicn-plugin/src/faces/face.c475
-rw-r--r--hicn-plugin/src/faces/face.h798
-rw-r--r--hicn-plugin/src/faces/face_cli.c194
-rw-r--r--hicn-plugin/src/faces/face_node.c940
-rw-r--r--hicn-plugin/src/faces/face_node.h52
-rw-r--r--hicn-plugin/src/faces/iface_node.c915
-rw-r--r--hicn-plugin/src/faces/iface_node.h54
15 files changed, 4931 insertions, 0 deletions
diff --git a/hicn-plugin/src/faces/app/address_mgr.c b/hicn-plugin/src/faces/app/address_mgr.c
new file mode 100644
index 000000000..2d5894ab8
--- /dev/null
+++ b/hicn-plugin/src/faces/app/address_mgr.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by cisco systems inc. All rights reserved.
+ *
+ */
+
+#include <dlfcn.h>
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip4.h> //ip4_add_del_ip_address
+#include <vnet/ip/ip6.h> //ip6_add_del_ip_address
+#include <vnet/fib/fib_types.h> //FIB_PROTOCOL_IP4/6, FIB_NODE_INDEX_INVALID
+#include <vnet/fib/fib_entry.h> //FIB_SOURCE_PRIORITY_HI
+#include <vnet/fib/fib_table.h>
+#include <vppinfra/format.h>
+#include <vnet/interface.h> //appif_flags
+#include <vnet/interface_funcs.h> //vnet_sw_interface_set_flags
+
+#include "address_mgr.h"
+#include "../../hicn.h"
+#include "../../infra.h"
+#include "../../error.h"
+#include "../face.h"
+#include "../../strategy_dpo_ctx.h"
+#include "../../route.h"
+
+typedef struct address_mgr_main_s
+{
+ ip4_address_t next_ip4_local_addr;
+ ip6_address_t next_ip6_local_addr;
+} address_mgr_main_t;
+
+address_mgr_main_t address_mgr_main;
+
+static void
+increment_v4_address (ip4_address_t * a, u32 val)
+{
+ u32 v;
+
+ v = clib_net_to_host_u32 (a->as_u32) + val;
+ a->as_u32 = clib_host_to_net_u32 (v);
+}
+
+static void
+increment_v6_address (ip6_address_t * a, u64 val)
+{
+ u64 v;
+
+ v = clib_net_to_host_u64 (a->as_u64[1]) + val;
+ a->as_u64[1] = clib_host_to_net_u64 (v);
+}
+
+void
+get_two_ip4_addresses (ip4_address_t * appif_addr, ip4_address_t * nh_addr)
+{
+ /* We want two consecutives address that fall into a /31 mask */
+ if (address_mgr_main.next_ip4_local_addr.as_u8[3] & 0x01)
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+
+ *appif_addr = address_mgr_main.next_ip4_local_addr;
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+ *nh_addr = address_mgr_main.next_ip4_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP4;
+ fib_pfx.fp_len = ADDR_MGR_IP4_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, appif_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index =
+ fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ FIB_SOURCE_PRIORITY_HI);
+ }
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ increment_v4_address (appif_addr, 2);
+ increment_v4_address (nh_addr, 2);
+ }
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ address_mgr_main.next_ip4_local_addr = *nh_addr;
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+}
+
+void
+get_two_ip6_addresses (ip6_address_t * appif_addr, ip6_address_t * nh_addr)
+{
+
+ /* We want two consecutives address that fall into a /127 mask */
+ if (address_mgr_main.next_ip6_local_addr.as_u8[15] & 0x01)
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+
+ *appif_addr = address_mgr_main.next_ip6_local_addr;
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+ *nh_addr = address_mgr_main.next_ip6_local_addr;
+
+
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP6;
+ fib_pfx.fp_len = ADDR_MGR_IP6_LEN;
+
+ fib_index = fib_table_find (fib_pfx.fp_proto, 0);
+
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, appif_addr->as_u8);
+
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ //fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+
+ fib_entry_index =
+ fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ // fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ // FIB_SOURCE_PRIORITY_HI);
+ }
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ increment_v6_address (appif_addr, 2);
+ increment_v6_address (nh_addr, 2);
+ }
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ address_mgr_main.next_ip6_local_addr = *nh_addr;
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+}
+
+ip4_address_t
+get_ip4_address ()
+{
+ ip4_address_t *prefix = &address_mgr_main.next_ip4_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP4;
+ fib_pfx.fp_len = ADDR_MGR_IP4_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ increment_v4_address (prefix, 1);
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ return fib_pfx.fp_addr.ip4;
+}
+
+ip6_address_t
+get_ip6_address ()
+{
+ ip6_address_t *prefix = &address_mgr_main.next_ip6_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP6;
+ fib_pfx.fp_len = ADDR_MGR_IP6_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ increment_v6_address (prefix, 1);
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ return fib_pfx.fp_addr.ip6;
+}
+
+void
+address_mgr_init ()
+{
+
+ address_mgr_main.next_ip4_local_addr.as_u8[0] = 169;
+ address_mgr_main.next_ip4_local_addr.as_u8[1] = 254;
+ address_mgr_main.next_ip4_local_addr.as_u8[2] = 1;
+ address_mgr_main.next_ip4_local_addr.as_u8[3] = 1;
+
+ ip6_address_set_zero (&address_mgr_main.next_ip6_local_addr);
+ address_mgr_main.next_ip6_local_addr.as_u16[0] =
+ clib_host_to_net_u16 (0xfc00);
+ address_mgr_main.next_ip6_local_addr.as_u8[15] = 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/address_mgr.h b/hicn-plugin/src/faces/app/address_mgr.h
new file mode 100644
index 000000000..99450dcdd
--- /dev/null
+++ b/hicn-plugin/src/faces/app/address_mgr.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ADDRESS_MGR_H_
+#define _ADDRESS_MGR_H_
+
+/**
+ * @file
+ *
+ * @brief Address manager.
+ *
+ * Address manager that maintains a pool of ip4 and ip6 addresses to assign to
+ * an interface.
+ */
+
+#define ADDR_MGR_IP4_LEN 32
+#define ADDR_MGR_IP4_CONS_LEN 31
+#define ADDR_MGR_IP6_LEN 128
+#define ADDR_MGR_IP6_CONS_LEN 127
+
+/**
+ * @brief Get two consecutive IP v4 addresses from the same /31 subnet
+ *
+ * @param addr1 first ip address with the least significant bit set to 0
+ * @param addr2 second ip address with the least significant bit set to 1
+ */
+void get_two_ip4_addresses (ip4_address_t * addr1, ip4_address_t * addr2);
+
+/**
+ * @brief Get two consecutive IP v6 addresses from the same /126 subnet
+ *
+ * @param addr1 first ip address with the least significant bit set to 0
+ * @param addr2 second ip address with the least significant bit set to 1
+ */
+void get_two_ip6_addresses (ip6_address_t * addr1, ip6_address_t * addr2);
+
+/**
+ * @brief Get one IP v4 address
+ *
+ * @return ip address
+ */
+ip4_address_t get_ip4_address (void);
+
+/**
+ * @brief Get one IP v6 address
+ *
+ * @return ip address
+ */
+ip6_address_t get_ip6_address (void);
+
+/**
+ * @brief Init the address manager
+ */
+void address_mgr_init (void);
+
+#endif /* _ADDRESS_MGR_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_app_cli.c b/hicn-plugin/src/faces/app/face_app_cli.c
new file mode 100644
index 000000000..1aa27adc7
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_app_cli.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+
+//#include "../face_dpo.h"
+#include "../face.h"
+#include "face_prod.h"
+#include "face_cons.h"
+
+#define HICN_FACE_NONE 0
+#define HICN_FACE_DELETE 1
+#define HICN_FACE_ADD 2
+
+static clib_error_t *
+hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ fib_prefix_t prefix;
+ hicn_face_id_t face_id1 = HICN_FACE_NULL;
+ hicn_face_id_t face_id2 = HICN_FACE_NULL;
+ u32 cs_reserved = HICN_PARAM_FACE_DFT_CS_RESERVED;
+ int ret = HICN_ERROR_NONE;
+ int sw_if;
+ int face_op = HICN_FACE_NONE;
+ int prod = 0;
+
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ {
+ face_op = HICN_FACE_DELETE;
+ }
+ else if (face_op == HICN_FACE_DELETE
+ && unformat (line_input, "id %d", &face_id1))
+ ;
+ else if (unformat (line_input, "add"))
+ {
+ face_op = HICN_FACE_ADD;
+ }
+ else if (face_op == HICN_FACE_ADD)
+ {
+ if (unformat (line_input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if))
+ ;
+ else
+ if (unformat
+ (line_input, "prod prefix %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {
+ prod = 1;
+ }
+ else if (prod && unformat (line_input, "cs_size %d", &cs_reserved))
+ ;
+ else if (unformat (line_input, "cons"))
+ ;
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (face_id1 != HICN_FACE_NULL)
+ {
+
+ if (!hicn_dpoi_idx_is_valid (face_id1))
+ {
+ return clib_error_return (0, "%s, face_id1 %d not valid",
+ get_error_string (ret), face_id1);
+ }
+ }
+
+ int rv;
+ switch (face_op)
+ {
+ case HICN_FACE_ADD:
+ {
+ ip46_address_t prod_addr;
+ ip4_address_t cons_addr4;
+ ip6_address_t cons_addr6;
+
+ if (prod)
+ {
+ prefix.fp_proto =
+ ip46_address_is_ip4 (&prefix.
+ fp_addr) ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+ rv =
+ hicn_face_prod_add (&prefix, sw_if, &cs_reserved, &prod_addr,
+ &face_id1);
+ if (rv == HICN_ERROR_NONE)
+ {
+ u8 *sbuf = NULL;
+ sbuf =
+ format (sbuf, "Face id: %d, producer address %U", face_id1,
+ format_ip46_address, &prod_addr,
+ 0 /*IP46_ANY_TYPE */ );
+ vlib_cli_output (vm, "%s", sbuf);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ }
+ else
+ {
+ rv =
+ hicn_face_cons_add (&cons_addr4, &cons_addr6, sw_if, &face_id1,
+ &face_id2);
+ if (rv == HICN_ERROR_NONE)
+ {
+ u8 *sbuf = NULL;
+ sbuf =
+ format (sbuf,
+ "Face id: %d, address v4 %U, face id: %d address v6 %U",
+ face_id1, format_ip4_address, &cons_addr4, face_id2,
+ format_ip6_address, &cons_addr6);
+ vlib_cli_output (vm, "%s", sbuf);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ }
+ break;
+ }
+ case HICN_FACE_DELETE:
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id1);
+
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ rv = hicn_face_cons_del (face_id1);
+ else
+ rv = hicn_face_prod_del (face_id1);
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face %d deleted", face_id1);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ }
+ default:
+ return clib_error_return (0, "Operation (%d) not implemented", face_op);
+ break;
+ }
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (rv));
+}
+
+/* cli declaration for 'cfg face' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_app_cli_set_command, static) =
+{
+ .path = "hicn face app",
+ .short_help = "hicn face app {add intfc <sw_if> { prod prefix <hicn_prefix> cs_size <size_in_packets>} {cons} | {del <face_id>}",
+ .function = hicn_face_app_cli_set_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_cons.c b/hicn-plugin/src/faces/app/face_cons.c
new file mode 100644
index 000000000..d44ba1a2b
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_cons.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "face_cons.h"
+#include "address_mgr.h"
+#include "../../infra.h"
+
+int
+hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
+ u32 swif, hicn_face_id_t * faceid1,
+ hicn_face_id_t * faceid2)
+{
+ /* Create the corresponding appif if */
+ /* Retrieve a valid local ip address to assign to the appif */
+ /* Set the ip address and create the face in the face db */
+
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hicn_main_t *hm = &hicn_main;
+
+ ip46_address_t if_ip;
+ ip46_address_reset (&if_ip);
+ nh_addr4->as_u32 = 0;
+ nh_addr6->as_u64[0] = 0;
+ nh_addr6->as_u64[1] = 0;
+ u32 if_flags = 0;
+
+ if (!hm->is_enabled)
+ {
+ return HICN_ERROR_FWD_NOT_ENABLED;
+ }
+ if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, swif, if_flags);
+
+ get_two_ip4_addresses (&(if_ip.ip4), nh_addr4);
+ ip4_add_del_interface_address (vm,
+ swif,
+ &(if_ip.ip4),
+ ADDR_MGR_IP4_CONS_LEN, 0 /* is_del */ );
+
+ ip46_address_t nh_addr = to_ip46 (0, (u8 *) nh_addr4);
+
+ index_t adj_index = adj_nbr_find(FIB_PROTOCOL_IP4, VNET_LINK_IP4, &nh_addr, swif);
+
+ hicn_iface_add (&nh_addr, swif, faceid1, DPO_PROTO_IP4, adj_index);
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (*faceid1);
+ face->flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+
+ get_two_ip6_addresses (&(if_ip.ip6), nh_addr6);
+ ip6_add_del_interface_address (vm,
+ swif,
+ &(if_ip.ip6),
+ ADDR_MGR_IP6_CONS_LEN, 0 /* is_del */ );
+
+ adj_index = adj_nbr_find(FIB_PROTOCOL_IP6, VNET_LINK_IP6, &nh_addr, swif);
+
+ hicn_iface_add ((ip46_address_t *) nh_addr6, swif, faceid2, DPO_PROTO_IP6, adj_index);
+
+ face = hicn_dpoi_get_from_idx (*faceid2);
+ face->flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_face_cons_del (hicn_face_id_t face_id)
+{
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ {
+ return hicn_face_del (face_id);
+ }
+ else
+ {
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+ }
+}
+
+u8 *
+format_hicn_face_cons (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ s = format (s, " (consumer face)");
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_cons.h b/hicn-plugin/src/faces/app/face_cons.h
new file mode 100644
index 000000000..5f8f5dde8
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_cons.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FACE_CONSUMER_H_
+#define _FACE_CONSUMER_H_
+
+#include <vnet/vnet.h>
+#include "../face.h"
+
+/**
+ * @file
+ *
+ * @brief Consumer application face.
+ *
+ * A consumer application face is built upon an ip face and identify a local
+ * consumer application (co-located with the forwarder) that acts as a
+ * consumer. The interface used by the consumer application face is
+ * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
+ * connects the applictation to the forwarder). Only one application face can be
+ * assigned to an interface.
+ *
+ * In the vlib graph a consumer application face directly connect the
+ * device-input node to the hicn-vface-ip node.
+ */
+
+/**
+ * @brief Add a new consumer application face
+ *
+ * The method creates the internal ip face and set the ip address to the interface.
+ * @param nh_addr4 ipv4 address to assign to interface used by the application to
+ * send interest to the consumer face
+ * @param nh_addr6 ipv6 address to assign to interface used by the application to
+ * send interest to the consumer face
+ * @param swif interface associated to the face
+ */
+int
+hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
+ u32 swif, hicn_face_id_t * faceid1,
+ hicn_face_id_t * faceid2);
+
+/**
+ * @brief Delete an existing consumer application face
+ *
+ * @param face_id Id of the consumer application face
+ */
+int hicn_face_cons_del (hicn_face_id_t face_id);
+
+/**
+ * @brief Format an application consumer face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_cons (u8 * s, va_list * args);
+
+
+#endif /* _FACE_CONSUMER_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_prod.c b/hicn-plugin/src/faces/app/face_prod.c
new file mode 100644
index 000000000..645154325
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_prod.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/interface_funcs.h>
+
+#include "face_prod.h"
+#include "address_mgr.h"
+#include "../../infra.h"
+#include "../../route.h"
+#include "../../cache_policies/cs_lru.h"
+
+hicn_face_prod_state_t *face_state_vec;
+
+/* used to check if an interface is already in the vector */
+u32 *face_state_pool;
+
+static int
+hicn_app_state_create (u32 swif, fib_prefix_t * prefix)
+{
+ /* Make sure that the pool is not empty */
+ pool_validate_index (face_state_pool, 0);
+
+ u32 *swif_app;
+ u8 found = 0;
+ /* *INDENT-OFF* */
+ pool_foreach (swif_app, face_state_pool,{
+ if (*swif_app == swif)
+ {
+ found = 1;
+ }
+ }
+ );
+ /* *INDENT-ON* */
+
+
+ if (found)
+ return HICN_ERROR_APPFACE_ALREADY_ENABLED;
+
+
+ /* Create the appif and store in the vector */
+ vec_validate (face_state_vec, swif);
+ clib_memcpy (&(face_state_vec[swif].prefix), prefix, sizeof (fib_prefix_t));
+
+ /* Set as busy the element in the vector */
+ pool_get (face_state_pool, swif_app);
+ *swif_app = swif;
+
+ int ret = HICN_ERROR_NONE;
+ if (ip46_address_is_ip4 (&(prefix->fp_addr)))
+ {
+ ret =
+ vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
+ }
+ else
+ {
+ ret =
+ vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
+ }
+
+ return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+static int
+hicn_app_state_del (u32 swif)
+{
+ /* Make sure that the pool is not empty */
+ pool_validate_index (face_state_pool, 0);
+
+ u32 *temp;
+ u32 *swif_app = NULL;
+ u8 found = 0;
+ fib_prefix_t *prefix;
+ /* *INDENT-OFF* */
+ pool_foreach (temp, face_state_pool,{
+ if (*temp == swif)
+ {
+ found = 1;
+ swif_app = temp;
+ }
+ }
+ );
+ /* *INDENT-ON* */
+
+ if (!found)
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ prefix = &(face_state_vec[swif].prefix);
+
+ int ret = HICN_ERROR_NONE;
+ if (ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ ret =
+ vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
+ }
+ else
+ {
+ ret =
+ vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
+ }
+
+ pool_put (face_state_pool, swif_app);
+ memset (&face_state_vec[swif], 0, sizeof (hicn_face_prod_state_t));
+
+ return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+int
+hicn_face_prod_add (fib_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
+ ip46_address_t * prod_addr, hicn_face_id_t * faceid)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hicn_main_t *hm = &hicn_main;
+
+ ip46_address_t local_app_ip;
+ CLIB_UNUSED(ip46_address_t remote_app_ip);
+ u32 if_flags = 0;
+
+ if (!hm->is_enabled)
+ {
+ return HICN_ERROR_FWD_NOT_ENABLED;
+ }
+
+ if (vnet_get_sw_interface_or_null (vnm, sw_if) == NULL)
+ {
+ return HICN_ERROR_FACE_HW_INT_NOT_FOUND;
+ }
+
+ int ret = HICN_ERROR_NONE;
+ hicn_face_t *face = NULL;
+
+ if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, sw_if, if_flags);
+
+ u8 *s0;
+ s0 = format (0, "Prefix %U", format_fib_prefix, prefix);
+
+ vlib_cli_output (vm, "Received request for %s, swif %d\n", s0, sw_if);
+
+ if (ip46_address_is_zero (&prefix->fp_addr))
+ {
+ return HICN_ERROR_APPFACE_PROD_PREFIX_NULL;
+ }
+
+ u8 isv6 = ip46_address_is_ip4(prod_addr);
+ index_t adj_index = adj_nbr_find(isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4, isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
+
+ /*
+ * Check if a producer face is already existing for the same prefix
+ * and sw_if
+ */
+ face = hicn_face_get (&(prefix->fp_addr), sw_if,
+ &hicn_face_hashtb, adj_index);
+
+ if (face != NULL)
+ {
+ if (!(face->flags & HICN_FACE_FLAGS_DELETED))
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ /*
+ * Something went worng, a consumer face exists for the
+ * producer's prefix.
+ */
+ /* It should never happens, this is a safety check. */
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ /* If the face exists but is marked as deleted, undelete it */
+ if (face->flags & HICN_FACE_FLAGS_DELETED)
+ {
+ /*
+ * remove the deleted flag and retrieve the face
+ * local addr
+ */
+ face->flags &= HICN_FACE_FLAGS_DELETED;
+ }
+ }
+ else
+ {
+ /* Otherwise create the face */
+ if (ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ /*
+ * Otherwise retrieve an ip address to assign as a
+ * local ip addr.
+ */
+ ip4_address_t local_app_ip4;
+ ip4_address_t remote_app_ip4;
+ get_two_ip4_addresses (&local_app_ip4, &remote_app_ip4);
+ ip4_add_del_interface_address (vm,
+ sw_if,
+ &local_app_ip4, 31, 0 /* is_del */ );
+ local_app_ip = to_ip46 ( /* isv6 */ 0, local_app_ip4.as_u8);
+ remote_app_ip = to_ip46 ( /* isv6 */ 0, remote_app_ip4.as_u8);
+
+ vnet_build_rewrite_for_sw_interface(vnm, sw_if, VNET_LINK_IP4, &remote_app_ip4);
+ }
+ else
+ {
+ ip6_address_t local_app_ip6;
+ ip6_address_t remote_app_ip6;
+ get_two_ip6_addresses (&local_app_ip6, &remote_app_ip6);
+ u8 *s0;
+ s0 = format (0, "%U", format_ip6_address, &local_app_ip6);
+
+ vlib_cli_output (vm, "Setting ip address %s\n", s0);
+
+ ip6_add_del_interface_address (vm,
+ sw_if,
+ &local_app_ip6, 127,
+ 0 /* is_del */ );
+ local_app_ip = to_ip46 ( /* isv6 */ 1, local_app_ip6.as_u8);
+ remote_app_ip = to_ip46 ( /* isv6 */ 1, remote_app_ip6.as_u8);
+ }
+ }
+
+ if (ret == HICN_ERROR_NONE)
+ // && hicn_face_prod_set_lru_max (*faceid, cs_reserved) == HICN_ERROR_NONE)
+ {
+ fib_route_path_t rpath = {0};
+ fib_route_path_t * rpaths = NULL;
+
+ if (ip46_address_is_ip4(&(prefix->fp_addr)))
+ {
+ ip4_address_t mask;
+ ip4_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip4.as_u32 = prefix->fp_addr.ip4.as_u32 & mask.as_u32;
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_addr.ip4.as_u32 = remote_app_ip.ip4.as_u32;
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_proto = DPO_PROTO_IP4;
+
+ vec_add1 (rpaths, rpath);
+ }
+ else
+ {
+ ip6_address_t mask;
+ ip6_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip6.as_u64[0] =
+ prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
+ prefix->fp_addr.ip6.as_u64[1] =
+ prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_addr.ip6.as_u64[0] = remote_app_ip.ip6.as_u64[0];
+ rpath.frp_addr.ip6.as_u64[1] = remote_app_ip.ip6.as_u64[1];
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_proto = DPO_PROTO_IP6;
+
+ vec_add1 (rpaths, rpath);
+ }
+
+ u32 fib_index = fib_table_find (prefix->fp_proto, 0);
+ fib_table_entry_path_add2 (fib_index,
+ prefix,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE, rpaths);
+
+ hicn_route_enable(prefix);
+ hicn_app_state_create (sw_if, prefix);
+ }
+
+ adj_index = adj_nbr_find(isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4, isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
+ face = hicn_face_get(&local_app_ip, sw_if, &hicn_face_hashtb, adj_index);//HICN_FACE_FLAGS_APPFACE_PROD);
+
+ *faceid = hicn_dpoi_get_index (face);
+
+ face->flags |= HICN_FACE_FLAGS_APPFACE_PROD;
+
+ hicn_face_unlock_with_id(*faceid);
+
+ *prod_addr = local_app_ip;
+
+ /* Cleanup in case of something went wrong. */
+ if (ret)
+ {
+ hicn_app_state_del (sw_if);
+ }
+ return ret;
+}
+
+int
+hicn_face_prod_del (hicn_face_id_t face_id)
+{
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_PROD)
+ {
+ /* Remove the face from the fib */
+ hicn_route_disable(&(face_state_vec[face->sw_if].prefix));
+ //hicn_route_del_nhop (&(face_state_vec[face->sw_if].prefix),
+ // face_id);
+
+ //int ret = hicn_face_del (face_id);
+ return hicn_app_state_del (face->sw_if);
+ //ret == HICN_ERROR_NONE ? hicn_app_state_del (face->sw_if) : ret;
+ }
+ else
+ {
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+ }
+
+ return HICN_ERROR_NONE;
+}
+
+u8 *
+format_hicn_face_prod (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ s =
+ format (s, " (producer)");
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_prod_app_input_ip6, static)=
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "hicn-face-prod-input",
+ .runs_before = VNET_FEATURES("ip6-inacl"),
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_prod_app_input_ip4, static)=
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "hicn-face-prod-input",
+ .runs_before = VNET_FEATURES("ip4-inacl"),
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_prod.h b/hicn-plugin/src/faces/app/face_prod.h
new file mode 100644
index 000000000..4cb2e3fbf
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_prod.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FACE_PRODUCER_H_
+#define _FACE_PRODUCER_H_
+
+#include "../../cache_policies/cs_policy.h"
+#include "../face.h"
+
+/**
+ * @file
+ *
+ * @brief Producer application face.
+ *
+ * A producer application face is built upon an ip face and identify a local
+ * producer application (co-located with the forwarder) that acts as a producer. In the
+ * current design an application face is either a face towards a consumer face
+ * or towards a producer. The interface used by the producer application face is
+ * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
+ * connects the applictation to the forwarder). Only one application face can be
+ * assigned to an interface.
+ *
+ * To each producer application face it is assigned a portion of the CS. Every
+ * data arriving to a producer application will be stored in the portion of the
+ * CS assigned to the face. The eviction policy is defined in the
+ * face. Available eviction faces are list in the /cache_policy folder.
+ *
+ * In the vlib graph a producer application face is directly connected to the
+ * device-input node (with the node hicn-face-prod-input) and passes every packet to
+ * the hicn-face-ip node.
+ */
+
+/**
+ * @brief Producer application face state that refer to the hICN producer socket
+ * created by the application.
+ *
+ */
+typedef struct
+{
+ fib_prefix_t prefix;
+} hicn_face_prod_state_t;
+
+extern hicn_face_prod_state_t *face_state_vec;
+
+#define DEFAULT_PROBING_PORT 3784
+
+/**
+ * @brief Add a new producer application face
+ *
+ * The method creates the internal ip face and the state specific to the
+ * producer application face. This method setups a route in the FIB for the
+ * producer's prefix.
+ * @param prefix hicn prefix name assigned to the producer face
+ * @param len length of the prefix
+ * @param swif interface associated to the face
+ * @param cs_reserved return the amount of cs assigned to the face
+ * @param prod_addr address to assign to interface used by the appliction to
+ * send data to the producer face
+ */
+int
+hicn_face_prod_add (fib_prefix_t * prefix, u32 swif, u32 * cs_reserved,
+ ip46_address_t * prod_addr, hicn_face_id_t * faceid);
+
+/**
+ * @brief Delete an existing application face
+ *
+ * @param faceid id of the face to remove
+ */
+int hicn_face_prod_del (hicn_face_id_t faceid);
+
+/**
+ * @brief Set lru queue size for an app face
+ *
+ * @param face_id Id of the producer application face
+ */
+int hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 * requested_size);
+
+/**
+ * @brief Format an application producer face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_prod (u8 * s, va_list * args);
+
+
+#endif /* _FACE_PROD_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/app/face_prod_node.c b/hicn-plugin/src/faces/app/face_prod_node.c
new file mode 100644
index 000000000..80c3e124c
--- /dev/null
+++ b/hicn-plugin/src/faces/app/face_prod_node.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ *
+ * @brief Application interface node
+ *
+ * This node runs after the device-input node and perfoms some safety checks in
+ * order to avoid unespected interest and data (i.e., hICN packets whose name do
+ * not contain the prefix associated to the application face)
+ */
+
+#include "face_prod.h"
+#include "../../hicn_api.h"
+#include "../../mgmt.h"
+
+static __clib_unused char *face_prod_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Node context data */
+typedef struct hicn_face_prod_runtime_s
+{
+ int id;
+} hicn_face_prod_runtime_t;
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} hicn_face_prod_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_PROD_NEXT_DATA_IP4,
+ HICN_FACE_PROD_NEXT_DATA_IP6,
+ HICN_FACE_PROD_NEXT_ERROR_DROP,
+ HICN_FACE_PROD_N_NEXT,
+} hicn_face_prod_next_t;
+
+vlib_node_registration_t hicn_face_prod_input_node;
+
+static __clib_unused u8 *
+format_face_prod_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_prod_input_trace_t *t =
+ va_arg (*args, hicn_face_prod_input_trace_t *);
+ CLIB_UNUSED (u32 indent) = format_get_indent (s);
+
+ s = format (s, "prod-face: sw_if_index %d next-index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+static_always_inline int
+match_ip4_name (u32 * name, fib_prefix_t * prefix)
+{
+ u32 xor = 0;
+
+ xor = *name & prefix->fp_addr.ip4.data_u32;
+
+ return xor == prefix->fp_addr.ip4.data_u32;
+}
+
+static_always_inline int
+match_ip6_name (u8 * name, fib_prefix_t * prefix)
+{
+ union
+ {
+ u32x4 as_u32x4;
+ u64 as_u64[2];
+ u32 as_u32[4];
+ } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
+
+ xor_sum.as_u64[0] = ((u64 *) name)[0] & prefix->fp_addr.ip6.as_u64[0];
+ xor_sum.as_u64[1] = ((u64 *) name)[1] & prefix->fp_addr.ip6.as_u64[1];
+
+ return (xor_sum.as_u64[0] == prefix->fp_addr.ip6.as_u64[0]) &&
+ (xor_sum.as_u64[1] == prefix->fp_addr.ip6.as_u64[1]);
+}
+
+static_always_inline u32
+hicn_face_prod_next_from_data_hdr (vlib_node_runtime_t * node,
+ vlib_buffer_t * b, fib_prefix_t * prefix)
+{
+ u8 *ptr = vlib_buffer_get_current (b);
+ u8 v = *ptr & 0xf0;
+ int match_res = 1;
+
+ if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ match_res = match_ip4_name ((u32 *) & (ptr[12]), prefix);
+ }
+ else if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ match_res = match_ip6_name (& (ptr[8]), prefix);
+ }
+
+ return match_res ? HICN_FACE_PROD_NEXT_DATA_IP4 + (v ==
+ 0x60) :
+ HICN_FACE_PROD_NEXT_ERROR_DROP;
+}
+
+static_always_inline void
+hicn_face_prod_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u32 swif, vlib_buffer_t * b, u32 next)
+{
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_face_prod_input_trace_t *t =
+ vlib_add_trace (vm, node, b, sizeof (*t));
+ t->next_index = next;
+ t->sw_if_index = swif;
+ }
+}
+
+static uword
+hicn_face_prod_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_face_prod_next_t next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 bi0, bi1, bi2, bi3;
+ hicn_face_prod_state_t *prod_face0 = NULL;
+ hicn_face_prod_state_t *prod_face1 = NULL;
+ hicn_face_prod_state_t *prod_face2 = NULL;
+ hicn_face_prod_state_t *prod_face3 = NULL;
+ u32 next0, next1, next2, next3;
+
+ {
+ vlib_buffer_t *b4, *b5, *b6, *b7;
+ b4 = vlib_get_buffer (vm, from[4]);
+ b5 = vlib_get_buffer (vm, from[5]);
+ b6 = vlib_get_buffer (vm, from[6]);
+ b7 = vlib_get_buffer (vm, from[7]);
+ CLIB_PREFETCH (b4, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b5, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b6, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b7, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+
+ from += 4;
+ n_left_from -= 4;
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ to_next[2] = bi2;
+ to_next[3] = bi3;
+
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ prod_face0 =
+ &face_state_vec[vnet_buffer (b0)->sw_if_index[VLIB_RX]];
+ prod_face1 =
+ &face_state_vec[vnet_buffer (b1)->sw_if_index[VLIB_RX]];
+ prod_face2 =
+ &face_state_vec[vnet_buffer (b2)->sw_if_index[VLIB_RX]];
+ prod_face3 =
+ &face_state_vec[vnet_buffer (b3)->sw_if_index[VLIB_RX]];
+
+ next0 =
+ hicn_face_prod_next_from_data_hdr (node, b0, &prod_face0->prefix);
+ next1 =
+ hicn_face_prod_next_from_data_hdr (node, b1, &prod_face1->prefix);
+ next2 =
+ hicn_face_prod_next_from_data_hdr (node, b2, &prod_face2->prefix);
+ next3 =
+ hicn_face_prod_next_from_data_hdr (node, b3, &prod_face3->prefix);
+ stats.pkts_data_count += 4;
+
+ /* trace */
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ b0, next0);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX],
+ b1, next1);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b2)->sw_if_index[VLIB_RX],
+ b2, next2);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b3)->sw_if_index[VLIB_RX],
+ b3, next3);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+
+ stats.pkts_processed += 4;
+
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, swif;
+ hicn_face_prod_state_t *prod_face = NULL;
+ u32 next0;
+
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ swif = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ prod_face = &face_state_vec[swif];
+
+ next0 =
+ hicn_face_prod_next_from_data_hdr (node, b0, &prod_face->prefix);
+ stats.pkts_data_count++;
+
+ /* trace */
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ b0, next0);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ stats.pkts_processed += 1;
+
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_prod_input_node) =
+{
+ .function = hicn_face_prod_input_node_fn,
+ .name = "hicn-face-prod-input",
+ .vector_size = sizeof(u32),
+ .format_trace = format_face_prod_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(face_prod_input_error_strings),
+ .error_strings = face_prod_input_error_strings,
+ .n_next_nodes = HICN_FACE_PROD_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_FACE_PROD_NEXT_DATA_IP4] = "hicn4-face-input",
+ [HICN_FACE_PROD_NEXT_DATA_IP6] = "hicn6-face-input",
+ [HICN_FACE_PROD_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/faces/face.c b/hicn-plugin/src/faces/face.c
new file mode 100644
index 000000000..b495d18b0
--- /dev/null
+++ b/hicn-plugin/src/faces/face.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/fib/fib_entry_track.h>
+
+#include "face.h"
+#include "../hicn.h"
+#include "../params.h"
+#include "../error.h"
+#include "../mapme.h"
+#include "../mapme_eventmgr.h"
+
+dpo_id_t *face_dpo_vec;
+hicn_face_vft_t *face_vft_vec;
+char **face_type_names_vec;
+
+hicn_face_t *hicn_dpoi_face_pool;
+
+dpo_type_t first_type = DPO_FIRST;
+
+vlib_combined_counter_main_t *counters;
+
+dpo_type_t hicn_face_type;
+
+fib_node_type_t hicn_face_fib_node_type;
+
+const char *HICN_FACE_CTRX_STRING[] = {
+#define _(a,b,c) c,
+ foreach_hicn_face_counter
+#undef _
+};
+
+u8 *
+face_show (u8 * s, int face_id, u32 indent)
+{
+ s = format (s, "%U Faces:\n", format_white_space, indent);
+ indent += 4;
+ int i;
+ vec_foreach_index (i, face_dpo_vec)
+ {
+ s =
+ format (s, "%U", face_vft_vec[i].format_face,
+ face_dpo_vec[face_id].dpoi_index, indent);
+ }
+
+ return (s);
+
+}
+
+mhash_t hicn_face_vec_hashtb;
+mhash_t hicn_face_hashtb;
+
+hicn_face_vec_t *hicn_vec_pool;
+
+const static char *const hicn_face6_nodes[] = {
+ "hicn6-face-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ "hicn6-iface-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+const static char *const hicn_face4_nodes[] = {
+ "hicn4-face-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ "hicn4-iface-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+
+const static char *const *const hicn_face_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = hicn_face4_nodes,
+ [DPO_PROTO_IP6] = hicn_face6_nodes
+};
+
+const static dpo_vft_t hicn_face_dpo_vft = {
+ .dv_lock = hicn_face_lock,
+ .dv_unlock = hicn_face_unlock,
+ .dv_format = format_hicn_face,
+};
+
+static fib_node_t *
+hicn_face_node_get (fib_node_index_t index)
+{
+ hicn_face_t *face;
+
+ face = hicn_dpoi_get_from_idx (index);
+
+ return (&face->fib_node);
+}
+
+static void
+hicn_face_last_lock_gone (fib_node_t * node)
+{
+}
+
+static hicn_face_t *
+hicn_face_from_fib_node (fib_node_t * node)
+{
+ return ((hicn_face_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (hicn_face_t, fib_node)));
+}
+
+static fib_node_back_walk_rc_t
+hicn_face_back_walk_notify (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+
+ hicn_face_t *face = hicn_face_from_fib_node (node);
+
+ const dpo_id_t *dpo_loadbalance =
+ fib_entry_contribute_ip_forwarding (face->fib_entry_index);
+ const load_balance_t *lb0 = load_balance_get (dpo_loadbalance->dpoi_index);
+
+ const dpo_id_t *dpo = load_balance_get_bucket_i (lb0, 0);
+
+ dpo_stack (hicn_face_type, face->dpo.dpoi_proto, &face->dpo, dpo);
+ /* if (dpo_is_adj(dpo)) */
+ /* { */
+ /* ip_adjacency_t * adj = adj_get (dpo->dpoi_index); */
+
+ /* if (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN || */
+ /* dpo->dpoi_type == DPO_ADJACENCY_MCAST_MIDCHAIN) */
+ /* { */
+ /* adj_nbr_midchain_stack(dpo->dpoi_index, &face->dpo); */
+ /* } */
+ /* else */
+ /* { */
+ /* dpo_stack(hicn_face_type, face->dpo.dpoi_proto, &face->dpo, dpo); */
+ /* } */
+ /* } */
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static void
+hicn_face_show_memory (void)
+{
+}
+
+
+static const fib_node_vft_t hicn_face_fib_node_vft = {
+ .fnv_get = hicn_face_node_get,
+ .fnv_last_lock = hicn_face_last_lock_gone,
+ .fnv_back_walk = hicn_face_back_walk_notify,
+ .fnv_mem_show = hicn_face_show_memory,
+};
+
+// Make this more flexible for future types face
+void
+hicn_face_module_init (vlib_main_t * vm)
+{
+ pool_validate (hicn_dpoi_face_pool);
+ pool_alloc (hicn_dpoi_face_pool, 1024);
+ counters =
+ vec_new (vlib_combined_counter_main_t,
+ HICN_PARAM_FACES_MAX * HICN_N_COUNTER);
+
+ mhash_init (&hicn_face_vec_hashtb,
+ sizeof (hicn_face_input_faces_t) /* value */ ,
+ sizeof (hicn_face_key_t) /* key */ );
+ mhash_init (&hicn_face_hashtb, sizeof (hicn_face_id_t) /* value */ ,
+ sizeof (hicn_face_key_t) /* key */ );
+
+ pool_alloc (hicn_vec_pool, 100);
+
+ /*
+ * How much useful is the following registration?
+ * So far it seems that we need it only for setting the dpo_type.
+ */
+ hicn_face_type =
+ dpo_register_new_type (&hicn_face_dpo_vft, hicn_face_nodes);
+
+ /*
+ * We register a new node type to get informed when the adjacency corresponding
+ * to a face is updated
+ */
+ hicn_face_fib_node_type =
+ fib_node_register_new_type (&hicn_face_fib_node_vft);
+}
+
+u8 *
+format_hicn_face (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
+ hicn_face_t *face;
+
+ face = hicn_dpoi_get_from_idx (index);
+
+ if (face->flags & HICN_FACE_FLAGS_FACE)
+ {
+ hicn_face_id_t face_id = hicn_dpoi_get_index (face);
+ s = format (s, "%U Face %d: ", format_white_space, indent, face_id);
+ s = format (s, "nat address %U locks %u, path_label %u",
+ format_ip46_address, &face->nat_addr, IP46_TYPE_ANY,
+ face->locks, face->pl_id);
+
+ if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD))
+ s = format (s, " (producer)");
+ else if ((face->flags & HICN_FACE_FLAGS_APPFACE_CONS))
+ s = format (s, " (consumer)");
+
+ if ((face->flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2,
+ format_dpo_id, &face->dpo, indent + 3);
+ }
+ else
+ {
+ hicn_face_id_t face_id = hicn_dpoi_get_index (face);
+ s = format (s, "%U iFace %d: ", format_white_space, indent, face_id);
+ s = format (s, "nat address %U locks %u, path_label %u",
+ format_ip46_address, &face->nat_addr, IP46_TYPE_ANY,
+ face->locks, face->pl_id);
+
+ if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD))
+ s = format (s, " (producer)");
+ else if ((face->flags & HICN_FACE_FLAGS_APPFACE_CONS))
+ s = format (s, " (consumer)");
+
+ if ((face->flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+
+ return s;
+}
+
+
+u8 *
+format_hicn_face_all (u8 * s, int n, ...)
+{
+ va_list ap;
+ va_start (ap, n);
+ u32 indent = va_arg (ap, u32);
+
+ s = format (s, "%U Faces:\n", format_white_space, indent);
+
+ hicn_face_t *face;
+
+ /* *INDENT-OFF* */
+ pool_foreach ( face, hicn_dpoi_face_pool,
+ {
+ s = format(s, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), indent);
+ });
+ /* *INDENT-ON* */
+
+ return s;
+}
+
+int
+hicn_face_del (hicn_face_id_t face_id)
+{
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ hicn_face_key_t key;
+ hicn_face_key_t old_key;
+ hicn_face_key_t old_key2;
+
+ hicn_face_get_key (&(face->nat_addr), face->sw_if, &(face->dpo), &key);
+ hicn_face_input_faces_t *in_faces_vec =
+ hicn_face_get_vec (&(face->nat_addr),
+ &hicn_face_vec_hashtb);
+ if (in_faces_vec != NULL)
+ {
+ hicn_face_vec_t *vec =
+ pool_elt_at_index (hicn_vec_pool, in_faces_vec->vec_id);
+ u32 index_face = vec_search (*vec, face_id);
+ vec_del1 (*vec, index_face);
+
+ if (vec_len (*vec) == 0)
+ {
+ pool_put_index (hicn_vec_pool, in_faces_vec->vec_id);
+ mhash_unset (&hicn_face_vec_hashtb, &key, (uword *) & old_key);
+ vec_free (*vec);
+ }
+ else
+ {
+ /* Check if the face we are deleting is the preferred one. */
+ /* If so, repleace with another. */
+ if (in_faces_vec->face_id == face_id)
+ {
+ in_faces_vec->face_id = (*vec)[0];
+ }
+ }
+
+ mhash_unset (&hicn_face_hashtb, &key, (uword *) & old_key2);
+ }
+
+ int ret = HICN_ERROR_NONE;
+
+ if (hicn_dpoi_idx_is_valid (face_id))
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ face->locks--;
+ if (face->locks == 0)
+ pool_put_index (hicn_dpoi_face_pool, face_id);
+ else
+ face->flags |= HICN_FACE_FLAGS_DELETED;
+ }
+ else
+ ret = HICN_ERROR_FACE_NOT_FOUND;
+
+
+ return ret;
+}
+
+static void
+hicn_iface_to_face (hicn_face_t * face, const dpo_id_t * dpo)
+{
+ dpo_stack (hicn_face_type, dpo->dpoi_proto, &face->dpo, dpo);
+
+ face->flags &= ~HICN_FACE_FLAGS_IFACE;
+ face->flags |= HICN_FACE_FLAGS_FACE;
+
+ if (dpo_is_adj (dpo))
+ {
+ fib_node_init (&face->fib_node, hicn_face_fib_node_type);
+ fib_node_lock (&face->fib_node);
+
+ if (dpo->dpoi_type != DPO_ADJACENCY_MIDCHAIN ||
+ dpo->dpoi_type != DPO_ADJACENCY_MCAST_MIDCHAIN)
+ {
+ ip_adjacency_t *adj = adj_get (dpo->dpoi_index);
+ ip46_address_t *nh = &(adj->sub_type.nbr.next_hop);
+ fib_prefix_t prefix;
+
+ if (!ip46_address_is_zero (nh))
+ {
+ fib_prefix_from_ip46_addr (nh, &prefix);
+
+ u32 fib_index =
+ fib_table_find (prefix.fp_proto, HICN_FIB_TABLE);
+
+ face->fib_entry_index = fib_entry_track (fib_index,
+ &prefix,
+ hicn_face_fib_node_type,
+ hicn_dpoi_get_index
+ (face),
+ &face->fib_sibling);
+ }
+ }
+ }
+}
+
+/*
+ * Utility that adds a new face cache entry. For the moment we assume that
+ * the ip_adjacency has already been set up.
+ */
+int
+hicn_face_add (const dpo_id_t * dpo_nh, ip46_address_t * nat_address,
+ int sw_if, hicn_face_id_t * pfaceid, u8 is_app_prod)
+{
+
+ hicn_face_flags_t flags = (hicn_face_flags_t) 0;
+ flags |= HICN_FACE_FLAGS_FACE;
+
+ hicn_face_t *face;
+
+ face =
+ hicn_face_get_with_dpo (nat_address, sw_if, dpo_nh, &hicn_face_hashtb);
+
+ if (face != NULL)
+ {
+ *pfaceid = hicn_dpoi_get_index (face);
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+ }
+
+ face =
+ hicn_face_get (nat_address, sw_if, &hicn_face_hashtb, dpo_nh->dpoi_index);
+
+ dpo_id_t temp_dpo = DPO_INVALID;
+ temp_dpo.dpoi_index = dpo_nh->dpoi_index;
+ hicn_face_key_t key;
+ hicn_face_get_key (nat_address, sw_if, dpo_nh, &key);
+
+ if (face == NULL)
+ {
+
+ hicn_iface_add (nat_address, sw_if, pfaceid, dpo_nh->dpoi_proto,
+ dpo_nh->dpoi_index);
+ face = hicn_dpoi_get_from_idx (*pfaceid);
+
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+
+ hicn_face_get_key (nat_address, sw_if, &temp_dpo, &key);
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+ }
+ else
+ {
+ /* We found an iface and we convert it to a face */
+ *pfaceid = hicn_dpoi_get_index (face);
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+ }
+
+ hicn_iface_to_face (face, dpo_nh);
+
+ temp_dpo.dpoi_index = ~0;
+
+ hicn_face_input_faces_t *in_faces =
+ hicn_face_get_vec (nat_address, &hicn_face_vec_hashtb);
+
+ if (in_faces == NULL)
+ {
+ hicn_face_input_faces_t in_faces_temp;
+ hicn_face_vec_t *vec;
+ pool_get (hicn_vec_pool, vec);
+ *vec = vec_new (hicn_face_id_t, 0);
+ u32 index = vec - hicn_vec_pool;
+ in_faces_temp.vec_id = index;
+ vec_add1 (*vec, *pfaceid);
+
+ in_faces_temp.face_id = *pfaceid;
+
+ hicn_face_get_key (nat_address, 0, &temp_dpo, &key);
+
+ mhash_set_mem (&hicn_face_vec_hashtb, &key,
+ (uword *) & in_faces_temp, 0);
+ }
+ else
+ {
+ hicn_face_vec_t *vec =
+ pool_elt_at_index (hicn_vec_pool, in_faces->vec_id);
+
+ /* */
+ if (vec_search (*vec, *pfaceid) != ~0)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ vec_add1 (*vec, *pfaceid);
+
+ hicn_iface_to_face (face, dpo_nh);
+
+ hicn_face_get_key (nat_address, 0, &temp_dpo, &key);
+
+ mhash_set_mem (&hicn_face_vec_hashtb, &key, (uword *) in_faces, 0);
+
+ /* If the face is an application producer face, we set it as the preferred incoming face. */
+ /* This is required to handle the CS separation, and the push api in a lightway */
+ if (is_app_prod)
+ {
+ in_faces->face_id = *pfaceid;
+ }
+ }
+
+ retx_t *retx = vlib_process_signal_event_data (vlib_get_main (),
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_ADD, 1,
+ sizeof (retx_t));
+
+ /* *INDENT-OFF* */
+ *retx = (retx_t) {
+ .face_id = *pfaceid,
+ };
+ /* *INDENT-ON* */
+
+ return HICN_ERROR_NONE;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/face.h b/hicn-plugin/src/faces/face.h
new file mode 100644
index 000000000..234c3fcc2
--- /dev/null
+++ b/hicn-plugin/src/faces/face.h
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_H__
+#define __HICN_FACE_H__
+
+#include <vnet/fib/fib_node.h>
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vnet/ip/ip46_address.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/adj/adj_types.h>
+#include <vppinfra/bihash_8_8.h>
+#include <vnet/adj/adj_midchain.h>
+
+
+#include "../error.h"
+typedef u8 hicn_face_flags_t;
+typedef index_t hicn_face_id_t;
+
+/**
+ * @file face.h
+ *
+ * This file implements a general face type. The purpose of a face is to
+ * carry the needed information to forward interest and data packets to the
+ * next node in the network. There are two type of faces: complete faces (in short
+ * faces), and incomplete faces (in short ifaces).
+ *
+ * A face that does not contain the indication of the adjacency is an
+ * incomplete face (iface), otherwise it is considered to be complete. Ifaces are
+ * used to forward data back to the previous hICN hop from which we received an
+ * interest, while faces are used to forward interest packets to the next hicn node.
+ * Faces and ifaces are created at two different points in time. Faces are created
+ * when a route is added, while ifaces are created when an interest is received.
+ * In details, faces and ifaces carry the following information:
+ * - nat_addr: the ip address to perform src nat or dst nat on interest and data packets, respectively;
+ * - pl_id: the path label
+ * - locks: the number of entities using this face. When 0 the face can be deallocated
+ * - dpo: the dpo that identifies the next node in the vlib graph for processing the vlib
+ * buffer. The dpo contains the dpo.dpoi_next field that points to the next node
+ * in the vlib graph and the dpo.dpoi_index which is an index to adj used by the next node
+ * to perform the l2 rewrite. In case of ifaces, it is likely we don't know the
+ * adjacency when creting the face. In this case, the next node in the vlib graph
+ * will be the node that performs a lookup in the fib. Only in case of udp tunnels,
+ * which are bidirectional tunnel we know that the incoming tunnel is also the outgoing
+ * one, therefore in this case we store the tunnel in the dpo.dpoi_index fields. For
+ * all the other tunnels (which are most likely unidirectional), the source address of
+ * the interest will be used to retrieve the outgoing tunnel when sending the corresponding
+ * data back.
+ * - sw_if: the incoming interface of the interest
+ * - fib_node, fib_entry_index and fib_sibling are information used to be notified of
+ * changes in the adjacency pointed by the dpo.
+ *
+ * We maintain two hash tables to retrieve faces and ifaces. In particular one hash table which
+ * index faces and ifaces for nat_address, sw_if and dpo. This is used to retrieve existing faces
+ * or ifaces when an interest is received and when an new face is created. A second hash table that
+ * indexes vectors of faces for nat_address and sw_if. This is used to retrieve a list of possible
+ * incoming faces when a data is received.
+ */
+
+/**
+ * @brief Structure representing a face. It containes the fields shared among
+ * all the types of faces as well it leaves some space for storing additional
+ * information specific to each type.
+ */
+typedef struct __attribute__ ((packed)) hicn_face_s
+{
+ /* Flags to idenfity if the face is incomplete (iface), complete (face) */
+ /* And a network or application face (1B) */
+ hicn_face_flags_t flags;
+
+ /* Align the upcoming fields */
+ u8 align;
+
+ /* Path label (2B) */
+ u16 pl_id;
+
+ /* Number of dpo holding a reference to the dpoi (4B) */
+ u32 locks;
+
+ /* Dpo for the adjacency (8B) */
+ union {
+ dpo_id_t dpo;
+ u64 align_dpo;
+ };
+
+ /* Local address of the interface sw_if */
+ ip46_address_t nat_addr;
+
+ /* local interface for the local ip address */
+ u32 sw_if;
+
+ fib_node_t fib_node;
+
+ fib_node_index_t fib_entry_index;
+
+ u32 fib_sibling;
+} hicn_face_t;
+
+/* Pool of faces */
+extern hicn_face_t *hicn_dpoi_face_pool;
+
+/* Flags */
+/* A face is complete and it stores all the information. A iface lacks of the
+ adj index, therefore sending a packet through a iface require a lookup in
+ the FIB. */
+#define HICN_FACE_FLAGS_DEFAULT 0x00
+#define HICN_FACE_FLAGS_FACE 0x01
+#define HICN_FACE_FLAGS_IFACE 0x02
+#define HICN_FACE_FLAGS_APPFACE_PROD 0x04 /* Currently only IP face can be appface */
+#define HICN_FACE_FLAGS_APPFACE_CONS 0x08 /* Currently only IP face can be appface */
+#define HICN_FACE_FLAGS_DELETED 0x10
+
+#define HICN_FACE_NULL (hicn_face_id_t) ~0
+
+#define HICN_FACE_FLAGS_APPFACE_PROD_BIT 2
+#define HICN_FACE_FLAGS_APPFACE_CONS_BIT 3
+
+
+#define HICN_BUFFER_FLAGS_DEFAULT 0x00
+#define HICN_BUFFER_FLAGS_FACE_IS_APP 0x01
+
+STATIC_ASSERT ((1 << HICN_FACE_FLAGS_APPFACE_PROD_BIT) ==
+ HICN_FACE_FLAGS_APPFACE_PROD,
+ "HICN_FACE_FLAGS_APPFACE_PROD_BIT and HICN_FACE_FLAGS_APPFACE_PROD must correspond");
+
+STATIC_ASSERT ((1 << HICN_FACE_FLAGS_APPFACE_CONS_BIT) ==
+ HICN_FACE_FLAGS_APPFACE_CONS,
+ "HICN_FACE_FLAGS_APPFACE_CONS_BIT and HICN_FACE_FLAGS_APPFACE_CONS must correspond");
+
+STATIC_ASSERT ((HICN_FACE_FLAGS_APPFACE_PROD >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT) ==
+ HICN_BUFFER_FLAGS_FACE_IS_APP,
+ "hicn buffer app flag does not correspond to HICN_FACE_FLAGS_APPFACE_PROD");
+
+STATIC_ASSERT ((HICN_FACE_FLAGS_APPFACE_CONS >>
+ HICN_FACE_FLAGS_APPFACE_CONS_BIT) ==
+ HICN_BUFFER_FLAGS_FACE_IS_APP,
+ "hicn buffer app flag does not correspond to HICN_FACE_FLAGS_APPFACE_PROD");
+
+/**
+ * @brief Definition of the virtual functin table for an hICN FACE DPO.
+ */
+typedef struct hicn_face_vft_s
+{
+ u8 *(*format_face) (u8 * s, va_list * args);
+ /**< Format an hICN face dpo*/
+ int (*hicn_face_del) (hicn_face_id_t face_id);
+ void (*hicn_face_get_dpo) (hicn_face_t * face, dpo_id_t * dpo);
+} hicn_face_vft_t;
+
+#define foreach_hicn_face_counter \
+ _(INTEREST_RX, 0, "Interest rx") \
+ _(INTEREST_TX, 1, "Interest tx") \
+ _(DATA_RX, 2, "Data rx") \
+ _(DATA_TX, 3, "Data tx") \
+
+typedef enum
+{
+#define _(a,b,c) HICN_FACE_COUNTERS_##a = (b),
+ foreach_hicn_face_counter
+#undef _
+ HICN_N_COUNTER
+} hicn_face_counters_t;
+
+extern mhash_t hicn_face_hashtb;
+
+extern const char *HICN_FACE_CTRX_STRING[];
+
+#define get_face_counter_string(ctrxno) (char *)(HICN_FACE_CTRX_STRING[ctrxno])
+
+
+/* Vector maintaining a dpo per face */
+extern dpo_id_t *face_dpo_vec;
+extern hicn_face_vft_t *face_vft_vec;
+
+/* Vector holding the set of face names */
+extern char **face_type_names_vec;
+
+/* First face type registered in the sytem.*/
+extern dpo_type_t first_type;
+
+/* Per-face counters */
+extern vlib_combined_counter_main_t *counters;
+
+/**
+ * @brief Return the face id from the face object
+ *
+ * @param Pointer to the face state
+ * @return face id
+ */
+always_inline hicn_face_id_t
+hicn_dpoi_get_index (hicn_face_t * face_dpoi)
+{
+ return face_dpoi - hicn_dpoi_face_pool;
+}
+
+/**
+ * @brief Return the face object from the face id.
+ * This method is robust to invalid face id.
+ *
+ * @param dpoi_index Face identifier
+ * @return Pointer to the face or NULL
+ */
+always_inline hicn_face_t *
+hicn_dpoi_get_from_idx_safe (hicn_face_id_t dpoi_index)
+{
+ if (!pool_is_free_index(hicn_dpoi_face_pool, dpoi_index))
+ return (hicn_face_t *) pool_elt_at_index (hicn_dpoi_face_pool, dpoi_index);
+ else
+ return NULL;
+}
+
+/**
+ * @brief Return the face from the face id. Face id must be valid.
+ *
+ * @param dpoi_index Face identifier
+ * @return Pointer to the face
+ */
+always_inline hicn_face_t *
+hicn_dpoi_get_from_idx (hicn_face_id_t dpoi_index)
+{
+ return (hicn_face_t *) pool_elt_at_index (hicn_dpoi_face_pool, dpoi_index);
+}
+
+/**
+ * @brief Return true if the face id belongs to an existing face
+ */
+always_inline int
+hicn_dpoi_idx_is_valid (hicn_face_id_t face_id)
+{
+ return pool_len (hicn_dpoi_face_pool) > face_id
+ && !pool_is_free_index (hicn_dpoi_face_pool, face_id);
+}
+
+
+/**
+ * @brief Add a lock to the face dpo
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_lock_with_id (hicn_face_id_t face_id)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (face_id);
+ face->locks++;
+}
+
+/**
+ * @brief Remove a lock to the face dpo. Deallocate the face id locks == 0
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_unlock_with_id (hicn_face_id_t face_id)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (face_id);
+ face->locks--;
+}
+
+/**
+ * @brief Add a lock to the face through its dpo
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_lock (dpo_id_t * dpo)
+{
+ hicn_face_lock_with_id(dpo->dpoi_index);
+}
+
+/**
+ * @brief Remove a lock to the face through its dpo. Deallocate the face id locks == 0
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_unlock (dpo_id_t * dpo)
+{
+ hicn_face_unlock_with_id (dpo->dpoi_index);
+}
+
+
+/**
+ * @brief Init the internal structures of the face module
+ *
+ * Must be called before processing any packet
+ */
+void hicn_face_module_init (vlib_main_t * vm);
+
+u8 * format_hicn_face (u8 * s, va_list * args);
+
+
+/**
+ * @brief Format all the existing faces
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param n Number of input parameters
+ * @return String with the faces formatted
+ */
+u8 *format_hicn_face_all (u8 * s, int n, ...);
+
+/**
+ * @brief Delete a face
+ *
+ * @param face_id Id of the face to delete
+ * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
+ * HICN_ERROR_NONE
+ */
+int hicn_face_del (hicn_face_id_t face_id);
+
+/**
+ * @bried vector of faces used to collect faces having the same local address
+ *
+ */
+typedef hicn_face_id_t *hicn_face_vec_t;
+
+typedef struct hicn_input_faces_s_
+{
+ /* Vector of all possible input faces */
+ u32 vec_id;
+
+ /* Preferred face. If an prod_app face is in the vector it will be the preferred one. */
+ /* It's not possible to have multiple prod_app face in the same vector, they would have */
+ /* the same local address. Every prod_app face is a point-to-point face between the forwarder */
+ /* and the application. */
+ hicn_face_id_t face_id;
+
+} hicn_face_input_faces_t;
+
+/**
+ * Pool containing the vector of possible incoming faces.
+ */
+extern hicn_face_vec_t *hicn_vec_pool;
+
+/**
+ * Hash tables that indexes a face by remote address. For fast lookup when an
+ * interest arrives.
+ */
+extern mhash_t hicn_face_vec_hashtb;
+
+
+/**
+ * Key definition for the mhash table. An face is uniquely identified by ip
+ * address, the interface id and a dpo pointing to the next node in the vlib graph.
+ * The ip address can correspond to the remote ip address of the next hicn hop,
+ * or to the local address of the receiving interface. The former is used to
+ * retrieve the incoming face when an interest is received, the latter when
+ * the arring packet is a data. If the face is a regular face
+ * In case of iface, the following structure can be filled in different ways:
+ * - dpo equal to DPO_INVALID when the iface is a regular hICN iface
+ * - in case of udp_tunnel dpo =
+ * {
+ * .dpoi_index = tunnel_id,
+ * .dpoi_type = DPO_FIRST, //We don't need the type, we leave it invalid
+ * .dpoi_proto = DPO_PROTO_IP4 or DPO_PROTO_IP6,
+ * .dpoi_next_node = HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP or
+ * HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP or
+ * HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP or
+ * HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP
+ * }
+ */
+typedef struct __attribute__ ((packed)) hicn_face_key_s
+{
+ ip46_address_t addr;
+ union {
+ dpo_id_t dpo;
+ u64 align_dpo;
+ };
+ u32 sw_if;
+} hicn_face_key_t;
+
+/**
+ * @brief Create the key object for the mhash. Fill in the key object with the
+ * expected values.
+ *
+ * @param addr nat address of the face
+ * @param sw_if interface associated to the face
+ * @param key Pointer to an allocated hicn_face_ip_key_t object
+ */
+always_inline void
+hicn_face_get_key (const ip46_address_t * addr,
+ u32 sw_if, const dpo_id_t * dpo, hicn_face_key_t * key)
+{
+ key->dpo = *dpo;
+ key->addr = *addr;
+ key->sw_if = sw_if;
+}
+
+/**
+ * @brief Get the face obj from the nat address. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_get (const ip46_address_t * addr, u32 sw_if, mhash_t * hashtb, index_t adj_index)
+{
+ hicn_face_key_t key;
+
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo.dpoi_index = adj_index;
+
+ hicn_face_get_key (addr, sw_if, &dpo, &key);
+
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
+ &key);
+
+ if ( dpoi_index != NULL)
+ {
+ hicn_face_lock_with_id(*dpoi_index);
+ return hicn_dpoi_get_from_idx (*dpoi_index);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Get the face obj from the nat address and the dpo. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_get_with_dpo (const ip46_address_t * addr, u32 sw_if, const dpo_id_t * dpo, mhash_t * hashtb)
+{
+ hicn_face_key_t key;
+
+ hicn_face_get_key (addr, sw_if, dpo, &key);
+
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
+ &key);
+
+ if ( dpoi_index != NULL)
+ {
+ hicn_face_lock_with_id(*dpoi_index);
+ return hicn_dpoi_get_from_idx (*dpoi_index);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Get the vector of faces from the ip v4 address. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_input_faces_t *
+hicn_face_get_vec (const ip46_address_t * addr,
+ mhash_t * hashtb)
+{
+ hicn_face_key_t key;
+
+ dpo_id_t dpo = DPO_INVALID;
+
+ hicn_face_get_key (addr, 0, &dpo, &key);
+ return (hicn_face_input_faces_t *) mhash_get (hashtb, &key);
+}
+
+/**
+ * @brief Create a new face ip. API for other modules (e.g., routing)
+ *
+ * @param dpo_nh dpo contained in the face that points to the next node in
+ * the vlib graph
+ * @param nat_addr nat ip v4 or v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param pfaceid Pointer to return the face id
+ * @param is_app_prod if HICN_FACE_FLAGS_APPFACE_PROD the face is a local application face, all other values are ignored
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+int hicn_face_add (const dpo_id_t * dpo_nh,
+ ip46_address_t * nat_address,
+ int sw_if,
+ hicn_face_id_t * pfaceid,
+ u8 is_app_prod);
+
+/**
+ * @brief Create a new incomplete face ip. (Meant to be used by the data plane)
+ *
+ * @param local_addr Local ip v4 or v6 address of the face
+ * @param remote_addr Remote ip v4 or v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param pfaceid Pointer to return the face id
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+always_inline void
+hicn_iface_add (ip46_address_t * nat_address, int sw_if,
+ hicn_face_id_t * pfaceid, dpo_proto_t proto,
+ u32 adj_index)
+{
+ hicn_face_t *face;
+ pool_get (hicn_dpoi_face_pool, face);
+
+ clib_memcpy (&(face->nat_addr), nat_address,
+ sizeof (ip46_address_t));
+ face->sw_if = sw_if;
+
+ face->dpo.dpoi_type = DPO_FIRST;
+ face->dpo.dpoi_proto = DPO_PROTO_NONE;
+ face->dpo.dpoi_index = adj_index;
+ face->dpo.dpoi_next_node = 0;
+ face->pl_id = (u16) 0;
+ face->flags = HICN_FACE_FLAGS_IFACE;
+ face->locks = 1;
+
+ hicn_face_key_t key;
+ hicn_face_get_key (nat_address, sw_if, &face->dpo, &key);
+ *pfaceid = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_validate_combined_counter (&counters[(*pfaceid) * HICN_N_COUNTER],
+ i);
+ vlib_zero_combined_counter (&counters[(*pfaceid) * HICN_N_COUNTER], i);
+ }
+}
+
+/**** Helpers to manipulate faces and ifaces from the face/iface input nodes ****/
+
+/**
+ * @brief Retrieve a vector of faces from the ip4 local address and returns its index.
+ *
+ * @param vec: Result of the lookup. If no face exists for the local address vec = NULL
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param local_addr: Ip v4 nat address of the face
+ * @param sw_if: software interface id of the face
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_face_ip4_lock (hicn_face_id_t * face_id,
+ u32 * in_faces_vec_id,
+ u8 * hicnb_flags,
+ const ip4_address_t * nat_addr)
+{
+ ip46_address_t ip_address = {0};
+ ip46_address_set_ip4(&ip_address, nat_addr);
+ hicn_face_input_faces_t *in_faces_vec =
+ hicn_face_get_vec (&ip_address, &hicn_face_vec_hashtb);
+
+ if (PREDICT_FALSE (in_faces_vec == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ *in_faces_vec_id = in_faces_vec->vec_id;
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_faces_vec->face_id);
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *face_id = in_faces_vec->face_id;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Retrieve a face from the ip6 local address and returns its dpo. This
+ * method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param nat_addr: Ip v6 nat address of the face
+ * @param sw_if: software interface id of the face
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_face_ip6_lock (hicn_face_id_t * face_id,
+ u32 * in_faces_vec_id,
+ u8 * hicnb_flags,
+ const ip6_address_t * nat_addr)
+{
+ hicn_face_input_faces_t *in_faces_vec =
+ hicn_face_get_vec ((ip46_address_t *)nat_addr, &hicn_face_vec_hashtb);
+
+ if (PREDICT_FALSE (in_faces_vec == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ *in_faces_vec_id = in_faces_vec->vec_id;
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_faces_vec->face_id);
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *face_id = in_faces_vec->face_id;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Call back to get the adj of the tunnel
+ */
+static adj_walk_rc_t
+hicn4_iface_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+
+ hicn_face_t *face = (hicn_face_t *)ctx;
+
+ dpo_set(&face->dpo, DPO_ADJACENCY_MIDCHAIN, DPO_PROTO_IP4, ai);
+ adj_nbr_midchain_stack(ai, &face->dpo);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
+ * address and returns its dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param nat_addr: Ip v4 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_iface_ip4_add_and_lock (hicn_face_id_t * index,
+ u8 * hicnb_flags,
+ const ip4_address_t * nat_addr,
+ u32 sw_if, u32 adj_index, u32 node_index)
+{
+ /*All (complete) faces are indexed by remote addess as well */
+
+ ip46_address_t ip_address = {0};
+ ip46_address_set_ip4(&ip_address, nat_addr);
+
+ /* if the face exists, it adds a lock */
+ hicn_face_t *face =
+ hicn_face_get (&ip_address, sw_if, &hicn_face_hashtb, adj_index);
+
+ if (face == NULL)
+ {
+ hicn_face_id_t idx;
+ hicn_iface_add (&ip_address, sw_if, &idx, DPO_PROTO_IP4, adj_index);
+
+ face = hicn_dpoi_get_from_idx(idx);
+
+ face->dpo.dpoi_type = DPO_FIRST;
+ face->dpo.dpoi_proto = DPO_PROTO_IP4;
+ face->dpo.dpoi_index = adj_index;
+ face->dpo.dpoi_next_node = node_index;
+
+ /* if (nat_addr->as_u32 == 0) */
+ /* { */
+ adj_nbr_walk(face->sw_if,
+ FIB_PROTOCOL_IP4,
+ hicn4_iface_adj_walk_cb,
+ face);
+ /* } */
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+
+ *index = idx;
+ return;
+ }
+ else
+ {
+ /* unlock the face. We don't take a lock on each interest we receive */
+ hicn_face_id_t face_id = hicn_dpoi_get_index(face);
+ hicn_face_unlock_with_id(face_id);
+ }
+
+ /* Code replicated on purpose */
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *index = hicn_dpoi_get_index (face);
+}
+
+/**
+ * @brief Call back to get the adj of the tunnel
+ */
+static adj_walk_rc_t
+hicn6_iface_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+
+ hicn_face_t *face = (hicn_face_t *)ctx;
+
+ ip_adjacency_t *adj = adj_get(ai);
+ if ((adj->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN) ||
+ (adj->lookup_next_index == IP_LOOKUP_NEXT_MCAST_MIDCHAIN))
+ {
+ dpo_set(&face->dpo, DPO_ADJACENCY_MIDCHAIN, adj->ia_nh_proto, ai);
+ adj_nbr_midchain_stack(ai, &face->dpo);
+ }
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
+ * address and returns its dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param nat_addr: Ip v6 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_iface_ip6_add_and_lock (hicn_face_id_t * index,
+ u8 * hicnb_flags,
+ const ip6_address_t * nat_addr,
+ u32 sw_if, u32 adj_index, u32 node_index)
+{
+ /*All (complete) faces are indexed by remote addess as well */
+ /* if the face exists, it adds a lock */
+ hicn_face_t *face =
+ hicn_face_get ((ip46_address_t *)nat_addr, sw_if, &hicn_face_hashtb, adj_index);
+
+ if (face == NULL)
+ {
+ hicn_face_id_t idx;
+ hicn_iface_add ((ip46_address_t *) nat_addr, sw_if, &idx, DPO_PROTO_IP6, adj_index);
+
+ face = hicn_dpoi_get_from_idx(idx);
+
+ face->dpo.dpoi_type = DPO_FIRST;
+ face->dpo.dpoi_proto = DPO_PROTO_IP6;
+ face->dpo.dpoi_index = adj_index;
+ face->dpo.dpoi_next_node = node_index;
+
+ adj_nbr_walk(face->sw_if,
+ FIB_PROTOCOL_IP6,
+ hicn6_iface_adj_walk_cb,
+ face);
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+
+ *index = idx;
+
+ return;
+ }
+ else
+ {
+ /* unlock the face. We don't take a lock on each interest we receive */
+ hicn_face_id_t face_id = hicn_dpoi_get_index(face);
+ hicn_face_unlock_with_id(face_id);
+ }
+
+ /* Code replicated on purpose */
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *index = hicn_dpoi_get_index (face);
+}
+
+#endif // __HICN_FACE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/face_cli.c b/hicn-plugin/src/faces/face_cli.c
new file mode 100644
index 000000000..e9e516cc6
--- /dev/null
+++ b/hicn-plugin/src/faces/face_cli.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include "face.h"
+#include "../error.h"
+
+static clib_error_t *
+hicn_face_cli_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+ char *face_type_name = NULL;
+ int found = ~0;
+ int deleted = 0;
+ u8 *n = 0;
+ u8 *s = 0;
+ vlib_counter_t v;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%u", &face_id))
+ ;
+ else if (unformat (line_input, "type %s", &face_type_name))
+ ;
+ else if (unformat (line_input, "deleted"))
+ deleted = 1;
+ else
+ {
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL));
+ }
+ }
+
+ if (face_type_name != NULL)
+ {
+ int idx = 0;
+ vec_foreach_index (idx, face_type_names_vec)
+ {
+ if (!strcmp (face_type_names_vec[idx], face_type_name))
+ found = idx;
+ }
+ if (found == ~0)
+ return (clib_error_return (0, "Face type unknown"));
+ }
+
+ }
+
+ if (face_id != HICN_FACE_NULL)
+ {
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_FACE_NOT_FOUND));
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ vlib_cli_output (vm, "%U\n", format_hicn_face, face_id, 0 /*indent */ );
+
+ u32 indent = 3;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (&counters
+ [hicn_dpoi_get_index (face) *
+ HICN_N_COUNTER], i, &v);
+ s =
+ format (s, "%U%s", format_white_space, indent,
+ HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "packets");
+ s =
+ format (s, "%U%-16v%16Ld", format_white_space,
+ 30 - strlen (HICN_FACE_CTRX_STRING[i]), n, v.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n",
+ format_white_space, indent + 30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ else
+ {
+ if (found != ~0)
+ {
+ hicn_face_t *face;
+ /* *INDENT-OFF* */
+ pool_foreach(face, hicn_dpoi_face_pool,
+ {
+ if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ if (face->flags)
+ {
+ vlib_cli_output(vm, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), 0);
+ u8 * s = 0;
+ u32 indent = 3;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (&counters[hicn_dpoi_get_index(face) * HICN_N_COUNTER], i, &v);
+ s = format (s, "%U%s",format_white_space, indent, HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "packets");
+ s = format (s, "%U%-16v%16Ld", format_white_space, 30-strlen(HICN_FACE_CTRX_STRING[i]), n, v.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n",
+ format_white_space, indent+30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ hicn_face_t *face;
+ /* *INDENT-OFF* */
+ pool_foreach(face, hicn_dpoi_face_pool,
+ {
+ if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ vlib_cli_output(vm, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), 0);
+ u32 indent = 3;
+ u8 * s = 0;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (&counters[hicn_dpoi_get_index(face) * HICN_N_COUNTER], i, &v);
+ s = format (s, "%U%s",format_white_space, indent, HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "packets");
+ s = format (s, "%U%-16v%16Ld", format_white_space, 30-strlen(HICN_FACE_CTRX_STRING[i]), n, v.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n",
+ format_white_space, indent+30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ }
+
+ return 0;
+}
+
+/* cli declaration for 'show faces' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_cli_show_command, static) =
+{
+ .path = "hicn face show",
+ .short_help = "hicn face show [<face_id>]",
+ .function = hicn_face_cli_show_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/face_node.c b/hicn-plugin/src/faces/face_node.c
new file mode 100644
index 000000000..e1fd81ca0
--- /dev/null
+++ b/hicn-plugin/src/faces/face_node.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+
+#include "face.h"
+#include "face_node.h"
+#include "../strategy_dpo_manager.h"
+#include "face.h"
+#include "../cache_policies/cs_lru.h"
+#include "../infra.h"
+#include "../hicn.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for ip incomplete faces.
+ */
+
+vlib_node_registration_t hicn4_face_input_node;
+vlib_node_registration_t hicn4_face_output_node;
+vlib_node_registration_t hicn6_face_input_node;
+vlib_node_registration_t hicn6_face_output_node;
+
+#define ip_v4 4
+#define ip_v6 6
+
+static char *hicn4_face_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_face_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn4_face_input_trace_t;
+
+typedef enum
+{
+ HICN4_FACE_INPUT_NEXT_DATA,
+ HICN4_FACE_INPUT_NEXT_MAPME,
+ HICN4_FACE_INPUT_NEXT_ERROR_DROP,
+ HICN4_FACE_INPUT_N_NEXT,
+} hicn4_face_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn6_face_input_trace_t;
+
+typedef enum
+{
+ HICN6_FACE_INPUT_NEXT_DATA,
+ HICN6_FACE_INPUT_NEXT_MAPME,
+ HICN6_FACE_INPUT_NEXT_ERROR_DROP,
+ HICN6_FACE_INPUT_N_NEXT,
+} hicn6_face_input_next_t;
+
+#define NEXT_MAPME_IP4 HICN4_FACE_INPUT_NEXT_MAPME
+#define NEXT_MAPME_IP6 HICN6_FACE_INPUT_NEXT_MAPME
+#define NEXT_DATA_IP4 HICN4_FACE_INPUT_NEXT_DATA
+#define NEXT_DATA_IP6 HICN6_FACE_INPUT_NEXT_DATA
+
+#define NEXT_ERROR_DROP_IP4 HICN4_FACE_INPUT_NEXT_ERROR_DROP
+#define NEXT_ERROR_DROP_IP6 HICN6_FACE_INPUT_NEXT_ERROR_DROP
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define LOCK_DPO_FACE_IP4 hicn_face_ip4_lock
+#define LOCK_DPO_FACE_IP6 hicn_face_ip6_lock
+
+#define TRACE_INPUT_PKT_IP4 hicn4_face_input_trace_t
+#define TRACE_INPUT_PKT_IP6 hicn6_face_input_trace_t
+
+/*
+ * NOTE: Both hicn4_face_input_node_fn and hicn6_face_input_node_fn
+ * present a similar codebase. Macro are hard to debug, although the
+ * followind code is pretty straighforward and most of the complexity is in
+ * functions that can be easily debug.
+ */
+#define face_input_x1(ipv) \
+ do{ \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ int ret; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer(b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ \
+ u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp)*NEXT_DATA_IP##ipv; \
+ \
+ ret = LOCK_DPO_FACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &(hicnb0->in_faces_vec_id), \
+ &hicnb0->flags, \
+ &(ip_hdr->dst_address)); \
+ \
+ if ( PREDICT_FALSE(ret != HICN_ERROR_NONE) ) \
+ next0 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ { \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define face_input_x2(ipv) \
+ do{ \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ u32 next1 = NEXT_ERROR_DROP_IP##ipv; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ hicn_buffer_t * hicnb0; \
+ hicn_buffer_t * hicnb1; \
+ int ret0, ret1; \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ \
+ u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
+ u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp0)*NEXT_DATA_IP##ipv; \
+ \
+ next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp1)*NEXT_DATA_IP##ipv; \
+ \
+ \
+ ret0 = LOCK_DPO_FACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &(hicnb0->in_faces_vec_id), \
+ &hicnb0->flags, \
+ &(ip_hdr0->dst_address)); \
+ \
+ ret1 = LOCK_DPO_FACE_IP##ipv \
+ (&(hicnb1->face_id), \
+ &(hicnb1->in_faces_vec_id), \
+ &hicnb1->flags, \
+ &(ip_hdr1->dst_address)); \
+ \
+ if ( PREDICT_FALSE(ret0 != HICN_ERROR_NONE) ) \
+ next0 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ { \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if ( PREDICT_FALSE(ret1 != HICN_ERROR_NONE) ) \
+ next1 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ { \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb1->face_id \
+ * HICN_N_COUNTER], thread_index,\
+ HICN_FACE_COUNTERS_DATA_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn4_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_face_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_face_input_trace_t *t =
+ va_arg (*args, hicn4_face_input_trace_t *);
+
+ s = format (s, "FACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn4_face_input_node) =
+{
+ .function = hicn4_face_input_node_fn,
+ .name = "hicn4-face-input",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn4_face_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn4_face_input_error_strings),
+ .error_strings = hicn4_face_input_error_strings,
+ .n_next_nodes = HICN4_FACE_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN4_FACE_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN4_FACE_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN4_FACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief IPv6 face input node function
+ * @see hicn6_face_input_node_fn
+ */
+static uword
+hicn6_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_face_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_face_input_trace_t *t =
+ va_arg (*args, hicn6_face_input_trace_t *);
+
+ s = format (s, "FACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn6_face_input_node) =
+{
+ .function = hicn6_face_input_node_fn,
+ .name = "hicn6-face-input",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn6_face_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn6_face_input_error_strings),
+ .error_strings = hicn6_face_input_error_strings,
+ .n_next_nodes = HICN6_FACE_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN6_FACE_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN6_FACE_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN6_FACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/**** FACE OUTPUT *****/
+
+typedef enum
+{
+ HICN4_FACE_OUTPUT_NEXT_ECHO_REPLY,
+ HICN4_FACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN4_FACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN4_FACE_OUTPUT_N_NEXT,
+} hicn4_face_output_next_t;
+
+typedef enum
+{
+ HICN6_FACE_OUTPUT_NEXT_ECHO_REPLY,
+ HICN6_FACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN6_FACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN6_FACE_OUTPUT_N_NEXT,
+} hicn6_face_output_next_t;
+
+/* static_always_inline void */
+/* hicn_reply_probe_v4 (vlib_buffer_t * b, hicn_face_t * face) */
+/* { */
+/* hicn_header_t *h0 = vlib_buffer_get_current (b); */
+/* hicn_face_ip_t * face_ip = (hicn_face_ip_t *)(&face->data); */
+/* h0->v4.ip.saddr = h0->v4.ip.daddr; */
+/* h0->v4.ip.daddr = face_ip->local_addr.ip4; */
+/* vnet_buffer (b)->sw_if_index[VLIB_RX] = face->shared.sw_if; */
+
+/* u16 * dst_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip4_header_t) + sizeof(u16)); */
+/* u16 dst_port = *dst_port_ptr; */
+/* u16 * src_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip4_header_t)); */
+
+/* *dst_port_ptr = *src_port_ptr; */
+/* *src_port_ptr = dst_port; */
+
+/* hicn_type_t type = hicn_get_buffer (b)->type; */
+/* hicn_ops_vft[type.l1]->set_lifetime (type, &h0->protocol, 0); */
+/* } */
+
+/* static_always_inline void */
+/* hicn_reply_probe_v6 (vlib_buffer_t * b, hicn_face_t * face) */
+/* { */
+/* hicn_header_t *h0 = vlib_buffer_get_current (b); */
+/* hicn_face_ip_t * face_ip = (hicn_face_ip_t *)(&face->data); */
+/* h0->v6.ip.saddr = h0->v6.ip.daddr; */
+/* h0->v6.ip.daddr = face_ip->local_addr.ip6; */
+/* vnet_buffer (b)->sw_if_index[VLIB_RX] = face->shared.sw_if; */
+
+/* u16 * dst_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip6_header_t) + sizeof(u16)); */
+/* u16 dst_port = *dst_port_ptr; */
+/* u16 * src_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip6_header_t)); */
+
+/* *dst_port_ptr = *src_port_ptr; */
+/* *src_port_ptr = dst_port; */
+
+/* hicn_type_t type = hicn_get_buffer (b)->type; */
+/* hicn_ops_vft[type.l1]->set_lifetime (type, &h0->protocol, 0); */
+
+/* } */
+
+/* static_always_inline u32 */
+/* hicn_face_match_probe (vlib_buffer_t * b, hicn_face_t * face, u32 * next) */
+/* { */
+
+/* u8 *ptr = vlib_buffer_get_current (b); */
+/* u8 v = *ptr & 0xf0; */
+/* u8 res = 0; */
+
+/* if ( v == 0x40 ) */
+/* { */
+/* u16 * dst_port = (u16 *)(ptr + sizeof(ip4_header_t) + sizeof(u16)); */
+/* if (*dst_port == clib_net_to_host_u16(DEFAULT_PROBING_PORT)) */
+/* { */
+/* hicn_reply_probe_v6(b, face); */
+/* *next = HICN4_FACE_NEXT_ECHO_REPLY; */
+/* res = 1; */
+/* } */
+/* } */
+/* else if ( v == 0x60 ) */
+/* { */
+/* u16 * dst_port = (u16 *)(ptr + sizeof(ip6_header_t) + sizeof(u16)); */
+/* if (*dst_port == clib_net_to_host_u16(DEFAULT_PROBING_PORT)) */
+/* { */
+/* hicn_reply_probe_v6(b, face); */
+/* *next = HICN6_FACE_NEXT_ECHO_REPLY; */
+/* res = 1; */
+/* } */
+/* } */
+/* return res; */
+/* } */
+
+
+static inline void
+hicn_face_rewrite_interest (vlib_main_t * vm, vlib_buffer_t * b0,
+ hicn_face_t * face, u32 * next)
+{
+
+ /* if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD) && hicn_face_match_probe(b0, face, next)) */
+ /* return; */
+
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ //hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_interest (type, &hicn->protocol,
+ &face->nat_addr, &temp_addr);
+
+ if (ip46_address_is_ip4(&face->nat_addr))
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+
+ ASSERT(face->flags & HICN_FACE_FLAGS_FACE);
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = face->dpo.dpoi_index;
+ *next = face->dpo.dpoi_next_node;
+}
+
+static char *hicn4_face_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_face_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn4_face_output_trace_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn6_face_output_trace_t;
+
+#define TRACE_OUTPUT_PKT_IP4 hicn4_face_output_trace_t
+#define TRACE_OUTPUT_PKT_IP6 hicn6_face_output_trace_t
+
+#define face_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = ~0; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ face = \
+ hicn_dpoi_get_from_idx (face_id); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b0, face, &next0); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+#define face_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = ~0; \
+ u32 next1 = ~0; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ face0 = \
+ hicn_dpoi_get_from_idx (face_id0); \
+ face1 = \
+ hicn_dpoi_get_from_idx (face_id1); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b0, face0, &next0); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id0 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b1, face1, &next1); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id1 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn4_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_face_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_face_output_trace_t *t =
+ va_arg (*args, hicn4_face_output_trace_t *);
+
+ s =
+ format (s, "FACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn4_face_output_node) =
+{
+ .function = hicn4_face_output_node_fn,
+ .name = "hicn4-face-output",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn4_face_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn4_face_output_error_strings),
+ .error_strings = hicn4_face_output_error_strings,
+ .n_next_nodes = HICN4_FACE_OUTPUT_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with arp */
+ .next_nodes =
+ {
+ [HICN4_FACE_OUTPUT_NEXT_ECHO_REPLY] = "hicn4-face-input",
+ [HICN4_FACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN4_FACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+ }
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn6_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_face_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_face_output_trace_t *t =
+ va_arg (*args, hicn6_face_output_trace_t *);
+
+ s =
+ format (s, "FACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn6_face_output_node) =
+{
+ .function = hicn6_face_output_node_fn,
+ .name = "hicn6-face-output",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn6_face_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn6_face_output_error_strings),
+ .error_strings = hicn6_face_output_error_strings,
+ .n_next_nodes = HICN6_FACE_OUTPUT_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with neighbour discovery */
+ .next_nodes =
+ {
+ [HICN6_FACE_OUTPUT_NEXT_ECHO_REPLY] = "hicn6-face-input",
+ [HICN6_FACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN6_FACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+ }
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/face_node.h b/hicn-plugin/src/faces/face_node.h
new file mode 100644
index 000000000..f5a8bf5ae
--- /dev/null
+++ b/hicn-plugin/src/faces/face_node.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_NODE_H__
+#define __HICN_FACE_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @file face_node.h
+ *
+ * Implements the input and output face nodes. Input face nodes
+ * process incoming data while output face nodes process outgoing
+ * interests packets.
+ *
+ * Input face nodes follow hicn-face-input nodes and their purpose
+ * is to retrieve the list of possible incoming faces for each the data packet.
+ * The following node to the input face nodes is the hicn-data-pcslookup.
+ * Output face nodes follow the strategy and the hicn-interest-hitpit nodes and
+ * they perform the src nat on each interest packet. The node following the
+ * output face nodes depends on the adjacency type. In case of ip, the following
+ * node is the ip-rewrite, in case of tunnels the next node is the one implementing
+ * the tunnel encapsulation (udp-encap, mpls, etc).
+ */
+
+extern vlib_node_registration_t hicn4_face_input_node;
+extern vlib_node_registration_t hicn4_face_output_node;
+extern vlib_node_registration_t hicn6_face_input_node;
+extern vlib_node_registration_t hicn6_face_output_node;
+
+#endif // __HICN_FACE_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/iface_node.c b/hicn-plugin/src/faces/iface_node.c
new file mode 100644
index 000000000..433cf0b02
--- /dev/null
+++ b/hicn-plugin/src/faces/iface_node.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "face.h"
+#include "../strategy_dpo_manager.h"
+#include "../hicn.h"
+#include "../infra.h"
+#include "../cache_policies/cs_lru.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for ip incomplete faces.
+ */
+
+vlib_node_registration_t hicn4_iface_input_node;
+vlib_node_registration_t hicn4_iface_output_node;
+vlib_node_registration_t hicn6_iface_input_node;
+vlib_node_registration_t hicn6_iface_output_node;
+
+u32 data_fwd_iface_ip4_vlib_edge;
+u32 data_fwd_iface_ip6_vlib_edge;
+
+static char *hicn4_iface_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_iface_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn4_iface_input_trace_t;
+
+typedef enum
+{
+ HICN4_IFACE_INPUT_NEXT_INTEREST,
+ HICN4_IFACE_INPUT_NEXT_MAPME,
+ HICN4_IFACE_INPUT_NEXT_ERROR_DROP,
+ HICN4_IFACE_INPUT_N_NEXT,
+} hicn4_iface_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn6_iface_input_trace_t;
+
+typedef enum
+{
+ HICN6_IFACE_INPUT_NEXT_INTEREST,
+ HICN6_IFACE_INPUT_NEXT_MAPME,
+ HICN6_IFACE_INPUT_NEXT_ERROR_DROP,
+ HICN6_IFACE_INPUT_N_NEXT,
+} hicn6_iface_input_next_t;
+
+#define NEXT_MAPME_IP4 HICN4_IFACE_INPUT_NEXT_MAPME
+#define NEXT_MAPME_IP6 HICN6_IFACE_INPUT_NEXT_MAPME
+
+#define NEXT_INTEREST_IP4 HICN4_IFACE_INPUT_NEXT_INTEREST
+#define NEXT_INTEREST_IP6 HICN6_IFACE_INPUT_NEXT_INTEREST
+
+#define ADDRESS_IP4 ip_interface_address_t *ia = 0;ip4_address_t *local_address = ip4_interface_first_address(&ip4_main, swif, &ia)
+#define ADDRESS_IP6 ip6_address_t *local_address = ip6_interface_first_address(&ip6_main, swif)
+
+#define ADDRESSX2_IP4 ip_interface_address_t *ia0, *ia1; ia0 = ia1 = 0; \
+ ip4_address_t *local_address0 = ip4_interface_first_address(&ip4_main, swif0, &ia0); \
+ ip4_address_t *local_address1 = ip4_interface_first_address(&ip4_main, swif1, &ia1);
+
+#define ADDRESSX2_IP6 ip6_address_t *local_address0 = ip6_interface_first_address(&ip6_main, swif0); \
+ ip6_address_t *local_address1 = ip6_interface_first_address(&ip6_main, swif1);
+
+#define DPO_ADD_LOCK_IFACE_IP4 hicn_iface_ip4_add_and_lock
+#define DPO_ADD_LOCK_IFACE_IP6 hicn_iface_ip6_add_and_lock
+
+//#define VLIB_EDGE_IP4 data_fwd_iface_ip4_vlib_edge
+//#define VLIB_EDGE_IP6 data_fwd_iface_ip6_vlib_edge
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define TRACE_INPUT_PKT_IP4 hicn4_iface_input_trace_t
+#define TRACE_INPUT_PKT_IP6 hicn6_iface_input_trace_t
+
+// NODE OUTPUT
+
+static char *hicn4_iface_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_iface_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn4_iface_output_trace_t;
+
+typedef enum
+{
+ HICN4_IFACE_OUTPUT_NEXT_LOOKUP,
+ HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN4_IFACE_OUTPUT_N_NEXT,
+} hicn4_iface_output_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn6_iface_output_trace_t;
+
+typedef enum
+{
+ HICN6_IFACE_OUTPUT_NEXT_LOOKUP,
+ HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN6_IFACE_OUTPUT_N_NEXT,
+} hicn6_iface_output_next_t;
+
+//#define ERROR_OUTPUT_IP4 HICN4_IFACE_OUTPUT_NEXT_ERROR_DROP
+//#define ERROR_OUTPUT_IP6 HICN6_IFACE_OUTPUT_NEXT_ERROR_DROP
+
+#define NEXT_DATA_LOOKUP_IP4 HICN4_IFACE_OUTPUT_NEXT_LOOKUP
+#define NEXT_DATA_LOOKUP_IP6 HICN6_IFACE_OUTPUT_NEXT_LOOKUP
+
+#define NEXT_UDP_ENCAP_IP4 HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP
+#define NEXT_UDP_ENCAP_IP6 HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP
+
+#define HICN_REWRITE_DATA_IP4 hicn_rewrite_iface_data4
+#define HICN_REWRITE_DATA_IP6 hicn_rewrite_iface_data6
+
+#define TRACE_OUTPUT_PKT_IP4 hicn4_iface_output_trace_t
+#define TRACE_OUTPUT_PKT_IP6 hicn6_iface_output_trace_t
+
+// NODES IMPLEMENTATIONS
+
+#define iface_input_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0, next0, next_iface0; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer(b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ \
+ stats.pkts_interest_count += 1; \
+ \
+ u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp)*NEXT_INTEREST_IP##ipv; \
+ \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ else if(hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ \
+ DPO_ADD_LOCK_IFACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &hicnb0->flags, \
+ &(ip_hdr->src_address), \
+ vnet_buffer(b0)->sw_if_index[VLIB_RX], \
+ vnet_buffer(b0)->ip.adj_index[VLIB_RX], \
+ next_iface0); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define iface_input_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1, next0, next1, next_iface0, next_iface1; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ hicn_buffer_t *hicnb0, *hicnb1; \
+ \
+ /* Prefetch for next iteration. */ \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ \
+ stats.pkts_interest_count += 2; \
+ \
+ u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
+ u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp0)*NEXT_INTEREST_IP##ipv; \
+ \
+ next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp1)*NEXT_INTEREST_IP##ipv; \
+ \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ else if(hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ \
+ next_iface1 = NEXT_DATA_LOOKUP_IP##ipv; \
+ \
+ if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
+ next_iface1 = NEXT_UDP_ENCAP_IP4; \
+ else if(hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
+ next_iface1 = NEXT_UDP_ENCAP_IP6; \
+ \
+ DPO_ADD_LOCK_IFACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &hicnb0->flags, \
+ &(ip_hdr0->src_address), \
+ vnet_buffer(b0)->sw_if_index[VLIB_RX], \
+ vnet_buffer(b0)->ip.adj_index[VLIB_RX], \
+ next_iface0); \
+ \
+ DPO_ADD_LOCK_IFACE_IP##ipv \
+ (&(hicnb1->face_id), \
+ &hicnb1->flags, \
+ &(ip_hdr1->src_address), \
+ vnet_buffer(b1)->sw_if_index[VLIB_RX], \
+ vnet_buffer(b1)->ip.adj_index[VLIB_RX], \
+ next_iface1); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb1->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+static uword
+hicn4_iface_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_iface_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_iface_input_trace_t *t =
+ va_arg (*args, hicn4_iface_input_trace_t *);
+
+ s =
+ format (s, "IFACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn4_iface_input_node) =
+{
+ .function = hicn4_iface_input_node_fn,
+ .name = "hicn4-iface-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn4_iface_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn4_iface_input_error_strings),
+ .error_strings = hicn4_iface_input_error_strings,
+ .n_next_nodes = HICN4_IFACE_INPUT_N_NEXT,
+ /* edit / add dispositions*/
+ .next_nodes =
+ {
+ [HICN4_IFACE_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN4_IFACE_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN4_IFACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+static uword
+hicn6_iface_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (6);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_iface_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_iface_input_trace_t *t =
+ va_arg (*args, hicn6_iface_input_trace_t *);
+
+ s =
+ format (s, "IFACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn6_iface_input_node) =
+{
+ .function = hicn6_iface_input_node_fn,
+ .name = "hicn6-iface-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn6_iface_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn6_iface_input_error_strings),
+ .error_strings = hicn6_iface_input_error_strings,
+ .n_next_nodes = HICN6_IFACE_INPUT_N_NEXT,
+ /* edit / add dispositions*/
+ .next_nodes =
+ {
+ [HICN6_IFACE_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN6_IFACE_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN6_IFACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+/**** IFACE OUTPUT *****/
+
+static inline void
+hicn_rewrite_iface_data4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * iface, u32 * next)
+{
+ ip4_header_t *ip0;
+
+ /* Get the pointer to the old ip and tcp header */
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Set up the ip6 header */
+ /* IP4 lenght contains the size of the ip4 header too */
+ u16 sval = (vlib_buffer_length_in_chain (vm, b0));
+ ip0->length = clib_host_to_net_u16 (sval);
+ ip0->ttl = 254; // FIXME TTL
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = iface->dpo.dpoi_index;
+ *next = iface->dpo.dpoi_next_node;
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &(iface->nat_addr), &(temp_addr),
+ iface->pl_id);
+}
+
+static inline void
+hicn_rewrite_iface_data6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * iface, u32 * next)
+{
+ ip6_header_t *ip0;
+
+ /* Get the pointer to the old ip and tcp header */
+ /* Copy the previous ip and tcp header to the new portion of memory */
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Set up the ip6 header */
+ /* IP6 lenght does not include the size of the ip6 header */
+ u16 sval = (vlib_buffer_length_in_chain (vm, b0) - (sizeof (ip6_header_t)));
+ ip0->payload_length = clib_host_to_net_u16 (sval);
+ ip0->hop_limit = HICN_IP6_HOP_LIMIT;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = iface->dpo.dpoi_index;
+ *next = iface->dpo.dpoi_next_node;
+
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &(iface->nat_addr), &(temp_addr),
+ iface->pl_id);
+}
+
+#define iface_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = next_index; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ face = \
+ hicn_dpoi_get_from_idx (face_id); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b0, face, &next0); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0));\
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0); \
+
+
+#define iface_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = next_index; \
+ u32 next1 = next_index; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ face0 = \
+ hicn_dpoi_get_from_idx (face_id0); \
+ face1 = \
+ hicn_dpoi_get_from_idx (face_id1); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b0, face0, &next0); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id0 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0));\
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b1, face1, &next1); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id1 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0); \
+
+
+
+static uword
+hicn4_iface_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (4);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (4);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_iface_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_iface_output_trace_t *t =
+ va_arg (*args, hicn4_iface_output_trace_t *);
+
+ s =
+ format (s, "IFACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn4_iface_output_node) =
+{
+ .function = hicn4_iface_output_node_fn,
+ .name = "hicn4-iface-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn4_iface_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn4_iface_output_error_strings),
+ .error_strings = hicn4_iface_output_error_strings,
+ .n_next_nodes = HICN4_IFACE_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN4_IFACE_OUTPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+ },
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn6_iface_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (6);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_iface_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_iface_output_trace_t *t =
+ va_arg (*args, hicn6_iface_output_trace_t *);
+
+ s =
+ format (s, "IFACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn6_iface_output_node) =
+{
+ .function = hicn6_iface_output_node_fn,
+ .name = "hicn6-iface-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn6_iface_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn6_iface_output_error_strings),
+ .error_strings = hicn6_iface_output_error_strings,
+ .n_next_nodes = HICN6_IFACE_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN6_IFACE_OUTPUT_NEXT_LOOKUP] = "ip6-lookup",
+ [HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/faces/iface_node.h b/hicn-plugin/src/faces/iface_node.h
new file mode 100644
index 000000000..1a7c4291b
--- /dev/null
+++ b/hicn-plugin/src/faces/iface_node.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_IFACE_NODE_H__
+#define __HICN_IFACE_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @file iface_node.h
+ *
+ * Implements the input and output iface nodes. Input iface nodes
+ * process incoming interests while output face nodes process outgoing
+ * data packets.
+ *
+ * Input iface nodes follow ip-lookup nodes and their purpose
+ * is to create (or retrieve if already existing) the list incoming face
+ * for each the interest packet.
+ * The following node to the input iface nodes is the hicn-interest-pcslookup.
+ * Output iface nodes follow the hicn-data-fwd and the hicn-interest-hitcs nodes and
+ * they perform the dst nat on each data packet. The node following the
+ * output face nodes depends on the adjacency type. In case of ip, the following
+ * node is the ip4/6-lookup, in case of tunnels the next node is the one implementing
+ * the tunnel encapsulation (udp-encap, mpls, etc).
+ */
+
+
+/**
+ * @brief Initialize the ip iface module
+ */
+void hicn_iface_init (vlib_main_t * vm);
+
+#endif // __HICN_IFACE_IP_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */