summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJim Gibson <gibson@cisco.com>2017-02-20 11:53:54 -0500
committerJim Gibson <gibson@cisco.com>2017-02-20 12:21:12 -0500
commitdfd7ce27fea04c1a76844e21286c2b1d6653e153 (patch)
tree0025f965ddb68599ea824b9d9edf61b7647dd4ec
parent9b30fc10fb1cbebe651e5a107e8ca5b24de54675 (diff)
Initial Commit: VPP cicn VPP plugin
Change-Id: If1b965f0a4b7cfacda8f6caf6925072a9007ffb4 Signed-off-by: Jim Gibson <gibson@cisco.com>
-rw-r--r--AUTHORS6
-rw-r--r--README.md221
-rwxr-xr-xcicn-plugin/CMakeLists.txt113
-rw-r--r--cicn-plugin/Makefile.am87
-rw-r--r--cicn-plugin/cicn/cicn.api502
-rw-r--r--cicn-plugin/cicn/cicn.c481
-rw-r--r--cicn-plugin/cicn/cicn.h42
-rw-r--r--cicn-plugin/cicn/cicn_all_api_h.h21
-rw-r--r--cicn-plugin/cicn/cicn_api.h96
-rw-r--r--cicn-plugin/cicn/cicn_api_handler.h73
-rw-r--r--cicn-plugin/cicn/cicn_api_test.c1094
-rw-r--r--cicn-plugin/cicn/cicn_face.c646
-rw-r--r--cicn-plugin/cicn/cicn_face.h122
-rw-r--r--cicn-plugin/cicn/cicn_fib.c798
-rw-r--r--cicn-plugin/cicn/cicn_fib.h176
-rw-r--r--cicn-plugin/cicn/cicn_hashtb.c1567
-rw-r--r--cicn-plugin/cicn/cicn_hashtb.h526
-rw-r--r--cicn-plugin/cicn/cicn_hello.c439
-rw-r--r--cicn-plugin/cicn/cicn_hello.h95
-rw-r--r--cicn-plugin/cicn/cicn_hello_inlines.h91
-rw-r--r--cicn-plugin/cicn/cicn_infra.h312
-rw-r--r--cicn-plugin/cicn/cicn_mgmt.c2277
-rw-r--r--cicn-plugin/cicn/cicn_mgmt.h99
-rw-r--r--cicn-plugin/cicn/cicn_msg_enum.h33
-rw-r--r--cicn-plugin/cicn/cicn_params.h117
-rw-r--r--cicn-plugin/cicn/cicn_parser.c112
-rw-r--r--cicn-plugin/cicn/cicn_parser.h743
-rw-r--r--cicn-plugin/cicn/cicn_pcs.c384
-rw-r--r--cicn-plugin/cicn/cicn_pcs.h512
-rw-r--r--cicn-plugin/cicn/cicn_pg.c945
-rw-r--r--cicn-plugin/cicn/cicn_rte_mbuf.h69
-rw-r--r--cicn-plugin/cicn/cicn_rte_mbuf_inlines.h451
-rw-r--r--cicn-plugin/cicn/cicn_siphash.h458
-rw-r--r--cicn-plugin/cicn/cicn_std.h31
-rw-r--r--cicn-plugin/cicn/cicn_types.h30
-rw-r--r--cicn-plugin/cicn/node.c2031
-rw-r--r--cicn-plugin/cicn/test/test_cicn.c139
-rw-r--r--cicn-plugin/cicn/test/test_cicn.h44
-rw-r--r--cicn-plugin/cicn/test/test_cicn_hash.c415
-rw-r--r--cicn-plugin/cicn/test/test_cicn_hash.h42
-rw-r--r--cicn-plugin/cicn/test/test_cicn_hash_cdata.c161
-rw-r--r--cicn-plugin/configure.ac33
42 files changed, 16634 insertions, 0 deletions
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 00000000..6fb88a3a
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,6 @@
+cicn-plugin authors are listed below:
+
+ Jim Gibson <gibson@cisco.com>
+ Ilya Moiseenko <ilmoisee@cisco.com>
+ Mark Stapp
+ Spyros Mastorakis <mastorakis@cs.ucla.edu>
diff --git a/README.md b/README.md
new file mode 100644
index 00000000..2e155c18
--- /dev/null
+++ b/README.md
@@ -0,0 +1,221 @@
+FD.IO CICN project: VPP plugin
+==============================
+
+The CICN forwarder
+
+## Quick Start ##
+```
+From the code tree root
+
+Using automake
+$ cd cicn-plugin
+$ autoreconf -i -f
+$ mkdir -p build
+$ cd build
+$ ../configure --with-plugin-toolkit
+OR, to omit UT code
+$ ../configure --with-plugin-toolkit --without-cicn-test
+$ make
+$ sudo make install
+
+Using cmake
+$ cd cicn-plugin
+$ mkdir -p build
+$ cd build
+$ cmake ..
+Or, to omit UT code
+$ cmake .. -DNO_UNIT_TEST=TRUE
+$ make
+$ sudo make install
+```
+
+## Introduction ##
+
+The high-performance CCNx ICN forwarder as a plugin to VPP.
+
+The plugin provides the following functionalities:
+
+ - Fast packet processing
+ - Interest aggregation
+ - Content caching
+
+## Using CICN plugin ##
+
+### Platforms ###
+
+CICN has been tested in:
+
+- Ubuntu 16.04 LTS (x86_64)
+- Ubuntu 14.04 LTS (x86_64)
+- Debian Stable/Testing (2017-03-01)
+- Red Hat Enterprise Linux 7
+- CentOS 7
+
+
+### Dependencies ###
+
+Build dependencies:
+
+- VPP 17.01
+
+Hardware dependencies:
+
+- [DPDK](http://DPDK.org/) compatible nic
+
+### Getting started ###
+In order to start, the CICN plugin requires a running instance of VPP and at least one DPDK compatible nic. The steps required to successfully start CICN are:
+
+- Setup the host to run VPP
+- Configure VPP to use DPDK compatible niCS
+- Start VPP
+- Configure VPP interfaces
+- Configure and start CICN
+
+Detailed information for configuring VPP can be found at [https://wiki.fd.io/view/VPP](https://wiki.fd.io/view/VPP).
+
+##### Setup the host for VPP #####
+
+VPP requires the `uio` and `igb_uio` modules to be loaded in the kernel
+
+```
+$ sudo modprobe uio
+$ sudo modprobe igb_uio
+```
+Hugepages must be enabled in the system
+
+```
+$ sudo sysctl -w vm.nr_hugepages=1024
+```
+
+If the DPDK interface we want to assign to VPP is up, we must bring it down
+
+```
+$ sudo ifconfig <interface_name> down
+```
+
+##### Configure VPP #####
+The file /etc/VPP/startup.conf contains a set of parameters to setup VPP at startup. The following example sets up VPP to use a DPDK interfaces:
+
+``` shell
+unix {
+ nodaemon
+ log /tmp/vpp.log
+ full-coredump
+}
+
+api-trace {
+ on
+}
+
+api-segment {
+ gid vpp
+}
+
+DPDK {
+ socket-mem 1024
+ dev 0000:08:00.0
+}
+```
+Where `0000:08:00.0` must be replaced with the actual PCI address of the DPDK interface
+
+##### Start VPP #####
+
+VPP can be started as a process or a service:
+
+``` shell
+Start VPP as a service in Ubuntu 16.04
+$ sudo systemctl start vpp
+
+Start VPP as a service in Ubuntu 14.04
+$ sudo service vpp start
+
+Start VPP as a process in both 16.04 and 14.04
+$ sudo vpp -c /etc/vpp/startup.conf
+
+```
+
+##### Configure VPP interfaces #####
+
+ICN communications run on top of IP and TCP, therefore we need to assign an IP address to the DPDK interface that enables IP connectivity through that interface
+
+``` shell
+Set an IP address on the DPDK interface
+$ sudo vppctl set int ip address GigabitEthernet0/8/0 10.0.0.1/24
+
+Bring the interface up
+$ sudo vppctl set int state GigabitEthernet0/8/0 up
+```
+`GigabitEthernet0/8/0` is the name that VPP assigned to the DPDK interface. It must be replaced with the actual name of the DPDK interface ([`sudo vppctl show interfaces`](https://doCS.fd.io/vpp/17.01/clicmd_vnet_vnet.html#clicmd_show_interfaces) shows the available interfaces in VPP).
+
+
+##### Configure and start CICN #####
+
+The following three commands enable CICN, set a face pointing to a remote host and add an entry in the FIB
+
+``` shell
+Start CICN plugin
+$ sudo vppctl cicn enable
+
+Create a face for the DPDK interface
+$ sudo vppctl cicn cfg face add local 10.0.0.1:33302 remote 10.0.0.2:33302
+Face ID: 1
+
+Add a FIB entry
+$ sudo vppctl cicn cfg fib add prefix /cicn face 1
+```
+
+- `10.0.0.1:33302` must be replaced with the IP address assigned to the DPDK interface in the previous step. The tcp port can be chosen as desired.
+- `10.0.0.2:33302` must be replaced with the IP address assigned to the remote host we want to connect to
+- `/cicn` must be replaced with the ICN prefix to forward through face 1
+
+### CICN commands ###
+A full list of the available commands for CICN is available through:
+
+- `sudo vppctl cicn help`
+
+If `help` is passed as a paramenter to a CICN command, the output will display information about the syntax of the command
+
+##### CICN statistiCS ####
+
+A fine grained set of statistic for the ongoing communication is available through:
+
+- `sudo vppctl cicn show`
+
+##### Setting PIT, FIB and CS parameters ####
+
+If needed, it is possible to change the default values for the three CICN internal structures. The customizable parameters and the corresponding commands are:
+
+- PIT size
+ - `sudo vppctl cicn control param pit size <# of entries>`
+- PIT entry lifetime (default, min and max)
+ - `sudo vppctl cicn control param pit {dfltlife | minlife | maxlife} <seconds>`
+- FIB size
+ - `sudo vppctl cicn control param fib size <# of entries>`
+- CS size
+ - `sudo vppctl cicn control param cs size <# of entries>`
+
+Changes on PIT, FIB and CS can be made only before starting CICN.
+
+
+### License ###
+
+This software is distributed under the following license:
+
+```
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+```
+
+
diff --git a/cicn-plugin/CMakeLists.txt b/cicn-plugin/CMakeLists.txt
new file mode 100755
index 00000000..c68a8021
--- /dev/null
+++ b/cicn-plugin/CMakeLists.txt
@@ -0,0 +1,113 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 3.2)
+project(cicn_plugin)
+
+set(CICN_PLUGIN_SOURCE_FILES
+ cicn/cicn.c
+ cicn/cicn_face.c
+ cicn/cicn_fib.c
+ cicn/cicn_hashtb.c
+ cicn/cicn_hello.c
+ cicn/cicn_mgmt.c
+ cicn/cicn_parser.c
+ cicn/cicn_pcs.c
+ cicn/cicn_pg.c
+ cicn/node.c)
+
+set(CICN_PLUGIN_HEADER_FILES
+ cicn/cicn_all_api_h.h
+ cicn/cicn_api_handler.h
+ cicn/cicn_fib.h
+ cicn/cicn_hashtb.h
+ cicn/cicn_hello_inlines.h
+ cicn/cicn_mgmt.h
+ cicn/cicn_params.h
+ cicn/cicn_pcs.h
+ cicn/cicn_rte_mbuf_inlines.h
+ cicn/cicn_std.h
+ cicn/cicn_api.h
+ cicn/cicn_face.h
+ cicn/cicn.h
+ cicn/cicn_hello.h
+ cicn/cicn_infra.h
+ cicn/cicn_msg_enum.h
+ cicn/cicn_parser.h
+ cicn/cicn_rte_mbuf.h
+ cicn/cicn_siphash.h
+ cicn/cicn_types.h)
+
+if(NOT NO_UNIT_TEST)
+ list(APPEND CICN_PLUGIN_SOURCE_FILES
+ cicn/test/test_cicn.c
+ cicn/test/test_cicn_hash.c
+ cicn/test/test_cicn_hash_cdata.c)
+ list(APPEND CICN_PLUGIN_HEADER_FILES
+ cicn/test/test_cicn.h
+ cicn/test/test_cicn_hash.h)
+endif(NOT NO_UNIT_TEST)
+
+set(CICN_API_TEST_SOURCE_FILES
+ cicn/cicn_api_test.c)
+
+set(CICN_API_TEST_HEADER_FILES
+ cicn/cicn_api.h)
+
+execute_process(COMMAND mkdir -p ${CMAKE_CURRENT_BINARY_DIR}/cicn)
+add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/cicn/cicn.api.h
+ COMMAND
+ $ENV{CC} $ENV{CPPFLAGS} -E -P -x c-header ${CMAKE_CURRENT_SOURCE_DIR}/cicn/cicn.api |
+ vppapigen --input - --output ${CMAKE_CURRENT_BINARY_DIR}/cicn/cicn.api.h
+ --show-name ${CMAKE_CURRENT_BINARY_DIR}/cicn/cicn.api.h
+ DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/cicn/cicn.api)
+
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+include_directories(./)
+include_directories(cicn/test)
+include_directories(/usr/include/vpp-dpdk)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -march=native -O3")
+add_library(cicn_api_test_plugin SHARED
+ ${CICN_API_TEST_SOURCE_FILES}
+ ${CICN_API_TEST_HEADER_FILES}
+ ${CMAKE_CURRENT_BINARY_DIR}/cicn/cicn.api.h)
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DDPDK=1 -DCICN_VPP_PLUGIN=1")
+add_library(cicn_plugin SHARED
+ ${CICN_PLUGIN_SOURCE_FILES}
+ ${CICN_PLUGIN_HEADER_FILES}
+ ${CMAKE_CURRENT_BINARY_DIR}/cicn/cicn.api.h)
+
+set_target_properties(cicn_plugin
+ PROPERTIES
+ LINKER_LANGUAGE C)
+set_target_properties(cicn_api_test_plugin
+ PROPERTIES
+ LINKER_LANGUAGE C)
+
+target_link_libraries(cicn_plugin)
+target_link_libraries(cicn_api_test_plugin)
+
+set(VPP_INSTALL_PLUGIN /usr/lib/vpp_plugins)
+set(VPP_INSTALL_API_TEST_PLUGIN /usr/lib/vpp_api_test_plugins)
+
+install(DIRECTORY DESTINATION ${VPP_INSTALL_PLUGIN})
+install(TARGETS cicn_plugin
+ DESTINATION
+ ${VPP_INSTALL_PLUGIN})
+
+install(DIRECTORY DESTINATION ${VPP_INSTALL_API_TEST_PLUGIN})
+install(TARGETS cicn_api_test_plugin
+ DESTINATION
+ ${VPP_INSTALL_API_TEST_PLUGIN})
diff --git a/cicn-plugin/Makefile.am b/cicn-plugin/Makefile.am
new file mode 100644
index 00000000..b6e659a3
--- /dev/null
+++ b/cicn-plugin/Makefile.am
@@ -0,0 +1,87 @@
+# Copyright (c) 2017 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+AUTOMAKE_OPTIONS = foreign subdir-objects
+
+AM_CFLAGS = -Wall -Werror -march=native -I@TOOLKIT_INCLUDE@ \
+ -DDPDK=1 \
+ -I/usr/include/vpp-dpdk
+
+lib_LTLIBRARIES = cicn_plugin.la cicn_api_test_plugin.la
+cicn_plugin_oper_sources = \
+ cicn/cicn.c \
+ cicn/cicn_face.c \
+ cicn/cicn_fib.c \
+ cicn/cicn_hashtb.c \
+ cicn/cicn_hello.c \
+ cicn/cicn_mgmt.c \
+ cicn/cicn_parser.c \
+ cicn/cicn_pg.c \
+ cicn/cicn_pcs.c \
+ cicn/node.c
+
+cicn_plugin_la_SOURCES = $(cicn_plugin_oper_sources)
+
+# UT sources, optionally included in plugin for execution under "cicn test"
+cicn_plugin_test_sources = \
+ cicn/test/test_cicn.c \
+ cicn/test/test_cicn_hash.c cicn/test/test_cicn_hash_cdata.c
+
+# include/exclude cicn ut modules in plugin, compile modules regardless
+if WITH_CICN_TEST
+cicn_plugin_la_SOURCES += $(cicn_plugin_test_sources)
+else
+# libtest_cicn.la is never used, only built if tests omitted from cicn_plug_la
+# (goal is to force compilation in all cases)
+noinst_LTLIBRARIES = libtest_cicn.la
+libtest_cicn_la_SOURCES = $(cicn_plugin_test_sources)
+libtest_cicn_la_CPPFLAGS = $(cicn_plugin_la_CPPFLAGS)
+endif
+
+# CICN_VPP_PLUGIN is only set for vpp plugin, not api client library plugin
+cicn_plugin_la_CPPFLAGS = \
+ -DCICN_VPP_PLUGIN=1
+cicn_plugin_la_LDFLAGS = -module
+
+BUILT_SOURCES = cicn/cicn.api.h
+
+SUFFIXES = .api.h .api
+
+%.api.h: %.api
+ mkdir -p `dirname $@` ; \
+ $(CC) $(CPPFLAGS) -E -P -C -x c $^ \
+ | vppapigen --input - --output $@ --show-name $@
+
+nobase_include_HEADERS = \
+ cicn/cicn_all_api_h.h \
+ cicn/cicn_msg_enum.h \
+ cicn/cicn_api.h \
+ cicn/cicn.api.h
+
+cicn_api_test_plugin_la_SOURCES = \
+ cicn/cicn_api_test.c cicn/cicn_plugin.api.h cicn/cicn_api.h
+cicn_api_test_plugin_la_CPPFLAGS = \
+ -DCICN_VPP_CLIENTLIB=1
+cicn_api_test_plugin_la_LDFLAGS = -module
+
+if WITH_PLUGIN_TOOLKIT
+install-data-hook:
+ mkdir -p /usr/lib/vpp_plugins
+ mkdir -p /usr/lib/vpp_api_test_plugins
+ cp -p $(prefix)/lib/cicn_plugin.so.*.*.* \
+ /usr/lib/vpp_plugins/cicn_plugin.so
+ cp -p $(prefix)/lib/cicn_api_test_plugin.so.*.*.* \
+ /usr/lib/vpp_api_test_plugins/cicn_api_test_plugin.so
+ rm -f $(prefix)/lib/cicn_plugin.*
+ rm -f $(prefix)/lib/cicn_api_test_plugin.*
+endif
diff --git a/cicn-plugin/cicn/cicn.api b/cicn-plugin/cicn/cicn.api
new file mode 100644
index 00000000..ac76f388
--- /dev/null
+++ b/cicn-plugin/cicn/cicn.api
@@ -0,0 +1,502 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn plug-in api description/structures, shared between plugin
+ * and management client(s)
+ *
+ * - Include the generated file, see BUILT_SOURCES in Makefile.am
+ */
+define cicn_api_node_params_set {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Enable / disable ICN forwarder in VPP */
+ u8 enable_disable;
+
+ /* PIT maximum size, otherwise -1 to assign default value */
+ i32 pit_max_size;
+
+ /* FIB maximum size, otherwise -1 to assign default value */
+ i32 fib_max_size;
+
+ /* CS maximum size, otherwise -1 to assign default value */
+ i32 cs_max_size;
+
+ /* Default PIT entry lifetime */
+ f64 pit_dflt_lifetime_sec;
+
+ /* Lower bound on PIT entry lifetime */
+ f64 pit_min_lifetime_sec;
+
+ /* Upper bound on PIT entry lifetime */
+ f64 pit_max_lifetime_sec;
+};
+
+define cicn_api_node_params_set_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_node_params_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define cicn_api_node_params_get_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Enabled / disabled flag */
+ u8 is_enabled;
+
+ /* compile-time plugin features */
+ u8 feature_multithread;
+ u8 feature_cs;
+ u8 feature_dpdk_rtembuf_cloning;
+ u8 feature_vpp_vlib_cloning;
+
+ /* Number of VPP workers */
+ u32 worker_count;
+
+ /* PIT maximum size, otherwise -1 to assign default value */
+ u32 pit_max_size;
+
+ /* FIB maximum size, otherwise -1 to assign default value */
+ u32 fib_max_size;
+
+ /* CS maximum size, otherwise -1 to assign default value */
+ u32 cs_max_size;
+
+ /* Default PIT entry lifetime */
+ f64 pit_dflt_lifetime_sec;
+
+ /* Lower bound on PIT entry lifetime */
+ f64 pit_min_lifetime_sec;
+
+ /* Upper bound on PIT entry lifetime */
+ f64 pit_max_lifetime_sec;
+};
+
+define cicn_api_face_add {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* IPv4 local address */
+ u32 local_addr;
+
+ /* IPv4 local port number */
+ u16 local_port;
+
+ /* IPv4 destination (next-hop) address*/
+ u32 remote_addr;
+
+ /* IPv4 destination (next-hop) port number */
+ u16 remote_port;
+};
+
+define cicn_api_face_add_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value: new Face ID, -1 means no Face was created */
+ i32 faceid;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_face_delete {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face ID to be deleted */
+ i32 faceid;
+};
+
+define cicn_api_face_delete_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_face_params_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face to be retrieved */
+ i32 faceid;
+};
+
+define cicn_api_face_params_get_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Local IP address */
+ u32 local_addr;
+
+ /* Local port */
+ u16 local_port;
+
+ /* Remote IP address */
+ u32 remote_addr;
+
+ /* Remote port */
+ u16 remote_port;
+
+ /* Face flags */
+ i32 flags;
+
+ /* VPP interface (index) associated with the face */
+ i32 sw_interface_id;
+};
+
+define cicn_api_face_props_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define cicn_api_face_props_get_reply {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Number of valid entries in the response */
+ i32 nentries;
+
+ /* Array of 1000 cicn_api_face_entry_t */
+ u8 face[24000];
+};
+
+define cicn_api_face_stats_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Face ID to retrieve stats from */
+ i32 faceid;
+};
+
+define cicn_api_face_stats_get_reply {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Face ID */
+ i32 faceid;
+
+ /* Interest msgs originated */
+ u64 orig_interests;
+
+ /* Data msgs originated */
+ u64 orig_datas;
+
+ /* InterestReturn (i.e. network level NACK) msgs originated */
+ u64 orig_naks;
+
+ /* Interest msgs terminated */
+ u64 term_interests;
+
+ /* Data msgs terminated */
+ u64 term_datas;
+
+ /* InterestReturn (i.e. network level NACK) msgs terminated */
+ u64 term_naks;
+
+ /* Interest msgs received */
+ u64 in_interests;
+
+ /* Data msgs received */
+ u64 in_datas;
+
+ /* InterestReturn (i.e. network level NACK) msgs received */
+ u64 in_naks;
+
+ /* Interest msgs transmitted */
+ u64 out_interests;
+
+ /* Data msgs transmitted */
+ u64 out_datas;
+
+ /* InterestReturn (i.e. network level NACK) msgs transmitted */
+ u64 out_naks;
+};
+
+define cicn_api_fib_entry_nh_add {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to be added to the FIB */
+ u8 prefix[500];
+
+ /* A Face ID to the next hop forwarder for the specified prefix */
+ i32 faceid;
+
+ /* A weight (priority) of the specified Face ID */
+ i32 weight;
+};
+
+define cicn_api_fib_entry_nh_add_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_fib_entry_nh_delete {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to be removed from the FIB */
+ u8 prefix[500];
+
+ /* Specific next-hop to be removed, or 0 to remove all next-hops */
+ i32 faceid;
+};
+
+define cicn_api_fib_entry_nh_delete_reply {
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_fib_entry_props_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Requested page number */
+ i32 pagenum;
+};
+
+define cicn_api_fib_entry_props_get_reply {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Array of 512 cicn_api_fib_entry_t */
+ u8 fibentry[324608];
+
+ /* Number of valid FIB entries in this page */
+ i32 nentries;
+
+ /* Requested page number, or -1 if page number is out of bound */
+ i32 pagenum;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_fib_xprops_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define cicn_api_fib_details {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Name prefix */
+ u8 prefix[500];
+
+ /* Number of valid next-hops (faces) */
+ i32 nfaces;
+
+ /* Next-hop Face IDs */
+ i32 faceid[16];
+
+ /* Face weights */
+ i32 faceweight[16];
+ };
+
+define cicn_api_node_stats_get {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define cicn_api_node_stats_get_reply {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* ICN packets processed */
+ u64 pkts_processed;
+
+ /* ICN interests forwarded */
+ u64 pkts_interest_count;
+
+ /* ICN data msgs forwarded */
+ u64 pkts_data_count;
+
+ /* ICN Nak msgs forwarded */
+ u64 pkts_nak_count;
+
+ /* ICN cached data msg replies */
+ u64 pkts_from_cache_count;
+
+ /* ICN Nak msgs originated */
+ u64 pkts_nacked_interests_count;
+
+ /* ICN hoplimit exceeded errors */
+ u64 pkts_nak_hoplimit_count;
+
+ /* ICN no-route errors */
+ u64 pkts_nak_no_route_count;
+
+ /* ICN no PIT entry drops */
+ u64 pkts_no_pit_count;
+
+ /* ICN expired PIT entries */
+ u64 pit_expired_count;
+
+ /* ICN expired CS entries */
+ u64 cs_expired_count;
+
+ /* ICN LRU CS entries freed */
+ u64 cs_lru_count;
+
+ /* ICN msgs dropped due to no packet buffers */
+ u64 pkts_drop_no_buf;
+
+ /* ICN Interest messages aggregated in PIT */
+ u64 interests_aggregated;
+
+ /* ICN Interest messages retransmitted */
+ u64 interests_retx;
+
+ /* Number of entries in PIT at the present moment */
+ u64 pit_entries_count;
+
+ /* Number of entries in CS at the present moment */
+ u64 cs_entries_count;
+};
+
+define cicn_api_test_run_get {
+ u32 client_index; // Client identifier, set from api_main.my_client_index
+ u32 context; // Arbitrary context, so client can match reply to req
+ i32 testid; // test to run or -1 for all
+};
+
+define cicn_api_test_run_get_reply {
+ u32 client_index; // Client identifier, set from api_main.my_client_index
+ u32 context; // Arbitrary context, so client can match reply to req
+ i32 retval; // Return value, zero means all OK
+ i32 nentries; // Number of valid entries (suites) in the response
+
+ u8 suites[10000]; // Array of 1000/176=56 cicn_api_test_suite_results_t
+};
+
+define cicn_api_face_events_subscribe {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* 1 => register for events, 0 => cancel registration */
+ u16 enable_disable;
+};
+
+define cicn_api_face_events_subscribe_reply {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define cicn_api_face_event {
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Face ID */
+ i32 faceid;
+
+ /* Face flags */
+ i32 flags;
+};
diff --git a/cicn-plugin/cicn/cicn.c b/cicn-plugin/cicn/cicn.c
new file mode 100644
index 00000000..d44eeb6f
--- /dev/null
+++ b/cicn-plugin/cicn/cicn.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn.c - skeleton vpp engine plug-in
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+
+#include <cicn/cicn.h>
+#include <cicn/cicn_api_handler.h>
+
+static vlib_node_registration_t icn_process_node;
+
+cicn_main_t cicn_main;
+/* Module vars */
+int cicn_infra_fwdr_initialized = 0;
+
+cicn_face_db_t cicn_face_db;
+
+/* Global forwarder name info */
+cicn_infra_fwdr_name_t cicn_infra_fwdr_name;
+
+/* Global generation value, updated for (some? all?) config changes */
+cicn_infra_shard_t cicn_infra_gshard;
+
+/* Fixed array for worker threads, to be indexed by worker index */
+cicn_infra_shard_t cicn_infra_shards[CICN_INFRA_WORKERS_MAX];
+
+/* Global time counters we're trying out for opportunistic hashtable
+ * expiration.
+ */
+uint16_t cicn_infra_fast_timer; /* Counts at 1 second intervals */
+uint16_t cicn_infra_slow_timer; /* Counts at 1 minute intervals */
+
+/*
+ * Initialize support for cicn_rc_e codes
+ * - build hash table mapping codes to printable strings
+ */
+static void
+cicn_rc_strings_init (void)
+{
+ cicn_main.cicn_rc_strings = hash_create (0, sizeof (uword));
+
+#define _(n,v,s) hash_set (cicn_main.cicn_rc_strings, v, s);
+ foreach_cicn_rc;
+#undef _
+}
+
+/*
+ * modify/return supplied vector with printable representation of crc,
+ * which is string name if available, otherwise numeric value.
+ */
+const u8 *
+cicn_rc_c_string (u8 * s, cicn_rc_e crc)
+{
+ uword *p;
+
+ p = hash_get (cicn_main.cicn_rc_strings, crc);
+
+ if (p)
+ {
+ s = format (s, "%s", p[0]);
+ }
+ else
+ {
+ s = format (s, "%d", crc);
+ }
+ return (s);
+}
+
+/*
+ * Return printable representation of crc.
+ */
+const char *
+cicn_rc_str (cicn_rc_e crc)
+{
+ char *crc_str;
+
+ uword *p;
+
+ p = hash_get (cicn_main.cicn_rc_strings, crc);
+
+ if (p)
+ {
+ crc_str = (char *) p[0];
+ }
+ else
+ {
+ crc_str = "unknown";
+ }
+ return (crc_str);
+}
+
+/*
+ * Return printable representation of cicn_rd.
+ * - if cicn_rc is set to an error, use that code for string
+ * - otherwise use ux_rc
+ */
+const char *
+cicn_rd_str (cicn_rd_t * cicn_rd)
+{
+ const char *str;
+ if (cicn_rd->rd_cicn_rc != CICN_RC_OK)
+ {
+ str = cicn_rc_str (cicn_rd->rd_cicn_rc);
+ }
+ else
+ {
+ str = strerror (cicn_rd->rd_ux_rc);
+ }
+ return (str);
+}
+
+/*
+ * Init CICN forwarder with configurable FIB, PIT, CS sizes
+ */
+static int
+cicn_infra_fwdr_init (uint32_t fib_size, uint32_t shard_pit_size,
+ uint32_t shard_cs_size)
+{
+ int ret = 0;
+
+ if (cicn_infra_fwdr_initialized)
+ {
+ cicn_cli_output ("cicn: already enabled");
+ goto done;
+ }
+
+ cicn_rc_strings_init ();
+
+ /* Initialize the forwarder's name structure */
+ cicn_sstrncpy (cicn_infra_fwdr_name.fn_str, "no-name",
+ sizeof (cicn_infra_fwdr_name.fn_str));
+ cicn_infra_fwdr_name.fn_reply_payload_flen = 0;
+
+ /* Init per worker limits */
+ cicn_infra_shard_pit_size = shard_pit_size;
+ cicn_infra_shard_cs_size = shard_cs_size;
+
+ /* Init face cache */
+ cicn_face_db.entry_count = 0;
+
+ /* Init event subscribers' info */
+ cicn_main.n_face_event_subscribers = 0;
+
+ /* Init the config generation number values */
+ cicn_infra_gshard.cfg_generation = 1LL;
+ memset (cicn_infra_shards, 0, sizeof (cicn_infra_shards));
+
+ /* Init the global time-compression counters */
+ cicn_infra_fast_timer = 1;
+ cicn_infra_slow_timer = 1;
+
+ /* Init global FIB */
+ ret = cicn_fib_create (&(cicn_main.fib), fib_size);
+
+done:
+
+ cicn_cli_output ("cicn: fwdr initialize => %d", ret);
+
+ if ((ret == AOK) && !cicn_infra_fwdr_initialized)
+ {
+ cicn_infra_fwdr_initialized = 1;
+ }
+
+ return (ret);
+}
+
+/*
+ * Action function shared between message handler and debug CLI
+ * NOTICE: we're only 'enabling' now
+ */
+int
+cicn_infra_plugin_enable_disable (int enable_disable,
+ int fib_size_req,
+ int pit_size_req,
+ f64 pit_dflt_lifetime_sec_req,
+ f64 pit_min_lifetime_sec_req,
+ f64 pit_max_lifetime_sec_req,
+ int cs_size_req)
+{
+ int ret = 0;
+
+ cicn_main_t *sm = &cicn_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_thread_registration_t *tr;
+ uword *p;
+ uint32_t fib_size, pit_size, cs_size;
+
+ /* Notice if we're already enabled... */
+ if (sm->is_enabled)
+ {
+ vlib_cli_output (sm->vlib_main, "cicn: already enabled");
+ ret = 0;
+ goto done;
+ }
+
+ /* Figure out how many workers will be running */
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ tr = (vlib_thread_registration_t *) p[0];
+ if (tr)
+ {
+ sm->worker_count = tr->count;
+ sm->worker_first_index = tr->first_index;
+ vlib_cli_output (sm->vlib_main,
+ "cicn: worker count %u, first idx %u",
+ sm->worker_count, sm->worker_first_index);
+ }
+ else
+ {
+ sm->worker_count = 0;
+ sm->worker_first_index = 0;
+
+ vlib_cli_output (sm->vlib_main, "cicn: no worker threads");
+ }
+ sm->shard_count = (sm->worker_count == 0) ? 1 : sm->worker_count;
+
+ /* Set up params and call fwdr_init set up FIB, PIT/CS, forwarder nodes */
+
+ /* Check the range and assign some globals */
+ if (pit_min_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_min_ms = CICN_PARAM_PIT_LIFETIME_DFLT_MIN_MS;
+ }
+ else
+ {
+ if (pit_min_lifetime_sec_req < CICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC ||
+ pit_min_lifetime_sec_req > CICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+ sm->pit_lifetime_min_ms = pit_min_lifetime_sec_req * SEC_MS;
+ }
+
+ if (pit_max_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_max_ms = CICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS;
+ }
+ else
+ {
+ if (pit_max_lifetime_sec_req < CICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC ||
+ pit_max_lifetime_sec_req > CICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+ sm->pit_lifetime_max_ms = pit_max_lifetime_sec_req * SEC_MS;
+ }
+ if (sm->pit_lifetime_min_ms > sm->pit_lifetime_max_ms)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+
+ if (pit_dflt_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_dflt_ms = CICN_PARAM_PIT_LIFETIME_DFLT_DFLT_MS;
+ }
+ else
+ {
+ sm->pit_lifetime_dflt_ms = pit_dflt_lifetime_sec_req * SEC_MS;
+ }
+ if (sm->pit_lifetime_dflt_ms < sm->pit_lifetime_min_ms ||
+ sm->pit_lifetime_dflt_ms > sm->pit_lifetime_max_ms)
+ {
+ goto done;
+ }
+
+ if (fib_size_req < 0)
+ {
+ fib_size = CICN_PARAM_FIB_ENTRIES_DFLT;
+ }
+ else
+ {
+ if (fib_size_req < CICN_PARAM_FIB_ENTRIES_MIN ||
+ fib_size_req > CICN_PARAM_FIB_ENTRIES_MAX)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+ fib_size = (uint32_t) fib_size_req;
+ }
+
+ if (pit_size_req < 0)
+ {
+ pit_size = CICN_PARAM_PIT_ENTRIES_DFLT;
+ }
+ else
+ {
+ if (pit_size_req < CICN_PARAM_PIT_ENTRIES_MIN ||
+ pit_size_req > CICN_PARAM_PIT_ENTRIES_MAX)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+ pit_size = (uint32_t) pit_size_req;
+ }
+
+ if (cs_size_req < 0)
+ {
+ cs_size = CICN_PARAM_CS_ENTRIES_DFLT;
+ }
+ else
+ {
+ if (cs_size_req > CICN_PARAM_CS_ENTRIES_MAX)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+ cs_size = (uint32_t) cs_size_req;
+ }
+
+ pit_size = pit_size / sm->shard_count;
+ cs_size = cs_size / sm->shard_count;
+
+ ret = cicn_infra_fwdr_init (fib_size, pit_size, cs_size);
+ if (ret != 0)
+ {
+ vlib_cli_output (sm->vlib_main,
+ "cicn: enable_disable failed => %d", ret);
+ goto done;
+ }
+
+#if CICN_FEATURE_MULTITHREAD
+ /* If we're not running main-thread only, set up a relationship between
+ * the dpdk worker handoff node and our forwarding node.
+ */
+ if (sm->worker_count > 1)
+ {
+ /* Engage with the worker thread handoff node so that we can dispatch
+ * through it from our dist node directly to our forwarder node
+ */
+ sm->fwd_next_node = vlib_node_add_next (sm->vlib_main,
+ handoff_dispatch_node.index,
+ icnfwd_node.index);
+ vlib_cli_output (sm->vlib_main,
+ "cicn: handoff node %u, fwd node next idx %u",
+ handoff_dispatch_node.index, sm->fwd_next_node);
+ }
+#endif //CICN_FEATURE_MULTITHREAD
+
+ ret = cicn_hello_plugin_activation_init (sm->vlib_main); //start adj protocol
+ if (ret != AOK)
+ {
+ goto done;
+ }
+
+ sm->is_enabled = 1;
+
+done:
+
+ return (ret);
+}
+
+/*
+ * The entry-point for the ICN background process, which does...
+ * background things, like garbage collection, for us.
+ */
+static uword
+icn_process_fn (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+#define CICN_PROCESS_WAIT_TIME 1.0 /* 1 second */
+
+ f64 timeout = CICN_PROCESS_WAIT_TIME;
+ f64 tnow, tnext = 0.0;
+ uword event_type;
+ uword *event_data = 0;
+ int timer_counter = 0;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+
+ tnow = vlib_time_now (cicn_main.vlib_main);
+ if (tnext == 0.0)
+ {
+ tnext = tnow + CICN_INFRA_FAST_TIMER_SECS;
+ }
+
+ /* Update the timeout compression counters we're trying for
+ * opportunistic timeouts in the hashtables.
+ */
+ if (tnow >= tnext)
+ {
+ cicn_infra_fast_timer =
+ cicn_infra_seq16_sum (cicn_infra_fast_timer, 1);
+
+ if ((++timer_counter % CICN_INFRA_SLOW_TIMER_SECS) == 0)
+ {
+ cicn_infra_slow_timer =
+ cicn_infra_seq16_sum (cicn_infra_slow_timer, 1);
+ timer_counter = 0;
+ }
+
+ tnext = tnow + CICN_INFRA_FAST_TIMER_SECS;
+ }
+
+ switch (event_type)
+ {
+ case ~0:
+ default:
+ /* Reset timeout */
+ timeout = CICN_PROCESS_WAIT_TIME;
+ break;
+
+ } /* End switch() */
+
+ vec_reset_length (event_data);
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+
+VLIB_REGISTER_NODE (icn_process_node, static) =
+{
+.function = icn_process_fn,.type = VLIB_NODE_TYPE_PROCESS,.name =
+ "icn-process",.process_log2_n_stack_bytes = 16};
+
+/*
+ * Init entry-point for the icn plugin
+ */
+static clib_error_t *
+cicn_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+
+ error = cicn_api_plugin_hookup (vm);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (cicn_init);
+
+/*
+ * This routine exists to convince the vlib plugin framework that
+ * we haven't accidentally copied a random .dll into the plugin directory.
+ *
+ * Also collects global variable pointers passed from the vpp engine
+ */
+
+clib_error_t *
+vlib_plugin_register (vlib_main_t * vm, vnet_plugin_handoff_t * h,
+ int from_early_init)
+{
+ cicn_main_t *sm = &cicn_main;
+ clib_error_t *error = 0;
+
+ sm->vlib_main = vm;
+ sm->vnet_main = h->vnet_main;
+ sm->ethernet_main = h->ethernet_main;
+
+ /* Init other elements in the 'main' struct */
+ sm->is_enabled = 0;
+ sm->fwd_next_node = ~0;
+
+ sm->pgen_enabled = 0;
+ sm->pgen_clt_src_addr = sm->pgen_clt_dest_addr = 0;
+ sm->pgen_clt_src_port = sm->pgen_clt_dest_port = 0;
+
+ sm->pgen_svr_enabled = 0;
+
+ return error;
+}
+
diff --git a/cicn-plugin/cicn/cicn.h b/cicn-plugin/cicn/cicn.h
new file mode 100644
index 00000000..81028d81
--- /dev/null
+++ b/cicn-plugin/cicn/cicn.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn.h - master include file
+ */
+
+#ifndef __included_cicn_h__
+#define __included_cicn_h__
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <netinet/in.h>
+
+#include "cicn_types.h"
+#include "cicn_std.h"
+#include "cicn_api_handler.h" // cicn_infra.h includes vl_api subscriber
+
+#include "cicn_params.h"
+
+#include "cicn_parser.h"
+#include "cicn_fib.h"
+#include "cicn_hello.h"
+#include "cicn_face.h"
+#include "cicn_mgmt.h"
+
+#include "cicn_infra.h"
+
+#endif /* __included_cicn_h__ */
diff --git a/cicn-plugin/cicn/cicn_all_api_h.h b/cicn-plugin/cicn/cicn_all_api_h.h
new file mode 100644
index 00000000..5136a0cc
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_all_api_h.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn plug-in API header file, wrapper for generated file
+ * - Shared between
+ * - Include the generated file, see BUILT_SOURCES in Makefile.am
+ */
+
+#include <cicn/cicn.api.h>
diff --git a/cicn-plugin/cicn/cicn_api.h b/cicn-plugin/cicn/cicn_api.h
new file mode 100644
index 00000000..9ce78970
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_api.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_api.h: definitions shared between plugin and its api clients
+ */
+
+#ifndef _cicn_api_h_
+#define _cicn_api_h_
+
+/* CICN API constants */
+// for search (vpp currently uses 0 directly rather than VNET_API_ERROR_NONE)
+#define CICN_VNET_API_ERROR_NONE ((vnet_api_error_t)0)
+
+#define CICN_API_FIB_ENTRY_NHOP_WGHT_UNSET (-1)
+
+/* define message structures */
+#define vl_typedefs
+#include <cicn/cicn_all_api_h.h>
+#undef vl_typedefs
+
+/* Face entry:
+ * Total size: 24 bytes
+ */
+typedef struct cicn_api_face_entry_s
+{
+ /* Face ID */
+ i32 faceid;
+
+ /* Local IP address */
+ u32 local_addr;
+
+ /* Local port */
+ u16 local_port;
+
+ /* Remote IP address */
+ u32 remote_addr;
+
+ /* Remote port */
+ u16 remote_port;
+
+ /* Face flags */
+ i32 flags;
+
+ /* VPP interface (index) associated with the face */
+ i32 sw_interface_id;
+
+ u32 fib_nhs; /* fib nhs using this face */
+} cicn_api_face_entry_t;
+
+/* FIB entry: 500-byte long name prefixes and up to 16 next-hops
+ * Total size: 634 bytes
+ */
+typedef struct cicn_api_fib_entry_s
+{
+ /* Name prefix */
+ u8 prefix[500];
+
+ /* Number of valid next-hops (face) */
+ i32 nfaces;
+
+ /* Next-hop Face IDs */
+ i32 faceid[16];
+
+ /* Face wights */
+ i32 faceweight[16];
+} cicn_api_fib_entry_t;
+
+/* test suite results entry: suite name and results
+ * Total size: 176 bytes
+ */
+typedef struct cicn_api_test_suite_results_s
+{
+ u8 suitename[128];
+
+ i32 ntests; // Number of tests requested
+ i32 nsuccesses;
+ i32 nfailures;
+ i32 nskipped;
+
+ u8 failures_mask[16];
+ u8 skips_mask[16];
+} cicn_api_test_suite_results_t;
+
+#endif // _cicn_api_h_
diff --git a/cicn-plugin/cicn/cicn_api_handler.h b/cicn-plugin/cicn/cicn_api_handler.h
new file mode 100644
index 00000000..d1e949a0
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_api_handler.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_api_handler.h - Binary API support definitions
+ *
+ * - This header file consolidates imports/uses of vl_api definitions,
+ * to avoid polluting other plugin-internal header files.
+ */
+
+#ifndef __CICN_API_HANDLER_H__
+#define __CICN_API_HANDLER_H__
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <vnet/vnet.h>
+
+#include <cicn/cicn_api.h>
+
+/*
+ * Binary serialization for get face configuration API.
+ */
+vnet_api_error_t
+cicn_face_api_entry_params_serialize (int faceid,
+ vl_api_cicn_api_face_params_get_reply_t
+ * reply);
+
+/*
+ * Binary serialization for show faces API.
+ */
+vnet_api_error_t
+cicn_face_api_entry_props_serialize (vl_api_cicn_api_face_props_get_reply_t *
+ reply);
+
+/*
+ * Binary serialization for face statistics API.
+ */
+vnet_api_error_t
+cicn_face_api_entry_stats_serialize (int faceid,
+ vl_api_cicn_api_face_stats_get_reply_t *
+ reply);
+
+/*
+ * Binary serialization for show FIB API.
+ */
+vnet_api_error_t
+ cicn_fib_api_entry_props_serialize
+ (vl_api_cicn_api_fib_entry_props_get_reply_t * reply, int page);
+
+/*
+ * Binary serialization for UT API.
+ */
+struct test_cicn_api_op_s
+{
+ vl_api_cicn_api_test_run_get_reply_t *reply;
+};
+
+int test_cicn_api_results_serialize (test_cicn_api_op_t * test_cicn_api_op);
+
+#endif // __CICN_API_HANDLER_H__
diff --git a/cicn-plugin/cicn/cicn_api_test.c b/cicn-plugin/cicn/cicn_api_test.c
new file mode 100644
index 00000000..40b6fdec
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_api_test.c
@@ -0,0 +1,1094 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn.c - skeleton vpp-api-test plug-in
+ */
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/udp.h>
+#include <cicn/cicn_api.h>
+
+uword unformat_sw_if_index (unformat_input_t * input, va_list * args);
+
+/* Declare message IDs */
+#include <cicn/cicn_msg_enum.h>
+
+/* declare message handlers for each api */
+
+#define vl_endianfun /* define message structures */
+#include <cicn/cicn_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...)
+#define vl_printfun
+#include <cicn/cicn_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number. */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <cicn/cicn_all_api_h.h>
+#undef vl_api_version
+
+/* copied from vpe.api.h */
+#define VL_API_PACKED(x) x __attribute__ ((packed))
+
+/* copied from vpe.api.h */
+typedef VL_API_PACKED (struct _vl_api_control_ping
+ {
+ u16 _vl_msg_id; u32 client_index;
+ u32 context;
+ }) vl_api_control_ping_t;
+
+typedef struct
+{
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+} cicn_test_main_t;
+
+cicn_test_main_t cicn_test_main;
+
+#define foreach_standard_reply_retval_handler \
+_(cicn_api_node_params_set_reply) \
+_(cicn_api_fib_entry_nh_add_reply) \
+_(cicn_api_fib_entry_nh_delete_reply) \
+_(cicn_api_face_delete_reply) \
+_(cicn_api_face_events_subscribe_reply)
+
+#define _(n) \
+ static void vl_api_##n##_t_handler \
+ (vl_api_##n##_t * mp) \
+ { \
+ vat_main_t * vam = cicn_test_main.vat_main; \
+ i32 retval = ntohl(mp->retval); \
+ if (vam->async_mode) { \
+ vam->async_errors += (retval < 0); \
+ } else { \
+ vam->retval = retval; \
+ vam->result_ready = 1; \
+ } \
+ }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers
+ * we just generated
+ */
+#define foreach_vpe_api_reply_msg \
+_(CICN_API_NODE_PARAMS_SET_REPLY, cicn_api_node_params_set_reply) \
+_(CICN_API_NODE_PARAMS_GET_REPLY, cicn_api_node_params_get_reply) \
+_(CICN_API_NODE_STATS_GET_REPLY, cicn_api_node_stats_get_reply) \
+_(CICN_API_FACE_ADD_REPLY, cicn_api_face_add_reply) \
+_(CICN_API_FACE_DELETE_REPLY, cicn_api_face_delete_reply) \
+_(CICN_API_FACE_PARAMS_GET_REPLY, cicn_api_face_params_get_reply) \
+_(CICN_API_FIB_ENTRY_NH_ADD_REPLY, cicn_api_fib_entry_nh_add_reply) \
+_(CICN_API_FIB_ENTRY_NH_DELETE_REPLY, cicn_api_fib_entry_nh_delete_reply) \
+_(CICN_API_FACE_PROPS_GET_REPLY, cicn_api_face_props_get_reply) \
+_(CICN_API_FACE_STATS_GET_REPLY, cicn_api_face_stats_get_reply) \
+_(CICN_API_FIB_ENTRY_PROPS_GET_REPLY, cicn_api_fib_entry_props_get_reply) \
+_(CICN_API_FIB_DETAILS, cicn_api_fib_details) \
+_(CICN_API_TEST_RUN_GET_REPLY, cicn_api_test_run_get_reply) \
+_(CICN_API_FACE_EVENTS_SUBSCRIBE_REPLY, cicn_api_face_events_subscribe_reply) \
+_(CICN_API_FACE_EVENT, cicn_api_face_event)
+
+/* M: construct, but don't yet send a message */
+
+#define M(T,t) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)); \
+ memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+#define M2(T,t,n) \
+do { \
+ vam->result_ready = 0; \
+ mp = vl_msg_api_alloc(sizeof(*mp)+(n)); \
+ memset (mp, 0, sizeof (*mp)); \
+ mp->_vl_msg_id = ntohs (VL_API_##T + sm->msg_id_base); \
+ mp->client_index = vam->my_client_index; \
+} while(0);
+
+/* S: send a message */
+#define S (vl_msg_api_send_shmem (vam->vl_input_queue, (u8 *)&mp))
+
+/* W: wait for results, with timeout */
+#define W \
+do { \
+ timeout = vat_time_now (vam) + 1.0; \
+ \
+ while (vat_time_now (vam) < timeout) { \
+ if (vam->result_ready == 1) { \
+ return (vam->retval); \
+ } \
+ } \
+ return -99; \
+} while(0);
+
+static int
+api_cicn_api_node_params_set (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ int enable_disable = 1;
+ int pit_size = -1, fib_size = -1, cs_size = -1;
+ f64 pit_dflt_lifetime_sec = -1.0f;
+ f64 pit_min_lifetime_sec = -1.0f, pit_max_lifetime_sec = -1.0f;
+
+ vl_api_cicn_api_node_params_set_t *mp;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "disable"))
+ {
+ enable_disable = 0;
+ }
+ else if (unformat (input, "PIT size %d", &pit_size))
+ {
+ ;
+ }
+ else if (unformat (input, "FIB size %d", &fib_size))
+ {
+ ;
+ }
+ else if (unformat (input, "CS size %d", &cs_size))
+ {
+ ;
+ }
+ else if (unformat (input, "PIT dfltlife %f", &pit_dflt_lifetime_sec))
+ {
+ ;
+ }
+ else if (unformat (input, "PIT minlife %f", &pit_min_lifetime_sec))
+ {
+ ;
+ }
+ else if (unformat (input, "PIT maxlife %f", &pit_max_lifetime_sec))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Construct the API message */
+ M (CICN_API_NODE_PARAMS_SET, cicn_api_node_params_set);
+ mp->enable_disable = enable_disable;
+ mp->pit_max_size = clib_host_to_net_i32 (pit_size);
+ mp->fib_max_size = clib_host_to_net_i32 (fib_size);
+ mp->cs_max_size = clib_host_to_net_i32 (cs_size);
+ //TODO: implement clib_host_to_net_f64 in VPP ?
+ mp->pit_dflt_lifetime_sec = pit_dflt_lifetime_sec;
+ mp->pit_min_lifetime_sec = pit_min_lifetime_sec;
+ mp->pit_max_lifetime_sec = pit_max_lifetime_sec;
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static int
+api_cicn_api_node_params_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ f64 timeout;
+ vl_api_cicn_api_node_params_get_t *mp;
+
+ // Construct the API message
+ M (CICN_API_NODE_PARAMS_GET, cicn_api_node_params_get);
+
+ S;
+ W;
+}
+
+static void
+ vl_api_cicn_api_node_params_get_reply_t_handler
+ (vl_api_cicn_api_node_params_get_reply_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ fformat (vam->ofp,
+ "Enabled %d\n"
+ " Features: multithreading:%d, cs:%d, dpdk-cloning:%d, "
+ "vlib-cloning:%d\n",
+ " Workers %d, FIB size %d PIT size %d\n"
+ " PIT lifetime dflt %.3f, min %.3f, max %.3f\n"
+ " CS size %d\n",
+ mp->is_enabled,
+ mp->feature_multithread,
+ mp->feature_cs,
+ mp->feature_dpdk_rtembuf_cloning,
+ mp->feature_vpp_vlib_cloning,
+ clib_net_to_host_u32 (mp->worker_count),
+ clib_net_to_host_u32 (mp->fib_max_size),
+ clib_net_to_host_u32 (mp->pit_max_size),
+ //TODO: implement clib_net_to_host_f64 in VPP ?
+ mp->pit_dflt_lifetime_sec,
+ mp->pit_min_lifetime_sec,
+ mp->pit_max_lifetime_sec, clib_net_to_host_u32 (mp->cs_max_size));
+}
+
+
+static int
+api_cicn_api_face_add (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ ip4_address_t local_addr4, remote_addr4;
+ int local_port = 0, remote_port = 0;
+ vl_api_cicn_api_face_add_t *mp;
+
+ local_addr4.as_u32 = 0;
+ remote_addr4.as_u32 = 0;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "local %U:%d",
+ unformat_ip4_address, &local_addr4, &local_port))
+ {
+ ;
+ }
+ else if (unformat (input, "remote %U:%d",
+ unformat_ip4_address, &remote_addr4, &remote_port))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check for presence of both addresses */
+ if ((local_addr4.as_u32 == 0) || (remote_addr4.as_u32 == 0))
+ {
+ clib_warning ("Please specify both local and remote addresses...");
+ return (1);
+ }
+
+ /* Check for presence of both addresses */
+ if ((local_port == 0) || (remote_port == 0))
+ {
+ clib_warning ("Please specify both local and remote ports...");
+ return (1);
+ }
+
+ /* Construct the API message */
+ M (CICN_API_FACE_ADD, cicn_api_face_add);
+ mp->local_addr = clib_host_to_net_u32 (local_addr4.as_u32);
+ mp->local_port = clib_host_to_net_u16 ((u16) local_port);
+ mp->remote_addr = clib_host_to_net_u32 (remote_addr4.as_u32);
+ mp->remote_port = clib_host_to_net_u16 ((u16) remote_port);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void
+vl_api_cicn_api_face_add_reply_t_handler (vl_api_cicn_api_face_add_reply_t *
+ mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ fformat (vam->ofp, "New Face ID: %d\n", ntohl (mp->faceid));
+}
+
+static int
+api_cicn_api_face_delete (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ int faceid = 0;
+ vl_api_cicn_api_face_delete_t *mp;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ // Check for presence of face ID
+ if (faceid == 0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+
+ // Construct the API message
+ M (CICN_API_FACE_DELETE, cicn_api_face_delete);
+ mp->faceid = clib_host_to_net_i32 (faceid);
+
+ // send it...
+ S;
+
+ // Wait for a reply...
+ W;
+}
+
+static int
+api_cicn_api_face_params_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ int faceid = 0;
+ vl_api_cicn_api_face_params_get_t *mp;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ // Check for presence of face ID
+ if (faceid == 0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+
+ // Construct the API message
+ M (CICN_API_FACE_PARAMS_GET, cicn_api_face_params_get);
+ mp->faceid = clib_host_to_net_i32 (faceid);
+
+ // send it...
+ S;
+
+ // Wait for a reply...
+ W;
+}
+
+static void
+ vl_api_cicn_api_face_params_get_reply_t_handler
+ (vl_api_cicn_api_face_params_get_reply_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+ u8 *sbuf = 0, *dbuf = 0;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ u32 local_addr = clib_net_to_host_u32 (mp->local_addr);
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "%U", format_ip4_address, &local_addr);
+
+ u32 remote_addr = clib_net_to_host_u32 (mp->remote_addr);
+ vec_reset_length (dbuf);
+ dbuf = format (dbuf, "%U", format_ip4_address, &remote_addr);
+
+ fformat (vam->ofp, "%s:%d <-> %s:%d swif %d flags %d\n",
+ sbuf,
+ clib_net_to_host_u16 (mp->local_port),
+ dbuf,
+ clib_net_to_host_u16 (mp->remote_port),
+ clib_net_to_host_i32 (mp->sw_interface_id),
+ clib_net_to_host_i32 (mp->flags));
+}
+
+static int
+api_cicn_api_fib_entry_nh_add (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ vl_api_cicn_api_fib_entry_nh_add_t *mp;
+
+ const char *prefix = NULL;
+ int faceid = 0;
+ int weight = CICN_API_FIB_ENTRY_NHOP_WGHT_UNSET;
+
+ /* TODO -- support next-hop weights */
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %s", &prefix))
+ {
+ ;
+ }
+ else if (unformat (input, "face %d", &faceid))
+ {
+ ;
+ }
+ else if (unformat (input, "weight %d", &weight))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if ((prefix == NULL) || (strlen (prefix) == 0) || (faceid == 0))
+ {
+ clib_warning ("Please specify prefix and faceid...");
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (CICN_API_FIB_ENTRY_NH_ADD, cicn_api_fib_entry_nh_add);
+ memcpy (mp->prefix, prefix, strlen (prefix));
+ mp->faceid = clib_host_to_net_i32 (faceid);
+ mp->weight = clib_host_to_net_i32 (weight);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static int
+api_cicn_api_fib_entry_nh_delete (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ vl_api_cicn_api_fib_entry_nh_delete_t *mp;
+
+ const char *prefix = NULL;
+ int faceid = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %s", &prefix))
+ {
+ ;
+ }
+ else if (unformat (input, "face %d", &faceid))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if ((prefix == NULL) || (strlen (prefix) == 0))
+ {
+ clib_warning ("Please specify prefix");
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (CICN_API_FIB_ENTRY_NH_DELETE, cicn_api_fib_entry_nh_delete);
+ memcpy (mp->prefix, prefix, strlen (prefix));
+ mp->faceid = clib_host_to_net_i32 (faceid);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static int
+api_cicn_api_face_props_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ f64 timeout;
+ vl_api_cicn_api_face_props_get_t *mp;
+
+ /* Construct the API message */
+ M (CICN_API_FACE_PROPS_GET, cicn_api_face_props_get);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void
+ vl_api_cicn_api_face_props_get_reply_t_handler
+ (vl_api_cicn_api_face_props_get_reply_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+ u8 *sbuf = 0, *dbuf = 0;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ i32 nentries = clib_net_to_host_i32 (mp->nentries);
+
+ cicn_api_face_entry_t *faces = (cicn_api_face_entry_t *) & mp->face[0];
+ int i;
+ for (i = 0; i < nentries; i++)
+ {
+ cicn_api_face_entry_t *face = &faces[i];
+
+ u32 local_addr = clib_net_to_host_u32 (face->local_addr);
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "%U", format_ip4_address, &local_addr);
+
+ u32 remote_addr = clib_net_to_host_u32 (face->remote_addr);
+ vec_reset_length (dbuf);
+ dbuf = format (dbuf, "%U", format_ip4_address, &remote_addr);
+
+ fformat (vam->ofp,
+ "Face %d: %s:%d <-> %s:%d swif %d flags %d, fib_nhs:%d\n",
+ clib_net_to_host_i32 (face->faceid),
+ sbuf,
+ clib_net_to_host_u16 (face->local_port),
+ dbuf,
+ clib_net_to_host_u16 (face->remote_port),
+ clib_net_to_host_i32 (face->sw_interface_id),
+ clib_net_to_host_i32 (face->flags),
+ clib_net_to_host_u32 (face->fib_nhs));
+ }
+}
+
+static int
+api_cicn_api_face_stats_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *input = vam->input;
+ f64 timeout;
+ vl_api_cicn_api_face_stats_get_t *mp;
+ int faceid = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {
+ ;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ // Check for presence of face ID
+ if (faceid == 0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (CICN_API_FACE_STATS_GET, cicn_api_face_stats_get);
+ mp->faceid = clib_host_to_net_i32 (faceid);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void
+ vl_api_cicn_api_face_stats_get_reply_t_handler
+ (vl_api_cicn_api_face_stats_get_reply_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ fformat (vam->ofp,
+ "Face %d "
+ "orig_interests %d orig_datas %d orig_naks %d "
+ "term_interests %d term_datas %d term_naks %d "
+ "in_interests %d in_datas %d in_naks %d "
+ "out_interests %d out_datas %d out_naks %d\n",
+ clib_net_to_host_i32 (mp->faceid),
+ clib_net_to_host_u64 (mp->orig_interests),
+ clib_net_to_host_u64 (mp->orig_datas),
+ clib_net_to_host_u64 (mp->orig_naks),
+ clib_net_to_host_u64 (mp->term_interests),
+ clib_net_to_host_u64 (mp->term_datas),
+ clib_net_to_host_u64 (mp->term_naks),
+ clib_net_to_host_u64 (mp->in_interests),
+ clib_net_to_host_u64 (mp->in_datas),
+ clib_net_to_host_u64 (mp->in_naks),
+ clib_net_to_host_u64 (mp->out_interests),
+ clib_net_to_host_u64 (mp->out_datas),
+ clib_net_to_host_u64 (mp->out_naks));
+}
+
+static int
+api_cicn_api_node_stats_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ f64 timeout;
+ vl_api_cicn_api_node_stats_get_t *mp;
+
+ /* Construct the API message */
+ M (CICN_API_NODE_STATS_GET, cicn_api_node_stats_get);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void
+ vl_api_cicn_api_node_stats_get_reply_t_handler
+ (vl_api_cicn_api_node_stats_get_reply_t * rmp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ else
+ {
+ fformat (vam->ofp, // compare cicn_cli_show_command_fn block: should match
+ " PIT entries (now): %d\n"
+ " CS entries (now): %d\n"
+ " Forwarding statistics:"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_nak_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " pkts_nacked_interests_count: %d\n"
+ " pkts_nak_hoplimit_count: %d\n"
+ " pkts_nak_no_route_count: %d\n"
+ " pkts_no_pit_count: %d\n"
+ " pit_expired_count: %d\n"
+ " cs_expired_count: %d\n"
+ " cs_lru_count: %d\n"
+ " pkts_drop_no_buf: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_nak_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64 (rmp->pkts_nacked_interests_count),
+ clib_net_to_host_u64 (rmp->pkts_nak_hoplimit_count),
+ clib_net_to_host_u64 (rmp->pkts_nak_no_route_count),
+ clib_net_to_host_u64 (rmp->pkts_no_pit_count),
+ clib_net_to_host_u64 (rmp->pit_expired_count),
+ clib_net_to_host_u64 (rmp->cs_expired_count),
+ clib_net_to_host_u64 (rmp->cs_lru_count),
+ clib_net_to_host_u64 (rmp->pkts_drop_no_buf),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
+ }
+}
+
+static int
+api_cicn_api_fib_entry_props_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ f64 timeout;
+ vl_api_cicn_api_fib_entry_props_get_t *mp;
+
+ /* Construct the API message */
+ M (CICN_API_FIB_ENTRY_PROPS_GET, cicn_api_fib_entry_props_get);
+ mp->pagenum = 0;
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void
+ vl_api_cicn_api_fib_entry_props_get_reply_t_handler
+ (vl_api_cicn_api_fib_entry_props_get_reply_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ // vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ i32 nentries = clib_net_to_host_i32 (mp->nentries);
+
+ fformat (vam->ofp, "Entries %d\n", nentries);
+
+ cicn_api_fib_entry_t *entries = (cicn_api_fib_entry_t *) & mp->fibentry[0];
+
+ int i;
+ for (i = 0; i < nentries; i++)
+ {
+ cicn_api_fib_entry_t *entry = &entries[i];
+
+ fformat (vam->ofp, "%s:", entry->prefix);
+
+ int j;
+ for (j = 0; j < clib_net_to_host_i32 (entry->nfaces); j++)
+ {
+ fformat (vam->ofp, " (face: %d, wght %d)",
+ clib_net_to_host_i32 (entry->faceid[j]),
+ clib_net_to_host_i32 (entry->faceweight[j]));
+ }
+
+ fformat (vam->ofp, "\n");
+ }
+}
+
+static void
+vl_api_cicn_api_fib_details_t_handler (vl_api_cicn_api_fib_details_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+
+ fformat (vam->ofp, "%s:", mp->prefix);
+
+ int j;
+ for (j = 0; j < clib_net_to_host_i32 (mp->nfaces); j++)
+ {
+ fformat (vam->ofp, " (face: %d, wght %d)",
+ clib_net_to_host_i32 (mp->faceid[j]),
+ clib_net_to_host_i32 (mp->faceweight[j]));
+ }
+ fformat (vam->ofp, "\n");
+}
+
+static int
+api_cicn_api_test_run_get (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ f64 timeout;
+ vl_api_cicn_api_test_run_get_t *mp;
+
+ /* Construct the API message */
+ M (CICN_API_TEST_RUN_GET, cicn_api_test_run_get);
+
+ /* send it... */
+ S;
+
+ /* Wait for a reply... */
+ W;
+}
+
+static void
+ vl_api_cicn_api_test_run_get_reply_t_handler
+ (vl_api_cicn_api_test_run_get_reply_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+ i32 retval = clib_net_to_host_i32 (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ goto done;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+ if (vam->retval < 0)
+ {
+ goto done;
+ }
+
+ i32 nentries = clib_net_to_host_i32 (mp->nentries);
+ cicn_api_test_suite_results_t *suites =
+ (cicn_api_test_suite_results_t *) & mp->suites[0];
+
+ int i;
+ for (i = 0; i < nentries; i++)
+ {
+ cicn_api_test_suite_results_t *suite = &suites[i];
+ int ntests = clib_net_to_host_i32 (suite->ntests);
+ int nsuccesses = clib_net_to_host_i32 (suite->nsuccesses);
+ int nfailures = clib_net_to_host_i32 (suite->nfailures);
+ int nskipped = clib_net_to_host_i32 (suite->nskipped);
+ int j, cnt;
+
+ fformat (vam->ofp,
+ "Suite %s: %d tests: %d successes, %d failures, %d skipped\n",
+ suite->suitename, ntests, nsuccesses, nfailures, nskipped);
+
+ if (nfailures != 0)
+ {
+ fformat (vam->ofp, " Failed Test(s):");
+ for (j = 0, cnt = 0; j < 8 * sizeof (suite->failures_mask); j++)
+ {
+ if ((suite->failures_mask[j / 8] & (1 << (j % 8))) == 0)
+ {
+ continue;
+ }
+ cnt++;
+ fformat (vam->ofp, " %d%s", j + 1,
+ (cnt < nfailures) ? ", " : " ");
+ }
+ fformat (vam->ofp, "\n");
+ }
+ if (nskipped != 0)
+ {
+ fformat (vam->ofp, " Skipped Test(s):");
+ for (j = 0, cnt = 0; j < 8 * sizeof (suite->skips_mask); j++)
+ {
+ if ((suite->skips_mask[j / 8] & (1 << (j % 8))) == 0)
+ {
+ continue;
+ }
+ cnt++;
+ fformat (vam->ofp, " %d%s", j + 1,
+ (cnt < nskipped) ? ", " : " ");
+ }
+ fformat (vam->ofp, "\n");
+ }
+ }
+
+done:; // ";" meets requirement for statement after label
+}
+
+static int
+api_cicn_api_face_events_subscribe (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ unformat_input_t *i = vam->input;
+ vl_api_cicn_api_face_events_subscribe_t *mp;
+ f64 timeout;
+ int enable = -1;
+
+ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (i, "enable"))
+ enable = 1;
+ else if (unformat (i, "disable"))
+ enable = 0;
+ else
+ break;
+ }
+
+ if (enable == -1)
+ {
+ errmsg ("missing enable|disable\n");
+ return -99;
+ }
+
+ M (CICN_API_FACE_EVENTS_SUBSCRIBE, cicn_api_face_events_subscribe);
+ mp->enable_disable = clib_host_to_net_u16 (enable);
+ mp->context = clib_host_to_net_u32 (10101 /*random number */ );
+
+ S;
+ W;
+}
+
+static void
+vl_api_cicn_api_face_event_t_handler (vl_api_cicn_api_face_event_t * mp)
+{
+ vat_main_t *vam = cicn_test_main.vat_main;
+
+ fformat (vam->ofp, "Event Face %d Flags %d\n",
+ clib_net_to_host_i32 (mp->faceid),
+ clib_net_to_host_i32 (mp->flags));
+}
+
+/*
+ * List of messages that the api test plugin sends,
+ * and that the data plane plugin processes
+ */
+#define foreach_vpe_api_msg \
+_(cicn_api_node_params_set, "FIB size <sz> PIT size <sz> CS size <sz>" \
+"PIT minlimit <f> PIT maxlimit <f> [disable] ") \
+_(cicn_api_node_params_get, "") \
+_(cicn_api_node_stats_get, "") \
+_(cicn_api_face_add, "local <IPv4-addr:port> remote <IPv4-addr:port>") \
+_(cicn_api_face_delete, "face <faceID>") \
+_(cicn_api_face_stats_get, "face <faceID>") \
+_(cicn_api_face_params_get, "face <faceID>") \
+_(cicn_api_face_props_get, "") \
+_(cicn_api_fib_entry_nh_add, "prefix </prefix> face <faceID> weight <weight>")\
+_(cicn_api_fib_entry_nh_delete, "prefix </prefix>") \
+_(cicn_api_fib_entry_props_get, "") \
+_(cicn_api_face_events_subscribe, "enable|disable") \
+_(cicn_api_test_run_get, "testsuite <ID>")
+
+void
+vat_api_hookup (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ /* Hook up handlers for replies from the data plane plug-in */
+#define _(N,n) \
+ vl_msg_api_set_handlers((VL_API_##N + sm->msg_id_base), \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_reply_msg;
+#undef _
+
+ /* API messages we can send */
+#define _(n,h) hash_set_mem (vam->function_by_name, #n, api_##n);
+ foreach_vpe_api_msg;
+#undef _
+
+ /* Help strings */
+#define _(n,h) hash_set_mem (vam->help_by_name, #n, h);
+ foreach_vpe_api_msg;
+#undef _
+}
+
+clib_error_t *
+vat_plugin_register (vat_main_t * vam)
+{
+ cicn_test_main_t *sm = &cicn_test_main;
+ u8 *name;
+
+ sm->vat_main = vam;
+
+ /* Ask the vpp engine for the first assigned message-id */
+ name = format (0, "cicn_%08x%c", api_version, 0);
+ sm->msg_id_base = vl_client_get_first_plugin_msg_id ((char *) name);
+
+ if (sm->msg_id_base != (u16) ~ 0)
+ vat_api_hookup (vam);
+
+ vec_free (name);
+
+ return 0;
+}
diff --git a/cicn-plugin/cicn/cicn_face.c b/cicn-plugin/cicn/cicn_face.c
new file mode 100644
index 00000000..2bb90253
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_face.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Implementation if ICN face table
+ */
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/format.h>
+
+#include <cicn/cicn.h>
+
+/*
+ * ICN face underlying swif has a "device class" (e.g. dpdk, af-packet").
+ * Currently, this determines if the device supports dpdk cloning or not.
+ * Retrieve the class index for storage in a newly created face_db entry.
+ */
+static int
+cicn_face_swif_dev_class_index (int swif)
+{
+ int dev_class_index = -1;
+ int unix_rc = AOK;
+
+ vnet_main_t *vnm;
+ vnet_hw_interface_t *hw;
+
+ vnm = vnet_get_main ();
+ hw = vnet_get_sup_hw_interface (vnm, swif);
+ if (hw == NULL)
+ {
+ unix_rc = ENODEV;
+ goto done;
+ }
+ dev_class_index = hw->dev_class_index;
+
+done:
+ return ((dev_class_index >= 0) ? dev_class_index : -unix_rc);
+}
+
+/*
+ * Return the face's swif's device class, for CLI show
+ */
+static const char *
+cicn_face_dev_class_name (const cicn_face_db_entry_t * face)
+{
+
+ vnet_main_t *vnm;
+ vnet_device_class_t *dev_class;
+
+ vnm = vnet_get_main ();
+ dev_class = vnet_get_device_class (vnm, face->swif_dev_class_index);
+ return (dev_class ? dev_class->name : "???");
+}
+
+/*
+ * Utility that adds a new face cache entry
+ */
+static int
+cicn_face_db_add (uint32_t src_addr, uint16_t src_port,
+ uint32_t dest_addr, uint16_t dest_port,
+ int app_face, int swif, int *pfaceid)
+{
+ int ret = 0;
+
+ cicn_face_db_entry_t *ptr;
+ int faceid = 0;
+ int dev_class_index;
+ int is_dpdk_driver;
+ int cloning_supported;
+
+ vnet_device_class_t *dev_class;
+
+ dev_class_index = cicn_face_swif_dev_class_index (swif);
+ if (dev_class_index < 0)
+ {
+ ret = -dev_class_index;
+ goto done;
+ }
+ dev_class = vnet_get_device_class (vnet_get_main (), dev_class_index);
+ if (dev_class == NULL)
+ {
+ ret = ENOENT;
+ goto done;
+ }
+ is_dpdk_driver = !strcmp (dev_class->name, "dpdk");
+ cloning_supported =
+ CICN_FEATURE_VPP_VLIB_CLONING ||
+ (CICN_FEATURE_DPDK_RTEMBUF_CLONING && is_dpdk_driver);
+
+ if (cicn_face_db.entry_count >= CICN_PARAM_FACES_MAX)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+ ptr = &cicn_face_db.entries[cicn_face_db.entry_count];
+ faceid = cicn_face_db.entry_count + 1; /* Start at one, not zero */
+ cicn_face_db.entry_count++;
+
+ ptr->src_addr.sin_addr.s_addr = src_addr;
+ ptr->src_addr.sin_port = htons (src_port);
+ ptr->dest_addr.sin_addr.s_addr = dest_addr;
+ ptr->dest_addr.sin_port = htons (dest_port);
+ ptr->app_face = app_face;
+ ptr->faceid = faceid;
+ ptr->swif = swif;
+ ptr->swif_dev_class_index = dev_class_index;
+ ptr->swif_is_dpdk_driver = is_dpdk_driver;
+ ptr->swif_cloning_supported = cloning_supported;
+ ptr->flags = CICN_FACE_FLAGS_DEFAULT;
+ ptr->fe_fib_nh_cnt = 0;
+
+
+done:
+ if (pfaceid)
+ {
+ *pfaceid = faceid;
+ }
+
+ return (ret);
+}
+
+/* TODO -- delete face, deactivate/down face apis? */
+
+/*
+ * Locate a face cache entry by face id
+ * TODO: Replace linear scan with faster lookup
+ */
+int
+cicn_face_entry_find_by_id (int id, cicn_face_db_entry_t ** pface)
+{
+ int i, ret = ENOENT;
+ cicn_face_db_entry_t *ptr = cicn_face_db.entries;
+
+ for (i = 0; i < cicn_face_db.entry_count; i++)
+ {
+ if (ptr->faceid == id)
+ {
+
+ /* Don't return/access deleted entries */
+ if (ptr->flags & CICN_FACE_FLAG_DELETED)
+ {
+ goto done;
+ }
+
+ if (pface)
+ {
+ *pface = ptr;
+ }
+
+ ret = AOK;
+ break;
+ }
+
+ ptr++;
+ }
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Find a face cache entry by address, from a packet e.g.
+ */
+int
+cicn_face_entry_find_by_addr (const struct sockaddr_in *src,
+ const struct sockaddr_in *dest,
+ cicn_face_db_entry_t ** pface)
+{
+ int i, ret = ENOENT;
+ cicn_face_db_entry_t *ptr = cicn_face_db.entries;
+
+ for (i = 0; i < cicn_face_db.entry_count; ptr++, i++)
+ {
+ if ((ptr->src_addr.sin_addr.s_addr == src->sin_addr.s_addr) &&
+ (ptr->src_addr.sin_port == src->sin_port) &&
+ (ptr->dest_addr.sin_addr.s_addr == dest->sin_addr.s_addr) &&
+ (ptr->dest_addr.sin_port == dest->sin_port))
+ {
+
+ /* Don't return/access deleted entries */
+ if (ptr->flags & CICN_FACE_FLAG_DELETED)
+ {
+ goto done;
+ }
+
+ if (pface)
+ {
+ *pface = ptr;
+ }
+ ret = AOK;
+ break;
+ }
+ }
+
+done:
+
+ return (ret);
+}
+
+int
+cicn_face_db_index (const cicn_face_db_entry_t * face)
+{
+ return (face - cicn_face_db.entries);
+}
+
+int
+cicn_face_stats_aggregate (const cicn_face_db_entry_t * face,
+ cicn_face_stats_t * face_stats)
+{
+ int fcidx;
+ u32 index = 0;
+ const cicn_face_stats_t *cpu_face_stats;
+
+ memset (face_stats, 0, sizeof (*face_stats));
+
+ fcidx = cicn_face_db_index (face);
+
+ foreach_vlib_main (
+ {
+ cpu_face_stats =
+ &cicn_infra_shards[index].face_stats[fcidx];
+ face_stats->orig_interests +=
+ cpu_face_stats->orig_interests;
+ face_stats->orig_datas += cpu_face_stats->orig_datas;
+ face_stats->orig_naks += cpu_face_stats->orig_naks;
+ face_stats->term_interests +=
+ cpu_face_stats->term_interests;
+ face_stats->term_datas += cpu_face_stats->term_datas;
+ face_stats->term_naks += cpu_face_stats->term_naks;
+ face_stats->in_interests +=
+ cpu_face_stats->in_interests;
+ face_stats->in_datas += cpu_face_stats->in_datas;
+ face_stats->in_naks += cpu_face_stats->in_naks;
+ face_stats->out_interests +=
+ cpu_face_stats->out_interests;
+ face_stats->out_datas += cpu_face_stats->out_datas;
+ face_stats->out_naks += cpu_face_stats->out_naks;
+ index++;
+ }
+ );
+ return (AOK);
+}
+
+/*
+ * Create face, typically while handling cli input. Returns zero
+ * and mails back the faceid on success.
+ */
+int
+cicn_face_add (uint32_t src_addr, uint16_t src_port,
+ uint32_t dest_addr, uint16_t dest_port,
+ int app_face, int swif, int *faceid_p, cicn_rd_t * cicn_rd)
+{
+ int ret = AOK;
+ cicn_rc_e crc = CICN_RC_OK;
+
+ int faceid;
+
+ if (!cicn_infra_fwdr_initialized)
+ {
+ cicn_cli_output ("cicn: fwdr disabled");
+ ret = EINVAL;
+ goto done;
+ }
+ /* check for face already existing */
+ struct sockaddr_in src, dst;
+ src.sin_addr.s_addr = src_addr;
+ src.sin_port = htons (src_port);
+
+ dst.sin_addr.s_addr = dest_addr;
+ dst.sin_port = htons (dest_port);
+ ret = cicn_face_entry_find_by_addr (&src, &dst, NULL /*!pface */ );
+ if (ret == AOK)
+ {
+ ret = EEXIST;
+ goto done;
+ }
+
+ /* TODO -- support delete also? */
+
+ /* TODO -- check face cache for dup before trying to add? */
+
+ /* Add to face cache */
+ ret =
+ cicn_face_db_add (src_addr, src_port, dest_addr, dest_port, app_face,
+ swif, &faceid);
+
+done:
+
+ if ((ret == AOK) && faceid_p)
+ {
+ *faceid_p = faceid;
+ }
+ if (cicn_rd)
+ {
+ cicn_rd_set (cicn_rd, crc, ret);
+ }
+
+ return (ret);
+}
+
+void
+cicn_face_flags_update (cicn_face_db_entry_t * face, int set, u32 uflags)
+{
+ u32 oflags, nflags;
+
+ oflags = nflags = face->flags;
+
+ if (set)
+ {
+ nflags |= uflags;
+ }
+ else
+ {
+ nflags &= ~uflags;
+ }
+
+ if (oflags == nflags)
+ {
+ return;
+ }
+ face->flags = nflags;
+
+ if (oflags & CICN_FACE_FLAGS_DOWN)
+ {
+ if (!(nflags & CICN_FACE_FLAGS_DOWN))
+ {
+ // face => up
+ }
+ }
+ else
+ {
+ if (nflags & CICN_FACE_FLAGS_DOWN)
+ {
+ // face => down
+ }
+ }
+}
+
+/*
+ * based on add being true/false, increment/decrement count of
+ * fib nexthops using face
+ *
+ * return success/error for supplied faceid being valid/invalid
+ */
+int
+cicn_face_fib_nh_cnt_update (int faceid, int add)
+{
+ int ret;
+ cicn_face_db_entry_t *face;
+
+ ret = cicn_face_entry_find_by_id (faceid, &face);
+ if (ret != 0)
+ {
+ goto done;
+ }
+ face->fe_fib_nh_cnt += add ? 1 : -1;
+
+done:
+ return (ret);
+}
+
+/***************************************************************************
+ * CICN_FACE management plane (binary API, debug cli) helper routines
+ ***************************************************************************/
+
+/*
+ * Binary serialization for get face configuration API.
+ */
+vnet_api_error_t
+cicn_face_api_entry_params_serialize (int faceid,
+ vl_api_cicn_api_face_params_get_reply_t
+ * reply)
+{
+ vnet_api_error_t rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ if (!reply)
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ int i;
+ for (i = 0; i < cicn_face_db.entry_count; i++)
+ {
+ if (i >= CICN_PARAM_FACES_MAX)
+ { // should never happen
+ break;
+ }
+ if (cicn_face_db.entries[i].faceid != faceid)
+ {
+ continue;
+ }
+
+ reply->local_addr =
+ clib_host_to_net_u32 (cicn_face_db.entries[i].src_addr.
+ sin_addr.s_addr);
+ reply->local_port = cicn_face_db.entries[i].src_addr.sin_port;
+
+ reply->remote_addr =
+ clib_host_to_net_u32 (cicn_face_db.entries[i].dest_addr.
+ sin_addr.s_addr);
+ reply->remote_port = cicn_face_db.entries[i].dest_addr.sin_port;
+
+ reply->flags = clib_host_to_net_i32 (cicn_face_db.entries[i].flags);
+
+ reply->sw_interface_id =
+ clib_host_to_net_i32 (cicn_face_db.entries[i].swif);
+
+ rv = CICN_VNET_API_ERROR_NONE;
+ break;
+ }
+
+done:
+ return (rv);
+}
+
+/*
+ * Binary serialization for show faces API.
+ */
+int
+cicn_face_api_entry_props_serialize (vl_api_cicn_api_face_props_get_reply_t *
+ reply)
+{
+ int rv = CICN_VNET_API_ERROR_NONE;
+ int i;
+
+ if (!reply)
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ for (i = 0; i < cicn_face_db.entry_count; i++)
+ {
+ if (i >= CICN_PARAM_FACES_MAX)
+ { // should never happen
+ break;
+ }
+
+ cicn_face_db_entry_t *face = &cicn_face_db.entries[i];
+ cicn_api_face_entry_t *api_face = (cicn_api_face_entry_t *)
+ (&reply->face[i * sizeof (cicn_api_face_entry_t)]);
+
+ api_face->faceid = clib_host_to_net_i32 (face->faceid);
+
+ api_face->local_addr =
+ clib_host_to_net_u32 (face->src_addr.sin_addr.s_addr);
+ api_face->local_port = face->src_addr.sin_port;
+
+ api_face->remote_addr =
+ clib_host_to_net_u32 (face->dest_addr.sin_addr.s_addr);
+ api_face->remote_port = face->dest_addr.sin_port;
+
+ api_face->flags = clib_host_to_net_i32 (face->flags);
+
+ api_face->sw_interface_id = clib_host_to_net_i32 (face->swif);
+
+ api_face->fib_nhs = clib_host_to_net_u32 (face->fe_fib_nh_cnt);
+ }
+ reply->nentries = clib_host_to_net_i32 (i);
+
+done:
+ return (rv);
+}
+
+
+/*
+ * Binary serialization for face statistics API.
+ */
+int
+cicn_face_api_entry_stats_serialize (int faceid,
+ vl_api_cicn_api_face_stats_get_reply_t *
+ reply)
+{
+ vnet_api_error_t rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+
+ if (!reply)
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ goto done;
+ }
+
+ int i;
+ for (i = 0; i < cicn_face_db.entry_count; i++)
+ {
+ if (i >= CICN_PARAM_FACES_MAX)
+ { // should never happen
+ break;
+ }
+
+ cicn_face_db_entry_t *face = &cicn_face_db.entries[i];
+
+ if (face->faceid != faceid)
+ {
+ continue;
+ }
+
+ cicn_face_stats_t f_stats;
+
+ reply->faceid = clib_host_to_net_i32 (face->faceid);
+
+ cicn_face_stats_aggregate (&cicn_face_db.entries[i], &f_stats);
+
+ reply->orig_interests = clib_host_to_net_u64 (f_stats.orig_interests);
+ reply->orig_datas = clib_host_to_net_u64 (f_stats.orig_datas);
+ reply->orig_naks = clib_host_to_net_u64 (f_stats.orig_naks);
+ reply->term_interests = clib_host_to_net_u64 (f_stats.term_interests);
+ reply->term_datas = clib_host_to_net_u64 (f_stats.term_datas);
+ reply->term_naks = clib_host_to_net_u64 (f_stats.term_naks);
+ reply->in_interests = clib_host_to_net_u64 (f_stats.in_interests);
+ reply->in_datas = clib_host_to_net_u64 (f_stats.in_datas);
+ reply->in_naks = clib_host_to_net_u64 (f_stats.in_naks);
+ reply->out_interests = clib_host_to_net_u64 (f_stats.out_interests);
+ reply->out_datas = clib_host_to_net_u64 (f_stats.out_datas);
+ reply->out_naks = clib_host_to_net_u64 (f_stats.out_naks);
+
+ rv = CICN_VNET_API_ERROR_NONE;
+ break;
+ }
+
+done:
+ return (rv);
+}
+
+/*
+ * CLI show output for faces. if 'faceid' >= 0, just show a single face
+ */
+int
+cicn_face_show (int faceid_arg, int detail_p, int internal_p)
+{
+ int ret = 0;
+ int i;
+ u8 *sbuf = 0, *dbuf = 0;
+
+ cicn_cli_output ("Faces:");
+
+ for (i = 0; i < cicn_face_db.entry_count; i++)
+ {
+ cicn_face_db_entry_t *face = &cicn_face_db.entries[i];
+ int efaceid = face->faceid;
+ if ((faceid_arg >= 0) && (faceid_arg != efaceid))
+ {
+ continue;
+ }
+
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "%U", format_ip4_address,
+ &face->src_addr.sin_addr);
+ vec_terminate_c_string (sbuf);
+
+ vec_reset_length (dbuf);
+ dbuf = format (dbuf, "%U", format_ip4_address,
+ &face->dest_addr.sin_addr);
+ vec_terminate_c_string (dbuf);
+
+ char *if_status = "unknown";
+ if (face->flags & CICN_FACE_FLAG_DELETED)
+ {
+ if_status = "DELETED";
+ }
+ else if (face->flags & CICN_FACE_FLAG_ADMIN_DOWN)
+ {
+ if_status = "admin-down";
+ }
+ else if (face->flags & CICN_FACE_FLAGS_DOWN)
+ {
+ if_status = "oper-down";
+ }
+ else
+ {
+ if_status = "up";
+ }
+ cicn_cli_output (" Face %d: %s:%d <-> %s:%d (swif %d)",
+ face->faceid, sbuf, ntohs (face->src_addr.sin_port),
+ dbuf, ntohs (face->dest_addr.sin_port), face->swif);
+
+ cicn_cli_output ("\tFace Type:%s, State:%s, FIB_NHs:%d, Class:%s(%s)",
+ face->app_face ? "app" : "peer", if_status,
+ face->fe_fib_nh_cnt, cicn_face_dev_class_name (face),
+ face->swif_cloning_supported ? "clone" : "copy");
+
+ // TODO: More output.
+ cicn_main_t *sm = &cicn_main;
+ if (sm->cicn_hello_adjs[efaceid].active)
+ {
+ cicn_cli_output
+ ("\t%-14.14s State:enabled,%s [last_sent:%lu, last_rcvd:%lu]",
+ "Hello Proto:",
+ (face->flags & CICN_FACE_FLAG_HELLO_DOWN) ? "down" : "up",
+ sm->cicn_hello_adjs[efaceid].last_sent_seq_num,
+ sm->cicn_hello_adjs[efaceid].last_received_seq_num);
+ }
+ else
+ {
+ cicn_cli_output ("\tHello Protocol: State:disabled");
+ }
+
+ cicn_face_stats_t face_stats;
+ cicn_face_stats_aggregate (face, &face_stats);
+
+#define CICN_SHOW_MSGS_FMT "\t%-14.14s Interests:%lu, Data:%lu, Naks:%lu"
+ cicn_cli_output (CICN_SHOW_MSGS_FMT, "Originated:",
+ face_stats.orig_interests, face_stats.orig_datas,
+ face_stats.orig_naks);
+
+ cicn_cli_output (CICN_SHOW_MSGS_FMT, "Terminated:",
+ face_stats.term_interests, face_stats.term_datas,
+ face_stats.term_naks);
+
+ cicn_cli_output (CICN_SHOW_MSGS_FMT, "Received:",
+ face_stats.in_interests, face_stats.in_datas,
+ face_stats.in_naks);
+
+ cicn_cli_output (CICN_SHOW_MSGS_FMT, "Sent:",
+ face_stats.out_interests, face_stats.out_datas,
+ face_stats.out_naks);
+
+ if (faceid_arg >= 0)
+ { // found it
+ break;
+ }
+ }
+
+ if (sbuf)
+ {
+ vec_free (sbuf);
+ }
+ if (dbuf)
+ {
+ vec_free (dbuf);
+ }
+
+ return (ret);
+}
diff --git a/cicn-plugin/cicn/cicn_face.h b/cicn-plugin/cicn/cicn_face.h
new file mode 100644
index 00000000..b8687951
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_face.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Internal API to ICN face table
+ */
+#ifndef _CICN_FACE_H_
+#define _CICN_FACE_H_
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <vlibsocket/api.h>
+
+#include "cicn_hello.h"
+
+typedef struct cicn_face_stats_s
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ uint64_t orig_interests;
+ uint64_t orig_datas;
+ uint64_t orig_naks;
+ uint64_t term_interests;
+ uint64_t term_datas;
+ uint64_t term_naks;
+ uint64_t in_interests;
+ uint64_t in_datas;
+ uint64_t in_naks;
+ uint64_t out_interests;
+ uint64_t out_datas;
+ uint64_t out_naks;
+} cicn_face_stats_t;
+
+/*
+ * Cache info about 'faces' so we can glue together the cicn
+ * and vpp views of the world
+ */
+struct cicn_face_db_entry_s
+{
+ u32 flags;
+ int faceid; /* Our internal id */
+ int swif; /* VPP sw if index */
+ int swif_dev_class_index;
+ int swif_is_dpdk_driver;
+ int swif_cloning_supported;
+ int app_face;
+ struct sockaddr_in src_addr;
+ struct sockaddr_in dest_addr;
+ u8 fe_ha_name_cmn[CICN_HELLO_NAME_CMN_FLEN];
+ uint32_t fe_fib_nh_cnt; /* Refcount of dependent FIB entries */
+ cicn_hello_fcd_t fe_ha_fcd_loc;
+ cicn_hello_fcd_t fe_ha_fcd_nbr;
+};
+
+/* Face cache flags */
+#define CICN_FACE_FLAG_ADMIN_DOWN 0x02
+#define CICN_FACE_FLAG_HELLO_DOWN 0x04
+#define CICN_FACE_FLAG_DELETED 0x08
+
+#define CICN_FACE_FLAGS_DEFAULT 0x00
+#define CICN_FACE_FLAGS_DOWN_HARD \
+ (CICN_FACE_FLAG_ADMIN_DOWN | CICN_FACE_FLAG_DELETED)
+#define CICN_FACE_FLAGS_DOWN \
+ (CICN_FACE_FLAGS_DOWN_HARD | CICN_FACE_FLAG_HELLO_DOWN)
+
+/*
+ * Cache info about 'faces' so we can glue together the cicn
+ * and vpp views of the world
+ */
+
+/* TODO -- use vpp pool instead? */
+typedef struct cicn_face_db_s
+{
+ int entry_count;
+ cicn_face_db_entry_t entries[CICN_PARAM_FACES_MAX];
+} cicn_face_db_t;
+
+extern cicn_face_db_t cicn_face_db;
+
+/* Create face, typically while handling cli input. Returns zero
+ * and mails back the faceid on success.
+ */
+int cicn_face_add (uint32_t src_addr, uint16_t src_port,
+ uint32_t dest_addr, uint16_t dest_port,
+ int app_face, int swif, int *faceid_p,
+ cicn_rd_t * cicn_rd);
+
+/* update (set or clear) supplied flags in face table entry */
+void
+cicn_face_flags_update (cicn_face_db_entry_t * face, int set, u32 uflags);
+/* update refcount for add/delete of fib entry nh pointing to face */
+int cicn_face_fib_nh_cnt_update (int faceid, int add);
+
+/* Find a face entry by face id. Return AOK and mail back face on success */
+int cicn_face_entry_find_by_id (int id, cicn_face_db_entry_t ** pface);
+
+/* Find a face cache entry by address, from a packet e.g. */
+int cicn_face_entry_find_by_addr (const struct sockaddr_in *src,
+ const struct sockaddr_in *dest,
+ cicn_face_db_entry_t ** pface);
+/* Find face cache index (e.g. for distributed face statistics) */
+int cicn_face_db_index (const cicn_face_db_entry_t * face);
+/* Aggregate stats for one face across all cpus */
+int cicn_face_stats_aggregate (const cicn_face_db_entry_t * face,
+ cicn_face_stats_t * face_stats);
+
+/* CLI show output for faces. if 'faceid' >= 0, just show a single face */
+int cicn_face_show (int faceid, int detail_p, int internal_p);
+
+#endif // _CICN_FACE_H_
diff --git a/cicn-plugin/cicn/cicn_fib.c b/cicn-plugin/cicn/cicn_fib.c
new file mode 100644
index 00000000..a8e1e642
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_fib.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_fib.c: Fast-path, vpp-aware FIB, used in the cicn forwarder.
+ */
+
+#include <vlib/vlib.h>
+
+#include <cicn/cicn.h>
+#include <cicn/cicn_fib.h>
+
+/*
+ * Init/alloc a new FIB
+ */
+int
+cicn_fib_create (cicn_fib_t * p, uint32_t num_elems)
+{
+ int ret;
+
+ ret =
+ cicn_hashtb_alloc (&p->fib_table, num_elems, sizeof (cicn_fib_entry_t));
+ if (ret == AOK)
+ {
+ /* Reserve last entry in a row/bucket for overflow, to make
+ * unsynchronized FIB modifications easier.
+ */
+ p->fib_table->ht_flags |= CICN_HASHTB_FLAG_USE_SEVEN;
+ p->fib_table->ht_flags |= CICN_HASHTB_FLAG_KEY_FMT_PFX;
+
+ p->fib_flags = CICN_FIB_FLAGS_NONE;
+
+ p->fib_capacity = num_elems;
+
+ p->fib_max_comps = 0;
+ memset (&(p->fib_default_entry), 0, sizeof (cicn_fib_entry_t));
+ }
+
+ return (ret);
+}
+
+/*
+ * FIB lookup using 'pfxhash' struct containing prefix-hash results. This returns
+ * the longest matching entry (not a virtual entry). If there is no valid FIB
+ * match and a default FIB entry exists, the default is returned.
+ */
+int
+cicn_fib_lookup (cicn_fib_t * fib, const cicn_prefix_hashinf_t * pfxhash,
+ cicn_fib_entry_t ** pentry)
+{
+ int found_p, i, ret = EINVAL;
+ cicn_hash_node_t *node = NULL;
+ cicn_fib_entry_t *fe;
+
+ if ((fib == NULL) || (pfxhash == NULL) || (pfxhash->pfx_ptr == NULL) ||
+ (pfxhash->pfx_len == 0) ||
+ (pfxhash->pfx_count > CICN_HASHTB_MAX_NAME_COMPS) || (pentry == NULL))
+ {
+
+ goto done;
+ }
+
+ found_p = FALSE;
+
+ /* If we have a default entry, start with that */
+ if (fib->fib_flags & CICN_FIB_FLAG_DEFAULT_SET)
+ {
+ *pentry = &fib->fib_default_entry;
+
+ /* And we have some form of success... */
+ found_p = TRUE;
+ }
+
+ /*
+ * TODO -- souped-up, thoughtful walk through the prefixes, optimized
+ * to start 'in the middle' and then walk 'up' or 'down' depending on
+ * what we find. For now, just simple iteration...
+ */
+
+ /*
+ * Iterate through the prefix hashes, looking for the longest match
+ */
+ for (i = 0; i < pfxhash->pfx_count; i++)
+ {
+ ret = cicn_hashtb_lookup_node (fib->fib_table,
+ pfxhash->pfx_ptr,
+ pfxhash->pfx_lens[i],
+ pfxhash->pfx_hashes[i], &node);
+ if (ret == AOK)
+ {
+ fe = cicn_fib_get_data (node);
+
+ /* Don't use a 'virtual' entry */
+ if (fe->fe_flags & CICN_FIB_ENTRY_FLAG_VIRTUAL)
+ {
+ continue;
+ }
+
+ /* Best match so far */
+ *pentry = fe;
+ found_p = TRUE;
+
+ }
+ else
+ {
+ /* No more possible longer prefixes */
+ break;
+ }
+ }
+
+ if (found_p)
+ {
+ ret = AOK;
+ }
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Insert a new prefix into the FIB (or add an additional next-hop,
+ * if the prefix already exists, or mod an existing next-hop, if the
+ * next-hop already exists.) We expect that 'pfx' is the start of the
+ * name-components only, not the start of a complete 'name' TLV. We expect
+ * that the prefix-hashing has already been done, into 'pfxhash'.
+ */
+int
+cicn_fib_entry_insert (cicn_fib_t * fib,
+ const cicn_prefix_hashinf_t * pfxhash,
+ uint16_t faceid, uint8_t weight, cicn_rd_t * cicn_rd)
+{
+ int ret = EINVAL;
+ cicn_rc_e crc = CICN_RC_OK;
+ int i, j;
+ cicn_hash_node_t *node_array[CICN_HASHTB_MAX_NAME_COMPS];
+ int node_count = 0;
+ cicn_hash_node_t *pnode;
+ cicn_fib_entry_t *fe;
+ int add_ref_val = 0;
+
+ if ((fib == NULL) || (pfxhash == NULL) || (pfxhash->pfx_ptr == NULL) ||
+ (pfxhash->pfx_len == 0) || (pfxhash->pfx_count == 0))
+ {
+ goto done;
+ }
+ if (pfxhash->pfx_count > CICN_HASHTB_MAX_NAME_COMPS ||
+ pfxhash->pfx_overflow)
+ {
+ crc = CICN_RC_FIB_PFX_COMP_LIMIT;
+ goto done;
+ }
+
+ /* Start walking down the series of intermediate prefixes,
+ * capturing the hash node at each level that already exists. We need
+ * this in order to manage internal state, like refcounts
+ * and virtual FIB nodes.
+ */
+ for (i = 0; i < pfxhash->pfx_count; i++)
+ {
+ ret = cicn_hashtb_lookup_node (fib->fib_table,
+ pfxhash->pfx_ptr,
+ pfxhash->pfx_lens[i],
+ pfxhash->pfx_hashes[i], &pnode);
+ if (ret != AOK)
+ { // this component and beneath not present
+ break;
+ }
+ node_array[node_count] = pnode;
+ node_count++;
+ }
+
+ /* Now we've reached either a) the point where parents of the offered
+ * prefix end, or b) the entry for the offered prefix.
+ */
+ while (i < pfxhash->pfx_count)
+ {
+ /*
+ * There are more components in 'pfx' than are in the existing
+ * fib. We need to add one or more entries, probably virtual.
+ */
+
+ /* Allocate a new node */
+ pnode = cicn_hashtb_alloc_node (fib->fib_table);
+ if (pnode == NULL)
+ {
+ /* TODO -- clean up any new virtual nodes we've added! */
+ ret = ENOMEM;
+ goto done;
+ }
+
+ /* Set up the embedded virtual fib entry */
+ fe = cicn_fib_get_data (pnode);
+
+ /* Clear out the new entry */
+ memset (fe, 0, sizeof (cicn_fib_entry_t));
+
+ fe->fe_flags = CICN_FIB_ENTRY_FLAG_VIRTUAL;
+
+ /* Set up the hash node */
+ cicn_hashtb_init_node (fib->fib_table, pnode,
+ pfxhash->pfx_hashes[i],
+ pfxhash->pfx_ptr, pfxhash->pfx_lens[i]);
+
+ /* Insert into the hashtable */
+ ret = cicn_hashtb_insert (fib->fib_table, pnode);
+ if (ret != AOK)
+ {
+ /* Whoa - we didn't expect that */
+ goto done;
+ /* TODO -- cleanup on error */
+ }
+
+ /*
+ * Save new nodes in the array too.
+ */
+ ASSERT (node_count < (CICN_HASHTB_MAX_NAME_COMPS - 1));
+ node_array[node_count] = pnode;
+
+ node_count++;
+ i++;
+
+ /* Count each added 'level' of prefixes */
+ add_ref_val++;
+ }
+
+ /*
+ * At this point, we've either found or added a new entry node,
+ * it's the last one in the array of nodes, and we're ready
+ * to set it up. If it's valid, we'll walk back
+ * through the parents and update them (refcount, max-comps, etc.)
+ */
+ ASSERT (node_count > 0);
+ pnode = node_array[node_count - 1];
+
+ /* Set up (or update) the embedded actual fib entry */
+ fe = cicn_fib_get_data (pnode);
+
+ /* If this was an existing _virtual_ entry, convert it to a real one */
+ fe->fe_flags &= ~(CICN_FIB_ENTRY_FLAG_VIRTUAL |
+ CICN_FIB_ENTRY_FLAG_DELETED);
+
+ /* Next-hop face and weight. We'll _update_ a next-hop that matches
+ * the current face, or else we'll add a new next-hop.
+ */
+ for (i = 0; i < ARRAY_LEN (fe->fe_next_hops); i++)
+ {
+ if (fe->fe_next_hops[i].nh_faceid == faceid)
+ {
+ if (fe->fe_next_hops[i].nh_weight == weight)
+ {
+ ret = EEXIST;
+ goto done;
+ }
+
+ /* Found a matching entry */
+ fe->fe_next_hops[i].nh_weight = weight;
+ break;
+ }
+ }
+
+ if (i == ARRAY_LEN (fe->fe_next_hops))
+ {
+ /* Didn't find a match, try to find a free nh slot */
+ for (i = 0; i < ARRAY_LEN (fe->fe_next_hops); i++)
+ {
+ cicn_fib_entry_nh_t *fe_nh = &fe->fe_next_hops[i];
+ if (fe->fe_next_hops[i].nh_faceid != 0)
+ {
+ continue;
+ }
+ /* Found a free entry */
+ ret = cicn_face_fib_nh_cnt_update (faceid, 1 /*add */ );
+ if (ret != AOK)
+ { // should not happen
+ break;
+ }
+ fe_nh->nh_faceid = faceid;
+ fe_nh->nh_weight = weight;
+ break;
+ }
+ if (i == ARRAY_LEN (fe->fe_next_hops))
+ {
+ /* Whoops - can't add any more next-hops */
+ /* TODO -- clean up on error at this point, since we may have added
+ * virtual nodes.
+ */
+ ret = ENOSPC;
+ crc = CICN_RC_FIB_NHOP_LIMIT;
+ goto done;
+ }
+ }
+
+
+ /* Max comps */
+ if (pfxhash->pfx_count > fe->fe_max_comps)
+ {
+ fe->fe_max_comps = pfxhash->pfx_count;
+ }
+
+ /*
+ * Loop back through the nodes, updating the refcounts, max-comps, etc.
+ */
+ for (i = node_count - 1, j = 1; i >= 0; i--, j++)
+ {
+ pnode = node_array[i];
+
+ fe = cicn_fib_get_data (pnode);
+
+ /*
+ * Update refcounts if we added any new prefixes.
+ */
+ if (add_ref_val > 0)
+ {
+
+ /* Add refs to all the new nodes */
+ if (j < add_ref_val)
+ {
+ fe->fe_refcount += j;
+ }
+ else
+ {
+ /* Add refs for all the new nodes to existing parents */
+ fe->fe_refcount += add_ref_val;
+ }
+ }
+
+ if (pfxhash->pfx_count > fe->fe_max_comps)
+ {
+ fe->fe_max_comps = pfxhash->pfx_count;
+ }
+ }
+
+ ret = AOK;
+
+done:
+ if (cicn_rd)
+ {
+ cicn_rd_set (cicn_rd, crc, ret);
+ }
+
+ return (ret);
+}
+
+/*
+ * Delete a FIB prefix, or just delete a next-hop, if 'faceid' != 0.
+ * If the prefix has children, this may just result in the conversion of
+ * the entry into a virtual entry.
+ * We expect that the prefix-hashing has already been done, into 'pfxhash'.
+ */
+int
+cicn_fib_entry_delete (cicn_fib_t * fib,
+ const cicn_prefix_hashinf_t * pfxhash, uint16_t faceid)
+{
+ int i, counter, ret = EINVAL;
+ cicn_hash_node_t *node_array[CICN_HASHTB_MAX_NAME_COMPS];
+ int node_count = 0;
+ cicn_hash_node_t *pnode;
+ cicn_fib_entry_t *fe;
+
+ if ((fib == NULL) || (pfxhash == NULL) ||
+ (pfxhash->pfx_ptr == NULL) || (pfxhash->pfx_len == 0) ||
+ (pfxhash->pfx_count == 0) ||
+ (pfxhash->pfx_count > CICN_HASHTB_MAX_NAME_COMPS))
+ {
+ goto done;
+ }
+
+ /* Start walking down the series of intermediate prefixes,
+ * capturing the hash node at each level that already exists. We need
+ * this in order to manage internal state, like refcounts
+ * and virtual FIB nodes. We use the extended 'lookup' api so that we
+ * will see hashtable nodes that were marked for deletion.
+ */
+ for (i = 0; i < pfxhash->pfx_count; i++)
+ {
+ ret = cicn_hashtb_lookup_node_ex (fib->fib_table,
+ pfxhash->pfx_ptr,
+ pfxhash->pfx_lens[i],
+ pfxhash->pfx_hashes[i], TRUE, &pnode);
+ if (ret != AOK)
+ { // should not happen
+ break;
+ }
+ node_array[i] = pnode;
+ }
+
+ /*
+ * Now we've reached either a) the entry for the offered prefix, or the end
+ * of the bread-crumb trail...
+ */
+ if (i < pfxhash->pfx_count)
+ {
+ /* If we can't get to the prefix specified,
+ * then we can't really proceed?
+ */
+ ret = ENOENT;
+ goto done;
+ }
+
+ node_count = i;
+ pnode = node_array[node_count - 1]; // first update actual node, then parents
+ fe = cicn_fib_get_data (pnode);
+
+ /*
+ * If we're clearing a single next-hop, see whether we should remove the
+ * whole entry. 'pnode' is the last node in the array, which should be
+ * the target entry.
+ */
+ if (faceid != 0)
+ {
+ counter = 0;
+ ret = ENOENT;
+
+ for (i = 0; i < ARRAY_LEN (fe->fe_next_hops); i++)
+ {
+ cicn_fib_entry_nh_t *fe_nh = &fe->fe_next_hops[i];
+
+ /* Remove the specific next-hop */
+ if (fe_nh->nh_faceid == faceid)
+ {
+ cicn_face_fib_nh_cnt_update (faceid, 0 /*!add */ );
+ fe_nh->nh_faceid = 0;
+
+ /* Return success if we find the specified next-hop */
+ ret = AOK;
+ }
+
+ /* Count all existing next-hops */
+ if (fe_nh->nh_faceid != 0)
+ {
+ counter++;
+ }
+ }
+
+ if (counter != 0)
+ {
+ /* Remove entire entry if no next-hops remaining */
+ goto done;
+ }
+ }
+
+ /*
+ * Remove entry if it's a leaf, or convert it to 'virtual' if not.
+ */
+
+ /* First clear out next-hop(s) */
+ for (i = 0; i < ARRAY_LEN (fe->fe_next_hops); i++)
+ {
+ cicn_fib_entry_nh_t *fe_nh = &fe->fe_next_hops[i];
+
+ if (fe_nh->nh_faceid == 0)
+ {
+ continue;
+ }
+ cicn_face_fib_nh_cnt_update (fe_nh->nh_faceid, 0 /*!add */ );
+ fe_nh->nh_faceid = 0;
+ }
+
+ if (fe->fe_refcount > 1)
+ {
+ /* Convert to virtual entry in-place */
+
+ /* Set the 'virtual' flag */
+ fe->fe_flags |= CICN_FIB_ENTRY_FLAG_VIRTUAL;
+
+ /* No changes to parents, since we aren't changing the internal
+ * prefix structure, so we're done.
+ */
+ ret = AOK;
+ goto done;
+
+ }
+ else
+ {
+ /* Remove entry entirely */
+ ret = cicn_hashtb_delete (fib->fib_table, &pnode);
+ pnode = NULL;
+ }
+
+
+ /*
+ * We've removed a node: loop back through the parents,
+ * updating the refcounts, max-comps, etc. We may decide to remove
+ * parent nodes too, if their only descendent has been deleted.
+ */
+ counter = 1;
+ for (i = node_count - 2; i >= 0; i--)
+ {
+ pnode = node_array[i];
+
+ fe = cicn_fib_get_data (pnode);
+
+ fe->fe_refcount -= counter;
+
+ /* TODO -- figure out whether we can do anything about the max-comps
+ * at the parents' levels
+ */
+
+ if (fe->fe_refcount > 1)
+ {
+ continue;
+ }
+
+ /* This entry is no longer ref'd; if it's 'virtual', we can
+ * delete it too.
+ */
+ if (fe->fe_flags & CICN_FIB_ENTRY_FLAG_VIRTUAL)
+ {
+ /* Remove entry entirely */
+ ret = cicn_hashtb_delete (fib->fib_table, &pnode);
+
+ /* Now more nodes have been removed, so more refs need to be
+ * removed at the parents...
+ */
+ counter++;
+ }
+ }
+
+ ret = AOK;
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Mark a FIB prefix for delete, before actually deleting through a later
+ * api call.
+ * We expect that the prefix-hashing has already been done, into 'pfxhash'.
+ * This will check virtual parents' refcounts, and will mark them for
+ * delete also if necessary.
+ */
+int
+cicn_fib_entry_mark_for_delete (cicn_fib_t * fib,
+ const cicn_prefix_hashinf_t * pfxhash)
+{
+ int ret = EINVAL;
+
+ return (ret);
+}
+
+/*
+ * Add, delete, or change weight of fib entry next hop (which may
+ * lead to add/delete of fib entry)
+ */
+int
+cicn_fib_entry_nh_update (const char *prefix, int faceid, int weight,
+ int add_p, cicn_rd_t * cicn_rd_res)
+{
+ cicn_rd_t cicn_rd;
+
+ int len;
+ uint8_t buf[CICN_PARAM_FIB_ENTRY_PFX_WF_BYTES_MAX];
+ cicn_prefix_hashinf_t pfxhash;
+
+ cicn_rd_set (&cicn_rd, CICN_RC_OK, AOK);
+
+ /* Check that we're init-ed */
+ if (!cicn_infra_fwdr_initialized)
+ {
+ cicn_cli_output ("cicn: disabled");
+
+ cicn_rd.rd_ux_rc = EINVAL;
+ goto done;
+ }
+
+ /* Quick check for valid face for adds */
+ if (add_p)
+ {
+ if (cicn_face_entry_find_by_id (faceid, NULL) != AOK)
+ {
+ cicn_rd.rd_cicn_rc = CICN_RC_FACE_UNKNOWN;
+ cicn_rd.rd_ux_rc = EINVAL;
+ goto done;
+ }
+ }
+
+ /* Convert prefix to wire-format */
+ len = cicn_parse_name_comps_from_str (buf, sizeof (buf), prefix, &cicn_rd);
+ if (len < 0)
+ {
+ goto done;
+ }
+
+ /* Hash the prefix */
+ cicn_rd.rd_ux_rc =
+ cicn_hashtb_hash_prefixes (buf, len, 0 /*full_name */ , &pfxhash,
+ 0 /*limit */ );
+ if (cicn_rd.rd_ux_rc != AOK)
+ {
+ goto done;
+ }
+
+ /* Call to the fib apis */
+ if (add_p)
+ {
+ /* TODO -- support next-hop weight */
+ cicn_rd.rd_ux_rc =
+ cicn_fib_entry_insert (&cicn_main.fib, &pfxhash, faceid, weight,
+ &cicn_rd);
+ }
+ else
+ {
+ cicn_rd.rd_ux_rc =
+ cicn_fib_entry_delete (&cicn_main.fib, &pfxhash, faceid);
+ }
+
+done:
+
+ if (cicn_rd_res)
+ {
+ *cicn_rd_res = cicn_rd;
+ }
+ return (cicn_rd.rd_ux_rc);
+}
+
+/***************************************************************************
+ * CICN_FIB management plane (debug cli, binary API) helper routines
+ ***************************************************************************/
+
+/*
+ * CLI show output for fib. if 'prefix', just show a single entry
+ */
+int
+cicn_fib_show (const char *prefix, int detail_p, int internal_p)
+{
+ int i, ret = 0;
+ uint64_t cookie;
+ cicn_hash_node_t *node;
+ cicn_fib_entry_t *fe;
+ char cbuf[CICN_PARAM_HASHTB_KEY_BYTES_MAX];
+ u8 *strbuf = NULL;
+
+ /* TODO -- use the supplied 'prefix' */
+
+ /* Check that we're init-ed */
+ if (!cicn_infra_fwdr_initialized)
+ {
+ cicn_cli_output ("cicn: disabled");
+
+ ret = EINVAL;
+ goto done;
+ }
+
+ cicn_cli_output ("cicn FIB:");
+
+ /* Walk the FIB hashtable, */
+ cookie = CICN_HASH_WALK_CTX_INITIAL;
+
+ while (cicn_hashtb_next_node (cicn_main.fib.fib_table,
+ &node, &cookie) == AOK)
+ {
+
+ fe = cicn_fib_get_data (node);
+
+ /* Skip virtual entries unless 'internal_p' */
+ if (!internal_p && (fe->fe_flags & CICN_FIB_ENTRY_FLAG_VIRTUAL))
+ {
+ continue;
+ }
+
+ vec_reset_length (strbuf);
+
+ ret =
+ cicn_hashtb_key_to_str (cicn_main.fib.fib_table, node, cbuf,
+ sizeof (cbuf), 0 /*!must_fit */ );
+
+ strbuf = format (strbuf, " %s/...", cbuf);
+ int pad = 16 - vec_bytes (strbuf); // even out to column 16
+ if (pad > 0)
+ {
+ strbuf = format (strbuf, "%*s", pad, "");
+ }
+
+
+ if (fe->fe_flags & CICN_FIB_ENTRY_FLAG_VIRTUAL)
+ {
+ strbuf = format (strbuf, " (virtual)");
+ }
+
+ if (internal_p)
+ {
+ strbuf = format (strbuf, " (ref:%d)", fe->fe_refcount);
+ }
+
+ for (i = 0; i < ARRAY_LEN (fe->fe_next_hops); i++)
+ {
+ if (fe->fe_next_hops[i].nh_faceid != 0)
+ {
+ strbuf = format (strbuf, " (face:%d, weight:%d)",
+ (int) (fe->fe_next_hops[i].nh_faceid),
+ (int) (fe->fe_next_hops[i].nh_weight));
+ }
+ }
+
+ /* Oy - vecs are neat, but ... */
+ vec_terminate_c_string (strbuf);
+ vlib_cli_output (cicn_main.vlib_main, "%s", strbuf);
+ }
+
+done:
+
+ if (strbuf)
+ {
+ vec_free (strbuf);
+ }
+
+ return (ret);
+}
+
+/*
+ * Binary serialization for show FIB API.
+ */
+int
+ cicn_fib_api_entry_props_serialize
+ (vl_api_cicn_api_fib_entry_props_get_reply_t * reply, int page)
+{
+ int rv = CICN_VNET_API_ERROR_NONE;
+
+ int i, nentries = 0;
+
+ uint64_t cookie;
+ cicn_hash_node_t *node;
+ cicn_fib_entry_t *fe;
+
+ /* Check that we're init-ed */
+ if (!cicn_infra_fwdr_initialized)
+ {
+ rv = VNET_API_ERROR_FEATURE_DISABLED;
+ goto done;
+ }
+
+ /* Walk the FIB hashtable, */
+ cookie = CICN_HASH_WALK_CTX_INITIAL;
+
+ while (cicn_hashtb_next_node (cicn_main.fib.fib_table,
+ &node, &cookie) == AOK)
+ {
+
+ fe = cicn_fib_get_data (node);
+
+ /* Skip virtual entries unless 'internal_p' */
+ if ((fe->fe_flags & CICN_FIB_ENTRY_FLAG_VIRTUAL))
+ {
+ continue;
+ }
+
+ /* TODO -- deal with overflow keys */
+ i = node->hn_keysize;
+ if (i > CICN_HASH_KEY_BYTES)
+ {
+ i = CICN_HASH_KEY_LIST_BYTES;
+ }
+
+ cicn_api_fib_entry_t *entry = (cicn_api_fib_entry_t *)
+ (&reply->fibentry[nentries * sizeof (cicn_api_fib_entry_t)]);
+
+ cicn_hashtb_key_to_str (cicn_main.fib.fib_table, node,
+ (char *) entry->prefix, sizeof (entry->prefix),
+ 0 /*!must_fit */ );
+
+ for (i = 0; i < ARRAY_LEN (fe->fe_next_hops); i++)
+ {
+ if (fe->fe_next_hops[i].nh_faceid == 0)
+ {
+ continue;
+ }
+ entry->faceid[i] =
+ clib_host_to_net_i32 (fe->fe_next_hops[i].nh_faceid);
+
+ entry->faceweight[i] =
+ clib_host_to_net_i32 (fe->fe_next_hops[i].nh_weight);
+
+ entry->nfaces = clib_host_to_net_i32 (i + 1);
+ }
+
+ nentries++;
+ reply->nentries = clib_host_to_net_i32 (nentries);
+ }
+
+done:
+ return (rv);
+}
+
+
+
diff --git a/cicn-plugin/cicn/cicn_fib.h b/cicn-plugin/cicn/cicn_fib.h
new file mode 100644
index 00000000..0a90ef86
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_fib.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_fib.h: Fast-path, vpp-aware FIB, used in the cicn forwarder.
+ */
+
+#ifndef _CICN_FIB_H_
+#define _CICN_FIB_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include "cicn_types.h"
+#include "cicn_std.h"
+#include "cicn_params.h"
+#include "cicn_hashtb.h"
+
+/* FIB entry next-hop, info about a single face */
+typedef struct cicn_fib_entry_nh_s
+{
+ /* Flags for the entry */
+ uint8_t nh_flags;
+
+ uint8_t nh_weight;
+
+ /* Next-hop face. Invalid face value (of zero, for now)
+ * means 'skip this one'.
+ */
+ uint16_t nh_faceid;
+
+} cicn_fib_entry_nh_t;
+
+/* Flags for a FIB next-hop */
+#define CICN_FIB_NH_FLAGS_DEFAULT 0x00
+
+/* Next-hop is down, via admin or via some TBD BFD-ish protocol */
+#define CICN_FIB_NH_FLAG_DOWN 0x01
+
+/*
+ * FIB entry, info about a single prefix, and possibly
+ * containing multiple next-hops. This is embedded in a hashtable node,
+ * so its size (and alignment) have to be managed very carefully.
+ */
+typedef struct cicn_fib_entry_s
+{
+
+ /* Refcount for children. This helps us identify 'leaf' nodes,
+ * and helps us clean up virtual nodes that aren't needed any longer.
+ */
+ int32_t fe_refcount;
+
+ /* Next-hops. No 'count', because we don't assume these are
+ * contiguous. TODO -- need scalable next-hops
+ * (vector, pool, etc.) eventually.
+ */
+ cicn_fib_entry_nh_t fe_next_hops[CICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+
+ /* Flags */
+ uint8_t fe_flags;
+
+ /* Max name components in this prefix */
+ uint8_t fe_max_comps;
+
+ /* Size is 4 + 2 + (4 * 4) => 22B */
+
+} cicn_fib_entry_t;
+
+/* Flags values for a FIB entry */
+#define CICN_FIB_ENTRY_FLAGS_DEFAULT 0x0
+
+#define CICN_FIB_ENTRY_FLAG_DELETED 0x1
+#define CICN_FIB_ENTRY_FLAG_VIRTUAL 0x2
+
+/*
+ * Overall fib table, containing an instance of the generic hashtable
+ */
+typedef struct cicn_fib_s
+{
+
+ /* Flags */
+ int fib_flags;
+
+ /* Default route entry */
+ cicn_fib_entry_t fib_default_entry;
+
+ uint16_t fib_max_comps; /* Max comps overall */
+
+ /* Internal generic hashtable */
+ cicn_hashtb_t *fib_table;
+
+ /* Maximum capacity (in entries) */
+ uint32_t fib_capacity;
+
+} cicn_fib_t;
+
+/* Flags for FIB */
+#define CICN_FIB_FLAGS_NONE 0x0
+#define CICN_FIB_FLAG_DEFAULT_SET 0x1
+
+
+/* Accessor for fib data inside hash table node */
+static inline cicn_fib_entry_t *
+cicn_fib_get_data (cicn_hash_node_t * node)
+{
+ return (cicn_fib_entry_t *) (cicn_hashtb_node_data (node));
+}
+
+/* Init/alloc a new FIB */
+int cicn_fib_create (cicn_fib_t * p, uint32_t num_elems);
+
+/*
+ * FIB lookup using 'pfxhash' struct containing prefix-hash results. This returns
+ * the longest matching entry (not a virtual entry). If there is no valid FIB
+ * match and a default FIB entry exists, the default is returned.
+ */
+int cicn_fib_lookup (cicn_fib_t * fib, const cicn_prefix_hashinf_t * pfxhash,
+ cicn_fib_entry_t ** pentry);
+
+/*
+ * Insert a new prefix into the FIB (or add an additional next-hop,
+ * if the prefix already exists, or mod an existing next-hop, if the
+ * next-hop already exists.) We expect that 'pfx' is the start of the
+ * name-components only, not the start of a complete 'name' TLV. We expect
+ * that the prefix-hashing has already been done, into 'pfxhash'.
+ */
+int cicn_fib_entry_insert (cicn_fib_t * fib,
+ const cicn_prefix_hashinf_t * pfxhash,
+ uint16_t faceid, uint8_t weight,
+ cicn_rd_t * cicn_rd_res);
+
+/*
+ * Mark a FIB prefix for delete, before actually deleting through a later
+ * api call.
+ * We expect that the prefix-hashing has already been done, into 'pfxhash'.
+ * This will check virtual parents' refcounts, and will mark them for
+ * delete also if necessary.
+ *
+ * TODO -- NYI...
+ */
+int cicn_fib_entry_mark_for_delete (cicn_fib_t * fib,
+ const cicn_prefix_hashinf_t * pfxhash);
+
+/*
+ * Delete a FIB prefix, or just delete a next-hop, if 'faceid' != 0.
+ * If the prefix has children, this may just result in the conversion of
+ * the entry into a virtual entry.
+ * We expect that the prefix-hashing has already been done, into 'pfxhash'.
+ */
+int cicn_fib_entry_delete (cicn_fib_t * fib,
+ const cicn_prefix_hashinf_t * pfxhash,
+ uint16_t faceid);
+
+/* CLI show output for fib. if 'prefix', just show a single entry */
+int cicn_fib_show (const char *prefix, int detail_p, int internal_p);
+
+/*
+ * Add, delete, or change weight of fib entry next hop (which may
+ * lead to add/delete of fib entry)
+ */
+int cicn_fib_entry_nh_update (const char *prefix, int faceid, int weight,
+ int add_p, cicn_rd_t * cicn_rd_res);
+
+#endif /* _CICN_FIB_H_ */
diff --git a/cicn-plugin/cicn/cicn_hashtb.c b/cicn-plugin/cicn/cicn_hashtb.c
new file mode 100644
index 00000000..e8b344fe
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_hashtb.c
@@ -0,0 +1,1567 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_hashtb.c: Fast-path, vpp-aware hashtable, the base for the PIT/CS and FIB
+ * used in the cicn forwarder.
+ *
+ * - As is the case in other areas, we can't share headers between the vpp code
+ * and the cicn/cndn code: there are conflicting definitions.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vppinfra/pool.h>
+
+#include "cicn_hashtb.h"
+#include "cicn_siphash.h" /* Inline implementation of siphash-2-4 */
+#include "cicn_parser.h"
+
+/* return dvd/dvr, rounded up (intended for integer values) */
+#define CEIL(dvd, dvr) \
+({ \
+ __typeof__ (dvd) _dvd = (dvd); \
+ __typeof__ (dvr) _dvr = (dvr); \
+ (_dvd + _dvr - 1)/_dvr; \
+})
+
+#ifndef ALIGN8
+#define ALIGN8(p) (((p) + 0x7) & ~(0x7))
+#endif
+
+#ifndef ALIGNPTR8
+#define ALIGNPTR8(p) ((void *)(((uint8_t * )(p) + 0x7) & ~(0x7)))
+#endif
+
+#ifndef ALIGN64
+#define ALIGN64(p) (((p) + 0x3f) & ~(0x3f))
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* Default hash seed for now; TODO: needs to be random/per-box eventually */
+unsigned char cicn_default_sip_seed[16] = {
+ 0x12, 0x34, 0x56, 0x78, 0x98, 0x76, 0x54, 0x32,
+ 0x12, 0x34, 0x56, 0x78, 0x98, 0x76, 0x54, 0x32,
+};
+
+/* Offset to aligned start of additional data (PIT/CS, FIB)
+ * embedded in each node.
+ */
+uint32_t ht_node_data_offset_aligned;
+
+/* Some support for posix vs vpp mem management */
+#define MEM_ALLOC(x) clib_mem_alloc_aligned((x), 8)
+#define MEM_FREE(p) clib_mem_free((p))
+
+/*
+ * Internal utilities
+ */
+
+/* Allocate an overflow bucket */
+static cicn_hash_bucket_t *
+alloc_overflow_bucket (cicn_hashtb_h h)
+{
+ cicn_hash_bucket_t *newbkt = NULL;
+
+ if (h->ht_overflow_buckets_used < h->ht_overflow_bucket_count)
+ {
+ pool_get_aligned (h->ht_overflow_buckets, newbkt, 8);
+
+ if (newbkt)
+ {
+ h->ht_overflow_buckets_used++;
+ }
+ }
+
+ return (newbkt);
+}
+
+/* Free an overflow bucket; clear caller's pointer */
+static void
+free_overflow_bucket (cicn_hashtb_h h, cicn_hash_bucket_t ** pb)
+{
+ cicn_hash_bucket_t *bkt = *pb;
+
+ ASSERT (h->ht_overflow_buckets_used > 0);
+
+ pool_put (h->ht_overflow_buckets, bkt);
+ h->ht_overflow_buckets_used--;
+ *pb = NULL;
+}
+
+/* Allocate an overflow key buffer */
+static cicn_hash_key_t *
+alloc_key_buf (cicn_hashtb_h h)
+{
+ cicn_hash_key_t *hk = NULL;
+
+ if (h->ht_keys_used < h->ht_key_count)
+ {
+ pool_get_aligned (h->ht_extra_keys, hk, 8);
+ if (hk)
+ {
+ h->ht_keys_used++;
+ }
+ }
+
+ return (hk);
+}
+
+/* for iterating over key chunks, get next chunk given current chunk */
+static inline cicn_hash_key_t *
+next_key_buf (cicn_hashtb_h h, const cicn_hash_key_t * hk_cur)
+{
+ cicn_hash_key_t *hk_next;
+
+ if (hk_cur == NULL)
+ {
+ return (NULL);
+ }
+ hk_next = (hk_cur->kl.idx_next == CICN_HASH_INVALID_IDX) ? NULL :
+ pool_elt_at_index (h->ht_extra_keys, hk_cur->kl.idx_next);
+
+ return (hk_next);
+}
+
+/* Free an overflow key buffer; clear caller's pointer. */
+static void
+free_key_buf (cicn_hashtb_h h, cicn_hash_key_t ** pkey)
+{
+ cicn_hash_key_t *k = *pkey;
+
+ ASSERT (h->ht_keys_used > 0);
+
+ pool_put (h->ht_extra_keys, k);
+ h->ht_keys_used--;
+
+ *pkey = NULL;
+}
+
+/*
+ * Init, allocate a new hashtable
+ */
+int
+cicn_hashtb_alloc (cicn_hashtb_h * ph, uint32_t max_elems,
+ size_t app_data_size)
+{
+ int ret = EINVAL;
+ cicn_hashtb_h h = NULL;
+ uint32_t count;
+ size_t sz;
+ cicn_hash_node_t *nodep;
+ cicn_hash_bucket_t *bucket;
+ cicn_hash_key_t *hkey;
+
+ if (ph == NULL)
+ {
+ goto done;
+ }
+
+ if (max_elems < CICN_HASHTB_MIN_ENTRIES ||
+ max_elems > CICN_HASHTB_MAX_ENTRIES)
+ {
+ goto done;
+ }
+
+ /* Allocate and init main hashtable struct */
+ h = MEM_ALLOC (sizeof (cicn_hashtb_t));
+ if (h == NULL)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ memset (h, 0, sizeof (cicn_hashtb_t));
+
+ /* Compute main table bucket (row) count and size, and allocate */
+ count = ALIGN8 (CEIL (max_elems, CICN_HASHTB_FILL_FACTOR));
+
+ h->ht_bucket_count = count;
+
+ /* We _really_ expect to have buckets aligned on cache lines ... */
+ sz = sizeof (cicn_hash_bucket_t);
+ assert (sz == ALIGN64 (sz));
+
+ h->ht_buckets = MEM_ALLOC (count * sz);
+ if (h->ht_buckets == NULL)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ memset (h->ht_buckets, 0, count * sz);
+
+ /* First time through, compute offset to aligned extra data start in
+ * each node struct
+ * it's crucial that both the node struct (that the base hashtable uses)
+ * and the extra data area (that's also probably a struct) are aligned.
+ */
+ if (ht_node_data_offset_aligned == 0)
+ {
+ count = STRUCT_OFFSET_OF (cicn_hash_node_t, hn_data);
+ ht_node_data_offset_aligned = ALIGN8 (count);
+ }
+
+ // check app struct fits into space provided (CICN_HASH_NODE_APP_DATA_SIZE)
+ uint32_t ht_node_data_size;
+ ht_node_data_size = sizeof (cicn_hash_node_t) - ht_node_data_offset_aligned;
+ if (app_data_size > ht_node_data_size)
+ {
+ clib_error
+ ("cicn hashtable: fatal error: requested app data size(%u) > hashtb node's configured bytes available(%u)",
+ app_data_size, ht_node_data_size);
+ }
+
+ /*
+ * Compute entry node count and size, allocate
+ * Allocate/'Hide' the zero-th node so can use zero as an 'empty' value
+ */
+ pool_alloc_aligned (h->ht_nodes, max_elems, 8);
+ if (h->ht_nodes == NULL)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ pool_get_aligned (h->ht_nodes, nodep, 8); // alloc node 0
+ nodep = nodep; /* Silence 'not used' warning */
+
+ h->ht_node_count = max_elems;
+ h->ht_nodes_used = 1;
+
+ /*
+ * Compute overflow bucket count and size, allocate
+ */
+ count = ALIGN8 (CEIL (max_elems, CICN_HASHTB_OVERFLOW_FRACTION));
+
+ pool_alloc_aligned (h->ht_overflow_buckets, count, 8);
+ if (h->ht_overflow_buckets == NULL)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ /* 'Hide' the zero-th node so we can use zero as an 'empty' value */
+ pool_get_aligned (h->ht_overflow_buckets, bucket, 8);
+ bucket = bucket; /* Silence 'not used' warning */
+
+ h->ht_overflow_bucket_count = count;
+ h->ht_overflow_buckets_used = 1;
+
+ /*
+ * Compute overflow key buffer count and size, allocate
+ */
+ count = ALIGN8 (CEIL (max_elems, CICN_HASHTB_KEY_RATIO));
+
+ pool_alloc_aligned (h->ht_extra_keys, count, 8);
+ if (h->ht_extra_keys == NULL)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ /* 'Hide' the zero-th node so we can use zero as an 'empty' value */
+ pool_get_aligned (h->ht_extra_keys, hkey, 8);
+ hkey = hkey; /* Silence 'not used' warning */
+
+ h->ht_key_count = count;
+ h->ht_keys_used = 1;
+
+ /* Success */
+ ret = AOK;
+
+done:
+
+ if (h)
+ {
+ if ((ret == AOK) && ph)
+ {
+ *ph = h;
+ }
+ else
+ {
+ cicn_hashtb_free (&h);
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * Free, de-allocate a hashtable
+ */
+int
+cicn_hashtb_free (cicn_hashtb_h * ph)
+{
+ int ret = 0;
+
+ if (ph)
+ {
+ if ((*ph)->ht_extra_keys)
+ {
+ pool_free ((*ph)->ht_extra_keys);
+ (*ph)->ht_extra_keys = 0;
+ }
+ if ((*ph)->ht_nodes)
+ {
+ pool_free ((*ph)->ht_nodes);
+ (*ph)->ht_nodes = 0;
+ }
+ if ((*ph)->ht_overflow_buckets)
+ {
+ pool_free ((*ph)->ht_overflow_buckets);
+ (*ph)->ht_overflow_buckets = 0;
+ }
+ if ((*ph)->ht_buckets)
+ {
+ MEM_FREE ((*ph)->ht_buckets);
+ (*ph)->ht_buckets = 0;
+ }
+ MEM_FREE (*ph);
+
+ *ph = NULL;
+ }
+
+ return (ret);
+}
+
+/*
+ * Hash a bytestring, using siphash-2-4.
+ */
+uint64_t
+cicn_hashtb_hash_bytestring (const uint8_t * in, uint32_t inlen)
+{
+ return (cicn_siphash (in, inlen, cicn_default_sip_seed));
+}
+
+/*
+ * Hash a name, using siphash-2-4. TODO -- want a table handle here?
+ */
+uint64_t
+cicn_hashtb_hash_name (const uint8_t * key, uint32_t keylen)
+{
+ if (key == NULL || keylen < CICN_TLV_HDR_LEN)
+ {
+ return (-1LL);
+ }
+ return (cicn_siphash (&key[CICN_TLV_HDR_LEN], keylen - CICN_TLV_HDR_LEN,
+ cicn_default_sip_seed));
+}
+
+/*
+ * Hash a name, returning hash values of prefixes (for LPM, e.g.) in
+ * addition to (or instead of) hash of full name
+ * - Hash of prefixes (by necessity) and of full name (for consistency)
+ * skips the name header tlv and starts at the first name component tlv.
+ * - version using incremental hashing, i.e. a single pass over string
+ * reusing the results for hashing each prefix in calculating the
+ * hash of the following prefix (rather than re-hashing from the
+ * beginning of the bytestring for each successive prefix as in the
+ * nonincr version).
+ * Args:
+ * - is_full_name:
+ * - if true
+ * - 'name' points to the beginning of the entire name TLV;
+ * - calculate hash of entire name, as well as prefixes
+ * - if false
+ * - name to the first name-comp sub-tlv
+ * - not required to compute the full-name hash, though currently
+ * this version does compute full-name hash.
+ * TODO: is avoiding that full hash a worthwhile savings?
+ * - limit: if 'limit' > 0, limit prefixes to less than array size (8)
+ */
+static inline int
+cicn_hashtb_hash_prefixes_incr (const uint8_t * name, uint16_t namelen,
+ int is_full_name, cicn_prefix_hashinf_t * pfx,
+ int limit)
+{
+ int ret = AOK;
+
+ cicn_siphash_hi_t hi_state;
+ uint64_t cur_hash = 0;
+
+ int comp_offset; // offset (from name_val) of comp
+
+ /* Must point to something, and it must be at least as long
+ * as an empty name or name-comp
+ */
+ if ((name == NULL) || (namelen < CICN_TLV_HDR_LEN) || (pfx == NULL))
+ {
+ ret = EINVAL;
+ goto done;
+ }
+
+ /* Establish sane limit on number of comps */
+ if (limit == 0 || limit > CICN_HASHTB_MAX_NAME_COMPS)
+ {
+ limit = CICN_HASHTB_MAX_NAME_COMPS;
+ }
+ pfx->pfx_overflow = 0;
+
+ // Capture tlv pointer and len in the context struct
+ // If leading name tlv (packet vs. fib prefix), skip it
+ if (is_full_name)
+ {
+ pfx->pfx_ptr = name + CICN_TLV_HDR_LEN;
+ pfx->pfx_len = namelen - CICN_TLV_HDR_LEN;
+ }
+ else
+ {
+ /* Capture tlv pointer and len in the context struct */
+ pfx->pfx_ptr = name;
+ pfx->pfx_len = namelen;
+ }
+
+ cicn_siphash_hi_initialize (&hi_state, cicn_default_sip_seed);
+
+ int comp_flen; // len of comp incl. hdr
+ int pfx_idx; // index into returned prefix arrays
+ for (comp_offset = 0, pfx_idx = 0;
+ comp_offset < pfx->pfx_len; comp_offset += comp_flen, pfx_idx++)
+ {
+
+ const unsigned char *comp; // pointer to component record (hdr)
+ int comp_type; // type of component record (from component (sub)-TLV)
+ uint16_t comp_vlen; // len of comp excl. hdr (component name/value len)
+ int pfx_len; // len of pfx (equal to offset of following comp)
+
+ comp = &pfx->pfx_ptr[comp_offset];
+ C_GETINT16 (comp_type, &comp[0]);
+ C_GETINT16 (comp_vlen, &comp[CICN_TLV_TYPE_LEN]);
+ comp_flen = CICN_TLV_HDR_LEN + comp_vlen;
+
+ pfx_len = comp_offset + comp_flen;
+ if (pfx_len > pfx->pfx_len)
+ {
+ ret = EINVAL;
+ goto done;
+ }
+
+ if (comp_type == CICN_NAME_COMP_CHUNK)
+ {
+ /* assume FIB entries dont include chunk#: terminate partial hashes
+ * and proceed to full name hash
+ *
+ * for now, only chunk# (above) ends partial hashing, i.e. do not
+ * rule out creating and matching on FIB entries that include
+ * non-NameComponent components (that precede chunk#).
+ */
+ comp_flen = -1; /* end indicator */
+ pfx_len = pfx->pfx_len;
+ }
+ else if (pfx_idx >= limit)
+ { /* Are we out of partial hash slots? */
+ /*
+ * - no room in arrays to save remaining hashes or, in
+ * fib lookup case, no reason to calculate remaining
+ * partial hashes
+ * - do one last hash covering full string, taking advantage
+ * of v_whole rather than starting from scratch, to save in
+ * name_hash as always (even though will not saved in array
+ * in overflow case).
+ * - re-check for overflow below, i.e. after efficient
+ * whole-string hash calculation, and break out of loop there
+ */
+ pfx->pfx_overflow = 1;
+ comp_flen = -1; /* end indicator */
+ pfx_len = pfx->pfx_len;
+ }
+
+ cur_hash =
+ cicn_siphash_hi_calculate (&hi_state, pfx->pfx_ptr, pfx_len,
+ comp_offset);
+
+ if (comp_flen < 0)
+ {
+ /*
+ * - No benefit to calculating more partial hashes.
+ * - In overflow case, no room to store results.
+ * - In chunk component case, partial hash not useful
+ * - Due to similar check above, full string's hash has been
+ * calculated, break out of loop with cur_hash and
+ * pfx_idx having right values for return arguments.
+ * Return appropriate rc:
+ * - if actually out of room, return overflow (callers may not
+ * need to know this)
+ * - otherwise, max requested depth (less than array size)
+ * reached, but that's okay (not currently used).
+ */
+ if (pfx_idx >= ARRAY_LEN (pfx->pfx_hashes))
+ {
+ ret = ENOSPC;
+ }
+ break;
+ }
+
+ pfx->pfx_lens[pfx_idx] = pfx_len;
+ pfx->pfx_hashes[pfx_idx] = cur_hash;
+ } /* for */
+
+ // pfx_idx is now count (idx+1) (normal loop exit happens after
+ // for-loop increment and loop breaks happen before next array entry
+ // is filled in
+ pfx->pfx_count = pfx_idx;
+
+ if (pfx_idx == 0)
+ { // case of empty name still has hash
+ cur_hash =
+ cicn_siphash_hi_calculate (&hi_state, pfx->pfx_ptr, pfx->pfx_len,
+ comp_offset);
+ }
+
+ pfx->pfx_full_hash = cur_hash;
+
+done:
+ return (ret);
+}
+
+int
+cicn_hashtb_hash_prefixes (const uint8_t * name, uint16_t namelen,
+ int is_full_name, cicn_prefix_hashinf_t * pfx,
+ int limit)
+{
+ int ret;
+
+ ret =
+ cicn_hashtb_hash_prefixes_incr (name, namelen, is_full_name, pfx, limit);
+
+ return (ret);
+}
+
+/*
+ * Basic api to lookup a specific hash+key tuple. This does the entire
+ * lookup operation, retrieving node structs and comparing keys,
+ * so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+cicn_hashtb_lookup_node (cicn_hashtb_h h, const uint8_t * key,
+ uint32_t keylen, uint64_t hashval,
+ cicn_hash_node_t ** nodep)
+{
+ return (cicn_hashtb_lookup_node_ex
+ (h, key, keylen, hashval, FALSE /*deleted nodes */ , nodep));
+}
+
+/*
+ * Extended api to lookup a specific hash+key tuple. The implementation
+ * allows the caller to locate nodes that are marked for deletion, which
+ * is part of some hashtable applications, such as the FIB.
+ *
+ * This does the entire lookup operation, retrieving node structs
+ * and comparing keys, so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+cicn_hashtb_lookup_node_ex (cicn_hashtb_h h, const uint8_t * key,
+ uint32_t keylen, uint64_t hashval,
+ int include_deleted_p, cicn_hash_node_t ** nodep)
+{
+ int i, ret = EINVAL;
+ int found_p = FALSE;
+ uint32_t bidx;
+ cicn_hash_bucket_t *bucket;
+ cicn_hash_node_t *node;
+
+ /* Check args */
+ if ((h == NULL) || (key == NULL) || (keylen == 0))
+ {
+ goto done;
+ }
+
+ /* Use some bits of the low half of the hash
+ * to locate a row/bucket in the table
+ */
+ bidx = (hashval & (h->ht_bucket_count - 1));
+
+ bucket = h->ht_buckets + bidx;
+
+ /* Check the entries in the bucket for matching hash value */
+
+loop_buckets:
+
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+
+ /* If an entry is marked for deletion, ignore it unless the caller
+ * explicitly wants these nodes.
+ */
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ if (!include_deleted_p)
+ {
+ continue;
+ }
+ }
+
+ /* Be prepared to continue to an overflow bucket if necessary.
+ * We only expect the last entry in a bucket to refer to an overflow
+ * bucket...
+ */
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1) &&
+ bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ goto loop_buckets;
+ }
+
+ if (bucket->hb_entries[i].he_msb64 == hashval)
+ {
+ /* Found a candidate - must retrieve the actual node and
+ * check the key.
+ */
+ node = pool_elt_at_index (h->ht_nodes,
+ bucket->hb_entries[i].he_node);
+
+ /* Check the key itself; we've already checked the hash value */
+ ASSERT (node->hn_hash == hashval);
+
+ if (key && (keylen > 0))
+ {
+ if (keylen != node->hn_keysize)
+ {
+ continue;
+ }
+
+ if (keylen <= CICN_HASH_KEY_BYTES)
+ {
+ if (memcmp (key, node->hn_key.ks.key, keylen) != 0)
+ {
+ continue;
+ }
+ }
+ else
+ {
+ int key_bseen; // bytes processed/compared equal
+ int kc_bytes;
+ const cicn_hash_key_t *kc; // key chunks
+ for (kc = &node->hn_key, key_bseen = 0; kc != NULL;
+ kc = next_key_buf (h, kc), key_bseen += kc_bytes)
+ {
+ int key_bleft = keylen - key_bseen;
+ kc_bytes = (key_bleft <= CICN_HASH_KEY_LIST_BYTES) ?
+ key_bleft : CICN_HASH_KEY_LIST_BYTES;
+ if (memcmp (&key[key_bseen], kc->kl.key, kc_bytes))
+ {
+ break; // key chunk didn't match
+ }
+ }
+ if (kc != NULL)
+ { // found a mismatch before end of key
+ continue;
+ }
+ }
+ }
+
+ /* Found a match */
+ if (nodep)
+ {
+ *nodep = node;
+ }
+
+ found_p = TRUE;
+ break;
+ }
+ }
+
+ if (found_p)
+ {
+ ret = AOK;
+ }
+ else
+ {
+ ret = ENOENT;
+ }
+
+done:
+
+ return (ret);
+}
+
+/* Compute hash node index from node pointer */
+#define NODE_IDX_FROM_NODE(p, h) \
+ ((p) - ((h)->ht_nodes))
+
+/*
+ * Utility to init a new entry in a hashtable bucket/row. We use this
+ * to add new a node+hash, and to clear out an entry during removal.
+ */
+void
+cicn_hashtb_init_entry (cicn_hash_entry_t * entry, uint32_t nodeidx,
+ uint64_t hashval)
+{
+ entry->he_msb64 = hashval;
+ entry->he_node = nodeidx;
+
+ /* Clear out some other fields in the entry */
+ entry->he_flags = 0;
+ entry->he_timeout = 0;
+}
+
+/*
+ * Insert a node into the hashtable. We expect the caller has a) computed
+ * the hash value to use, b) initialized the node with the hash and key info,
+ * and c) filled in its app-specific data portion of the node.
+ */
+int
+cicn_hashtb_insert (cicn_hashtb_h h, cicn_hash_node_t * node)
+{
+ int i, ret = EINVAL;
+ uint32_t bidx;
+ cicn_hash_bucket_t *bucket, *newbkt;
+ int use_seven;
+
+ if (h == NULL)
+ {
+ goto done;
+ }
+
+ /* Use some bits of the low half of the hash
+ * to locate a row/bucket in the table
+ */
+ bidx = (node->hn_hash & (h->ht_bucket_count - 1));
+
+ bucket = h->ht_buckets + bidx;
+
+ use_seven = (h->ht_flags & CICN_HASHTB_FLAG_USE_SEVEN);
+
+ /* Locate a free entry slot in the bucket */
+
+loop_buckets:
+
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+
+ if ((bucket->hb_entries[i].he_msb64 == 0LL) &&
+ (bucket->hb_entries[i].he_node == 0))
+ {
+ /* Found a candidate -- fill it in */
+
+ /* Special case if the application asked not to use the last
+ * entry in each bucket.
+ */
+ if ((i != (CICN_HASHTB_BUCKET_ENTRIES - 1)) || !use_seven)
+ {
+
+ cicn_hashtb_init_entry (&(bucket->hb_entries[i]),
+ NODE_IDX_FROM_NODE (node, h),
+ node->hn_hash);
+
+ ret = AOK;
+ break;
+ }
+ }
+
+ /* Be prepared to continue to an overflow bucket if necessary,
+ * or to add a new overflow bucket.
+ * We only expect the last entry in a bucket to refer to an overflow
+ * bucket...
+ */
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+
+ /* Existing overflow bucket - re-start the search loop */
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ goto loop_buckets;
+
+ }
+ else
+ {
+ /* Overflow - reached the end of a bucket without finding a
+ * free entry slot. Need to allocate an overflow bucket, and
+ * connect it to this bucket.
+ */
+ newbkt = alloc_overflow_bucket (h);
+ if (newbkt == NULL)
+ {
+ ret = ENOMEM;
+ goto done;
+ }
+
+ /* We're touching some more bytes than we absolutely have to
+ * here, but ... that seems ok.
+ */
+ memset (newbkt, 0, sizeof (cicn_hash_bucket_t));
+
+ if (!use_seven)
+ {
+ /* Copy existing entry into new bucket - we really expect
+ * these to be properly aligned so they can be treated as
+ * ints. TODO -- do in 8-byte assignments?
+ */
+ memcpy (&(newbkt->hb_entries[0]),
+ &(bucket->hb_entries[i]),
+ sizeof (cicn_hash_entry_t));
+ }
+
+ /* Connect original bucket to the index of the
+ * new overflow bucket
+ */
+ bucket->hb_entries[i].he_flags |= CICN_HASH_ENTRY_FLAG_OVERFLOW;
+ bucket->hb_entries[i].he_node =
+ (newbkt - h->ht_overflow_buckets);
+
+ /* Add new entry to new overflow bucket */
+ bucket = newbkt;
+
+ /* Use entry [1] in the new bucket _if_ we just copied into
+ * entry [zero] above.
+ */
+ if (!use_seven)
+ {
+
+ cicn_hashtb_init_entry (&(bucket->hb_entries[1]),
+ NODE_IDX_FROM_NODE (node, h),
+ node->hn_hash);
+ }
+ else
+ {
+
+ cicn_hashtb_init_entry (&(bucket->hb_entries[0]),
+ NODE_IDX_FROM_NODE (node, h),
+ node->hn_hash);
+ }
+
+ /* And we're done with the overflow bucket */
+ ret = AOK;
+ break;
+ }
+ }
+ }
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Delete a node from a hashtable using the node itself, and delete/free
+ * the node. Caller's pointer is cleared on success.
+ */
+int
+cicn_hashtb_delete (cicn_hashtb_h h, cicn_hash_node_t ** pnode)
+{
+ int ret = EINVAL;
+
+ if ((h == NULL) || (pnode == NULL) || (*pnode == NULL))
+ {
+ goto done;
+ }
+
+ ret = cicn_hashtb_remove_node (h, *pnode);
+ if (ret == AOK)
+ {
+ cicn_hashtb_free_node (h, *pnode);
+ *pnode = NULL;
+ }
+
+done:
+ return (ret);
+}
+
+/*
+ * Delete an entry from a hashtable using the node itself
+ */
+int
+cicn_hashtb_remove_node (cicn_hashtb_h h, cicn_hash_node_t * node)
+{
+ int i, count, ret = EINVAL;
+ uint32_t bidx, overflow_p, nodeidx;
+ cicn_hash_bucket_t *bucket, *parent;
+
+ if ((h == NULL) || (node == NULL))
+ {
+ goto done;
+ }
+
+ /* Use some bits of the low half of the hash
+ * to locate a row/bucket in the table
+ */
+ bidx = (node->hn_hash & (h->ht_bucket_count - 1));
+
+ nodeidx = NODE_IDX_FROM_NODE (node, h);
+
+ bucket = h->ht_buckets + bidx;
+
+ overflow_p = FALSE;
+
+loop_buckets:
+
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ /* Note that we do consider entries that are marked for
+ * delete here, unlike some other operations.
+ */
+
+ /* Be prepared to continue to an overflow bucket if necessary.
+ * We only expect the last entry in a bucket to refer to an overflow
+ * bucket...
+ */
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1) &&
+ bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+
+ overflow_p = TRUE;
+
+ goto loop_buckets;
+ }
+
+ /* Look for an entry carrying this node index */
+ if (nodeidx == bucket->hb_entries[i].he_node)
+ {
+ ret = AOK;
+ break;
+ }
+ }
+
+ /* If we didn't find the matching entry, we're done */
+ if (ret != AOK)
+ {
+ ret = ENOENT;
+ goto done;
+ }
+
+ /* Clear out the entry. */
+ cicn_hashtb_init_entry (&(bucket->hb_entries[i]), 0, 0LL);
+
+ if (!overflow_p)
+ {
+ /* And we're done, in the easy case where we didn't change an
+ * overflow bucket
+ */
+ goto done;
+ }
+
+ /* The special case: if this is the last remaining entry in an
+ * overflow bucket, liberate the bucket. That in turn has a special
+ * case if this bucket is in the middle of a chain of overflow buckets.
+ *
+ * Note that we're not trying aggressively (yet) to condense buckets
+ * at every possible opportunity.
+ */
+
+ /* Reset this flag; we'll set it again if this bucket links to another */
+ overflow_p = FALSE;
+
+ for (i = 0, count = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ if (bucket->hb_entries[i].he_node != 0)
+ {
+ count++;
+ }
+
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1) &&
+ (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW))
+ {
+ count--; /* Doesn't count as a 'real' entry */
+ overflow_p = TRUE;
+ }
+ }
+
+ if (count > 0)
+ {
+ /* Still a (real) entry in the row */
+ goto done;
+ }
+
+ /* Need to locate the predecessor of 'bucket': start at
+ * the beginning of the chain of buckets and move forward
+ */
+ bidx = (node->hn_hash & (h->ht_bucket_count - 1));
+
+ for (parent = h->ht_buckets + bidx; parent != NULL;)
+ {
+
+ if ((parent->hb_entries[(CICN_HASHTB_BUCKET_ENTRIES - 1)].he_flags &
+ CICN_HASH_ENTRY_FLAG_OVERFLOW) == 0)
+ {
+ parent = NULL;
+ break;
+ }
+
+ bidx = parent->hb_entries[(CICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
+
+ if (pool_elt_at_index (h->ht_overflow_buckets, bidx) == bucket)
+ {
+ /* Found the predecessor of 'bucket'. If 'bucket' has a successor,
+ * connect 'parent' to it, and take 'bucket out of the middle.
+ */
+ if (overflow_p)
+ {
+ parent->hb_entries[(CICN_HASHTB_BUCKET_ENTRIES - 1)].he_node =
+ bucket->hb_entries[(CICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
+ }
+ else
+ {
+ /* Just clear the predecessor entry pointing at 'bucket' */
+ cicn_hashtb_init_entry (&parent->hb_entries
+ [(CICN_HASHTB_BUCKET_ENTRIES - 1)], 0,
+ 0LL);
+ }
+
+ break;
+ }
+
+ /* After the first iteration, 'parent' will be an overflow bucket too */
+ parent = pool_elt_at_index (h->ht_overflow_buckets, bidx);
+ }
+
+ /* We really expect to have found the predecessor */
+ ASSERT (parent != NULL);
+
+ /* And now, finally, we can put 'bucket' back on the free list */
+ free_overflow_bucket (h, &bucket);
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Prepare a hashtable node, supplying the key, and computed hash info.
+ */
+int
+cicn_hashtb_init_node (cicn_hashtb_h h, cicn_hash_node_t * node,
+ uint64_t hashval, const uint8_t * key, uint32_t keylen)
+{
+ int ret = EINVAL;
+ uint32_t keyidx;
+ cicn_hash_key_t *hk, *prevhk;
+
+ assert (h != NULL);
+ assert (node != NULL);
+
+ if ((h == NULL) || (node == NULL))
+ {
+ goto done;
+ }
+
+ /* Init the node struct */
+ node->hn_hash = hashval;
+ node->hn_flags = CICN_HASH_NODE_FLAGS_DEFAULT;
+ node->hn_keysize = 0;
+
+ if (key && (keylen > 0))
+ {
+ if (keylen > CICN_PARAM_HASHTB_KEY_BYTES_MAX)
+ {
+ /* Whoops - key is too darn big */
+ ret = EINVAL;
+ goto done;
+ }
+
+ node->hn_keysize = keylen;
+
+ if (keylen <= CICN_HASH_KEY_BYTES)
+ {
+ /* Use the node's embedded key buffer */
+ memcpy (node->hn_key.ks.key, key, keylen);
+ }
+ else
+ {
+ /*
+ * Key is too large for the embedded buffer alone;
+ * must use a chain of key buffers; we capture the chain
+ * by index.
+ */
+ prevhk = NULL;
+ hk = &(node->hn_key);
+
+ do
+ {
+
+ /* Put new key buf index into previous buf */
+ if (prevhk)
+ {
+ /* Compute index of new key buf */
+ keyidx = hk - h->ht_extra_keys;
+ prevhk->kl.idx_next = keyidx;
+ }
+
+ /* Copy as much key material as we can */
+ if (keylen > CICN_HASH_KEY_LIST_BYTES)
+ {
+ memcpy (hk->kl.key, key, CICN_HASH_KEY_LIST_BYTES);
+ key += CICN_HASH_KEY_LIST_BYTES;
+ keylen -= CICN_HASH_KEY_LIST_BYTES;
+ }
+ else
+ {
+ /* Last piece of the key */
+ memcpy (hk->kl.key, key, keylen);
+ keylen = 0;
+
+ /* Terminate the chain of key buffers */
+ hk->kl.idx_next = CICN_HASH_INVALID_IDX;
+ break;
+ }
+
+ prevhk = hk;
+
+ hk = alloc_key_buf (h);
+
+ }
+ while (hk);
+
+ if (keylen > 0)
+ {
+ /* Whoops - failed to get enough key buffers */
+ ret = ENOMEM;
+ goto done;
+ }
+ }
+ }
+
+ ret = AOK;
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Release a hashtable node back to the free list when an entry is cleared
+ */
+void
+cicn_hashtb_free_node (cicn_hashtb_h h, cicn_hash_node_t * node)
+{
+ uint32_t klen, keyidx;
+ cicn_hash_key_t *keyp;
+
+ ASSERT (h->ht_nodes_used > 0);
+
+ /* If there is a chain of key structs, need to free them also */
+ if (node->hn_keysize > CICN_HASH_KEY_BYTES)
+ {
+
+ /* Remaining key bytes (for consistency check) */
+ klen = node->hn_keysize - CICN_HASH_KEY_LIST_BYTES;
+
+ /* Walk along the chain of key buffers until we reach the end */
+ for (keyidx = node->hn_key.kl.idx_next; keyidx != CICN_HASH_INVALID_IDX;
+ /* can't step in iterator: keyp already freed */ )
+ {
+
+ keyp = pool_elt_at_index (h->ht_extra_keys, keyidx);
+ if (!keyp)
+ { // should not happen given valid keyidx
+ break;
+ }
+ keyidx = keyp->kl.idx_next;
+
+ /* Return the key buf to the free pool */
+ free_key_buf (h, &keyp);
+
+ /* Consistency checks on klen */
+ if (klen > CICN_HASH_KEY_LIST_BYTES)
+ {
+ klen -= CICN_HASH_KEY_LIST_BYTES;
+ ASSERT (keyidx != CICN_HASH_INVALID_IDX);
+ }
+ else
+ {
+ klen = 0;
+ ASSERT (keyidx == CICN_HASH_INVALID_IDX);
+ }
+ }
+ }
+
+ /* Return 'node' to the free list */
+ pool_put (h->ht_nodes, node);
+ h->ht_nodes_used--;
+
+}
+
+/*
+ * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'.
+ */
+int
+cicn_hashtb_next_node (cicn_hashtb_h h, cicn_hash_node_t ** pnode,
+ uint64_t * ctx)
+{
+ int i, j, ret = EINVAL;
+ uint32_t bidx, entry;
+ cicn_hash_bucket_t *bucket;
+
+ if ((h == NULL) || (pnode == NULL) || (ctx == NULL))
+ {
+ goto done;
+ }
+
+ /* Special-case for new iteration */
+ if (*ctx == CICN_HASH_WALK_CTX_INITIAL)
+ {
+ bidx = 0;
+ bucket = &h->ht_buckets[0];
+ entry = 0;
+ j = 0;
+ i = 0;
+ goto search_table;
+ }
+
+ /* Convert context to bucket and entry indices */
+ bidx = *ctx & 0xffffffffLL;
+ entry = *ctx >> 32;
+
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = ENOENT;
+ goto done;
+ }
+
+ bucket = h->ht_buckets + bidx;
+
+ /* Init total index into entries (includes fixed bucket and overflow) */
+ j = 0;
+
+
+skip_processed_bucket_chunks:
+ /* Figure out where to resume the search for the next entry in
+ * the table, by trying to find the last entry returned, from the cookie.
+ * Loop walks one (regular or overflow) bucket chunk, label is used for
+ * walking chain of chunks.
+ * Note that if there was a deletion or an addition that created an
+ * overflow, iterator can skip entries or return duplicate entries,
+ * for entries that are present from before the walk starts until
+ * after it ends.
+ * TODO: Add mechanism to break at bucket row boundaries, to avoid
+ * skip/duplication of entries not changed during walk.
+ */
+
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++, j++)
+ {
+ if (j > entry)
+ {
+ /*
+ * Start search for next here, use existing 'bucket' and 'i'
+ */
+ break;
+ }
+
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+
+ /* Be prepared to continue to an overflow bucket if necessary.
+ * (We only expect the last entry in a bucket to refer to an overflow
+ * bucket...)
+ */
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+
+ /* Increment overall entry counter 'j' */
+ j++;
+
+ goto skip_processed_bucket_chunks;
+ }
+
+ /* end of row (end of fixed bucket plus any overflows) */
+ i = 0;
+ j = 0;
+
+ bidx++;
+
+ /* Special case - we're at the end */
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = ENOENT;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+ break;
+ }
+ }
+
+search_table:
+
+ /* Now we're searching through the table for the next entry that's set */
+
+ for (; i < CICN_HASHTB_BUCKET_ENTRIES; i++, j++)
+ {
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+
+ /* Is this entry set? */
+ if (bucket->hb_entries[i].he_node != 0)
+ {
+
+ /* Retrieve the node struct */
+ *pnode = pool_elt_at_index (h->ht_nodes,
+ bucket->hb_entries[i].he_node);
+
+ /* Set 'entry' as we exit, so we can update the cookie */
+ entry = j;
+ ret = AOK;
+ break;
+ }
+
+ /* Be prepared to continue to an overflow bucket if necessary.
+ * (We only expect the last entry in a bucket to refer to an overflow
+ * bucket...)
+ */
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ /* Reset per-bucket index 'i', here (not done in iterator) */
+ i = 0;
+ /* Increment overall entry counter 'j' */
+ j++;
+
+ goto search_table;
+ }
+ else
+ {
+ /* Move to next bucket, resetting per-bucket and overall
+ * entry indexes
+ */
+ i = 0;
+ j = 0;
+
+ bidx++;
+
+ /* Special case - we're at the end */
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = ENOENT;
+ goto done;
+ }
+
+ bucket = h->ht_buckets + bidx;
+ goto search_table;
+ }
+ }
+ }
+
+done:
+
+ if (ret == AOK)
+ {
+ /* Update context */
+ *ctx = bidx;
+ *ctx |= ((uint64_t) entry << 32);
+ }
+
+ return (ret);
+}
+
+/*
+ * Update the per-entry compression expiration value for a hashtable node.
+ */
+int
+cicn_hashtb_entry_set_expiration (cicn_hashtb_h h,
+ cicn_hash_node_t * node,
+ uint16_t entry_timeout, uint8_t entry_flags)
+{
+ int i, ret = EINVAL;
+ uint32_t bidx, nodeidx;
+ cicn_hash_bucket_t *bucket;
+
+ if ((h == NULL) || (node == NULL))
+ {
+ goto done;
+ }
+
+ /* Use some bits of the low half of the hash
+ * to locate a row/bucket in the table
+ */
+ bidx = (node->hn_hash & (h->ht_bucket_count - 1));
+
+ nodeidx = NODE_IDX_FROM_NODE (node, h);
+
+ bucket = h->ht_buckets + bidx;
+
+loop_buckets:
+
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+
+ /* Be prepared to continue to an overflow bucket if necessary.
+ * We only expect the last entry in a bucket to refer to an overflow
+ * bucket...
+ */
+ if (i == (CICN_HASHTB_BUCKET_ENTRIES - 1) &&
+ bucket->hb_entries[i].he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+
+ goto loop_buckets;
+ }
+
+ /* Look for an entry carrying this node index */
+ if (nodeidx == bucket->hb_entries[i].he_node)
+ {
+ ret = AOK;
+ break;
+ }
+ }
+
+ /* If we didn't find the matching entry, we're done */
+ if (ret != AOK)
+ {
+ ret = ENOENT;
+ goto done;
+ }
+
+ /* Update the entry. */
+ bucket->hb_entries[i].he_timeout = entry_timeout;
+ if (entry_flags & CICN_HASH_ENTRY_FLAG_FAST_TIMEOUT)
+ {
+ bucket->hb_entries[i].he_flags |= CICN_HASH_ENTRY_FLAG_FAST_TIMEOUT;
+ }
+ else
+ {
+ bucket->hb_entries[i].he_flags &= ~CICN_HASH_ENTRY_FLAG_FAST_TIMEOUT;
+ }
+
+done:
+ return (ret);
+}
+
+int
+cicn_hashtb_key_to_buf (u8 ** vec_res, cicn_hashtb_h h,
+ const cicn_hash_node_t * node)
+{
+ int ret = AOK;
+ u8 *vec = *vec_res;
+
+ if (node->hn_keysize <= CICN_HASH_KEY_BYTES)
+ {
+ vec_add (vec, node->hn_key.ks.key, node->hn_keysize);
+ goto success;
+ }
+
+ const cicn_hash_key_t *kc;
+ for (kc = &node->hn_key; kc != NULL; kc = next_key_buf (h, kc))
+ {
+
+ int key_bleft = node->hn_keysize - vec_len (vec);
+ int kc_bytes = (key_bleft <= CICN_HASH_KEY_LIST_BYTES) ?
+ key_bleft : CICN_HASH_KEY_LIST_BYTES;
+ vec_add (vec, kc->kl.key, kc_bytes);
+ }
+
+success:
+
+ *vec_res = vec;
+ return (ret);
+}
+
+int
+cicn_hashtb_key_to_str (cicn_hashtb_h h, const cicn_hash_node_t * node,
+ char *buf, int bufsize, int must_fit)
+{
+ int ret = EINVAL;
+
+ u8 *kvec = NULL;
+ int bstr_len;
+
+ ret = cicn_hashtb_key_to_buf (&kvec, h, node);
+
+ if (h->ht_flags & CICN_HASHTB_FLAG_KEY_FMT_PFX)
+ {
+ ret =
+ cicn_parse_prefix_to_str (buf, bufsize, kvec, vec_len (kvec),
+ &bstr_len);
+ }
+ else if (h->ht_flags & CICN_HASHTB_FLAG_KEY_FMT_NAME)
+ {
+ ret =
+ cicn_parse_name_to_str (buf, bufsize, kvec, vec_len (kvec),
+ &bstr_len);
+ }
+ else
+ {
+ ret = EINVAL; // should never happen
+ }
+ if (ret != AOK)
+ {
+ goto err;
+ }
+
+ if (bstr_len >= bufsize)
+ {
+ if (must_fit)
+ {
+ ret = ENOSPC;
+ goto err;
+ }
+ if (bufsize < 4)
+ {
+ ret = ENOSPC;
+ goto err;
+ }
+ snprintf (&buf[bufsize - 4], 4, "...");
+ }
+
+ ret = AOK;
+
+err:
+ vec_free (kvec);
+ /* Not totally sure that I've got the to_str perfect - belt-and-susp. */
+ buf[bufsize - 1] = '\000';
+ return (ret);
+}
diff --git a/cicn-plugin/cicn/cicn_hashtb.h b/cicn-plugin/cicn/cicn_hashtb.h
new file mode 100644
index 00000000..c7522dd1
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_hashtb.h
@@ -0,0 +1,526 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_hashtb.h: Fast-path, vpp-aware hashtable, the base for the PIT/CS and FIB
+ * used in the cicn forwarder.
+ *
+ * - As is the case in other areas, we can't share headers between the vpp code
+ * and the cicn/cndn code: there are conflicting definitions.
+ *
+ */
+
+#ifndef _CICN_HASHTB_H_
+#define _CICN_HASHTB_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include "cicn_std.h"
+#include "cicn_params.h"
+
+/* Handy abbreviations for success status, and for boolean values */
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/*
+ * Lookup is finding a hashtable record whose name matches the name
+ * being looked up. Most of the lookup work is based on the hash
+ * value of the two names. Note that the intel cache line size is 64
+ * bytes, and some platforms load in 2 cache lines together.
+ * - first step is to match a record at the bucket/slot level
+ * (htab has an array of htbucket_t/htbc_elmt, where each bucket has
+ * 7 slots to hold indices for entries.) Matching at this level implies
+ * - the hashes of the lookup name and the record map to the
+ * same bucket
+ * - the high 32 bits of the hashes (slot bce_hash_msb32s) match.
+ * Read cost (on the hash table size, i.e. ignoring reading the
+ * name being looked up):
+ * - First step normally requires 1 cache line load to pull in
+ * the 64-byte htbucket_t with the 7 element slot table holding the
+ * hash_msb32s.
+ * - In the event (hopefully rare for a hash table with
+ * appropriate number of buckets) that more than 7 elements
+ * hash to the same bucket, lookup may well need to look not
+ * only at the static htbc_elmt_t but at the chain of dynamically
+ * allocated htbc_elmt_t's linked to the static htbc_elmt_t, where
+ * each of these holds slot entries for additional elements.
+ * - Before reaching that point, it is initially required is to read in
+ * the hash table record fields (ht_bucket_buf, htnode buf, etc)
+ * holding pointers to the arrays, but these cache lines are common
+ * to all lookups so will likely already be in the cache.
+ * - second step is to match at the record level (htnode/htkb level)
+ * once a slot-level match happens. Matching at this level implies
+ * the following match
+ * - the hash values (the full 64 bits vs. bucket+32 msb, above)
+ * With siphash, two names hashing to the same 64-bit value is
+ * quite rare.
+ * - the name which, on the hash table side, is stored as a list
+ * of htkb_t (key buffers). [In some cases, the full name is
+ * not compared, and a match is assumed based on hash value match.
+ * Read cost:
+ * - htnode_t, in one cache line, holds hash value and index for the
+ * htkb at the head of the key buffer list
+ * - each key buffer (htkb_t) is cache line aligned/sized, and holds
+ * 60 bytes of the name and requires a cache line read.
+ * Simplification is that a fib lookup requires 3 cache lines:
+ * - bucket
+ * - htnode
+ * - single key buffer (for cases where a name comparision is done)
+ *
+ * Some hashtables (for which rare false positives are tolerable)
+ * store hash values but no keys. (In ISM NDN forwarder, this was
+ * used for dcm_dpf: data cache manager's dataplane filter, where
+ * speed was critical and very rare false positives would be detected
+ * in the full dcm check.)
+ * - No key buffers are used (or even allocated at hash table creation).
+ */
+
+#define CICN_HASH_INVALID_IDX ~0
+/* for cicn_hashtb_next_node() iterator, this otherwise illegal context value
+ * indicates first call of iteration.
+ * Note: must not be 0, which is a legal context value.
+ */
+#define CICN_HASH_WALK_CTX_INITIAL (~((uint64_t)0))
+
+/*
+ * Key memory allocation scheme.
+ *
+ * The key is the bytestring that a hashtable entry is
+ * storing, e.g. a fib prefix or packet name. The hash of the
+ * name is used not just to pick the bucket, but also as a surrogate
+ * for the actual key value.
+ *
+ * Client calls pass key/name as contiguous memory for lookup/add/delete
+ * but hashable stores its copy of the key/name as a list of one or
+ * more hash_key structs.
+ * - key memory is managed as a list of keys (cache line
+ * sized/aligned buffers).
+ * - If (keysize < 128) then use key struct's full 128 bytes
+ * - If not, first key struct is head of a linked list of elements
+ * where the first bytes are used for the key and the last 4 bytes
+ * are the index of the next entry (or an end marker).
+ * - key memory is generally the single largest use of memory
+ * in the hash table, especially for PIT, as names are bigger
+ * than node structs (which is also per name/entry).
+ *
+ */
+
+#define CICN_HASH_KEY_BYTES 128
+#define CICN_HASH_KEY_LIST_BYTES (CICN_HASH_KEY_BYTES - sizeof(uint32_t))
+typedef struct
+{
+ union
+ {
+ struct
+ {
+ uint8_t key[CICN_HASH_KEY_BYTES];
+ } ks; /* Entire key in one block */
+ struct
+ {
+ uint8_t key[CICN_HASH_KEY_LIST_BYTES];
+ uint32_t idx_next; /* Next keybuf idx */
+ } kl; /* Key in a list of blocks */
+ };
+} cicn_hash_key_t;
+
+/* Ratio of extra key blocks to allocate, in case the embedded ones aren't
+ * sufficient. This is the fraction of the number of entries allocated.
+ */
+#define CICN_HASHTB_KEY_RATIO 8
+
+/*
+ * hash node, used to store a hash table entry; indexed by an entry in a bucket.
+ * the node contains an embedded key; long keys are stored as chains of keys.
+ *
+ * The memory block for a node includes space for client data,
+ * additional memory located off the end of the htnode data structure.
+ * Size of client-supplied data is fixed, so we can use vpp pools. The PIT
+ * and FIB need to ensure that they fit within the available data area,
+ * or change the size to accomodate their needs.
+ *
+ * NOTE: app_data_size currently applies to all apps, i.e. bigger FIB
+ * nodes means (leads to, requires) bigger PCS nodes
+ */
+
+/* Size this so that we can offer 64B aligned on 64-bits to the applications */
+#define CICN_HASH_NODE_APP_DATA_SIZE 72 /* TODO -- big enough? */
+
+typedef struct cicn_hash_node_s
+{
+
+ uint64_t hn_hash; /* Complete hash value */
+
+ /* Total size of the key (chained in several key structs if necessary) */
+ uint16_t hn_keysize;
+
+ /* 1 byte of flags for application use */
+ uint8_t hn_flags;
+
+ uint8_t _hn_reserved1; /* TBD, to align what follows back to 32 */
+
+ cicn_hash_key_t hn_key; /* Key value embedded in the node, may
+ * chain to more key buffers if necessary
+ */
+ /* TODO -- keep array of 'next keys' so we can prefetch better? */
+
+ /* Followed by app-specific data (fib or pit or cs entry, e.g.) */
+ uint8_t hn_data[CICN_HASH_NODE_APP_DATA_SIZE];
+
+} cicn_hash_node_t;
+
+#define CICN_HASH_NODE_FLAGS_DEFAULT 0x00
+
+
+/*
+ * cicn_hash_entry_t
+ * Structure holding all or part of a hash value, a node index, and other
+ * key pieces of info.
+ *
+ * - 128 bytes/bucket with 16 bytes/entry gives 8 entries,
+ * or 7 entries plus next bucket ptr if overflow
+ */
+typedef struct
+{
+
+ /* MSB of the hash value */
+ uint64_t he_msb64;
+
+ /* Index of node block */
+ uint32_t he_node;
+
+ /* Timeout value, units and scheme still TBD */
+ uint16_t he_timeout;
+
+ /* A few flags, including 'this points to a chain of buckets' */
+ uint8_t he_flags;
+
+ /* A byte for domain/application data (e.g. 'virtual fib entry' */
+ uint8_t he_appval;
+
+} cicn_hash_entry_t;
+
+#define CICN_HASH_ENTRY_FLAGS_DEFAULT 0x00
+
+/* This entry heads a chain of overflow buckets (we expect to see this
+ * only in the last entry in a bucket.) In this case, the index is
+ * to an overflow bucket rather than to a single node block.
+ */
+#define CICN_HASH_ENTRY_FLAG_OVERFLOW 0x01
+
+/* This entry has been marked for deletion */
+#define CICN_HASH_ENTRY_FLAG_DELETED 0x02
+
+/* Use fast he_timeout units for expiration, slow if not */
+#define CICN_HASH_ENTRY_FLAG_FAST_TIMEOUT 0x04
+
+/*
+ * hash bucket: Contains an array of entries.
+ * Cache line sized/aligned, so no room for extra fields unless
+ * bucket size is increased to 2 cache lines or the entry struct
+ * shrinks.
+ */
+
+/*
+ * Overflow bucket ratio as a fraction of the fixed/configured count;
+ * a pool of hash buckets used if a row in the fixed table overflows.
+ */
+#define CICN_HASHTB_OVERFLOW_FRACTION 8
+
+#define CICN_HASHTB_BUCKET_ENTRIES 8
+
+typedef struct
+{
+ cicn_hash_entry_t hb_entries[CICN_HASHTB_BUCKET_ENTRIES];
+} cicn_hash_bucket_t;
+
+/* Overall target fill-factor for the hashtable */
+#define CICN_HASHTB_FILL_FACTOR 4
+
+#define CICN_HASHTB_MIN_ENTRIES (1 << 4) // includes dummy node 0 entry
+#define CICN_HASHTB_MAX_ENTRIES (1 << 24)
+
+#define CICN_HASHTB_MIN_BUCKETS (1 << 10)
+
+/*
+ * htab_t
+ *
+ * Hash table main structure.
+ *
+ * Contains
+ * - pointers to dynamically allocated arrays of cache-line
+ * sized/aligned structures (buckets, nodes, keys).
+ * Put frequently accessed fields in the first cache line.
+ */
+typedef struct cicn_hashtb_s
+{
+
+ /* 8B - main array of hash buckets */
+ cicn_hash_bucket_t *ht_buckets;
+
+ /* 8B - just-in-case block of overflow buckets */
+ cicn_hash_bucket_t *ht_overflow_buckets;
+
+ /* 8B - block of nodes associated with entries in buckets */
+ cicn_hash_node_t *ht_nodes;
+
+ /*
+ * 8B - just-in-case block of extra keys, used when a key is too
+ * large to fit in a node's embedded key area
+ */
+ cicn_hash_key_t *ht_extra_keys;
+
+ /* Flags */
+ uint32_t ht_flags;
+
+ /* Count of buckets allocated in the main array */
+ uint32_t ht_bucket_count;
+
+ /* Count of overflow buckets allocated */
+ uint32_t ht_overflow_bucket_count;
+ uint32_t ht_overflow_buckets_used;
+
+ /* Count of nodes allocated */
+ uint32_t ht_node_count;
+ uint32_t ht_nodes_used;
+
+ /* Count of overflow key structs allocated */
+ uint32_t ht_key_count;
+ uint32_t ht_keys_used;
+
+ /* TODO -- stats? */
+
+} cicn_hashtb_t, *cicn_hashtb_h;
+
+/* Offset to aligned start of additional data (PIT/CS, FIB)
+ * embedded in each node.
+ */
+extern uint32_t ht_node_data_offset_aligned;
+
+/* Flags for hashtable */
+
+#define CICN_HASHTB_FLAGS_DEFAULT 0x00
+
+/* Don't use the last/eighth entry in each bucket - only use it for overflow.
+ * We use this for the FIB, currently, so that we can support in-place
+ * FIB changes that would be difficult if there were hash entry copies
+ * as part of overflow handling.
+ */
+#define CICN_HASHTB_FLAG_USE_SEVEN 0x01
+#define CICN_HASHTB_FLAG_KEY_FMT_PFX 0x02
+#define CICN_HASHTB_FLAG_KEY_FMT_NAME 0x04
+
+/*
+ * Max prefix name components we'll support in our incremental hashing;
+ * currently used only for LPM in the FIB.
+ */
+#define CICN_HASHTB_MAX_NAME_COMPS CICN_PARAM_FIB_ENTRY_PFX_COMPS_MAX
+
+/*
+ * Info about an LPM hash computation on a prefix or name.
+ */
+typedef struct cicn_prefix_hashinf_s
+{
+
+ const uint8_t *pfx_ptr;
+ uint16_t pfx_len;
+
+ uint16_t pfx_count; /* Number of prefix entries used */
+ uint8_t pfx_overflow; /* true if pfx has extra components (not hashed) */
+
+ uint16_t pfx_lens[CICN_HASHTB_MAX_NAME_COMPS];
+ uint64_t pfx_hashes[CICN_HASHTB_MAX_NAME_COMPS];
+
+ uint64_t pfx_full_hash;
+
+} cicn_prefix_hashinf_t;
+
+/*
+ * APIs and inlines
+ */
+
+/* Compute hash node index from node pointer */
+static inline uint32_t
+cicn_hashtb_node_idx_from_node (cicn_hashtb_h h, cicn_hash_node_t * p)
+{
+ return (p - h->ht_nodes);
+}
+
+/* Retrieve a hashtable node by node index */
+static inline cicn_hash_node_t *
+cicn_hashtb_node_from_idx (cicn_hashtb_h h, uint32_t idx)
+{
+ return (pool_elt_at_index (h->ht_nodes, idx));
+}
+
+/* Allocate a brand-new hashtable */
+int cicn_hashtb_alloc (cicn_hashtb_h * ph, uint32_t max_elems,
+ size_t app_data_size);
+
+/* Free a hashtable, including its embedded arrays */
+int cicn_hashtb_free (cicn_hashtb_h * ph);
+
+/* Hash a bytestring, currently using siphash64 (for UT) */
+uint64_t cicn_hashtb_hash_bytestring (const uint8_t * key, uint32_t keylen);
+
+/* Hash a name, currently using siphash64 */
+uint64_t cicn_hashtb_hash_name (const uint8_t * key, uint32_t keylen);
+
+/*
+ * Hash a name, with incremental prefix hashing (for LPM, e.g.) If
+ * 'limit' > 0, limit computation of incrementals.
+ * if 'is_full_name', we expect that 'name' points to the beginning
+ * of the entire name TLV; otherwise, 'name' just points to the first
+ * name-comp sub-tlv, and we will _not_ compute the full-name hash.
+ * Note that 'name' and 'namelen' are captured in the prefix-hash
+ * context struct, to make the LPM/FIB apis a little cleaner.
+ */
+int cicn_hashtb_hash_prefixes (const uint8_t * name, uint16_t namelen,
+ int is_full_name, cicn_prefix_hashinf_t * pfx,
+ int limit);
+
+/*
+ * Prepare a hashtable node for insertion, supplying the key
+ * and computed hash info. This sets up the node->key relationship, possibly
+ * allocating overflow key buffers.
+ */
+int cicn_hashtb_init_node (cicn_hashtb_h h, cicn_hash_node_t * node,
+ uint64_t hashval,
+ const uint8_t * key, uint32_t keylen);
+
+/*
+ * Insert a node into the hashtable. We expect the caller has used the
+ * init api to set the node key and hash info, and populated the extra
+ * data area (if any) - or done the equivalent work itself.
+ */
+int cicn_hashtb_insert (cicn_hashtb_h h, cicn_hash_node_t * node);
+
+/*
+ * Basic api to lookup a specific hash+key tuple. This does the entire
+ * lookup operation, retrieving node structs and comparing keys,
+ * so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int cicn_hashtb_lookup_node (cicn_hashtb_h h, const uint8_t * key,
+ uint32_t keylen, uint64_t hashval,
+ cicn_hash_node_t ** nodep);
+
+/*
+ * Extended api to lookup a specific hash+key tuple. The implementation
+ * allows the caller to locate nodes that are marked for deletion; this
+ * is part of some hashtable applications, such as the FIB.
+ *
+ * This does the entire lookup operation, retrieving node structs
+ * and comparing keys, so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+cicn_hashtb_lookup_node_ex (cicn_hashtb_h h, const uint8_t * key,
+ uint32_t keylen, uint64_t hashval,
+ int include_deleted_p, cicn_hash_node_t ** nodep);
+
+/*
+ * Remove a node from a hashtable using the node itself. The internal
+ * data structs are cleaned up, but the node struct itself is not: the caller
+ * must free the node itself.
+ */
+int cicn_hashtb_remove_node (cicn_hashtb_h h, cicn_hash_node_t * node);
+
+/*
+ * Delete a node from a hashtable using the node itself, and delete/free
+ * the node. Caller's pointer is cleared on success.
+ */
+int cicn_hashtb_delete (cicn_hashtb_h h, cicn_hash_node_t ** pnode);
+
+/*
+ * Utility to init a new entry in a hashtable bucket/row. We use this
+ * to add new a node+hash, and to clear out an entry during removal.
+ */
+void cicn_hashtb_init_entry (cicn_hash_entry_t * entry,
+ uint32_t nodeidx, uint64_t hashval);
+
+/*
+ * Return data area embedded in a hash node struct. We maintain an 'offset'
+ * value in case the common node body struct doesn't leave the data area
+ * aligned properly.
+ */
+static inline void *
+cicn_hashtb_node_data (cicn_hash_node_t * node)
+{
+ return ((uint8_t *) (node) + ht_node_data_offset_aligned);
+}
+
+/*
+ * Use some bits of the low half of the hash to locate a row/bucket in the table
+ */
+static inline uint32_t
+cicn_hashtb_bucket_idx (cicn_hashtb_h h, uint64_t hashval)
+{
+ return ((uint32_t) (hashval & (h->ht_bucket_count - 1)));
+}
+
+/*
+ * Return a hash node struct from the free list, or NULL.
+ * Note that the returned struct is _not_ cleared/zeroed - init
+ * is up to the caller.
+ */
+static inline cicn_hash_node_t *
+cicn_hashtb_alloc_node (cicn_hashtb_h h)
+{
+ cicn_hash_node_t *p = NULL;
+
+ if (h->ht_nodes_used < h->ht_node_count)
+ {
+ pool_get_aligned (h->ht_nodes, p, 8);
+ h->ht_nodes_used++;
+ }
+
+ return (p);
+}
+
+/*
+ * Release a hashtable node back to the free list when an entry is cleared
+ */
+void cicn_hashtb_free_node (cicn_hashtb_h h, cicn_hash_node_t * node);
+
+/*
+ * Walk a hashtable, iterating through the nodes, keeping context
+ * in 'ctx' between calls.
+ *
+ * Set the context value to CICN_HASH_WALK_CTX_INITIAL to start an iteration.
+ */
+int cicn_hashtb_next_node (cicn_hashtb_h h, cicn_hash_node_t ** pnode,
+ uint64_t * ctx);
+
+/*
+ * Update the per-entry compression expiration value and type
+ * for a hashtable node.
+ */
+int cicn_hashtb_entry_set_expiration (cicn_hashtb_h h,
+ cicn_hash_node_t * node,
+ uint16_t entry_timeout,
+ uint8_t entry_flags);
+
+int cicn_hashtb_key_to_str (cicn_hashtb_h h, const cicn_hash_node_t * node,
+ char *buf, int bufsize, int must_fit);
+
+#endif /* _CICN_HASHTB_H_ */
diff --git a/cicn-plugin/cicn/cicn_hello.c b/cicn-plugin/cicn/cicn_hello.c
new file mode 100644
index 00000000..0117a413
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_hello.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_hello.c - ICN hello protocol operation
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+
+#include <vnet/ip/udp.h>
+
+#include <cicn/cicn.h>
+#include <cicn/cicn_hello_inlines.h>
+
+static vlib_node_registration_t icn_hello_process_node;
+
+/* Stats string values */
+static char *icnhelloprocess_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icnhelloprocess_error
+#undef _
+};
+
+/*
+ * When face is created/hello enabled, fill in adjacency information
+ */
+clib_error_t *
+cicn_hello_adj_update (i32 faceid, int enable)
+{
+ clib_error_t *rv = 0;
+
+ int ret;
+ cicn_main_t *sm = &cicn_main;
+ cicn_hello_name_t *hello_name = &sm->hello_name;
+ cicn_face_db_entry_t *face;
+ cicn_hello_adj_t *hello_adj;
+ cicn_hello_fcd_t *fcd;
+ struct sockaddr_in *addr;
+
+ ret = cicn_face_entry_find_by_id (faceid, &face);
+ if (ret != AOK)
+ {
+ rv = clib_error_return (0, "face id %d not found", faceid);
+ goto done;
+ }
+ if (face->app_face)
+ {
+ rv =
+ clib_error_return (0,
+ "face id %d is app face, hello protocol disallowed",
+ faceid);
+ goto done;
+ }
+
+ /* Set the cicn_hello_adj struct values */
+
+ hello_adj = &sm->cicn_hello_adjs[faceid];
+
+ if (enable)
+ {
+ if (hello_adj->active)
+ {
+ rv =
+ clib_error_return (0, "face id %d hello protocol already enabled",
+ faceid);
+ goto done;
+ }
+
+ hello_adj->ha_swif = face->swif;
+
+ clib_memcpy (face->fe_ha_name_cmn, &hello_name->hn_wf[0],
+ CICN_HELLO_NAME_CMN_FLEN);
+
+ fcd = &face->fe_ha_fcd_loc;
+ addr = &face->src_addr;
+ memset (&fcd->fcd_v[0], 0, sizeof (fcd->fcd_v));
+ clib_memcpy (&fcd->fcd_v[0], &addr->sin_addr.s_addr, sizeof (u32));
+ clib_memcpy (&fcd->fcd_v[sizeof (u32)], &addr->sin_port, sizeof (u16));
+ fcd->fcd_v_len = CICN_HELLO_NAME_FACEID_V_LEN;
+
+ // for now, assume nbr's faceid vs. receiving in iMsg notification
+ fcd = &face->fe_ha_fcd_nbr;
+ addr = &face->dest_addr;
+ memset (&fcd->fcd_v[0], 0, sizeof (fcd->fcd_v));
+ clib_memcpy (&fcd->fcd_v[0], &addr->sin_addr.s_addr, sizeof (u32));
+ clib_memcpy (&fcd->fcd_v[sizeof (u32)], &addr->sin_port, sizeof (u16));
+ fcd->fcd_v_len = CICN_HELLO_NAME_FACEID_V_LEN;
+
+ hello_adj->active = 1;
+
+ /* Increment the number of active adjacencies */
+ sm->n_active_hello_adjs++;
+ }
+ else
+ {
+ if (!hello_adj->active)
+ {
+ rv =
+ clib_error_return (0,
+ "face id %d hello protocol already disabled",
+ faceid);
+ goto done;
+ }
+
+ hello_adj->active = 0;
+ hello_adj->ha_swif = 0;
+
+ fcd = &face->fe_ha_fcd_loc;
+ memset (fcd, 0, sizeof (*fcd));
+
+ fcd = &face->fe_ha_fcd_nbr;
+ memset (fcd, 0, sizeof (*fcd));
+
+ /* Decrement the number of active adjacencies */
+ sm->n_active_hello_adjs--;
+ }
+
+ cicn_face_flags_update (face, enable, CICN_FACE_FLAG_HELLO_DOWN);
+
+done:
+ return (rv);
+}
+
+/*
+ * Every hello period, create a hello packet for a peer, to be sent out,
+ * using buffer for buf_idx
+ */
+static void
+cicn_hello_packet_build (u32 bi0, cicn_hello_adj_t * hello_adj,
+ cicn_face_db_entry_t * face)
+{
+ cicn_main_t *sm = &cicn_main;
+ vlib_main_t *vm = sm->vlib_main;
+
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = hello_adj->ha_swif; //TODO: correct?
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ /* Increment the last sent seq num (i.e. first sent is 1, not 0) */
+ hello_adj->last_sent_seq_num++;
+
+ u32 icn_name_len = CICN_TLV_HDR_LEN + sm->hello_name.hn_wf_v_len;
+
+ u32 icn_len = sizeof (cicn_packet_hdr_t) + CICN_TLV_HDR_LEN + icn_name_len;
+
+ /* Zero all the way through the icn packet header, not ICN message */
+ u8 *ptr0 = vlib_buffer_get_current (b0);
+ memset (ptr0, 0, sizeof (ip4_header_t) + sizeof (udp_header_t) +
+ sizeof (cicn_packet_hdr_t));
+
+ /* Build IP header in place */
+ ip4_header_t *ip0 = (ip4_header_t *) ptr0;
+ b0->current_length = sizeof (ip4_header_t);
+ ASSERT ((((uintptr_t) ip0) & 0x3) == 0); // assert alignment for assigns below
+
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->ttl = 128;
+ ip0->protocol = IP_PROTOCOL_UDP;
+ ip0->src_address.as_u32 = face->src_addr.sin_addr.s_addr;
+ ip0->dst_address.as_u32 = face->dest_addr.sin_addr.s_addr;
+ ip0->length =
+ clib_host_to_net_u16 (sizeof (ip4_header_t) + sizeof (udp_header_t) +
+ icn_len);
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ /* Build UDP header in place */
+ udp_header_t *udp0 = (udp_header_t *) (ip0 + 1);
+ b0->current_length += sizeof (udp_header_t);
+
+ udp0->src_port = face->src_addr.sin_port;
+ udp0->dst_port = face->dest_addr.sin_port;
+ udp0->checksum = 0x0000;
+ udp0->length = clib_host_to_net_u16 (sizeof (udp_header_t) + icn_len);
+
+ /* Build ICN header */
+ cicn_packet_hdr_t *h = (cicn_packet_hdr_t *) (udp0 + 1);
+ b0->current_length += icn_len;
+
+ h->pkt_ver = CICN_PROTO_VERSION_CURRENT;
+ h->pkt_type = CICN_PKT_TYPE_CONTROL_REQUEST;
+ h->pkt_hop_limit = CICN_DEFAULT_HOP_LIMIT;
+ h->pkt_flags = 0;
+ h->pkt_hdr_len = sizeof (cicn_packet_hdr_t);
+ C_PUTINT16 (&h->pkt_len, icn_len);
+
+ /* The message type and length (currently just the name tlv) */
+ uint8_t *msg_tlv_p = (uint8_t *) (h + 1);
+ C_PUTINT16 (&msg_tlv_p[0], CICN_MSG_TYPE_INTEREST);
+ C_PUTINT16 (&msg_tlv_p[CICN_TLV_TYPE_LEN], CICN_HELLO_NAME_TOT_FLEN);
+
+ /* Copy name tlv, updating adjacency and seq_number components */
+ uint8_t *name_tlv_p = &msg_tlv_p[CICN_TLV_HDR_LEN];
+ u8 *fid_tlv_p = &name_tlv_p[CICN_HELLO_NAME_CMN_FLEN];
+ u8 *seq_tlv_p = &fid_tlv_p[CICN_HELLO_NAME_FACEID_FLEN];
+
+ clib_memcpy (name_tlv_p, face->fe_ha_name_cmn, CICN_HELLO_NAME_CMN_FLEN);
+
+ cicn_parse_tlv_build (fid_tlv_p, CICN_NAME_COMP,
+ CICN_HELLO_NAME_FACEID_V_LEN,
+ &face->fe_ha_fcd_loc.fcd_v[0]);
+
+ cicn_parse_tlv_hdr_build (seq_tlv_p, CICN_NAME_COMP,
+ CICN_HELLO_NAME_SEQ_V_LEN);
+ C_PUTINT64 (&seq_tlv_p[CICN_TLV_HDR_LEN], hello_adj->last_sent_seq_num);
+}
+
+/*
+ * At period expiry, walk through all adjacencies, building and sending
+ * hello packets. Return number of hello packets sent.
+ */
+u32
+cicn_hello_periodic (vlib_main_t * vm, vlib_node_runtime_t * node)
+{
+ cicn_main_t *sm = &cicn_main;
+ vlib_frame_t *f;
+ u32 *to_next;
+ u32 bi0;
+ u32 active_adjs_found = 0;
+ int j = 0;
+ u64 seq_num_gap;
+ cicn_face_db_entry_t *face_entry;
+
+ /* If no active adjacencies, don't walk array */
+ if (sm->n_active_hello_adjs == 0)
+ {
+ return (0);
+ }
+
+ /* Get a frame */
+ f = vlib_get_frame_to_node (vm, sm->cicn_hello_next_node_id);
+ ASSERT (f->n_vectors == 0);
+ to_next = vlib_frame_vector_args (f);
+
+ for (j = 0; j < CICN_PARAM_FACES_MAX; j++)
+ {
+ /* If we have found all the adjs, break */
+ if (active_adjs_found >= sm->n_active_hello_adjs)
+ {
+ break;
+ }
+
+ /* If this adj is not active, continue */
+ if (!sm->cicn_hello_adjs[j].active)
+ {
+ continue;
+ }
+ if (cicn_face_entry_find_by_id (j, &face_entry) != AOK)
+ {
+ continue;
+ }
+
+ active_adjs_found++;
+
+ /* Find the gap between the last sent and last acked seq num */
+ seq_num_gap = sm->cicn_hello_adjs[j].last_sent_seq_num -
+ sm->cicn_hello_adjs[j].last_received_seq_num;
+ /* If we go above the threshold, mark the interface as down */
+ if (seq_num_gap >= CICN_PARAM_HELLO_MISSES_DOWN_DFLT)
+ {
+ face_entry->flags |= CICN_FACE_FLAG_HELLO_DOWN;
+ }
+ vlib_buffer_alloc (vm, &bi0, 1);
+
+ /* Create the icn hello packet in bi0 */
+ cicn_hello_packet_build (bi0, &sm->cicn_hello_adjs[j], face_entry);
+
+ cicn_infra_shard_t *wshard = &cicn_infra_shards[vm->cpu_index];
+ cicn_face_stats_t *outface_stats =
+ &wshard->face_stats[cicn_face_db_index (face_entry)];
+ outface_stats->orig_interests++;
+ outface_stats->out_interests++;
+
+ /* Move the buffers to the frame */
+ to_next[0] = bi0;
+ to_next++;
+ f->n_vectors++;
+ }
+
+ /* Dispatch the frame to the node */
+ vlib_put_frame_to_node (vm, sm->cicn_hello_next_node_id, f);
+ return (active_adjs_found);
+}
+
+/*
+ * At cicn enable time, initialize hello's periodic state
+ * - sm->cicn_hello_next_node_id
+ * - sm->hello_name (string, wire-format, and initial (2-component) hash
+ */
+int
+cicn_hello_plugin_activation_init (vlib_main_t * vm)
+{
+ cicn_main_t *sm = &cicn_main;
+
+ /* Up/Down next node id */
+ vlib_node_t *next_node = vlib_get_node_by_name (vm, (u8 *) "ip4-lookup");
+ sm->cicn_hello_next_node_id = next_node->index;
+
+ /* Set the values of the ICN hello name struct */
+ cicn_hello_name_t *hello_name = &sm->hello_name;
+ cicn_sstrncpy (hello_name->hn_str, CICN_HELLO_NAME_TEMPLATE,
+ sizeof (hello_name->hn_str));
+
+ cicn_rd_t cicn_rd;
+ C_PUTINT16 (&hello_name->hn_wf[0], CICN_TLV_NAME);
+ hello_name->hn_wf_v_len =
+ cicn_parse_name_comps_from_str (&hello_name->hn_wf[CICN_TLV_HDR_LEN],
+ sizeof (hello_name->hn_wf) -
+ CICN_TLV_HDR_LEN, hello_name->hn_str,
+ &cicn_rd);
+ if (hello_name->hn_wf_v_len != CICN_HELLO_NAME_TOT_FLEN - CICN_TLV_HDR_LEN)
+ {
+ vlib_cli_output (sm->vlib_main,
+ "Error parsing hello name template: %s (%d)",
+ cicn_rd_str (&cicn_rd), hello_name->hn_wf_v_len);
+ return EINVAL;
+ }
+ C_PUTINT16 (&hello_name->hn_wf[CICN_TLV_TYPE_LEN], hello_name->hn_wf_v_len);
+
+ return (AOK);
+}
+
+/*
+ * The entry-point for the ICN adjacency process, which periodically
+ * sends adjacency packets.
+ */
+static uword
+icn_hello_process_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ cicn_main_t *sm = &cicn_main;
+ f64 up_down_time_remaining;
+ uword event_type;
+ cicn_hello_data *d;
+ uword *event_data = 0;
+ int i = 0;
+ cicn_face_db_entry_t *face_entry;
+
+ up_down_time_remaining = sm->cicn_hello_interval;
+
+ /* Loop forever */
+ while (1)
+ {
+ up_down_time_remaining = vlib_process_wait_for_event_or_clock (vm,
+ up_down_time_remaining);
+ /* Get the events (if any) */
+ event_type = vlib_process_get_events (vm, &event_data);
+ if (!sm->is_enabled)
+ {
+ ASSERT (vec_len (event_data) == 0);
+ up_down_time_remaining = sm->cicn_hello_interval;
+ continue;
+ }
+
+ switch (event_type)
+ {
+ case ~0:
+ break;
+ case CICN_HELLO_EVENT_DATA_RCVD:
+ for (i = 0; i < vec_len (event_data); i++)
+ {
+ /* We got a hello Data packet */
+ d = (cicn_hello_data *) event_data[i];
+ if (sm->cicn_hello_adjs[d->faceid].last_received_seq_num <
+ d->seq_num)
+ {
+ sm->cicn_hello_adjs[d->faceid].last_received_seq_num =
+ d->seq_num;
+ /* Find the face and, if down, bring it up */
+ if (cicn_face_entry_find_by_id (d->faceid, &face_entry) !=
+ AOK)
+ {
+ continue;
+ }
+ if (face_entry->flags & CICN_FACE_FLAG_HELLO_DOWN)
+ {
+ cicn_face_flags_update (face_entry, 0 /*!set */ ,
+ CICN_FACE_FLAG_HELLO_DOWN);
+ }
+ }
+ }
+ default:
+ ;
+ }
+
+ vec_reset_length (event_data);
+
+ /* peer timeout scan, send up-down Interest */
+ if (vlib_process_suspend_time_is_zero (up_down_time_remaining))
+ {
+ u32 adjs_sent = cicn_hello_periodic (vm, rt);
+ vlib_node_increment_counter (vm, icn_hello_process_node.index,
+ ICNHELLOPROCESS_ERROR_HELLO_INTERESTS_SENT,
+ adjs_sent);
+
+ up_down_time_remaining = sm->cicn_hello_interval;
+ }
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+
+clib_error_t *
+cicn_hello_boot_init (vlib_main_t * vm)
+{
+ cicn_main_t *sm = &cicn_main;
+
+ sm->n_active_hello_adjs = 0;
+ sm->cicn_hello_interval_cfgd = 0;
+ sm->cicn_hello_interval = CICN_PARAM_HELLO_POLL_INTERVAL_DFLT;
+
+ return (0);
+}
+
+
+VLIB_REGISTER_NODE (icn_hello_process_node, static) =
+{
+.function = icn_hello_process_fn,.type = VLIB_NODE_TYPE_PROCESS,.name =
+ "icn-hello-process",.process_log2_n_stack_bytes = 16,.n_errors =
+ ARRAY_LEN (icnhelloprocess_error_strings),.error_strings =
+ icnhelloprocess_error_strings,};
+
+VLIB_INIT_FUNCTION (cicn_hello_boot_init);
diff --git a/cicn-plugin/cicn/cicn_hello.h b/cicn-plugin/cicn/cicn_hello.h
new file mode 100644
index 00000000..d6a758d6
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_hello.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_hello.h - ICN hello protocol operation
+ */
+
+#ifndef _CICN_HELLO_H_
+#define _CICN_HELLO_H_
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <vnet/ip/ip.h>
+
+#define CICN_HELLO_EVENT_DATA_RCVD 1
+
+ /* hash of the hello protocol name */
+#define CICN_HELLO_NAME_TEMPLATE "/local/adj/1234567890123456/12345678"
+#define CICN_HELLO_NAME_CMN_COMPS_FLEN 16 // "/local/adj"
+
+#define CICN_HELLO_NAME_FACEID_V_LEN 16
+#define CICN_HELLO_NAME_FACEID_FLEN \
+ (CICN_TLV_HDR_LEN + CICN_HELLO_NAME_FACEID_V_LEN)
+
+#define CICN_HELLO_NAME_SEQ_V_LEN 8
+#define CICN_HELLO_NAME_SEQ_FLEN (CICN_TLV_HDR_LEN + CICN_HELLO_NAME_SEQ_V_LEN)
+
+// match pkt name against common-prefix and faceid components
+#define CICN_HELLO_NAME_CMN_FLEN \
+ (CICN_TLV_HDR_LEN + CICN_HELLO_NAME_CMN_COMPS_FLEN)
+#define CICN_HELLO_NAME_TOT_FLEN \
+ (CICN_TLV_HDR_LEN + CICN_HELLO_NAME_CMN_COMPS_FLEN + \
+ CICN_HELLO_NAME_FACEID_FLEN + CICN_HELLO_NAME_SEQ_FLEN)
+
+/* The name struct of the ICN Hello Interests */
+typedef struct
+{
+ char hn_str[CICN_HELLO_NAME_TOT_FLEN];
+ u32 hn_wf_v_len;
+ u8 hn_wf[CICN_HELLO_NAME_TOT_FLEN + 10 /* slop */ ];
+} cicn_hello_name_t;
+
+typedef struct cicn_hello_fcd_t_
+{
+ u16 fcd_v_len; // 0 if value not valid
+ u8 fcd_v[CICN_HELLO_NAME_FACEID_V_LEN];
+} cicn_hello_fcd_t;
+
+/* ICN Hello Adjacency struct (piggy-backs on face_cache_entry_t) */
+typedef struct
+{
+ int ha_swif; // vpp swif, use as dummy RX for originated packets
+
+ /* Is this adjacency active? */
+ int active;
+
+ /* Last received seq num */
+ u64 last_received_seq_num;
+
+ /* Last sent seq num */
+ u64 last_sent_seq_num;
+} cicn_hello_adj_t;
+
+/*
+ * The data structure to pass to the background process through
+ * signaled event
+ */
+typedef struct
+{
+ u64 seq_num;
+ u32 faceid;
+} cicn_hello_data;
+
+clib_error_t *cicn_hello_adj_update (i32 faceid, int enable);
+
+u32 cicn_hello_periodic (vlib_main_t * vm, vlib_node_runtime_t * node);
+
+int cicn_hello_plugin_activation_init (vlib_main_t * vm);
+
+clib_error_t *cicn_hello_boot_init (vlib_main_t * vm);
+
+#endif // _CICN_HELLO_H_
diff --git a/cicn-plugin/cicn/cicn_hello_inlines.h b/cicn-plugin/cicn/cicn_hello_inlines.h
new file mode 100644
index 00000000..c51bb7b0
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_hello_inlines.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_hello_inlines.h - ICN hello protocol packet forwarding inlines
+ */
+
+#ifndef _CICN_HELLO_INLINES_H_
+#define _CICN_HELLO_INLINES_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include "cicn.h"
+
+/*
+ * Is the supplied pkt_type/name a hello?
+ * Called from forwarding path, so performance sensitive
+ */
+static inline int
+cicn_hello_match (const cicn_face_db_entry_t * inface,
+ u8 pkt_type, const u8 * nameptr, u32 namelen,
+ const cicn_hello_name_t * hello_template, u64 * seq_num_res)
+{
+ const cicn_hello_fcd_t *fcd;
+ const u8 *in_fid_tlv;
+ u16 in_tlv_len;
+
+ switch (pkt_type)
+ {
+ case CICN_PKT_TYPE_CONTROL_REQUEST:
+ fcd = &inface->fe_ha_fcd_loc; // request for our name
+ break;
+ case CICN_PKT_TYPE_CONTROL_REPLY:
+ fcd = &inface->fe_ha_fcd_nbr; // reply to our request for nbr name
+ break;
+ default:
+ return (0); // not a hello message
+ }
+
+ if (fcd->fcd_v_len == 0)
+ { // name not currently initialized
+ return (0);
+ }
+
+ if (namelen != CICN_HELLO_NAME_TOT_FLEN)
+ {
+ return (0);
+ }
+ if (memcmp (nameptr, &inface->fe_ha_name_cmn[0],
+ sizeof (inface->fe_ha_name_cmn)))
+ {
+ return (0);
+ }
+
+ in_fid_tlv = &nameptr[CICN_HELLO_NAME_CMN_FLEN];
+ C_GETINT16 (in_tlv_len, &in_fid_tlv[CICN_TLV_TYPE_LEN]);
+ if (in_tlv_len != fcd->fcd_v_len)
+ {
+ return (0);
+ }
+
+ if (memcmp (&in_fid_tlv[CICN_TLV_HDR_LEN], &fcd->fcd_v[0],
+ fcd->fcd_v_len) != 0)
+ {
+ return (0);
+ }
+
+ if (seq_num_res)
+ { /* Extract the seq num from the response */
+ u64 seq_num;
+ C_GETINT64 (seq_num, &nameptr[namelen - CICN_HELLO_NAME_SEQ_V_LEN]);
+ *seq_num_res = seq_num;
+ }
+
+ return (1); // valid hello imsg/dmsg name for this face
+}
+
+#endif // _CICN_HELLO_INLINES_H_
diff --git a/cicn-plugin/cicn/cicn_infra.h b/cicn-plugin/cicn/cicn_infra.h
new file mode 100644
index 00000000..3013e1eb
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_infra.h
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * plugin infrastructure: global data structure, common definitions,
+ * statistics, etc
+ */
+#ifndef _CICN_INFRA_H_
+#define _CICN_INFRA_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+typedef int (*test_cicn_api_handler_fn) (test_cicn_api_op_t *
+ test_cicn_api_op);
+
+// cicn plugin global state: see also
+// - icnfwd_runtime_s for per-worker state
+// - fib and pits
+typedef struct cicn_main_s
+{
+ /* Binary API message ID base */
+ u16 msg_id_base;
+
+ /* Have we been enabled */
+ u16 is_enabled;
+
+ /* Convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ ethernet_main_t *ethernet_main;
+
+ /* Global FIB instance */
+ cicn_fib_t fib;
+
+ /* Info about vpp worker threads, used in our packet distribution node */
+ u32 shard_count; // 1 in single-threaded or 1 worker mode: see worker_count
+ u32 worker_count; // 0 in single-threaded case: see shard_count
+ u32 worker_first_index;
+
+ /* Node index for forwarder node in dpdk worker handoff context */
+ u32 fwd_next_node;
+
+ /* Global PIT lifetime info */
+ uint64_t pit_lifetime_dflt_ms;
+ uint64_t pit_lifetime_min_ms;
+ uint64_t pit_lifetime_max_ms;
+
+ /* Global ICN Hello Protocol Polling Interval */
+ f64 cicn_hello_interval;
+
+ /* The name of the ICN Hello Protocol Interests */
+ cicn_hello_name_t hello_name;
+
+ /* Is Hello Protocol polling interval set from cli/api? */
+ u32 cicn_hello_interval_cfgd;
+
+ /* Next node id for Hello Interests */
+ u32 cicn_hello_next_node_id;
+
+ /* Array of ICN Adjacencies indexed by faceid */
+ cicn_hello_adj_t cicn_hello_adjs[CICN_PARAM_FACES_MAX];
+
+ /* Array of ICN Hello data by faceid */
+ cicn_hello_data cicn_hello_data_array[CICN_PARAM_FACES_MAX];
+
+ /* Number of active adjacencies */
+ u32 n_active_hello_adjs;
+
+ uword *cicn_rc_strings; // to print string forms of return codes
+
+ /* Event subscribers' info */
+ i32 n_face_event_subscribers;
+ vl_api_cicn_api_face_events_subscribe_t
+ face_event_subscribers[CICN_PARAM_API_EVENT_SUBSCRIBERS_MAX];
+
+ /* Have we been enabled for packet-generation? */
+ u32 pgen_enabled;
+
+ /* pgen client side */
+ /* Source and destination info */
+ u32 pgen_clt_src_addr;
+ int pgen_clt_src_port;
+ u32 pgen_clt_dest_addr;
+ int pgen_clt_dest_port;
+
+ /* pgen server side */
+ /* Have we enabled the packet generator server? */
+ u32 pgen_svr_enabled;
+
+ /* Arbitrary content */
+ u32 pgen_svr_buffer_idx;
+
+ test_cicn_api_handler_fn test_cicn_api_handler;
+} cicn_main_t;
+
+extern cicn_main_t cicn_main;
+
+static inline char *
+cicn_sstrncpy (char *dst, const char *src, size_t n)
+{
+ char *res = strncpy (dst, src, n);
+ dst[n - 1] = '\000';
+ return (res);
+}
+
+/* Forwarder's name data structure */
+typedef struct cicn_infra_fwdr_name_s
+{
+ uint64_t fn_match_pfx_hash; // hash of fname's relevant pfx for match
+ int fn_reply_payload_flen; // total bytes in reply payload
+#define CICN_FWDR_NAME_BUFSIZE 200
+ uint8_t fn_reply_payload[CICN_FWDR_NAME_BUFSIZE]; // wire-fmt reply payload
+ cicn_prefix_hashinf_t fn_hashinf; // hash of fname components
+ char fn_str[CICN_FWDR_NAME_BUFSIZE]; // fname ascii version for output
+} cicn_infra_fwdr_name_t;
+
+/* Global name of the forwarder */
+extern cicn_infra_fwdr_name_t cicn_infra_fwdr_name;
+
+extern int cicn_infra_fwdr_initialized;
+
+/*
+ * Generation numbers for coordination between config changes and running
+ * worker threads. Certain configuration changes (deletes, especially)
+ * cause the master config generation to increment. Each worker checks the
+ * master value and updates its own dedicated value as it begins each
+ * frame of work. We hope this allows us to safely integrate configuration
+ * changes without more costly synchronization.
+ */
+
+/* Each value is actually a stand-alone cache line in size, so that
+ * the worker threads don't have to be entangled trying to make high-rate
+ * updates to shared cache lines.
+ */
+typedef struct cicn_infra_shard_s
+{
+ volatile uint64_t cfg_generation;
+ uint64_t _extra[7];
+ cicn_face_stats_t face_stats[CICN_PARAM_FACES_MAX];
+} cicn_infra_shard_t;
+
+/* Global generation value, updated for (some? all?) config changes */
+extern cicn_infra_shard_t cicn_infra_gshard;
+
+#define CICN_INFRA_CFG_GEN_INCR() (cicn_infra_gshard.cfg_generation++)
+
+/* Fixed array for worker threads, to be indexed by worker index */
+#define CICN_INFRA_WORKERS_MAX 24
+extern cicn_infra_shard_t cicn_infra_shards[CICN_INFRA_WORKERS_MAX];
+
+/* Per shard limits */
+uint32_t cicn_infra_shard_pit_size;
+uint32_t cicn_infra_shard_cs_size;
+
+/* cicn-owned return code for cases where unix rc are insufficient */
+#define foreach_cicn_rc \
+_(OK, 0, "ok") \
+_(FACE_UNKNOWN, 1, "face unknown") \
+_(FIB_PFX_COMP_LIMIT, 2, "fib prefix too man components") \
+_(FIB_PFX_SIZE_LIMIT, 3, "fib prefix too long") \
+_(FIB_NHOP_LIMIT, 4, "fib next hop limit exceeded")
+
+typedef enum
+{
+#define _(a,b,c) CICN_RC_##a = b,
+ foreach_cicn_rc
+#undef _
+} cicn_rc_e;
+
+struct cicn_rd_s
+{
+ cicn_rc_e rd_cicn_rc;
+ int rd_ux_rc;
+};
+
+static inline void
+cicn_rd_set (cicn_rd_t * cicn_rd, cicn_rc_e cicn_rc, int ux_rc)
+{
+ cicn_rd->rd_cicn_rc = cicn_rc;
+ cicn_rd->rd_ux_rc = ux_rc;
+}
+
+/*
+ * Printable error representation
+ */
+const char *cicn_rc_str (cicn_rc_e crc);
+
+const char *cicn_rd_str (cicn_rd_t * cicn_rd);
+
+/*
+ * wrapped timer sequence package (increment, comparison)
+ */
+
+/*
+ * wrappable counter math (assumed uint16_t): return sum of addends
+ */
+static inline uint16_t
+cicn_infra_seq16_sum (uint16_t addend1, uint16_t addend2)
+{
+ return (addend1 + addend2);
+}
+
+/*
+ * for comparing wrapping numbers, return lt,eq,gt 0 for a lt,eq,gt b
+ */
+static inline int
+cicn_infra_seq16_cmp (uint16_t a, uint16_t b)
+{
+ return ((int16_t) (a - b));
+}
+
+/*
+ * below are wrappers for lt, le, gt, ge seq16 comparators
+ */
+static inline int
+cicn_infra_seq16_lt (uint16_t a, uint16_t b)
+{
+ return (cicn_infra_seq16_cmp (a, b) < 0);
+}
+
+static inline int
+cicn_infra_seq16_le (uint16_t a, uint16_t b)
+{
+ return (cicn_infra_seq16_cmp (a, b) <= 0);
+}
+
+static inline int
+cicn_infra_seq16_gt (uint16_t a, uint16_t b)
+{
+ return (cicn_infra_seq16_cmp (a, b) > 0);
+}
+
+static inline int
+cicn_infra_seq16_ge (uint16_t a, uint16_t b)
+{
+ return (cicn_infra_seq16_cmp (a, b) >= 0);
+}
+
+/* Definitions and Forward refs for the time counters we're trying out.
+ * Counters are maintained by the background process.
+ */
+#define SEC_MS 1000
+#define CICN_INFRA_FAST_TIMER_SECS 1
+#define CICN_INFRA_FAST_TIMER_MSECS (CICN_INFRA_FAST_TIMER_SECS * SEC_MS)
+#define CICN_INFRA_SLOW_TIMER_SECS 60
+#define CICN_INFRA_SLOW_TIMER_MSECS (CICN_INFRA_SLOW_TIMER_SECS * SEC_MS)
+
+extern uint16_t cicn_infra_fast_timer; /* Counts at 1 second intervals */
+extern uint16_t cicn_infra_slow_timer; /* Counts at 1 minute intervals */
+
+/*
+ * Utilities to convert lifetime into expiry time based on
+ * compressed clock, suitable for the opportunistic hashtable
+ * entry timeout processing.
+ */
+
+// convert time in msec to time in clicks
+static inline uint16_t
+cicn_infra_ms2clicks (uint64_t time_ms, uint64_t ms_per_click)
+{
+ f64 time_clicks =
+ ((f64) (time_ms + ms_per_click - 1)) / ((f64) ms_per_click);
+ return ((uint16_t) time_clicks);
+}
+
+static inline uint16_t
+cicn_infra_get_fast_exp_time (uint64_t lifetime_ms)
+{
+ uint16_t lifetime_clicks =
+ cicn_infra_ms2clicks (lifetime_ms, CICN_INFRA_FAST_TIMER_MSECS);
+ return (cicn_infra_seq16_sum (cicn_infra_fast_timer, lifetime_clicks));
+}
+
+static inline uint16_t
+cicn_infra_get_slow_exp_time (uint64_t lifetime_ms)
+{
+ uint16_t lifetime_clicks =
+ cicn_infra_ms2clicks (lifetime_ms, CICN_INFRA_SLOW_TIMER_MSECS);
+ return (cicn_infra_seq16_sum (cicn_infra_slow_timer, lifetime_clicks));
+}
+
+int
+cicn_infra_plugin_enable_disable (int enable_disable,
+ int fib_max_size,
+ int pit_max_size,
+ f64 pit_dflt_lifetime_sec_req,
+ f64 pit_min_lifetime_sec_req,
+ f64 pit_max_lifetime_sec_req,
+ int cs_max_size);
+
+/* First versions of the ICN nodes: the forwarder node, the work-distributor
+ * node and the packet-generator client and server nodes
+ */
+extern vlib_node_registration_t icnfwd_node;
+extern vlib_node_registration_t icndist_node;
+extern vlib_node_registration_t icn_pg_node;
+extern vlib_node_registration_t icn_pg_server_node;
+
+#endif // _CICN_INFRA_H_
diff --git a/cicn-plugin/cicn/cicn_mgmt.c b/cicn-plugin/cicn/cicn_mgmt.c
new file mode 100644
index 00000000..18f03530
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_mgmt.c
@@ -0,0 +1,2277 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Management plane:
+ * - Handlers for CICN binary API operations (vpp_api_test)
+ * - Declarations of and handlers for DBG-cli commands
+ * - Internal management operation handlers called by both of the
+ * above, to minimize copied code.
+ */
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/format.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vlibsocket/api.h>
+
+#include <vnet/ip/udp.h> // port registration
+
+#include <cicn/cicn.h>
+
+/* define message IDs */
+#include <cicn/cicn_msg_enum.h>
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <cicn/cicn_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <cicn/cicn_all_api_h.h>
+#undef vl_printfun
+
+/* Get the API version number */
+#define vl_api_version(n,v) static u32 api_version=(v);
+#include <cicn/cicn_all_api_h.h>
+#undef vl_api_version
+
+/*
+ * Handy macros to initialize/send a message reply.
+ * Assumes that certain variables, showed in comments, are available
+ * as local variables in using routing.
+ */
+
+// allocate rmp buffer, verify queue is valie
+#define REPLY_SETUP(t, rmp, q, mp) \
+do { \
+ q = vl_api_client_index_to_input_queue(mp->client_index); \
+ if (!q) \
+ return; \
+ \
+ rmp = vl_msg_api_alloc(sizeof(*rmp)); \
+ rmp->_vl_msg_id = ntohs(sm->msg_id_base + (t)); \
+ rmp->context = mp->context; \
+} while(0);
+
+// set return value and send response
+#define REPLY_FINISH(rmp, q, rv) \
+do { \
+ rmp->retval = ntohl(rv); \
+ vl_msg_api_send_shmem (q, (u8 *)&rmp); \
+} while(0);
+
+// combined single vector to allocate rmp buffer and send rv response
+// can only be used for API calls (e.g. "set" calls) that only return rv
+#define REPLY_MACRO(t/*, rmp, mp, rv*/) \
+do { \
+ unix_shared_memory_queue_t * q; \
+ REPLY_SETUP(t, rmp, q, mp); \
+ REPLY_FINISH(rmp, q, rv); \
+} while(0);
+
+
+/*
+ * Convert a unix return code to a vnet_api return code.
+ * Currently stubby: should have more cases.
+ */
+static inline vnet_api_error_t
+cicn_api_rv_from_unix_rc (int ux_rc)
+{
+ vnet_api_error_t vae;
+
+ switch (ux_rc)
+ {
+ case AOK:
+ vae = CICN_VNET_API_ERROR_NONE;
+ break;
+ default:
+ vae = VNET_API_ERROR_SYSCALL_ERROR_9; // should not happen, add cases
+ break;
+ }
+ return (vae);
+}
+
+/*
+ * Convert a unix return code to a vnet_api return code.
+ * Currently stubby: should use cl_error unix_rc if available
+ */
+static inline vnet_api_error_t
+cicn_api_rv_from_clib_error (clib_error_t * cl_err)
+{
+ vnet_api_error_t vae;
+
+ if (cl_err == NULL)
+ {
+ vae = CICN_VNET_API_ERROR_NONE;
+ }
+ else
+ {
+ vae = VNET_API_ERROR_SYSCALL_ERROR_9; // should not happen, add cases
+ }
+ return (vae);
+}
+
+
+/*
+ * Hide the details of cli output from the cicn-aware modules
+ */
+int
+cicn_cli_output (const char *fmt, ...)
+{
+ cicn_main_t *sm = &cicn_main;
+ va_list args;
+ char buf[200];
+
+ va_start (args, fmt);
+ vsnprintf (buf, sizeof (buf), fmt, args);
+ va_end (args);
+
+ /* Belt and suspenders */
+ buf[sizeof (buf) - 1] = 0;
+
+ vlib_cli_output (sm->vlib_main, buf);
+
+ return (0);
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_node_params_set_t_handler (vl_api_cicn_api_node_params_set_t *
+ mp)
+{
+ vl_api_cicn_api_node_params_set_reply_t *rmp;
+ vnet_api_error_t rv;
+
+ cicn_main_t *sm = &cicn_main;
+ int ux_rc;
+
+ //TODO: implement clib_net_to_host_f64 in VPP ?
+ int fib_max_size = clib_net_to_host_i32 (mp->fib_max_size);
+ int pit_max_size = clib_net_to_host_i32 (mp->pit_max_size);
+ f64 pit_dflt_lifetime_sec = mp->pit_dflt_lifetime_sec;
+ f64 pit_min_lifetime_sec = mp->pit_min_lifetime_sec;
+ f64 pit_max_lifetime_sec = mp->pit_max_lifetime_sec;
+ int cs_max_size = clib_net_to_host_i32 (mp->cs_max_size);
+
+ ux_rc = cicn_infra_plugin_enable_disable ((int) (mp->enable_disable),
+ fib_max_size, pit_max_size,
+ pit_dflt_lifetime_sec,
+ pit_min_lifetime_sec,
+ pit_max_lifetime_sec,
+ cs_max_size);
+
+ rv = cicn_api_rv_from_unix_rc (ux_rc);
+ REPLY_MACRO (VL_API_CICN_API_NODE_PARAMS_SET_REPLY /*, rmp, mp, rv */ );
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_node_params_get_t_handler (vl_api_cicn_api_node_params_get_t *
+ mp)
+{
+ vl_api_cicn_api_node_params_get_reply_t *rmp;
+ vnet_api_error_t rv;
+
+ cicn_main_t *sm = &cicn_main;
+ int ux_rc = AOK;
+
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ {
+ return;
+ }
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id =
+ ntohs (sm->msg_id_base + VL_API_CICN_API_NODE_PARAMS_GET_REPLY);
+ rmp->context = mp->context;
+ rmp->is_enabled = sm->is_enabled;
+
+ rmp->feature_multithread = CICN_FEATURE_MULTITHREAD;
+ rmp->feature_cs = CICN_FEATURE_CS;
+ rmp->feature_dpdk_rtembuf_cloning = CICN_FEATURE_DPDK_RTEMBUF_CLONING;
+ rmp->feature_vpp_vlib_cloning = CICN_FEATURE_VPP_VLIB_CLONING;
+
+ rmp->worker_count = clib_host_to_net_u32 (sm->worker_count);
+
+ rmp->fib_max_size = clib_host_to_net_u32 (sm->fib.fib_capacity);
+
+ rmp->pit_max_size =
+ clib_host_to_net_u32 (cicn_infra_shard_pit_size * sm->shard_count);
+ //TODO: add clib_host_to_net_f64 to VPP ?
+ rmp->pit_dflt_lifetime_sec = ((f64) sm->pit_lifetime_dflt_ms) / SEC_MS;
+ rmp->pit_min_lifetime_sec = ((f64) sm->pit_lifetime_min_ms) / SEC_MS;
+ rmp->pit_max_lifetime_sec = ((f64) sm->pit_lifetime_max_ms) / SEC_MS;
+ rmp->cs_max_size =
+ clib_host_to_net_u32 (cicn_infra_shard_cs_size * sm->shard_count);
+
+ rv = cicn_api_rv_from_unix_rc (ux_rc);
+ rmp->retval = clib_host_to_net_i32 (rv);
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static vl_api_cicn_api_node_params_set_t node_ctl_params = {
+ .fib_max_size = -1,
+ .pit_max_size = -1,
+ .pit_dflt_lifetime_sec = -1.0f,
+ .pit_min_lifetime_sec = -1.0f,
+ .pit_max_lifetime_sec = -1.0f,
+ .cs_max_size = -1,
+};
+
+/*
+ * cli handler for 'control start'
+ */
+static clib_error_t *
+cicn_cli_node_ctl_start_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int ux_rc;
+
+ /* Catch unexpected extra arguments on this line.
+ * Get a line of input but only in the unexpected case that line
+ * line not already consumed by matching VLIB_CLI_COMMAND.path)
+ * [i.e. on "cicn control start\n", don't consume the following line (cmd)
+ * while catching unexpected extra arguments on "cicn control start XXX"]
+ */
+ if (main_input->index > 0 &&
+ main_input->buffer[main_input->index - 1] != '\n')
+ {
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ return clib_error_return (0, "Unknown argument '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ ux_rc = cicn_infra_plugin_enable_disable (1 /*enable */ ,
+ node_ctl_params.fib_max_size,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_dflt_lifetime_sec,
+ node_ctl_params.pit_min_lifetime_sec,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size);
+
+ switch (ux_rc)
+ {
+ case AOK:
+ break;
+ default:
+ return clib_error_return (0, "cmd returned %d", ux_rc);
+ }
+
+ return (0);
+}
+
+/*
+ * cli handler for 'control stop'
+ */
+static clib_error_t *
+cicn_cli_node_ctl_stop_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int ux_rc;
+
+ /* Catch unexpected extra arguments on this line.
+ * See comment on cicn_cli_node_ctrl_start_set_command_fn
+ */
+ if (main_input->index > 0 &&
+ main_input->buffer[main_input->index - 1] != '\n')
+ {
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ return clib_error_return (0, "Unknown argument '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ ux_rc = cicn_infra_plugin_enable_disable (0 /*!enable */ ,
+ node_ctl_params.fib_max_size,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_dflt_lifetime_sec,
+ node_ctl_params.pit_min_lifetime_sec,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size);
+
+ switch (ux_rc)
+ {
+ case AOK:
+ break;
+ default:
+ return clib_error_return (0, "cmd returned %d", ux_rc);
+ }
+
+ return (0);
+}
+
+#define DFLTD_RANGE_OK(val, min, max) \
+({ \
+ __typeof__ (val) _val = (val); \
+ __typeof__ (min) _min = (min); \
+ __typeof__ (max) _max = (max); \
+ (_val == -1) || \
+ (_val >= _min && _val <= _max); \
+})
+
+/*
+ * cli handler for 'control param'
+ */
+static clib_error_t *
+cicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int rv = 0;
+
+ int table_size;
+ f64 lifetime;
+
+ if (cicn_main.is_enabled)
+ {
+ return (clib_error_return
+ (0, "params cannot be altered once cicn started"));
+ }
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "fib"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, CICN_PARAM_FIB_ENTRIES_MIN,
+ CICN_PARAM_FIB_ENTRIES_MAX))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ break;
+ }
+ node_ctl_params.fib_max_size = table_size;
+ }
+ else
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ break;
+ }
+ }
+ else if (unformat (line_input, "pit"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, CICN_PARAM_PIT_ENTRIES_MIN,
+ CICN_PARAM_PIT_ENTRIES_MAX))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ break;
+ }
+ node_ctl_params.pit_max_size = table_size;
+ }
+ else if (unformat (line_input, "dfltlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, CICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ CICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ break;
+ }
+ node_ctl_params.pit_dflt_lifetime_sec = lifetime;
+ }
+ else if (unformat (line_input, "minlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, CICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ CICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ break;
+ }
+ node_ctl_params.pit_min_lifetime_sec = lifetime;
+ }
+ else if (unformat (line_input, "maxlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, CICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ CICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ break;
+ }
+ node_ctl_params.pit_max_lifetime_sec = lifetime;
+ }
+ else
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ break;
+ }
+ }
+ else if (unformat (line_input, "cs"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, CICN_PARAM_CS_ENTRIES_MIN,
+ CICN_PARAM_CS_ENTRIES_MAX))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ break;
+ }
+ node_ctl_params.cs_max_size = table_size;
+ }
+ else
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ break;
+ }
+ }
+ else
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ break;
+ }
+ }
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0, "Unknown argument '%U'",
+ format_unformat_error, line_input);
+ default:
+ return clib_error_return (0, "cmd returned %d", rv);
+ }
+
+ return (0);
+}
+
+/*
+ * cli handler for 'enable'
+ */
+static clib_error_t *
+cicn_cli_node_enable_disable_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int enable_disable = 1;
+ int ux_rc;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "disable"))
+ {
+ enable_disable = 0;
+ }
+ else
+ {
+ return clib_error_return (0, "Unknown argument '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ ux_rc = cicn_infra_plugin_enable_disable (enable_disable,
+ node_ctl_params.fib_max_size,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_dflt_lifetime_sec,
+ node_ctl_params.pit_min_lifetime_sec,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size);
+
+ switch (ux_rc)
+ {
+ case AOK:
+ break;
+
+ default:
+ return clib_error_return (0, "cicn enable_disable returned %d", ux_rc);
+ }
+ return 0;
+}
+
+/*
+ * cli handler for 'cfg name': router's own ICN name
+ */
+static clib_error_t *
+cicn_cli_node_name_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ cicn_infra_fwdr_name_t *gfname = &cicn_infra_fwdr_name;
+ int delete = 0;
+ uint8_t buf[200];
+ int len, ret;
+ const char *fwdr_name = NULL;
+ uint8_t *ptr;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "delete"))
+ {
+ delete = 1;
+ }
+ else if (unformat (line_input, "%s", &fwdr_name))
+ {
+ ;
+ }
+ else
+ {
+ return clib_error_return (0, "Unknown argument '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ /* Verify that the given name is not empty */
+ if (fwdr_name == NULL)
+ {
+ return clib_error_return (0, "Please specify an non-empty name...");
+ }
+
+ /* Handle delete case */
+ if (delete)
+ {
+ if (gfname->fn_reply_payload_flen == 0)
+ {
+ return clib_error_return (0,
+ "Forwarder does not have a name yet...");
+ }
+ else if (strcmp (gfname->fn_str, fwdr_name) == 0)
+ {
+ cicn_sstrncpy (gfname->fn_str, "no-name", sizeof (gfname->fn_str));
+ gfname->fn_reply_payload_flen = 0;
+ vlib_cli_output (vm, "name:%s: deleted successfully", fwdr_name);
+ }
+ else
+ {
+ return clib_error_return (0, "Name for deletion not found...");
+ }
+ }
+ else
+ {
+ /* TODO: Potentially do more validation for the parsed name */
+ if (strlen (fwdr_name) > sizeof (buf))
+ {
+ return clib_error_return (0, "The given name is too long...");
+ }
+ /* Convert prefix to wire-format */
+ cicn_rd_t cicn_rd;
+ len =
+ cicn_parse_name_comps_from_str (buf, sizeof (buf), fwdr_name,
+ &cicn_rd);
+ if (len < 0)
+ {
+ return clib_error_return (0,
+ "Could not parse name comps from the name: %s...",
+ cicn_rd_str (&cicn_rd));
+ }
+ /* Hash the prefix */
+ ret = cicn_hashtb_hash_prefixes (buf, len, 0 /*full_name */ ,
+ &gfname->fn_hashinf, 0 /*limit */ );
+ if (ret != AOK)
+ {
+ return clib_error_return (0, "Could not hash the given name...");
+ }
+ gfname->fn_match_pfx_hash =
+ gfname->fn_hashinf.pfx_hashes[gfname->fn_hashinf.pfx_count - 1];
+ cicn_sstrncpy (gfname->fn_str, fwdr_name, sizeof (gfname->fn_str));
+
+ gfname->fn_reply_payload_flen = CICN_TLV_HDR_LEN + len;
+ /* Check for overflow */
+ if (gfname->fn_reply_payload_flen > CICN_FWDR_NAME_BUFSIZE)
+ {
+ vlib_cli_output (vm, "traceroute payload TLV: overflow");
+ }
+
+ /* Create the traceroute payload (name TLV) */
+ memset (gfname->fn_reply_payload, 0, sizeof (gfname->fn_reply_payload));
+ ptr = gfname->fn_reply_payload;
+ C_PUTINT16 (&ptr[0], CICN_TLV_PAYLOAD);
+ C_PUTINT16 (&ptr[CICN_TLV_TYPE_LEN], len);
+
+ memcpy (&ptr[CICN_TLV_HDR_LEN], buf, len);
+
+ vlib_cli_output (vm, "name %s: added successfully", gfname->fn_str);
+ }
+ return (0);
+}
+
+/* shared routine betweeen API and CLI, leveraging API message structure */
+static int
+cicn_mgmt_node_stats_get (vl_api_cicn_api_node_stats_get_reply_t * rmp)
+{
+ rmp->pkts_processed = 0;
+ rmp->pkts_interest_count = 0;
+ rmp->pkts_data_count = 0;
+ rmp->pkts_nak_count = 0;
+ rmp->pkts_from_cache_count = 0;
+ rmp->pkts_nacked_interests_count = 0;
+ rmp->pkts_nak_hoplimit_count = 0;
+ rmp->pkts_nak_no_route_count = 0;
+ rmp->pkts_no_pit_count = 0;
+ rmp->pit_expired_count = 0;
+ rmp->cs_expired_count = 0;
+ rmp->cs_lru_count = 0;
+ rmp->pkts_drop_no_buf = 0;
+ rmp->interests_aggregated = 0;
+ rmp->interests_retx = 0;
+ rmp->pit_entries_count = 0;
+ rmp->cs_entries_count = 0;
+
+ vlib_error_main_t *em;
+ vlib_node_t *n;
+ foreach_vlib_main ((
+ {
+ em = &this_vlib_main->error_main;
+ n = vlib_get_node (this_vlib_main, icnfwd_node.index);
+ u32 node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_PROCESSED]);
+ rmp->pkts_interest_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_INTERESTS]);
+ rmp->pkts_data_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_DATAS]);
+ rmp->pkts_nak_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_NAKS]);
+ rmp->pkts_from_cache_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_CACHED]);
+ rmp->pkts_nacked_interests_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_NACKED_INTERESTS]);
+ rmp->pkts_nak_hoplimit_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_HOPLIMIT_EXCEEDED]);
+ rmp->pkts_nak_no_route_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_NO_ROUTE]);
+ rmp->pkts_no_pit_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_NO_PIT]);
+ rmp->pit_expired_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_PIT_EXPIRED]);
+ rmp->cs_expired_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_CS_EXPIRED]);
+ rmp->cs_lru_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_CS_LRU]);
+ rmp->pkts_drop_no_buf +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_NO_BUFS]);
+ rmp->interests_aggregated +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_INTEREST_AGG]);
+ rmp->interests_retx +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_INT_RETRANS]);
+ rmp->pit_entries_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_INT_COUNT]);
+ rmp->cs_entries_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ ICNFWD_ERROR_CS_COUNT]);
+ }));
+ return (AOK);
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_node_stats_get_t_handler (vl_api_cicn_api_node_stats_get_t *
+ mp)
+{
+ vl_api_cicn_api_node_stats_get_reply_t *rmp;
+ cicn_main_t *sm = &cicn_main;
+ vnet_api_error_t vaec = CICN_VNET_API_ERROR_NONE;
+
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id =
+ ntohs (sm->msg_id_base + VL_API_CICN_API_NODE_STATS_GET_REPLY);
+ rmp->context = mp->context;
+
+ int ux_rc = cicn_mgmt_node_stats_get (rmp);
+ if (ux_rc != AOK)
+ {
+ vaec = cicn_api_rv_from_unix_rc (ux_rc);
+ }
+
+ rmp->retval = clib_host_to_net_i32 (vaec);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+/*
+ * cli handler for 'cfg salt': per-router hash salt/nonce
+ */
+static clib_error_t *
+cicn_cli_salt_set_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ return (clib_error_return (0, "Not yet implemented..."));
+}
+
+typedef enum
+{
+ CICN_MGMT_FACE_OP_NONE = 0,
+ CICN_MGMT_FACE_OP_CREATE,
+ CICN_MGMT_FACE_OP_DELETE,
+ CICN_MGMT_FACE_OP_ADMIN,
+ CICN_MGMT_FACE_OP_HELLO,
+} cicn_mgmt_face_op_e;
+
+/*
+ * Push Face notifications to all subscribers
+ */
+static void
+cicn_api_face_event_send (int faceid, int faceflags)
+{
+ cicn_main_t *sm = &cicn_main;
+ vl_api_cicn_api_face_event_t *event = vl_msg_api_alloc (sizeof (*event));
+
+ int i;
+ for (i = 0; i < sm->n_face_event_subscribers; i++)
+ {
+ unix_shared_memory_queue_t *mq =
+ vl_api_client_index_to_input_queue (sm->
+ face_event_subscribers
+ [i].client_index);
+ if (!mq)
+ continue;
+
+ memset (event, 0, sizeof (*event));
+ event->_vl_msg_id =
+ ntohs (sm->msg_id_base + VL_API_CICN_API_FACE_EVENT);
+ event->context = sm->face_event_subscribers[i].context;
+ event->client_index = sm->face_event_subscribers[i].client_index;
+ event->faceid = clib_host_to_net_i32 (faceid);
+ event->flags = clib_host_to_net_i32 (faceflags);
+
+ vl_msg_api_send_shmem (mq, (u8 *) & event);
+ }
+}
+
+/*
+ * Face add routine common to binary api and cli.
+ *
+ * Adds UDPv4 face and returns new Face ID if successful, -1 otherwise
+ *
+ * TODO -- how to un-register? doesn't seem to be an api for that.
+ */
+static vnet_api_error_t
+cicn_mgmt_face_add (ip4_address_t local_addr4, int local_port,
+ ip4_address_t remote_addr4, int remote_port,
+ int app_face, int *faceid)
+{
+ vnet_api_error_t rv;
+
+ cicn_main_t *sm = &cicn_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_interface_main_t *im = &vnm->interface_main;
+ vnet_sw_interface_t *swif_list = 0, *si;
+ ip4_main_t *im4 = &ip4_main;
+ ip_lookup_main_t *lm4 = &im4->lookup_main;
+ ip4_address_t *addr4;
+ ip_interface_address_t *ia = 0;
+ int found_p;
+
+ /* Look for a matching swif for the local address */
+ found_p = 0;
+ swif_list = vec_new (vnet_sw_interface_t, pool_elts (im->sw_interfaces));
+ _vec_len (swif_list) = 0;
+
+ pool_foreach (si, im->sw_interfaces, (
+ {
+ vec_add1 (swif_list, si[0]);
+ }));
+
+ vec_foreach (si, swif_list)
+ {
+ foreach_ip_interface_address (lm4, ia, si->sw_if_index, 1, (
+ {
+ addr4 =
+ ip_interface_address_get_address
+ (lm4, ia);
+ if
+ (addr4->as_u32
+ ==
+ local_addr4.as_u32)
+ {
+ found_p = 1;
+ break;}
+ }
+ ));
+
+ if (found_p)
+ {
+ break;
+ }
+ };
+
+ if (!found_p)
+ {
+ rv = VNET_API_ERROR_NO_SUCH_ENTRY;
+ goto done;
+ }
+
+ // vlib_cli_output(sm->vlib_main, "cicn: face swif %d", si->sw_if_index);
+
+ /* TODO -- Check that the swif is 'up'? */
+ // if ((si->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) == 0) {}
+
+ /* Create a cicn 'face', and capture needed info in the face cache */
+ int ux_rc;
+ cicn_rd_t cicn_rd;
+ *faceid = -1;
+ ux_rc = cicn_face_add (local_addr4.as_u32, local_port, remote_addr4.as_u32,
+ remote_port, app_face, si->sw_if_index, faceid,
+ &cicn_rd);
+ if (ux_rc != AOK)
+ { // TODO: look at cicn_rc.rd_cicn_rc first
+ rv = cicn_api_rv_from_unix_rc (ux_rc);
+ goto done;
+ }
+ cicn_face_db_entry_t *face;
+ ux_rc = cicn_face_entry_find_by_id (*faceid, &face);
+ if (ux_rc != AOK)
+ {
+ rv = cicn_api_rv_from_unix_rc (ux_rc);
+ goto done;
+ }
+
+ /* Update config generation number */
+ CICN_INFRA_CFG_GEN_INCR ();
+
+ /* TODO -- output new face id on success? */
+
+ /* On success, start taking packets on the local port. Packets are
+ * delivered to our work-distribution nodes, which then pass them to
+ * forwarding nodes.
+ */
+
+ /* TODO -- only register the port if it's unique? */
+
+ /* If there are worker threads, register our distribution node,
+ * which will decide how packets go to forwarding threads.
+ */
+ if (sm->worker_count > 1)
+ {
+#if CICN_FEATURE_MULTITHREAD
+ udp_register_dst_port (sm->vlib_main, local_port,
+ icndist_node.index, 1);
+#else
+ ASSERT (sm->worker_count <= 1);
+#endif
+ }
+ else
+ {
+ /* Register the forwarding node directly otherwise (in
+ * single-threaded mode, e.g.)
+ */
+ udp_register_dst_port (sm->vlib_main, local_port, icnfwd_node.index, 1);
+ }
+
+ rv = CICN_VNET_API_ERROR_NONE;
+
+done:
+
+ vec_free (swif_list);
+
+ return (rv);
+}
+
+/*
+ * Face add routine common to binary api and cli.
+ *
+ * Removes specified face
+ */
+static clib_error_t *
+cicn_mgmt_face_remove (int faceid)
+{
+ return clib_error_return (0, "face deletion not implemented");
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_face_add_t_handler (vl_api_cicn_api_face_add_t * mp)
+{
+ vl_api_cicn_api_face_add_reply_t *rmp;
+ vnet_api_error_t rv;
+ unix_shared_memory_queue_t *q;
+
+ cicn_main_t *sm = &cicn_main;
+ int faceid = -1;
+
+ ip4_address_t local_addr =
+ (ip4_address_t) (clib_net_to_host_u32 (mp->local_addr));
+
+ uint16_t local_port = clib_net_to_host_u16 (mp->local_port);
+
+ ip4_address_t remote_addr =
+ (ip4_address_t) (clib_net_to_host_u32 (mp->remote_addr));
+
+ uint16_t remote_port = clib_net_to_host_u16 (mp->remote_port);
+
+ REPLY_SETUP (VL_API_CICN_API_FACE_ADD_REPLY, rmp, q, mp);
+
+ rv =
+ cicn_mgmt_face_add (local_addr, (int) local_port, remote_addr,
+ (int) remote_port, 0 /*is_app */ , &faceid);
+
+ if (rv >= 0)
+ {
+ rmp->faceid = clib_host_to_net_i32 (faceid);
+ }
+
+ REPLY_FINISH (rmp, q, rv);
+
+ if (rv >= 0)
+ {
+ // send event: for api, defer until after api response
+ cicn_api_face_event_send (faceid, CICN_FACE_FLAGS_DEFAULT);
+ }
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_face_delete_t_handler (vl_api_cicn_api_face_delete_t * mp)
+{
+ vl_api_cicn_api_face_delete_reply_t *rmp;
+ vnet_api_error_t rv;
+
+ cicn_main_t *sm = &cicn_main;
+ clib_error_t *cl_err = 0;
+
+ int faceid = clib_net_to_host_i32 (mp->faceid);
+ cl_err = cicn_mgmt_face_remove (faceid);
+
+ rv = cicn_api_rv_from_clib_error (cl_err);
+ REPLY_MACRO (VL_API_CICN_API_FACE_DELETE_REPLY /*, rmp, mp, rv */ );
+
+ // TODO: check error value or rv value
+ cicn_api_face_event_send (mp->faceid, CICN_FACE_FLAG_DELETED);
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_face_params_get_t_handler (vl_api_cicn_api_face_params_get_t *
+ mp)
+{
+ vl_api_cicn_api_face_params_get_reply_t *rmp;
+ vnet_api_error_t rv;
+ unix_shared_memory_queue_t *q;
+
+ cicn_main_t *sm = &cicn_main;
+
+ int faceid = clib_net_to_host_i32 (mp->faceid);
+
+ REPLY_SETUP (VL_API_CICN_API_FACE_PARAMS_GET_REPLY, rmp, q, mp);
+
+ rv = cicn_face_api_entry_params_serialize (faceid, rmp);
+
+ REPLY_FINISH (rmp, q, rv);
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_face_props_get_t_handler (vl_api_cicn_api_face_props_get_t *
+ mp)
+{
+ vl_api_cicn_api_face_props_get_reply_t *rmp;
+ vnet_api_error_t rv = 0;
+ unix_shared_memory_queue_t *q;
+
+ cicn_main_t *sm = &cicn_main;
+
+ REPLY_SETUP (VL_API_CICN_API_FACE_PROPS_GET_REPLY, rmp, q, mp);
+
+ rv = cicn_face_api_entry_props_serialize (rmp);
+
+ REPLY_FINISH (rmp, q, rv);
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_face_stats_get_t_handler (vl_api_cicn_api_face_stats_get_t *
+ mp)
+{
+ vl_api_cicn_api_face_stats_get_reply_t *rmp;
+ vnet_api_error_t rv;
+ unix_shared_memory_queue_t *q;
+
+ cicn_main_t *sm = &cicn_main;
+
+ int faceid = clib_net_to_host_i32 (mp->faceid);
+
+ REPLY_SETUP (VL_API_CICN_API_FACE_STATS_GET_REPLY, rmp, q, mp);
+
+ rv = cicn_face_api_entry_stats_serialize (faceid, rmp);
+
+ REPLY_FINISH (rmp, q, rv);
+}
+
+/* API message handler */
+static void
+ vl_api_cicn_api_face_events_subscribe_t_handler
+ (vl_api_cicn_api_face_events_subscribe_t * mp)
+{
+ cicn_main_t *sm = &cicn_main;
+ vl_api_cicn_api_face_events_subscribe_reply_t *rmp;
+
+ int rv = VNET_API_ERROR_INVALID_ARGUMENT;
+
+ u16 enable = clib_net_to_host_u16 (mp->enable_disable);
+
+ if (enable == 1)
+ {
+ // if the maximum number of event subscribers is not exceeded yet
+ if (sm->n_face_event_subscribers <
+ CICN_PARAM_API_EVENT_SUBSCRIBERS_MAX - 1)
+ {
+ // save the info about the event subscriber
+ memcpy (&(sm->face_event_subscribers[sm->n_face_event_subscribers]),
+ mp, sizeof (*mp));
+
+ sm->n_face_event_subscribers++;
+
+ rv = CICN_VNET_API_ERROR_NONE;
+ }
+ }
+ else if (enable == 0)
+ {
+ rv = VNET_API_ERROR_UNSPECIFIED;
+
+ // find the event subscriber with matching client_index
+ int i;
+ for (i = 0; i < sm->n_face_event_subscribers; i++)
+ {
+ if (mp->client_index == sm->face_event_subscribers[i].client_index)
+ {
+ // shift left the remaining items
+ int j;
+ for (j = i; j < sm->n_face_event_subscribers; j++)
+ {
+ memcpy (&(sm->face_event_subscribers[j]),
+ &(sm->face_event_subscribers[j + 1]), sizeof (*mp));
+ rv = CICN_VNET_API_ERROR_NONE;
+ }
+ sm->n_face_event_subscribers--;
+ break;
+ }
+ }
+ }
+
+ REPLY_MACRO (VL_API_CICN_API_FACE_EVENTS_SUBSCRIBE_REPLY /*, rmp, mp, rv */
+ );
+}
+
+static clib_error_t *
+cicn_mgmt_face_add_cli (ip4_address_t local_addr4, int local_port,
+ ip4_address_t remote_addr4, int remote_port,
+ int app_face, int *faceid)
+{
+ int rv;
+
+ rv =
+ cicn_mgmt_face_add (local_addr4, local_port, remote_addr4, remote_port,
+ app_face, faceid);
+
+ switch (rv)
+ {
+ case 0:
+ break;
+ case VNET_API_ERROR_NO_SUCH_ENTRY:
+ return (clib_error_return (0, "No matching interface"));
+ break;
+
+ case VNET_API_ERROR_INVALID_SW_IF_INDEX:
+ return
+ clib_error_return (0,
+ "Invalid interface, only works on physical ports");
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0,
+ "Device driver doesn't support redirection");
+ break;
+
+ default:
+ return clib_error_return (0, "cicn_cfg_face returned %d", rv);
+ }
+
+ // send event in different places for cli, api: see api case
+ cicn_api_face_event_send (*faceid, CICN_FACE_FLAGS_DEFAULT);
+ return 0;
+}
+
+/*
+ * cli handler for 'cfg face local <addr:port> remote <addr:port>'
+ */
+static clib_error_t *
+cicn_cli_face_set_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+ cicn_main_t *sm = &cicn_main;
+ cicn_face_db_entry_t *face_entry = NULL;
+ ip4_address_t local_addr4, remote_addr4;
+ int local_port = 0, remote_port = 0;
+ int faceid = -1;
+ cicn_mgmt_face_op_e face_op = CICN_MGMT_FACE_OP_NONE;
+ const char *cfg_admin_str = NULL;
+ int cfg_admin_up = 0;
+ const char *cfg_hello_str = NULL;
+ int cfg_hello_enable = 0;
+ int app_face = 0;
+ int ret;
+
+ local_addr4.as_u32 = 0;
+ remote_addr4.as_u32 = 0;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "id %d", &faceid))
+ {
+ if (unformat (line_input, "delete"))
+ {
+ /* TODO -- handle delete case... */
+ face_op = CICN_MGMT_FACE_OP_DELETE;
+ }
+ else if (unformat (line_input, "admin %s", &cfg_admin_str))
+ {
+ face_op = CICN_MGMT_FACE_OP_ADMIN;
+ if (strcmp (cfg_admin_str, "up") == 0)
+ {
+ cfg_admin_up = 1;
+ }
+ else if (strcmp (cfg_admin_str, "down") == 0)
+ {
+ cfg_admin_up = 0;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown face state %s", cfg_admin_str));
+ }
+ }
+ else if (unformat (line_input, "hello %s", &cfg_hello_str))
+ {
+ face_op = CICN_MGMT_FACE_OP_HELLO;
+ if (strcmp (cfg_hello_str, "enable") == 0)
+ {
+ cfg_hello_enable = 1;
+ }
+ else if (strcmp (cfg_hello_str, "disable") == 0)
+ {
+ cfg_hello_enable = 0;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown hello option (%s)", cfg_hello_str));
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "Please specify face operation");
+ }
+ }
+ else if (unformat (line_input, "add"))
+ {
+ face_op = CICN_MGMT_FACE_OP_CREATE;
+ if (unformat (line_input, "local %U:%d",
+ unformat_ip4_address, &local_addr4, &local_port))
+ {
+ if (unformat (line_input, "remote %U:%d",
+ unformat_ip4_address, &remote_addr4,
+ &remote_port))
+ {
+ if (unformat (line_input, "app_face"))
+ {
+ app_face = 1;
+ }
+ }
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "Unknown input '%U'",
+ format_unformat_error, line_input);
+ break;
+ }
+ }
+
+ if (faceid != -1)
+ {
+ ret = cicn_face_entry_find_by_id (faceid, &face_entry);
+ if (ret != AOK)
+ {
+ return clib_error_return (0, "faceid %d not valid", faceid);
+ }
+ }
+
+ switch (face_op)
+ {
+ case CICN_MGMT_FACE_OP_CREATE:
+ /* Check for presence of local address/port */
+ if ((local_addr4.as_u32 == 0) || (local_port == 0))
+ {
+ return clib_error_return (0, "local address/port not specified");
+ }
+
+ /* Check for presence of remote address/port */
+ if ((remote_addr4.as_u32 == 0) || (remote_port == 0))
+ {
+ return clib_error_return (0, "remote address/port not specified");
+ }
+ cl_err =
+ cicn_mgmt_face_add_cli (local_addr4, local_port, remote_addr4,
+ remote_port, app_face, &faceid);
+ if (cl_err == 0)
+ {
+ vlib_cli_output (sm->vlib_main, "Face ID: %d", faceid);
+ }
+ else
+ {
+ vlib_cli_output (sm->vlib_main, "Face add failed");
+ }
+ break;
+ case CICN_MGMT_FACE_OP_DELETE:
+ cl_err = cicn_mgmt_face_remove (faceid);
+ break;
+ case CICN_MGMT_FACE_OP_ADMIN:
+ cicn_face_flags_update (face_entry, !cfg_admin_up,
+ CICN_FACE_FLAG_ADMIN_DOWN);
+ break;
+ case CICN_MGMT_FACE_OP_HELLO:
+ cl_err = cicn_hello_adj_update (faceid, cfg_hello_enable);
+ break;
+ default:
+ return clib_error_return (0, "Operation (%d) not implemented", face_op);
+ break;
+ }
+ return cl_err;
+}
+
+
+/* API message handler */
+static void
+vl_api_cicn_api_fib_entry_nh_add_t_handler (vl_api_cicn_api_fib_entry_nh_add_t
+ * mp)
+{
+ vl_api_cicn_api_fib_entry_nh_add_reply_t *rmp;
+ vnet_api_error_t rv = CICN_VNET_API_ERROR_NONE;
+
+ cicn_main_t *sm = &cicn_main;
+
+ const char *prefix = (const char *) (&mp->prefix);
+ int faceid = clib_net_to_host_i32 (mp->faceid);
+ int weight = clib_net_to_host_i32 (mp->weight);
+
+ if ((prefix == NULL) || (strlen (prefix) <= 0)
+ || (strlen (prefix) > CICN_PARAM_FIB_ENTRY_PFX_WF_BYTES_MAX) ||
+ (faceid <= 0))
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ if (weight == CICN_API_FIB_ENTRY_NHOP_WGHT_UNSET)
+ {
+ weight = CICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT;
+ }
+ if ((weight < 0) || (weight > CICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX))
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ if (rv == CICN_VNET_API_ERROR_NONE)
+ {
+ int ux_rc;
+ cicn_rd_t cicn_rd;
+ ux_rc = cicn_fib_entry_nh_update (prefix, faceid, weight, 1 /*add */ ,
+ &cicn_rd);
+ if (ux_rc == AOK)
+ {
+ CICN_INFRA_CFG_GEN_INCR ();
+ }
+ switch (cicn_rd.rd_cicn_rc)
+ {
+ case CICN_RC_OK:
+ rv = cicn_api_rv_from_unix_rc (ux_rc);
+ break;
+ case CICN_RC_FIB_PFX_COMP_LIMIT:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_1;
+ break;
+ case CICN_RC_FIB_PFX_SIZE_LIMIT:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_2;
+ break;
+ case CICN_RC_FIB_NHOP_LIMIT:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_3;
+ break;
+ case CICN_RC_FACE_UNKNOWN:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_4;
+ break;
+ default:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_10; // should not happen
+ break;
+ }
+ }
+
+ REPLY_MACRO (VL_API_CICN_API_FIB_ENTRY_NH_ADD_REPLY /*, rmp, mp, rv */ );
+}
+
+/* API message handler */
+static void
+ vl_api_cicn_api_fib_entry_nh_delete_t_handler
+ (vl_api_cicn_api_fib_entry_nh_delete_t * mp)
+{
+ vl_api_cicn_api_fib_entry_nh_delete_reply_t *rmp;
+ vnet_api_error_t rv = CICN_VNET_API_ERROR_NONE;
+
+ cicn_main_t *sm = &cicn_main;
+
+ const char *prefix = (const char *) (&mp->prefix);
+ int faceid = clib_net_to_host_i32 (mp->faceid);
+
+ if ((prefix == NULL) || (strlen (prefix) <= 0) ||
+ (strlen (prefix) > CICN_PARAM_FIB_ENTRY_PFX_WF_BYTES_MAX))
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ if (rv == CICN_VNET_API_ERROR_NONE)
+ {
+ int ux_rc;
+ cicn_rd_t cicn_rd;
+ ux_rc =
+ cicn_fib_entry_nh_update (prefix, faceid, 0 /*dummy */ , 0 /*!add */ ,
+ &cicn_rd);
+ if (rv == 0)
+ {
+ CICN_INFRA_CFG_GEN_INCR ();
+ }
+ switch (cicn_rd.rd_cicn_rc)
+ {
+ case CICN_RC_OK:
+ rv = cicn_api_rv_from_unix_rc (ux_rc);
+ break;
+ case CICN_RC_FIB_PFX_COMP_LIMIT:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_1;
+ break;
+ case CICN_RC_FIB_PFX_SIZE_LIMIT:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_2;
+ break;
+ case CICN_RC_FIB_NHOP_LIMIT:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_3;
+ break;
+ case CICN_RC_FACE_UNKNOWN:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_4;
+ break;
+ default:
+ rv = VNET_API_ERROR_SYSCALL_ERROR_10; // should not happen
+ break;
+ }
+ }
+
+ REPLY_MACRO (VL_API_CICN_API_FIB_ENTRY_NH_DELETE_REPLY /*, rmp, mp, rv */ );
+}
+
+/* API message handler */
+static void
+ vl_api_cicn_api_fib_entry_props_get_t_handler
+ (vl_api_cicn_api_fib_entry_props_get_t * mp)
+{
+ vl_api_cicn_api_fib_entry_props_get_reply_t *rmp;
+ vnet_api_error_t rv;
+ unix_shared_memory_queue_t *q;
+
+ cicn_main_t *sm = &cicn_main;
+
+ REPLY_SETUP (VL_API_CICN_API_FIB_ENTRY_PROPS_GET_REPLY, rmp, q, mp);
+
+ rv =
+ cicn_fib_api_entry_props_serialize (rmp,
+ clib_net_to_host_i32 (mp->pagenum));
+
+ REPLY_FINISH (rmp, q, rv);
+}
+
+/*
+ * cli handler for 'cfg fib'
+ */
+static clib_error_t *
+cicn_cli_fib_set_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+
+ int ux_rc;
+ cicn_rd_t cicn_rd;
+
+ int addpfx = -1;
+ const char *prefix = NULL;
+ int faceid = 0;
+ int weight = CICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT;
+
+ /* TODO -- support next-hop weights */
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (addpfx == -1 && unformat (line_input, "add"))
+ {
+ addpfx = 1;
+ }
+ else if (addpfx == -1 && unformat (line_input, "delete"))
+ {
+ addpfx = 0;
+ }
+ else if (addpfx != -1 && unformat (line_input, "prefix %s", &prefix))
+ {
+ ;
+ }
+ else if (addpfx != -1 && unformat (line_input, "face %d", &faceid))
+ {
+ ;
+ }
+ else if (addpfx == 1 && unformat (line_input, "weight %d", &weight))
+ {
+ ;
+ }
+ else
+ {
+ return clib_error_return (0, "Unknown input '%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ /* Check parse */
+ if ((prefix == NULL) || (addpfx > 0 && faceid == 0))
+ {
+ return clib_error_return (0, "Please specify prefix and faceid...");
+ }
+
+ if (addpfx &&
+ ((weight < 0) || (weight > CICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX)))
+ {
+ return clib_error_return (0,
+ "Next-hop weight must be between 0 and %d",
+ (int) CICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX);
+ }
+
+ ux_rc = cicn_fib_entry_nh_update (prefix, faceid, weight, addpfx, &cicn_rd);
+ if (ux_rc == AOK)
+ {
+ CICN_INFRA_CFG_GEN_INCR ();
+ }
+ else
+ {
+ const char *subcode_str = cicn_rd_str (&cicn_rd);
+ cl_err =
+ clib_error_return (0, "Unable to modify fib: %s (%d)", subcode_str,
+ ux_rc);
+ }
+
+ return (cl_err);
+}
+
+/*
+ * cli handler for 'cicn hello'
+ */
+static clib_error_t *
+cicn_cli_hello_protocol_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+
+ cicn_main_t *sm = &cicn_main;
+ int interval = -1;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "interval %d", &interval))
+ {
+ ;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+
+ /* Check that hello protocol interval > 0 */
+ if (interval > 0)
+ {
+ sm->cicn_hello_interval = interval / 1000.0;
+ sm->cicn_hello_interval_cfgd = 1;
+ vlib_cli_output (vm, "Hello protocol interval was set successfully",
+ sm->cicn_hello_interval);
+ }
+ else
+ {
+ cl_err =
+ clib_error_return (0,
+ "cicn: the hello protocol time interval must be positive");
+ }
+
+ return (cl_err);
+}
+
+/*
+ * cli handler for 'cicn show'
+ */
+static clib_error_t *
+cicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int i, face_p = 0, fib_p = 0, all_p, detail_p = 0, internal_p = 0;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ /* TODO -- support specific args */
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "face all"))
+ {
+ face_p = 1;
+ }
+ else if (unformat (line_input, "fib all"))
+ {
+ fib_p = 1;
+ }
+ else if (unformat (line_input, "detail"))
+ {
+ detail_p = 1;
+ }
+ else if (unformat (line_input, "internal"))
+ {
+ /* We consider 'internal' a superset, so include 'detail' too */
+ internal_p = 1;
+ detail_p = 1;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+
+ /* If nothing specified, show everything */
+ if ((face_p == 0) && (fib_p == 0))
+ {
+ all_p = 1;
+ }
+
+ if (!cicn_main.is_enabled)
+ {
+ if (node_ctl_params.fib_max_size == -1 &&
+ node_ctl_params.pit_max_size == -1 &&
+ node_ctl_params.pit_dflt_lifetime_sec == -1 &&
+ node_ctl_params.pit_min_lifetime_sec == -1 &&
+ node_ctl_params.pit_max_lifetime_sec == -1 &&
+ node_ctl_params.cs_max_size == -1)
+ {
+ cicn_cli_output ("cicn: not enabled");
+ goto done;
+ }
+ vlib_cli_output (vm, "Forwarder %s: %sabled\nPreconfiguration:\n",
+ cicn_infra_fwdr_name.fn_str,
+ cicn_main.is_enabled ? "en" : "dis");
+
+ if (node_ctl_params.fib_max_size != -1)
+ {
+ vlib_cli_output (vm, " FIB:: max entries:%d\n,",
+ node_ctl_params.fib_max_size);
+ }
+ if (node_ctl_params.pit_max_size != -1)
+ {
+ vlib_cli_output (vm, " PIT:: max entries:%d\n",
+ node_ctl_params.pit_max_size);
+ }
+ if (node_ctl_params.pit_dflt_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: dflt lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_dflt_lifetime_sec);
+ }
+ if (node_ctl_params.pit_min_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: min lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_min_lifetime_sec);
+ }
+ if (node_ctl_params.pit_max_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: max lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_max_lifetime_sec);
+ }
+ if (node_ctl_params.cs_max_size != -1)
+ {
+ vlib_cli_output (vm, " CS:: max entries:%d\n",
+ node_ctl_params.cs_max_size);
+ }
+
+ goto done;
+ }
+
+ /* Globals */
+ vlib_cli_output (vm,
+ "Forwarder %s: %sabled\n"
+ " FIB:: max entries:%d\n"
+ " PIT:: max entries:%d,"
+ " lifetime default: %05.3f sec (min:%05.3f, max:%05.3f)\n"
+ " CS:: max entries:%d\n",
+ cicn_infra_fwdr_name.fn_str,
+ cicn_main.is_enabled ? "en" : "dis",
+ cicn_main.fib.fib_capacity,
+ cicn_infra_shard_pit_size * cicn_main.shard_count,
+ ((f64) cicn_main.pit_lifetime_dflt_ms) / SEC_MS,
+ ((f64) cicn_main.pit_lifetime_min_ms) / SEC_MS,
+ ((f64) cicn_main.pit_lifetime_max_ms) / SEC_MS,
+ cicn_infra_shard_cs_size * cicn_main.shard_count);
+
+ vl_api_cicn_api_node_stats_get_reply_t rm = { 0, }
+ , *rmp = &rm;
+ if (cicn_mgmt_node_stats_get (&rm) == AOK)
+ {
+ vlib_cli_output (vm, //compare vl_api_cicn_api_node_stats_get_reply_t_handler block
+ " PIT entries (now): %d\n"
+ " CS entries (now): %d\n"
+ " Forwarding statistics:"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_nak_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " pkts_nacked_interests_count: %d\n"
+ " pkts_nak_hoplimit_count: %d\n"
+ " pkts_nak_no_route_count: %d\n"
+ " pkts_no_pit_count: %d\n"
+ " pit_expired_count: %d\n"
+ " cs_expired_count: %d\n"
+ " cs_lru_count: %d\n"
+ " pkts_drop_no_buf: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_nak_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64
+ (rmp->pkts_nacked_interests_count),
+ clib_net_to_host_u64 (rmp->pkts_nak_hoplimit_count),
+ clib_net_to_host_u64 (rmp->pkts_nak_no_route_count),
+ clib_net_to_host_u64 (rmp->pkts_no_pit_count),
+ clib_net_to_host_u64 (rmp->pit_expired_count),
+ clib_net_to_host_u64 (rmp->cs_expired_count),
+ clib_net_to_host_u64 (rmp->cs_lru_count),
+ clib_net_to_host_u64 (rmp->pkts_drop_no_buf),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
+ }
+
+ if (internal_p)
+ {
+ vlib_cli_output (vm, "cicn: config gen %" PRIu64 "",
+ cicn_infra_gshard.cfg_generation);
+
+ for (i = 0; i <= cicn_main.worker_count; i++)
+ {
+ vlib_cli_output (vm, "cicn: worker [%d] gen %" PRIu64 "",
+ i, cicn_infra_shards[i].cfg_generation);
+ }
+ }
+
+ /* TODO -- Just show all faces */
+ if (face_p || all_p)
+ {
+ cicn_face_show (-1, detail_p, internal_p);
+ }
+
+ /* TODO -- just show fib */
+ if (fib_p || all_p)
+ {
+ cicn_fib_show (NULL, detail_p, internal_p);
+ }
+
+done:
+ if (all_p && internal_p)
+ {
+ vlib_cli_output (vm,
+ " Features: multithreading:%d, cs:%d, dpdk-cloning:%d, "
+ "vlib-cloning:%d\n",
+ CICN_FEATURE_MULTITHREAD,
+ CICN_FEATURE_CS,
+ CICN_FEATURE_DPDK_RTEMBUF_CLONING,
+ CICN_FEATURE_VPP_VLIB_CLONING);
+ }
+ return (0);
+}
+
+/*
+ * cli handler for 'pgen'
+ */
+static clib_error_t *
+cicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ cicn_main_t *sm = &cicn_main;
+ ip4_address_t src_addr, dest_addr;
+ int local_port = 0, src_port = 0, dest_port = 0;
+ int rv = VNET_API_ERROR_UNIMPLEMENTED;
+
+ if (sm->is_enabled)
+ {
+ /* That's no good - you only get one or the other */
+ return (clib_error_return (0, "Already enabled for forwarding"));
+ }
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "port %d", &local_port))
+ {
+ ;
+ }
+ else if (unformat (line_input, "dest %U:%d",
+ unformat_ip4_address, &dest_addr, &dest_port))
+ {
+ ;
+ }
+ else if (unformat (line_input, "src %U:%d",
+ unformat_ip4_address, &src_addr, &src_port))
+ {
+ ;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+
+ /* Attach our packet-gen node for ip4 udp local traffic */
+ if ((local_port == 0) || (dest_port == 0) || (src_port == 0))
+ {
+ return clib_error_return (0,
+ "Error: must supply local port and rewrite address info");
+ }
+
+ udp_register_dst_port (sm->vlib_main, local_port, icn_pg_node.index, 1);
+
+ sm->pgen_clt_src_addr = src_addr.as_u32;
+ sm->pgen_clt_src_port = htons ((uint16_t) src_port);
+ sm->pgen_clt_dest_addr = dest_addr.as_u32;
+ sm->pgen_clt_dest_port = htons ((uint16_t) dest_port);
+
+ sm->pgen_enabled = 1;
+ rv = 0;
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0, "Unimplemented, NYI");
+ break;
+
+ default:
+ return clib_error_return (0, "cicn enable_disable returned %d", rv);
+ }
+
+ return 0;
+}
+
+/*
+ * cli handler for 'pgen'
+ */
+static clib_error_t *
+cicn_cli_pgen_server_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err;
+ int rv = CICN_VNET_API_ERROR_NONE;
+
+ cicn_main_t *sm = &cicn_main;
+ int local_port = 0;
+ int payload_size = 0;
+
+ if (sm->is_enabled)
+ {
+ /* That's no good - you only get one or the other */
+ return (clib_error_return (0, "Already enabled for forwarding"));
+ }
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ /* Parse the arguments */
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "port %d", &local_port))
+ {
+ ;
+ }
+ else if (unformat (line_input, "size %d", &payload_size))
+ {
+ if (payload_size > 1200)
+ {
+ return (clib_error_return (0,
+ "Payload size must be <= 1200 bytes..."));
+ }
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+
+ /* Attach our packet-gen node for ip4 udp local traffic */
+ if (local_port == 0 || payload_size == 0)
+ {
+ return clib_error_return (0,
+ "Error: must supply local port and payload size");
+ }
+
+ /* Allocate the buffer with the actual content payload TLV */
+ vlib_buffer_alloc (vm, &sm->pgen_svr_buffer_idx, 1);
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, sm->pgen_svr_buffer_idx);
+
+ /* Initialize the buffer data with zeros */
+ memset (rb->data, 0, payload_size);
+ C_PUTINT16 (rb->data, CICN_TLV_PAYLOAD);
+ C_PUTINT16 (rb->data + 2, payload_size - 4);
+ rb->current_length = payload_size;
+
+ /* Register the UDP port of the server */
+ udp_register_dst_port (sm->vlib_main, local_port,
+ icn_pg_server_node.index, 1);
+
+ sm->pgen_svr_enabled = 1;
+
+ switch (rv)
+ {
+ case 0:
+ cl_err = 0;
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ cl_err = clib_error_return (0, "Unimplemented, NYI");
+ break;
+
+ default:
+ cl_err = clib_error_return (0, "cicn pgen server returned %d", rv);
+ }
+
+ return cl_err;
+}
+
+/* API message handler */
+static void
+vl_api_cicn_api_test_run_get_t_handler (vl_api_cicn_api_test_run_get_t * mp)
+{
+ vl_api_cicn_api_test_run_get_reply_t *rmp;
+ cicn_main_t *sm = &cicn_main;
+ vnet_api_error_t vaec = CICN_VNET_API_ERROR_NONE;
+
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ {
+ return;
+ }
+
+ rmp = vl_msg_api_alloc (sizeof (*rmp));
+ rmp->_vl_msg_id =
+ ntohs (sm->msg_id_base + VL_API_CICN_API_TEST_RUN_GET_REPLY);
+ rmp->context = mp->context;
+ if (sm->test_cicn_api_handler == NULL)
+ {
+ vaec = VNET_API_ERROR_UNIMPLEMENTED;
+ }
+ else
+ {
+ test_cicn_api_op_t test_cicn_api_op = {.reply = rmp };
+ vaec = (sm->test_cicn_api_handler) (&test_cicn_api_op);
+ }
+
+ rmp->retval = clib_host_to_net_i32 (vaec);
+
+ vl_msg_api_send_shmem (q, (u8 *) & rmp);
+}
+
+static void
+cicn_cli_test_results_output (vl_api_cicn_api_test_run_get_reply_t * rmp)
+{
+ u8 *strbuf = NULL;
+ i32 nentries = clib_net_to_host_i32 (rmp->nentries);
+ cicn_api_test_suite_results_t *suites =
+ (cicn_api_test_suite_results_t *) & rmp->suites[0];
+
+ int i;
+ for (i = 0; i < nentries; i++)
+ {
+ cicn_api_test_suite_results_t *suite = &suites[i];
+ int ntests = clib_net_to_host_i32 (suite->ntests);
+ int nsuccesses = clib_net_to_host_i32 (suite->nsuccesses);
+ int nfailures = clib_net_to_host_i32 (suite->nfailures);
+ int nskipped = clib_net_to_host_i32 (suite->nskipped);
+ int j, cnt;
+
+ strbuf = format (strbuf,
+ "Suite %s: %d tests: %d successes, %d failures, %d skipped\n",
+ suite->suitename, ntests, nsuccesses, nfailures,
+ nskipped);
+
+ if (nfailures != 0)
+ {
+ strbuf = format (strbuf, " Failed Test(s):");
+ for (j = 0, cnt = 0; j < 8 * sizeof (suite->failures_mask); j++)
+ {
+ if ((suite->failures_mask[j / 8] & (1 << (j % 8))) == 0)
+ {
+ continue;
+ }
+ cnt++;
+ strbuf =
+ format (strbuf, " %d%s", j + 1,
+ (cnt < nfailures) ? ", " : " ");
+ }
+ strbuf = format (strbuf, "\n");
+ }
+ if (nskipped != 0)
+ {
+ strbuf = format (strbuf, " Skipped Test(s):");
+ for (j = 0, cnt = 0; j < 8 * sizeof (suite->skips_mask); j++)
+ {
+ if ((suite->skips_mask[j / 8] & (1 << (j % 8))) == 0)
+ {
+ continue;
+ }
+ cnt++;
+ strbuf =
+ format (strbuf, " %d%s", j + 1,
+ (cnt < nskipped) ? ", " : " ");
+ }
+ strbuf = format (strbuf, "\n");
+ }
+ }
+
+ vec_terminate_c_string (strbuf);
+ vlib_cli_output (cicn_main.vlib_main, "%s", strbuf);
+ if (strbuf)
+ {
+ vec_free (strbuf);
+ }
+}
+
+static clib_error_t *
+cicn_cli_test_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err;
+ int rv;
+
+ cicn_main_t *sm = &cicn_main;
+
+ if (sm->test_cicn_api_handler == NULL)
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ }
+ else
+ {
+ // leverage API message for CLI
+ vl_api_cicn_api_test_run_get_reply_t rmp = { 0, };
+ test_cicn_api_op_t test_cicn_api_op = {.reply = &rmp };
+ rv = (sm->test_cicn_api_handler) (&test_cicn_api_op);
+ cicn_cli_test_results_output (test_cicn_api_op.reply);
+ }
+
+ switch (rv)
+ {
+ case 0:
+ cl_err = 0;
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ cl_err =
+ clib_error_return (0, "Unimplemented, test modules not linked");
+ break;
+
+ default:
+ cl_err = clib_error_return (0, "cicn pgen server returned %d", rv);
+ }
+
+ return cl_err;
+}
+
+
+
+/* List of message types that this plugin understands */
+
+#define foreach_cicn_plugin_api_msg \
+_(CICN_API_NODE_PARAMS_SET, cicn_api_node_params_set) \
+_(CICN_API_NODE_PARAMS_GET, cicn_api_node_params_get) \
+_(CICN_API_NODE_STATS_GET, cicn_api_node_stats_get) \
+_(CICN_API_FACE_ADD, cicn_api_face_add) \
+_(CICN_API_FACE_DELETE, cicn_api_face_delete) \
+_(CICN_API_FACE_PARAMS_GET, cicn_api_face_params_get) \
+_(CICN_API_FACE_PROPS_GET, cicn_api_face_props_get) \
+_(CICN_API_FACE_STATS_GET, cicn_api_face_stats_get) \
+_(CICN_API_FACE_EVENTS_SUBSCRIBE, cicn_api_face_events_subscribe) \
+_(CICN_API_FIB_ENTRY_NH_ADD, cicn_api_fib_entry_nh_add) \
+_(CICN_API_FIB_ENTRY_NH_DELETE, cicn_api_fib_entry_nh_delete) \
+_(CICN_API_FIB_ENTRY_PROPS_GET, cicn_api_fib_entry_props_get) \
+_(CICN_API_TEST_RUN_GET, cicn_api_test_run_get)
+
+/* Set up the API message handling tables */
+clib_error_t *
+cicn_api_plugin_hookup (vlib_main_t * vm)
+{
+ cicn_main_t *sm = &cicn_main;
+
+ /* Get a correctly-sized block of API message decode slots */
+ u8 *name = format (0, "cicn_%08x%c", api_version, 0);
+ sm->msg_id_base = vl_msg_api_get_msg_ids ((char *) name,
+ VL_MSG_FIRST_AVAILABLE);
+ vec_free (name);
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(sm->msg_id_base + VL_API_##N, \
+ #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_cicn_plugin_api_msg;
+#undef _
+
+ int smart_fib_update = 0;
+#if CICN_FEATURE_MULTITHREAD // smart fib update believed working, not tested
+ smart_fib_update = 1;
+#endif // CICN_FEATURE_MULTITHREAD
+ /*
+ * Thread-safe API messages
+ * i.e. disable thread synchronization
+ */
+ api_main_t *am = &api_main;
+ if (smart_fib_update)
+ {
+ am->is_mp_safe[sm->msg_id_base + VL_API_CICN_API_FIB_ENTRY_NH_ADD] = 1;
+ am->is_mp_safe[sm->msg_id_base + VL_API_CICN_API_FIB_ENTRY_NH_DELETE] =
+ 1;
+ }
+
+ return 0;
+}
+
+
+/* cli declaration for 'control' (root path of multiple commands, for help) */
+VLIB_CLI_COMMAND (cicn_cli_node_ctl_command, static) =
+{
+.path = "cicn control",.short_help = "cicn control"};
+
+/* cli declaration for 'control start' */
+VLIB_CLI_COMMAND (cicn_cli_node_ctl_start_set_command, static) =
+{
+.path = "cicn control start",.short_help = "cicn control start",.function =
+ cicn_cli_node_ctl_start_set_command_fn,};
+
+/* cli declaration for 'control stop' */
+VLIB_CLI_COMMAND (cicn_cli_node_ctl_stop_set_command, static) =
+{
+.path = "cicn control stop",.short_help = "cicn control stop",.function =
+ cicn_cli_node_ctl_stop_set_command_fn,};
+
+/* cli declaration for 'control param' */
+VLIB_CLI_COMMAND (cicn_cli_node_ctl_param_set_command, static) =
+{
+.path = "cicn control param",.short_help =
+ "cicn control param { pit { size <entries> | { dfltlife | minlife | maxlife } <seconds> } | fib size <entries> | cs size <entries> }\n",.function
+ = cicn_cli_node_ctl_param_set_command_fn,};
+
+/* cli declaration for 'enable-disable'*/
+VLIB_CLI_COMMAND (cicn_cli_node_enable_disable_set_command, static) =
+{
+.path = "cicn enable-disable",.short_help =
+ "cicn enable-disable [disable]",.function =
+ cicn_cli_node_enable_disable_set_command_fn,};
+
+/* cli declaration for 'cfg' */
+VLIB_CLI_COMMAND (cicn_cli_set_command, static) =
+{
+.path = "cicn cfg",.short_help = "cicn cfg",};
+
+/* cli declaration for 'cfg name' */
+VLIB_CLI_COMMAND (cicn_cli_node_name_set_command, static) =
+{
+.path = "cicn cfg name",.short_help =
+ "cicn cfg name <name> [delete]",.function =
+ cicn_cli_node_name_set_command_fn,.long_help =
+ "Add (or remove) an administrative name for this router\n" "\n"
+ "Multiple names are allowed. (NYI...)\n",};
+
+/* cli declaration for 'cfg salt' */
+VLIB_CLI_COMMAND (cicn_cli_salt_set_command, static) =
+{
+.path = "cicn cfg salt",.short_help = "cicn cfg salt <number>",.function =
+ cicn_cli_salt_set_command_fn,};
+
+/* cli declaration for 'cfg face' */
+VLIB_CLI_COMMAND (cicn_cli_face_set_command, static) =
+{
+.path = "cicn cfg face",.short_help =
+ "cicn cfg face { add local <addr:port> remote <addr:port> | "
+ "id <id> { delete | admin { down | up } | hello { enable | disable } }",.function
+ = cicn_cli_face_set_command_fn,};
+
+/* cli declaration for 'cfg fib' */
+VLIB_CLI_COMMAND (cicn_cli_fib_set_command, static) =
+{
+.path = "cicn cfg fib",.short_help =
+ "cicn cfg fib {add | delete } prefix <prefix> face <faceid> "
+ "[weight <weight>]",.function = cicn_cli_fib_set_command_fn,};
+
+/* cli declaration for 'cfg hello-protocol' */
+VLIB_CLI_COMMAND (cicn_cli_hello_protocol_set_command, static) =
+{
+.path = "cicn cfg hello-protocol",.short_help =
+ "cicn cfg hello-protocol interval <num_of_mseconds>",.function =
+ cicn_cli_hello_protocol_set_command_fn,};
+
+/* cli declaration for 'show' */
+VLIB_CLI_COMMAND (cicn_cli_show_command, static) =
+{
+.path = "cicn show",.short_help =
+ "cicn show [face ['all' | faceid]] "
+ "[fib ['all' | prefix]] "
+ "[detail] [internal]",.function = cicn_cli_show_command_fn,};
+
+/* cli declaration for 'cicn pgen client' */
+VLIB_CLI_COMMAND (cicn_cli_pgen_client_set_command, static) =
+{
+.path = "cicn pgen client",.short_help =
+ "cicn pgen client port <port> src <addr:port> dest <addr:port>",.long_help
+ = "Run icn in packet-gen client mode\n",.function =
+ cicn_cli_pgen_client_set_command_fn,};
+
+/* cli declaration for 'cicn pgen server' */
+VLIB_CLI_COMMAND (cicn_cli_pgen_server_set_command, static) =
+{
+.path = "cicn pgen server",.short_help =
+ "cicn pgen server port <port> size <content_payload_size>",.long_help =
+ "Run icn in packet-gen server mode\n",.function =
+ cicn_cli_pgen_server_set_command_fn,};
+
+/* cli declaration for 'test' */
+VLIB_CLI_COMMAND (cicn_cli_test_command, static) =
+{
+.path = "cicn test",.short_help = "cicn test",.function = cicn_cli_test_fn,};
diff --git a/cicn-plugin/cicn/cicn_mgmt.h b/cicn-plugin/cicn/cicn_mgmt.h
new file mode 100644
index 00000000..0283a96c
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_mgmt.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn plugin management plane (binary api, dbg-cli) definitions
+ */
+#ifndef _CICN_MGMT_H_
+#define _CICN_MGMT_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+/* Stats for the forwarding node,
+ * which end up called "error" even though they aren't...
+ */
+#define foreach_icnfwd_error \
+_(PROCESSED, "ICN packets processed") \
+_(INTERESTS, "ICN interests processed") \
+_(DATAS, "ICN data msgs forwarded") \
+_(NAKS, "ICN Nak msgs forwarded") \
+_(CACHED, "ICN cached data replies") \
+_(NACKED_INTERESTS, "ICN Nak msgs originated") \
+_(NO_ROUTE, "ICN no-route errors") \
+_(HOPLIMIT_EXCEEDED, "ICN hoplimit exceeded errors") \
+_(NO_PIT, "ICN no PIT entry drops") \
+_(PIT_EXPIRED, "ICN expired PIT entries") \
+_(CS_EXPIRED, "ICN expired CS entries") \
+_(CS_LRU, "ICN LRU CS entries freed") \
+_(NO_BUFS, "No packet buffers") \
+_(INTEREST_AGG, "Interests aggregated") \
+_(INT_RETRANS, "Interest retransmissions") \
+_(INT_COUNT, "Interests in PIT") \
+_(CS_COUNT, "CS entries") \
+_(CONTROL_REQUESTS, "ICN control request entries") \
+_(CONTROL_REPLIES, "ICN control reply entries") \
+_(HELLO_INTERESTS_RCVD, "ICN hello protocol interests received") \
+_(HELLO_DMSGS_SENT, "ICN hello protocol data msgs sent") \
+_(HELLO_DMSGS_RCVD, "ICN hello protocol data msgs received")
+
+typedef enum
+{
+#define _(sym,str) ICNFWD_ERROR_##sym,
+ foreach_icnfwd_error
+#undef _
+ ICNFWD_N_ERROR,
+} icnfwd_error_t;
+
+/*
+ * Stats for the packet-distribution node
+ */
+#define foreach_icndist_error \
+_(PROCESSED, "ICN packets dist") \
+_(INTERESTS, "ICN interests dist") \
+_(DATAS, "ICN data msgs dist") \
+_(DROPS, "ICN msgs dropped")
+
+typedef enum
+{
+#define _(sym,str) ICNDIST_ERROR_##sym,
+ foreach_icndist_error
+#undef _
+ ICNDIST_N_ERROR,
+} icndist_error_t;
+
+/*
+ * Stats for the background hello process node
+ */
+#define foreach_icnhelloprocess_error \
+_(HELLO_INTERESTS_SENT, "ICN hello protocol interests sent")
+
+typedef enum
+{
+#define _(sym,str) ICNHELLOPROCESS_ERROR_##sym,
+ foreach_icnhelloprocess_error
+#undef _
+ ICNHELLOPROCESS_N_ERROR,
+} icnhelloprocess_error_t;
+
+/*
+ * Hide the details of cli output from the cicn-aware modules
+ */
+int cicn_cli_output (const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+clib_error_t *cicn_api_plugin_hookup (vlib_main_t * vm);
+
+#endif // _CICN_MGMT_H_
diff --git a/cicn-plugin/cicn/cicn_msg_enum.h b/cicn-plugin/cicn/cicn_msg_enum.h
new file mode 100644
index 00000000..334ed851
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_msg_enum.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_msg_enum.h - skeleton vpp engine plug-in message enumeration
+ */
+
+#ifndef included_cicn_msg_enum_h
+#define included_cicn_msg_enum_h
+
+#include <vppinfra/byte_order.h>
+
+#define vl_msg_id(n,h) n,
+typedef enum
+{
+#include <cicn/cicn_all_api_h.h>
+ /* We'll want to know how many messages IDs we need... */
+ VL_MSG_FIRST_AVAILABLE,
+} vl_msg_id_t;
+#undef vl_msg_id
+
+#endif /* included_cicn_msg_enum_h */
diff --git a/cicn-plugin/cicn/cicn_params.h b/cicn-plugin/cicn/cicn_params.h
new file mode 100644
index 00000000..4110351e
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_params.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * #defines intended to be settable by users of the plugin, for build-time
+ * changes to cicn plugin behavior. (For example, increase or decrease
+ * PIT/CS size.)
+ */
+#ifndef _CICN_PARAM_H_
+#define _CICN_PARAM_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+/*
+ * Features
+ */
+#define CICN_FEATURE_MULTITHREAD 0 // multiple worker support enabled?
+#define CICN_FEATURE_CS 1 // tri-valued:see cicn_cs_enabled()
+#define CICN_FEATURE_DPDK_RTEMBUF_CLONING 1 // dpdk rtembuf cloning enabled?
+#define CICN_FEATURE_VPP_VLIB_CLONING 0 // vpp vlib cloning enabled?
+
+/*
+ * API compile-time parameters
+ */
+#define CICN_PARAM_API_EVENT_SUBSCRIBERS_MAX 32
+
+/*
+ * Face compile-time parameters
+ */
+#define CICN_PARAM_FACES_MAX 64
+
+/*
+ * Hash table compile-time parameters
+ * Overall max key size we're willing to deal with -- TODO --
+ */
+#define CICN_PARAM_HASHTB_KEY_BYTES_MAX 1024
+
+/*
+ * FIB compile-time parameters
+ */
+#define CICN_PARAM_FIB_ENTRIES_MIN 32
+#define CICN_PARAM_FIB_ENTRIES_DFLT 512
+#define CICN_PARAM_FIB_ENTRIES_MAX 2 * 1024 * 1024
+
+// wirefmt bytes (no lead name tlv)
+#define CICN_PARAM_FIB_ENTRY_PFX_WF_BYTES_MAX 200
+#define CICN_PARAM_FIB_ENTRY_PFX_COMPS_MAX 8
+
+// Max next-hops supported in a FIB entry
+#define CICN_PARAM_FIB_ENTRY_NHOPS_MAX 4
+
+// Default and limit on weight, whatever weight means
+#define CICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT 0x10
+#define CICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX 0xff
+
+/*
+ * PIT compile-time parameters
+ */
+#define CICN_PARAM_PIT_ENTRIES_MIN 1024
+#define CICN_PARAM_PIT_ENTRIES_DFLT 1024 * 128
+#define CICN_PARAM_PIT_ENTRIES_MAX 2 * 1024 * 1024
+
+// aggregation limit (interest previous hops)
+#define CICN_PARAM_PIT_ENTRY_PHOPS_MAX 7
+
+// PIT lifetime limits on API overrides (in seconds, long-float type)
+#define CICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC 0.100L
+#define CICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC 20.000L
+
+// PIT lifetime params if not set at API (in mseconds, integer type)
+#define CICN_PARAM_PIT_LIFETIME_DFLT_MIN_MS 200
+#define CICN_PARAM_PIT_LIFETIME_DFLT_DFLT_MS 2000
+#define CICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS 2000
+
+// TODO -- if restrict retransmissions. (ccnx does not, ndn does [we think])
+#define CICN_PARAM_PIT_RETRANS_TIME_DFLT 0.3
+
+/*
+ * CS compile-time parameters
+ */
+#define CICN_PARAM_CS_ENTRIES_MIN 0 // can disable CS
+#define CICN_PARAM_CS_ENTRIES_DFLT 4 * 1024
+#define CICN_PARAM_CS_ENTRIES_MAX 1024 * 1024
+
+#define CICN_PARAM_CS_LRU_DEFAULT (16 * 1024)
+
+/* CS lifetime defines, in mseconds, integer type */
+#define CICN_PARAM_CS_LIFETIME_MIN 1000
+#define CICN_PARAM_CS_LIFETIME_DFLT (5 * 60 * 1000) // 300 seconds
+#define CICN_PARAM_CS_LIFETIME_MAX (24 * 3600 * 1000) //TODO: 24 hours...
+
+/*
+ * Hello compile-time parameters
+ */
+#define CICN_PARAM_HELLO_MISSES_DOWN_DFLT 10
+// default frequency of sending hello packets
+#define CICN_PARAM_HELLO_POLL_INTERVAL_DFLT 1.0; /* seconds */
+
+extern int cicn_buftrc;
+#define BUFTRC(str, bi0) if (cicn_buftrc) printf("-%8s: %8.8d\n", str, bi0);
+#define GBI(vm,b0) (b0 ? vlib_get_buffer_index(vm, b0) : 0)
+
+#endif // _CICN_PARAM_H_
+
diff --git a/cicn-plugin/cicn/cicn_parser.c b/cicn-plugin/cicn/cicn_parser.c
new file mode 100644
index 00000000..f9889af4
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_parser.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_parser.c: Fast-path, vpp-aware ICN packet parser, used in cicn forwarder.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+
+#include <cicn/cicn.h>
+#include <cicn/cicn_parser.h>
+
+/*
+ * Given name string in ascii /a/b/... format, convert to wire-format
+ * list of components (without wrapper tlv), in obuf.
+ * If is_chunk_name, make last component chunk_type instead of generic type.
+ */
+int
+cicn_parse_name_comps_from_str_generic (uint8_t * obuf, int obuflen,
+ const char *str, int chunk_name,
+ cicn_rd_t * cicn_rd)
+{
+
+ int ux_rc;
+ cicn_rc_e crc = CICN_RC_OK;
+
+ int ret;
+
+ ret =
+ cicn_parse_name_comps_from_str_inline (obuf, obuflen, str, chunk_name);
+
+ if (ret >= 0)
+ {
+ ux_rc = AOK;
+ }
+ else
+ {
+ ux_rc = -ret;
+ switch (ux_rc)
+ {
+ case ENOSPC:
+ crc = CICN_RC_FIB_PFX_SIZE_LIMIT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ cicn_rd_set (cicn_rd, crc, ux_rc);
+ return (ret);
+}
+
+/*
+ * Given name string in ascii /a/b/... format, convert to wire-format
+ * list of components (without wrapper tlv), in obuf.
+ */
+int
+cicn_parse_name_comps_from_str (uint8_t * obuf, int obuflen, const char *str,
+ cicn_rd_t * cicn_rd)
+{
+ int ret;
+
+ ret =
+ cicn_parse_name_comps_from_str_generic (obuf, obuflen, str,
+ 0 /*!chunk_name */ , cicn_rd);
+ return (ret);
+}
+
+/*
+ * Given name string in ascii /a/b/... format, convert to full
+ * wire-format (including wrapper tlv), in obuf.
+ * If is_chunk_name, make last component chunk_type instead of generic type.
+ */
+int
+cicn_parse_name_from_str (uint8_t * obuf, int obuflen, const char *str,
+ int is_chunk_name, cicn_rd_t * cicn_rd)
+{
+ int ret;
+ if (obuflen < CICN_TLV_HDR_LEN)
+ {
+ cicn_rd_set (cicn_rd, CICN_RC_OK, EINVAL);
+ return (-cicn_rd->rd_ux_rc);
+ }
+ C_PUTINT16 (obuf, CICN_TLV_NAME);
+ ret =
+ cicn_parse_name_comps_from_str_generic (obuf + CICN_TLV_HDR_LEN,
+ obuflen - CICN_TLV_HDR_LEN, str,
+ is_chunk_name, cicn_rd);
+ if (ret >= 0)
+ { // length
+ C_PUTINT16 (&obuf[CICN_TLV_TYPE_LEN], ret);
+ ret += CICN_TLV_HDR_LEN;
+ }
+ return (ret);
+}
+
diff --git a/cicn-plugin/cicn/cicn_parser.h b/cicn-plugin/cicn/cicn_parser.h
new file mode 100644
index 00000000..58f1a11e
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_parser.h
@@ -0,0 +1,743 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_parser.h: Fast-path, vpp-aware ICN packet parser, used in cicn forwarder.
+ */
+
+#ifndef _CICN_PARSER_H_
+#define _CICN_PARSER_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <ctype.h>
+
+#include <vlib/vlib.h>
+
+#include <cicn/cicn_types.h>
+#include <cicn/cicn_std.h>
+
+#ifndef AOK
+# define AOK 0
+#endif
+
+/*
+ * Useful macros for working with integers in possibly-unaligned
+ * buffers and possibly-byteswapped architectures
+ */
+#define C_GETINT16(n, p) do { \
+ register uint8_t *t_p = (uint8_t *)(p); \
+ (n) = ((uint16_t)t_p[0] << 8) \
+ | ((uint16_t)t_p[1]); \
+ } while (0)
+
+#define C_GETINT32(n, p) do { \
+ register uint8_t *t_p = (uint8_t *)(p); \
+ (n) = ((uint32_t)t_p[0] << 24) \
+ | ((uint32_t)t_p[1] << 16) \
+ | ((uint32_t)t_p[2] << 8) \
+ | ((uint32_t)t_p[3]); \
+ } while (0)
+
+#define C_GETINT64(n, p) do { \
+ register uint8_t *t_p = (uint8_t *)(p); \
+ (n) = ((uint64_t)t_p[0] << 56) \
+ | ((uint64_t)t_p[1] << 48) \
+ | ((uint64_t)t_p[2] << 40) \
+ | ((uint64_t)t_p[3] << 32) \
+ | ((uint64_t)t_p[4] << 24) \
+ | ((uint64_t)t_p[5] << 16) \
+ | ((uint64_t)t_p[6] << 8) \
+ | ((uint64_t)t_p[7]); \
+ } while (0)
+
+#define C_PUTINT16(p, n) do { \
+ register uint16_t t_n = (uint16_t)(n); \
+ register uint8_t *t_p = (uint8_t *)(p); \
+ t_p[0] = (uint8_t)(t_n >> 8); \
+ t_p[1] = (uint8_t)(t_n); \
+ } while (0)
+
+#define C_PUTINT32(p, n) do { \
+ register uint32_t t_n = (uint32_t)(n); \
+ register uint8_t *t_p = (uint8_t *)(p); \
+ t_p[0] = (uint8_t)(t_n >> 24); \
+ t_p[1] = (uint8_t)(t_n >> 16); \
+ t_p[2] = (uint8_t)(t_n >> 8); \
+ t_p[3] = (uint8_t)t_n; \
+ } while (0)
+
+#define C_PUTINT64(p, n) do { \
+ register uint64_t t_n = (uint64_t)(n); \
+ register uint8_t *t_p = (uint8_t *)(p); \
+ t_p[0] = (uint8_t)(t_n >> 56); \
+ t_p[1] = (uint8_t)(t_n >> 48); \
+ t_p[2] = (uint8_t)(t_n >> 40); \
+ t_p[3] = (uint8_t)(t_n >> 32); \
+ t_p[4] = (uint8_t)(t_n >> 24); \
+ t_p[5] = (uint8_t)(t_n >> 16); \
+ t_p[6] = (uint8_t)(t_n >> 8); \
+ t_p[7] = (uint8_t)t_n; \
+ } while (0)
+
+/*
+ * Key type codes for header, header tlvs, body tlvs, and child tlvs
+ */
+
+enum cicn_pkt_type_e
+{
+ CICN_PKT_TYPE_INTEREST = 0,
+ CICN_PKT_TYPE_CONTENT = 1,
+ CICN_PKT_TYPE_NAK = 2,
+ CICN_PKT_TYPE_CONTROL = 0xA4,
+ CICN_PKT_TYPE_CONTROL_REQUEST = CICN_PKT_TYPE_CONTROL + 1,
+ CICN_PKT_TYPE_CONTROL_REPLY
+};
+
+enum cicn_msg_type_e
+{
+ CICN_MSG_TYPE_INTEREST = 1,
+ CICN_MSG_TYPE_CONTENT = 2,
+ CICN_MSG_TYPE_CONTROL = 0xBEEF,
+ CICN_MSG_TYPE_ECHO_REQUEST = CICN_MSG_TYPE_CONTROL + 1,
+ CICN_MSG_TYPE_ECHO_REPLY,
+ CICN_MSG_TYPE_TRACEROUTE_REQUEST,
+ CICN_MSG_TYPE_TRACEROUTE_REPLY,
+};
+
+enum cicn_hdr_tlv_e
+{
+ CICN_HDR_TLV_INT_LIFETIME = 1,
+ CICN_HDR_TLV_CACHE_TIME = 2,
+};
+
+enum cicn_tlv_e
+{
+ CICN_TLV_NAME = 0,
+ CICN_TLV_PAYLOAD = 1,
+ CICN_TLV_PAYLOAD_TYPE = 5,
+ CICN_TLV_MSG_EXPIRY = 6,
+};
+
+enum cicn_name_comp_e
+{
+ CICN_NAME_COMP = 1,
+ CICN_NAME_COMP_CHUNK = 16
+};
+
+enum cicn_msg_err_e
+{
+ CICN_MSG_ERR_NOROUTE = 1,
+ CICN_MSG_ERR_HOPLIM = 2,
+ CICN_MSG_ERR_RESOURCES = 3,
+ CICN_MSG_ERR_CONGESTION = 6,
+ CICN_MSG_ERR_MTU = 7
+};
+
+/*
+ * Fixed packet header
+ */
+typedef struct __attribute__ ((__packed__)) cicn_packet_hdr_s
+{
+ uint8_t pkt_ver;
+ uint8_t pkt_type;
+ uint16_t pkt_len;
+ uint8_t pkt_hop_limit;
+ uint8_t pkt_reserved;
+#define pkt_nack_code pkt_reserved
+ uint8_t pkt_flags;
+ uint8_t pkt_hdr_len;
+} cicn_packet_hdr_t;
+
+typedef struct cicn_pkt_hdr_desc_s
+{
+ int16_t ph_lifetime_idx;
+} cicn_pkt_hdr_desc_t;
+
+/* Simple min packet len */
+#define CICN_PACKET_MIN_LEN \
+ (sizeof(cicn_packet_hdr_t) + /*msg tlv*/ 4 + \
+ /*name tlv*/ 4 + /*name comp*/2 + 1)
+
+/* Protocol versions */
+#define CICN_PROTO_VERSION_1 0x01
+#define CICN_PROTO_VERSION_CURRENT CICN_PROTO_VERSION_1
+
+/* The versions we can deal with */
+#define CICN_PROTO_VERSION_MIN CICN_PROTO_VERSION_CURRENT
+#define CICN_PROTO_VERSION_MAX CICN_PROTO_VERSION_CURRENT
+
+/* Default initial hop limit */
+#define CICN_DEFAULT_HOP_LIMIT 128
+
+/* Current encoding restricts TLV 'T' and 'L' to two bytes */
+#define CICN_TLV_TYPE_LEN 2
+#define CICN_TLV_LEN_LEN 2
+#define CICN_TLV_HDR_LEN (CICN_TLV_TYPE_LEN + CICN_TLV_LEN_LEN)
+#define CICN_TLV_MAX_LEN 0xffff
+
+#define cicn_parse_tlvtype(p) ((((uint8_t*)(p))[0]<<8) | ((uint8_t*)(p))[1])
+#define cicn_parse_tlvlength(p) ((((uint8_t*)(p))[2]<<8) | ((uint8_t*)(p))[3])
+
+static inline uint64_t
+cicn_parse_getvlv (uint8_t * p, uint8_t * e)
+{
+ u64 v;
+
+ /*
+ * Should check
+ * if(e <= p || e - p > 8)
+ * - it's an error.
+ */
+ v = *p++;
+ while (p < e)
+ {
+ v = (v << 8) | *p++;
+ }
+
+ return v;
+}
+
+/*
+ * Wrapper to fill in tlv hdr (type and len) for tlv under constructions.
+ * Byte-swapps if needed for this CPU.
+ */
+static inline void
+cicn_parse_tlv_hdr_build (uint8_t * tlv, uint16_t type, uint16_t len)
+{
+ C_PUTINT16 (&tlv[0], type);
+ C_PUTINT16 (&tlv[CICN_TLV_TYPE_LEN], len);
+}
+
+/*
+ * Wrapper to build tlv given the type, the length, and the pre-constructed
+ * value
+ */
+static inline void
+cicn_parse_tlv_build (uint8_t * tlv, uint16_t type, uint16_t len,
+ const uint8_t * v)
+{
+ cicn_parse_tlv_hdr_build (tlv, type, len);
+ clib_memcpy (&tlv[CICN_TLV_HDR_LEN], v, len);
+}
+
+/*
+ * Quickie packet sanity check: check lengths, locate name
+ */
+static inline int
+cicn_parse_pkt (uint8_t * pkt, uint32_t pktlen, uint8_t * type_p,
+ uint16_t * msg_type_p, uint8_t ** name_p,
+ uint32_t * namelen_p, cicn_pkt_hdr_desc_t * pkt_hdr_desc)
+{
+ int ret = EINVAL;
+ uint8_t *startptr;
+ const uint8_t *endptr;
+ uint8_t type;
+ uint16_t sval;
+ uint8_t hdr_len;
+
+ if (pkt == NULL || pktlen < CICN_PACKET_MIN_LEN)
+ {
+ goto error;
+ }
+
+ startptr = pkt;
+ endptr = pkt + pktlen;
+
+ if ((*pkt < CICN_PROTO_VERSION_MIN) || (*pkt > CICN_PROTO_VERSION_MAX))
+ {
+ goto error;
+ }
+
+ pkt++;
+
+ /* TODO -- validate packet type or make the caller do it? */
+ type = *pkt;
+ if (type_p)
+ {
+ *type_p = type;
+ }
+
+ /* Advance to and check header's packet len */
+ pkt++;
+
+ C_GETINT16 (sval, pkt);
+ if (startptr + sval > endptr)
+ {
+ goto error;
+ }
+
+ /* TODO -- check hop limit here, or let caller do it? */
+
+ /* Advance past hop limit and reserved bits */
+ pkt += 4;
+
+ /* TODO -- any 'flags' to check? */
+
+ /* Advance to header-len field */
+ pkt++;
+ hdr_len = *pkt;
+
+ /* Check header-len; must be enough room for at least a message tlv and
+ * a name tlv.
+ */
+ if ((startptr + hdr_len) > (endptr - 4 /*msg */ - 4 /*name */ ))
+ {
+ goto error;
+ }
+
+ /* header options we care about */
+ pkt_hdr_desc->ph_lifetime_idx = -1;
+ uint8_t *hdr_tlv = pkt + 1;
+ pkt = startptr + hdr_len;
+ while (hdr_tlv < pkt)
+ {
+ uint16_t hdr_tlv_type = cicn_parse_tlvtype (hdr_tlv);
+ uint16_t hdr_tlv_len =
+ CICN_TLV_HDR_LEN + cicn_parse_tlvlength (hdr_tlv);
+ if (hdr_tlv + hdr_tlv_len > pkt)
+ {
+ goto error;
+ }
+
+ switch (hdr_tlv_type)
+ {
+ case CICN_HDR_TLV_INT_LIFETIME:
+ if (type == CICN_PKT_TYPE_INTEREST)
+ {
+ pkt_hdr_desc->ph_lifetime_idx = hdr_tlv - startptr;
+ }
+ case CICN_HDR_TLV_CACHE_TIME:
+ if (type == CICN_PKT_TYPE_CONTENT)
+ {
+ pkt_hdr_desc->ph_lifetime_idx = hdr_tlv - startptr;
+ }
+ break;
+ default:
+ break;
+ }
+ hdr_tlv += hdr_tlv_len;
+ }
+
+ /* Capture message type. TODO -- validate/enforce msg types. */
+ C_GETINT16 (sval, pkt);
+ if (msg_type_p)
+ {
+ *msg_type_p = sval;
+ }
+
+ pkt += 2;
+
+ /* Check len of message tlv. TODO -- not checking for other per-msg tlvs */
+ C_GETINT16 (sval, pkt);
+ if (((pkt + sval + CICN_TLV_LEN_LEN) > endptr) || (sval < 4 /*name */ ))
+ {
+ goto error;
+ }
+
+ pkt += 2;
+
+ /* Must find name first in the 'message' */
+ C_GETINT16 (sval, pkt);
+ if (sval != CICN_TLV_NAME)
+ {
+ goto error;
+ }
+
+ /* Capture start of name */
+ if (name_p)
+ {
+ *name_p = pkt;
+ }
+
+ pkt += 2;
+
+ /* Validate len of name tlv
+ * - zero _is_ a validate name len
+ * - TODO should compare embedded name len with containing message tlv len
+ */
+ C_GETINT16 (sval, pkt);
+ if ((pkt + sval + CICN_TLV_LEN_LEN) > endptr)
+ {
+ goto error;
+ }
+
+ if (namelen_p)
+ {
+ /* Return the whole length from the start of the Name tlv,
+ * including the T and L octets.
+ */
+ *namelen_p = sval + CICN_TLV_TYPE_LEN + CICN_TLV_LEN_LEN;
+ }
+
+ /* Looks ok so far... */
+ ret = AOK;
+
+ return (ret);
+
+error:
+ if (type_p)
+ {
+ *type_p = 0; // compiler warning
+ }
+ if (msg_type_p)
+ {
+ *msg_type_p = 0; // compiler warning
+ }
+ return (ret);
+}
+
+/*
+ * Process optional time-based Hop-by-hop headers.
+ * Packet already verified for sanity by cicn_parse_pkt().
+ * An Interest Lifetime TLV will affect the PIT timeout
+ * value, or whether the interest should be put in the PIT
+ * at all (if the value is 0 then no content is expected).
+ * Caching will use the Recommended Cache Time TLV.
+ */
+static inline int
+cicn_parse_hdr_time_ms (uint8_t * pkt, cicn_pkt_hdr_desc_t * pkt_hdr_desc,
+ uint16_t type, uint64_t * time_res)
+{
+ uint8_t *p;
+ uint16_t len;
+ uint64_t v;
+
+ if (pkt_hdr_desc->ph_lifetime_idx == -1)
+ {
+ return (ENOENT);
+ }
+
+ p = pkt + pkt_hdr_desc->ph_lifetime_idx;
+ len = cicn_parse_tlvlength (p);
+
+ switch (type)
+ {
+ case CICN_HDR_TLV_INT_LIFETIME:
+ if (len > 8)
+ {
+ return (ENOENT);
+ }
+ v =
+ cicn_parse_getvlv (p + CICN_TLV_HDR_LEN, p + CICN_TLV_HDR_LEN + len);
+ break;
+ case CICN_HDR_TLV_CACHE_TIME:
+ if (len != 8)
+ {
+ return (ENOENT);
+ }
+ C_GETINT64 (v, p + CICN_TLV_HDR_LEN);
+ break;
+ default:
+ return (ENOENT);
+ break;
+ }
+
+ *time_res = v;
+ return AOK;
+}
+
+/*
+ * skip over pkt_hdr to msg.
+ * pkt_hdr must have already been verified by cicn_parse_pkt()
+ */
+static inline uint8_t *
+cicn_parse_pkt2msg (cicn_packet_hdr_t * pkt_hdr)
+{
+ uint8_t *pkt_hdr_ptr = (uint8_t *) pkt_hdr;
+ return (&pkt_hdr_ptr[pkt_hdr->pkt_hdr_len]);
+}
+
+
+/*
+ * Utility to convert a string into a series of name-components. We use this
+ * in cli handling, for example. We write into 'buf', and we return the
+ * number of octets used, or an error < 0. This only creates name-comps: it's
+ * up to the caller to create a complete name tlv if that's needed.
+ * - obuf holds result
+ * - obuflen is size of result buffer
+ * - str is name in "/"-separated ascii
+ * - chunk_name specifies whether the name's last component should be
+ * chunk name component rather than generic name component.
+ *
+ * This is pretty basic right now:
+ * - the '/' character is the separator
+ * - binary octets (via escapes) not supported
+ * - instring component type specification not supported
+ * - not much validation of the input string.
+ */
+static inline int
+cicn_parse_name_comps_from_str_inline (uint8_t * obuf, int obuflen,
+ const char *str, int chunk_name)
+{
+ int ret = -EINVAL;
+
+ int i, used, start;
+ uint8_t *obufp;
+ uint8_t *last_comp = NULL; // chunk component, if chunk_name != 0
+
+ if (obuf == NULL || str == NULL)
+ {
+ goto done;
+ }
+
+ /* Special case empty string, which means a valid name with no components.
+ */
+ if (str[0] == '\000')
+ {
+ ret = 0;
+ goto done;
+ }
+
+ /* Let's see how many slashes there are, so we can pre-check the
+ * buffer space we'll need. There is the special-case of a single
+ * '/', which means a single empty name-component.
+ */
+ used = (str[0] == '/' || str[0] == '\000') ? 0 : 1;
+
+ for (i = 0; str[i] != 0; i++)
+ {
+ if (str[i] == '/')
+ {
+ used++;
+ }
+ }
+
+ /* Estimate safe buf len required */
+ if (obuflen < (i + (4 * used)))
+ {
+ ret = -ENOSPC;
+ goto done;
+ }
+
+ /* Convert to name-comp encoding */
+ start = (str[0] == '/') ? 1 : 0;
+ for (i = start, obufp = obuf;; i++)
+ {
+
+ if ((str[i] == '/') || ((str[i] == '\000') && (i > start)))
+ {
+
+ last_comp = obufp;
+ C_PUTINT16 (obufp, CICN_NAME_COMP);
+ obufp += CICN_TLV_TYPE_LEN;
+ C_PUTINT16 (obufp, (i - start));
+ obufp += CICN_TLV_LEN_LEN;
+
+ memcpy (obufp, str + start, i - start);
+ obufp += (i - start);
+
+ start = i + 1;
+ }
+
+ if (str[i] == 0)
+ {
+ ret = obufp - obuf;
+ break;
+ }
+ }
+ if (chunk_name && (last_comp != NULL))
+ {
+ C_PUTINT16 (last_comp, CICN_NAME_COMP_CHUNK);
+ }
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Utility to convert from tlv-encoded prefix to string (no leading name tlv),
+ * for cli output e.g. See also cicn_parse_name_to_str()
+ *
+ * For resultant buf, return strlen(buf) in *str_len_res
+ */
+static inline int
+cicn_parse_prefix_to_str (char *buf, int bufsize, const uint8_t * prefix,
+ int pfxlen, int *str_len_res)
+{
+ int i, str_len, ret = EINVAL;
+ uint16_t sval;
+
+ str_len = 0;
+
+ if ((buf == NULL) || (bufsize < 1) || (prefix == NULL) || (pfxlen < 0))
+ {
+ goto done;
+ }
+
+ /* Special-case empty prefix */
+ if (pfxlen == 0)
+ {
+ *buf = '\0';
+ ret = AOK;
+ goto done;
+ }
+
+ for (i = 0;; i++)
+ {
+ if (i >= pfxlen)
+ {
+ break;
+ }
+
+ /* Must have at least T + L */
+ if ((pfxlen - i) < 4)
+ {
+ break;
+ }
+
+ /* Skip 'T' */
+ i += 2;
+
+ C_GETINT16 (sval, (prefix + i));
+
+ /* Must not overrun 'prefix': error */
+ if ((i + 2 + sval) > pfxlen)
+ {
+ goto done;
+ }
+
+ /* Advance past 'L' */
+ i += 2;
+
+ if (str_len >= bufsize)
+ {
+ ret = ENOSPC;
+ goto done;
+ }
+ else
+ {
+ buf[str_len++] = '/';
+ }
+
+ while (sval > 0)
+ {
+ if (prefix[i] == '\\' || !isprint (prefix[i]))
+ {
+ int len;
+ if (prefix[i] == '\\')
+ {
+ len =
+ snprintf (&buf[str_len], bufsize - str_len, "%s", "\\\\");
+ }
+ else
+ {
+ len =
+ snprintf (&buf[str_len], bufsize - str_len, "\\%03o",
+ prefix[i]);
+ }
+ if (len < 0)
+ {
+ goto done;
+ }
+ str_len += len;
+ if (str_len >= bufsize)
+ {
+ ret = ENOSPC;
+ goto done;
+ }
+ }
+ else
+ {
+ if (str_len >= bufsize)
+ {
+ ret = ENOSPC;
+ goto done;
+ }
+ else
+ {
+ buf[str_len++] = prefix[i];
+ }
+ }
+ sval--;
+
+ if (sval > 0)
+ {
+ i++;
+ }
+ }
+
+ } /* End for... */
+
+ if (str_len >= bufsize)
+ {
+ ret = ENOSPC; // no space for terminal \0, added below
+ goto done;
+ }
+
+ ret = AOK; // success
+
+done:
+
+ if (bufsize <= 0)
+ {
+ str_len = 0;
+ }
+ else
+ {
+ if (str_len >= bufsize)
+ {
+ str_len = bufsize - 1;
+ }
+ buf[str_len] = '\000';
+ }
+ if (str_len_res)
+ {
+ *str_len_res = str_len;
+ }
+ return (ret);
+}
+
+/*
+ * Convert name (including name tlv header) to printable buffer
+ * For resultant buf, return strlen(buf) in *str_len_res
+ */
+static inline int
+cicn_parse_name_to_str (char *buf, int bufsize, const uint8_t * name,
+ int namelen, int *str_len_res)
+{
+ int ret;
+ uint16_t sval;
+
+ if (namelen < CICN_TLV_HDR_LEN)
+ {
+ return (EINVAL);
+ }
+ C_GETINT16 (sval, &name[0]);
+ if (sval != CICN_TLV_NAME)
+ {
+ return (EINVAL);
+ }
+ C_GETINT16 (sval, &name[CICN_TLV_TYPE_LEN]);
+ if (sval != namelen - CICN_TLV_HDR_LEN)
+ { /* Must not overrun name */
+ return (EINVAL);
+ }
+ ret =
+ cicn_parse_prefix_to_str (buf, bufsize, name + CICN_TLV_HDR_LEN,
+ namelen - CICN_TLV_HDR_LEN, str_len_res);
+
+ return (ret);
+}
+
+int
+cicn_parse_name_comps_from_str (uint8_t * obuf, int obuflen, const char *str,
+ cicn_rd_t * cicn_rd);
+int cicn_parse_name_from_str (uint8_t * obuf, int obuflen, const char *str,
+ int is_chunk_name, cicn_rd_t * cicn_rd);
+
+#endif /* _CICN_PARSER_H_ */
diff --git a/cicn-plugin/cicn/cicn_pcs.c b/cicn-plugin/cicn/cicn_pcs.c
new file mode 100644
index 00000000..e3427a46
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_pcs.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_pcs.c: Opportunistic timeout code for the PIT/CS used in the cicn forwarder.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vppinfra/pool.h>
+
+#include <cicn/cicn.h>
+#include <cicn/cicn_hashtb.h>
+#include <cicn/cicn_pcs.h>
+
+/*
+ * Calling worker thread context, passed in and bundled up
+ * to be passed to the bucket scanning code to enable updating
+ * data-stuctures in the event of deletions.
+ */
+typedef struct cicn_pcs_worker_ctx_s
+{ /* worker thread context */
+ vlib_main_t *vm; /* vpp */
+ cicn_pit_cs_t *pitcs; /* PIT/CS table */
+ uint64_t h; /* hash */
+ u32 *pec; /* PIT Expired Count */
+ u32 *cec; /* CS Expired Count */
+
+ cicn_hashtb_h ht; /* derived from .pitcs */
+} cicn_pcs_worker_ctx_t;
+
+/*
+ * Overflow bucket context: as a bucket is scanned, maintain
+ * the location and count of occupied and empty (free) entries
+ * to enable bucket compaction.
+ */
+typedef struct cicn_hashtb_bucket_ctx_s
+{ /* bucket context */
+ cicn_hash_bucket_t *bucket;
+ int occupied[CICN_HASHTB_BUCKET_ENTRIES]; /* occupied */
+ int noccupied; /* occupied */
+ int empty[CICN_HASHTB_BUCKET_ENTRIES]; /* free */
+ int nempty; /* free */
+} cicn_hashtb_bucket_ctx_t;
+
+/*
+ * Free an overflow bucket from a hashtable (derived
+ * from the static function in cicn_hashtb.c).
+ */
+static void
+cicn_free_overflow_bucket (cicn_hashtb_h ht, cicn_hash_bucket_t * bucket)
+{
+ ASSERT (ht->ht_overflow_buckets_used > 0);
+
+ pool_put (ht->ht_overflow_buckets, bucket);
+ ht->ht_overflow_buckets_used--;
+}
+
+/*
+ * Scan a single bucket (8 entries) for timed-out entries.
+ *
+ * Recursive function, for scanning chain of buckets.
+ * - Bucket chains should be short, so recursion should not be deep.
+ * (If bucket chains are long, either hash table is dimensioned too small or
+ * hash function is not distributing names effectively.)
+ * - Find and clear out timed out entries on the way down the recursion.
+ * - Compact entries and free unused overflow buckets (if possible) on the
+ * way back up the recursion.
+ *
+ *
+ * Recursion in detail:
+ * - pre-recursion processing
+ * - scan of the supplied bucket and cleanup of expired entries
+ * - recursion processing:
+ * - if a bucket follows supplied bucket, recurse
+ * - post-recursion processing:
+ * - if supplied bucket is head of chain (pbctx == 0), done
+ * - if supplied bucket is non-head element of chain, try to compact
+ * entries into supplied parent of supplied bucket and free supplied
+ * bucket if it ends up empty.
+ * - buckets are freed from the tail backwards
+ * - recursive call can have caused supplied bucket to pick up new
+ * entries from its child, so need to rescan supplied bucket after
+ * the recursive call.
+ *
+ * Arguments are:
+ * wctx: worker context for updating datastructures
+ * at the vpp node level;
+ * pbctx: bucket context of the calling (parent) instance
+ * of cicn_pcs_timeout_opportunity();
+ * bucket: the bucket to scan.
+ */
+static int
+cicn_pcs_timeout_opportunity (cicn_pcs_worker_ctx_t * wctx,
+ cicn_hashtb_bucket_ctx_t * pbctx,
+ cicn_hash_bucket_t * bucket)
+{
+ int i, r;
+ uint16_t timeout;
+ cicn_hashtb_h ht;
+ cicn_hash_bucket_t *b;
+ cicn_hash_node_t *node;
+ cicn_hash_entry_t *entry;
+ cicn_pcs_entry_t *pcs;
+ cicn_hashtb_bucket_ctx_t bctx;
+
+ /*
+ * Initialise the bucket context for this scan;
+ * if this bucket has an overflow entry, the context
+ * will be passed to it (and seen as pbctx).
+ */
+ memset (&bctx, 0, sizeof (bctx));
+ bctx.bucket = bucket;
+
+ /*
+ * Scan the bucket for expired entries and release them,
+ * updating bctx with the location and count of occupied
+ * and empty entries.
+ */
+ ht = wctx->ht;
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ entry = &bucket->hb_entries[i];
+ if (entry->he_node == 0)
+ {
+ bctx.empty[bctx.nempty++] = i;
+ continue;
+ }
+ if (entry->he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ if (i != CICN_HASHTB_BUCKET_ENTRIES - 1)
+ assert (!(entry->he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW));
+ bctx.occupied[bctx.noccupied++] = i;
+ break;
+ }
+
+ if (entry->he_flags & CICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ bctx.occupied[bctx.noccupied++] = i;
+ continue;
+ }
+
+ if (entry->he_flags & CICN_HASH_ENTRY_FLAG_FAST_TIMEOUT)
+ {
+ timeout = cicn_infra_fast_timer;
+ }
+ else
+ {
+ timeout = cicn_infra_slow_timer;
+ }
+ if (cicn_infra_seq16_gt (entry->he_timeout, timeout))
+ {
+ bctx.occupied[bctx.noccupied++] = i;
+ continue;
+ }
+
+ /*
+ * Entry has timed out, update the relevant statistics
+ * at the vpp node level and release the resources; the entry
+ * is now counted as empty.
+ * Parallel to cicn_pcs_delete(): cannot call cicn_pcs_delete()
+ * since that can call cicn_hashtb_delete() and cause supplied
+ * bucket to get freed in middle of chain scan.
+ */
+ node = pool_elt_at_index (ht->ht_nodes, entry->he_node);
+ pcs = cicn_pit_get_data (node);
+ switch (pcs->shared.entry_type)
+ {
+ default:
+ /*
+ * Should not happen, not sure how to signal this?
+ * Count the bucket as occupied and continue? or...
+ * assert(entry->he_flags & (CICN_CS_TYPE|CICN_PIT_TYPE));
+ */
+ bctx.occupied[bctx.noccupied++] = i;
+ continue;
+
+ case CICN_PIT_TYPE:
+ wctx->pitcs->pcs_pit_count--;
+ (*wctx->pec)++;
+ break;
+
+ case CICN_CS_TYPE:
+ wctx->pitcs->pcs_cs_count--;
+ /* Clean up CS LRU */
+ cicn_cs_lru_dequeue (wctx->pitcs, node, pcs);
+ if (pcs->u.cs.cs_pkt_buf != 0)
+ {
+ BUFTRC (" CS-TO", pcs->u.cs.cs_pkt_buf);
+ vlib_buffer_free_one (wctx->vm, pcs->u.cs.cs_pkt_buf);
+ pcs->u.cs.cs_pkt_buf = 0;
+ }
+ (*wctx->cec)++;
+ break;
+ }
+ cicn_hashtb_init_entry (entry, 0, 0ll);
+ cicn_hashtb_free_node (ht, node);
+
+ bctx.empty[bctx.nempty++] = i;
+ }
+
+ /* recursion phase: recursively process child of this bucket, if any
+ * - entry conveniently points to the supplied bucket's last entry,
+ * which indicates if another bucket is present in the bucket chain
+ */
+ r = AOK;
+ if (entry->he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ b = pool_elt_at_index (ht->ht_overflow_buckets, entry->he_node);
+ r = cicn_pcs_timeout_opportunity (wctx, &bctx, b);
+ if (r != AOK)
+ {
+ goto done;
+ }
+ }
+
+ /*
+ * post-recursion phase, case 1:
+ * - supplied bucket is head bucket, no further compaction required
+ */
+ if (pbctx == 0)
+ {
+ r = AOK;
+ goto done;
+ }
+
+ /*
+ * post-recursion phase, case 2:
+ * - supplied bucket is non-head (aka overflow) bucket, try to compact
+ * supplied bucket's entries into supplied parent of supplied bucket
+ * - pbctx is parent, try to compact entries into it from this bucket
+ * if room exists
+ * - pbctx is guaranteed still valid since not considered for
+ * freeing until this routine returns.
+ * - because child of this this bucket may have compacted its entries
+ * into supplied bucket, rescan supplied bucket to reinitialise
+ * the context.
+ * - if supplied bucket ends up empty, free it
+ * - supplied bucket is empty through combination of its entries
+ * timing out, no child entries got compressed into it,
+ * and/or supplied buckets entries get compressed into parent
+ */
+
+ /* rescan */
+ memset (&bctx, 0, sizeof (bctx));
+ bctx.bucket = bucket;
+ for (i = 0; i < CICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ entry = &bucket->hb_entries[i];
+ if (entry->he_node == 0)
+ {
+ bctx.empty[bctx.nempty++] = i;
+ continue;
+ }
+ bctx.occupied[bctx.noccupied++] = i;
+ }
+
+ /*
+ * Try to move any entries up to the parent bucket.
+ * Always set entry at the top of the loop before checking there is
+ * room in the parent so it will point to the first valid entry not
+ * moved up to the parent if the loop is exited before either all
+ * are copied or only an overflow bucket entry is left.
+ */
+ for (entry = 0, i = 0; i < bctx.noccupied; i++)
+ {
+ entry = &bucket->hb_entries[bctx.occupied[i]];
+ if (pbctx->nempty == 0)
+ {
+ break;
+ }
+ if (entry->he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ assert (i == bctx.noccupied - 1);
+ break;
+ }
+
+ pbctx->bucket->hb_entries[pbctx->empty[--pbctx->nempty]] = *entry;
+ cicn_hashtb_init_entry (entry, 0, 0ll);
+ }
+
+ /*
+ * How many are left in this bucket?
+ */
+ switch (bctx.noccupied - i)
+ {
+ default:
+ /*
+ * Couldn't empty all the entries in this overflow bucket,
+ * maybe next time...
+ */
+ break;
+
+ case 0:
+ /*
+ * This overflow bucket is empty, clear the parent's overflow entry
+ * and release this bucket.
+ */
+ cicn_hashtb_init_entry (&pbctx->
+ bucket->hb_entries[CICN_HASHTB_BUCKET_ENTRIES -
+ 1], 0, 0ll);
+ cicn_free_overflow_bucket (ht, bucket);
+ break;
+
+ case 1:
+ /*
+ * If it's an overflow bucket entry, can move it to the parent's
+ * overflow bucket entry (which points here) and free this bucket;
+ * similarly for a non-overflow bucket entry, unless the hashtable has
+ * CICN_HASHTB_FLAG_USE_SEVEN set, in which case there's nothing to be
+ * done - already checked the parent has no free space elsewhere.
+ */
+ if ((entry->he_flags & CICN_HASH_ENTRY_FLAG_OVERFLOW) ||
+ !(ht->ht_flags & CICN_HASHTB_FLAG_USE_SEVEN))
+ {
+ pbctx->bucket->hb_entries[CICN_HASHTB_BUCKET_ENTRIES - 1] = *entry;
+ cicn_free_overflow_bucket (ht, bucket);
+ }
+ break;
+ }
+
+ r = AOK;
+
+done:
+ return (r);
+}
+
+/*
+ * Opportunistic timeout:
+ * given a hash value and some context, scan all the entries in the
+ * relevant hashtable bucket (and any overflow buckets it may have)
+ * for entries that have timed out and free them;
+ * as a side effect, try to compact and free any overflow buckets.
+ *
+ * Could perhaps be generalised to other functions requiring a scan
+ * of a hashtable bucket, or easily adapted to using a timer-wheel if
+ * opportunistic scanning was found to be inadeqaute.
+ */
+int
+cicn_pcs_timeout (vlib_main_t * vm,
+ cicn_pit_cs_t * pitcs, uint64_t h, u32 * pec, u32 * cec)
+{
+ uint32_t bidx;
+ cicn_hashtb_h ht;
+ cicn_hash_bucket_t *bucket;
+ cicn_pcs_worker_ctx_t wctx;
+
+ /*
+ * Construct the worker thread context passed to the actual scan
+ * routine - it needs to be able to update datastuctures.
+ */
+ memset (&wctx, 0, sizeof (wctx));
+ wctx.vm = vm;
+ wctx.pitcs = pitcs;
+ ht = pitcs->pcs_table;
+ wctx.ht = ht;
+ wctx.h = h;
+ wctx.pec = pec;
+ wctx.cec = cec;
+
+ /*
+ * Locate the bucket in the table using some
+ * bits of the low half of the hash.
+ */
+ bidx = (h & (ht->ht_bucket_count - 1));
+ bucket = ht->ht_buckets + bidx;
+
+ return (cicn_pcs_timeout_opportunity (&wctx, 0, bucket));
+}
diff --git a/cicn-plugin/cicn/cicn_pcs.h b/cicn-plugin/cicn/cicn_pcs.h
new file mode 100644
index 00000000..0c6f9237
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_pcs.h
@@ -0,0 +1,512 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_pcs.h: Opportunistic timeout code for the PIT/CS used in the cicn forwarder.
+ */
+
+#ifndef _CICN_PCS_H_
+#define _CICN_PCS_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include "cicn_hashtb.h"
+
+/* The PIT and CS are stored as a union */
+#define CICN_PIT_NULL_TYPE 0
+#define CICN_PIT_TYPE 1
+#define CICN_CS_TYPE 2
+
+/* Max number of incoming (interest) faces supported, for now. Note that changing
+ * this may change alignment within the PIT struct, so be careful.
+ */
+typedef struct cicn_pcs_shared_s
+{
+
+ /* Installation/creation time (vpp float units, for now) */
+ f64 create_time;
+
+ /* Expiration time (vpp float units, for now) */
+ f64 expire_time;
+
+ /* Shared 'type' octet */
+ uint8_t entry_type;
+
+ /* Shared 'flags' octet */
+ uint8_t entry_flags;
+
+ /* Shared size 8 + 8 + 2 = 18B */
+
+} cicn_pcs_shared_t;
+
+/*
+ * PIT entry, unioned with a CS entry below
+ */
+typedef struct cicn_pit_entry_s
+{
+
+ /* Shared size 8 + 8 + 2 = 18B */
+
+ /* Egress face and array of ingress faces */
+ /* 18B + 2B*8 = 34B */
+ uint16_t pe_txface;
+ uint16_t pe_rxfaces[CICN_PARAM_PIT_ENTRY_PHOPS_MAX];
+
+ /* Bitmap of FIB faces tried (TODO -- needed in first go?) */
+ /* 34 + 2B = 36B */
+ uint16_t pe_tx_face_map;
+
+ /* FIB entry id (TODO -- why - related to 'faces tried'?) */
+ /* 36 + 4B = 40B */
+ uint32_t pe_fib_idx;
+
+ /* Packet buffer idx, if held */
+ /* 40 + 4B = 44B */
+ uint32_t pe_pkt_buf;
+
+} cicn_pit_entry_t;
+
+/*
+ * CS entry, unioned with a PIT entry below
+ */
+typedef struct cicn_cs_entry_s
+{
+
+ /* Shared size 8 + 8 + 2 = 18B */
+
+ /* Ingress face */
+ /* 2B = 20B */
+ uint16_t cs_rxface;
+
+ /* Packet buffer, if held */
+ /* 4B = 24B */
+ uint32_t cs_pkt_buf;
+
+ /* Linkage for LRU, in the form of hashtable node indexes */
+ /* 8B = 32B */
+ uint32_t cs_lru_prev;
+ uint32_t cs_lru_next;
+
+} cicn_cs_entry_t;
+
+/*
+ * Combined PIT/CS entry data structure, embedded in a hashtable entry
+ * after the common hashtable preamble struct. This MUST fit in the available
+ * (fixed) space in a hashtable node.
+ */
+typedef struct cicn_pcs_entry_s
+{
+
+ cicn_pcs_shared_t shared;
+
+ union
+ {
+ cicn_pit_entry_t pit;
+ cicn_cs_entry_t cs;
+ } u;
+} cicn_pcs_entry_t;
+
+/*
+ * Overall PIT/CS table, based on the common hashtable
+ */
+typedef struct cicn_pit_cs_s
+{
+
+ cicn_hashtb_t *pcs_table;
+
+ /* Counters for PIT/CS entries */
+ uint32_t pcs_pit_count;
+ uint32_t pcs_cs_count;
+
+ /* TODO -- CS LRU, represented as ... */
+ uint32_t pcs_lru_max;
+ uint32_t pcs_lru_count;
+
+ /* Indexes to hashtable nodes forming CS LRU */
+ uint32_t pcs_lru_head;
+ uint32_t pcs_lru_tail;
+
+} cicn_pit_cs_t;
+
+/* Accessor for pit/cs data inside hash table node */
+static inline cicn_pcs_entry_t *
+cicn_pit_get_data (cicn_hash_node_t * node)
+{
+ return (cicn_pcs_entry_t *) (cicn_hashtb_node_data (node));
+}
+
+/* Init pit/cs data block (usually inside hash table node) */
+static inline void
+cicn_pit_init_data (cicn_pcs_entry_t * p)
+{
+ memset (p, 0, sizeof (cicn_pcs_entry_t));
+}
+
+/* Wrapper for init/alloc of a new pit/cs */
+static inline int
+cicn_pit_create (cicn_pit_cs_t * p, uint32_t num_elems)
+{
+ int ret =
+ cicn_hashtb_alloc (&p->pcs_table, num_elems, sizeof (cicn_pcs_entry_t));
+ p->pcs_table->ht_flags |= CICN_HASHTB_FLAG_KEY_FMT_NAME;
+
+ p->pcs_pit_count = p->pcs_cs_count = 0;
+
+ p->pcs_lru_max = CICN_PARAM_CS_LRU_DEFAULT;
+ p->pcs_lru_count = 0;
+ p->pcs_lru_head = p->pcs_lru_tail = 0;
+
+ return (ret);
+}
+
+static inline f64
+cicn_pcs_get_exp_time (f64 cur_time_sec, uint64_t lifetime_msec)
+{
+ return (cur_time_sec + ((f64) lifetime_msec) / SEC_MS);
+}
+
+/*
+ * Configure CS LRU limit. Zero is accepted, means 'no limit', probably not
+ * a good choice.
+ */
+static inline void
+cicn_pit_set_lru_max (cicn_pit_cs_t * p, uint32_t limit)
+{
+ p->pcs_lru_max = limit;
+}
+
+/*
+ * Accessor for PIT interest counter.
+ */
+static inline uint32_t
+cicn_pit_get_int_count (const cicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_pit_count);
+}
+
+/*
+ * Accessor for PIT cs entries counter.
+ */
+static inline uint32_t
+cicn_pit_get_cs_count (const cicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_cs_count);
+}
+
+/*
+ * Convert a PIT entry into a CS entry (assumes that the entry is already
+ * in the hashtable.)
+ * This is primarily here to maintain the internal counters.
+ */
+static inline int
+cicn_pit_to_cs (cicn_pit_cs_t * p, cicn_pcs_entry_t * pcs)
+{
+ ASSERT (pcs->shared.entry_type = CICN_PIT_TYPE);
+
+ pcs->shared.entry_type = CICN_CS_TYPE;
+
+ p->pcs_pit_count--;
+ p->pcs_cs_count++;
+
+ return (AOK);
+}
+
+/*
+ * Is CS enabled?
+ * CICN_FEATURE_CS is tri-valued: value of 2 means do a run-time check
+ */
+static inline int
+cicn_cs_enabled (cicn_pit_cs_t * pit)
+{
+ switch (CICN_FEATURE_CS)
+ {
+ case 0:
+ default:
+ return (0);
+ case 1:
+ return (1);
+ case 2:
+ return (pit->pcs_lru_max > 0);
+ }
+}
+
+/*
+ * Insert a new CS element at the head of the CS LRU
+ */
+static inline void
+cicn_cs_lru_insert (cicn_pit_cs_t * p, cicn_hash_node_t * pnode,
+ cicn_pcs_entry_t * pcs)
+{
+ cicn_hash_node_t *lrunode;
+ cicn_pcs_entry_t *lrupcs;
+ uint32_t idx;
+
+ idx = cicn_hashtb_node_idx_from_node (p->pcs_table, pnode);
+
+ if (p->pcs_lru_head != 0)
+ {
+ lrunode = cicn_hashtb_node_from_idx (p->pcs_table, p->pcs_lru_head);
+ lrupcs = cicn_pit_get_data (lrunode);
+
+ ASSERT (lrupcs->u.cs.cs_lru_prev == 0);
+ lrupcs->u.cs.cs_lru_prev = idx;
+
+ pcs->u.cs.cs_lru_prev = 0;
+ pcs->u.cs.cs_lru_next = p->pcs_lru_head;
+
+ p->pcs_lru_head = idx;
+
+ }
+ else
+ {
+ ASSERT (p->pcs_lru_tail == 0); /* We think the list is empty */
+
+ p->pcs_lru_head = p->pcs_lru_tail = idx;
+
+ pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
+ }
+
+ p->pcs_lru_count++;
+}
+
+/*
+ * Dequeue an LRU element, for example when it has expired.
+ */
+static inline void
+cicn_cs_lru_dequeue (cicn_pit_cs_t * pit, cicn_hash_node_t * pnode,
+ cicn_pcs_entry_t * pcs)
+{
+ cicn_hash_node_t *lrunode;
+ cicn_pcs_entry_t *lrupcs;
+
+ if (pcs->u.cs.cs_lru_prev != 0)
+ {
+ /* Not already on the head of the LRU */
+ lrunode = cicn_hashtb_node_from_idx (pit->pcs_table,
+ pcs->u.cs.cs_lru_prev);
+ lrupcs = cicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_next;
+ }
+ else
+ {
+ ASSERT (pit->pcs_lru_head ==
+ cicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ pit->pcs_lru_head = pcs->u.cs.cs_lru_next;
+ }
+
+ if (pcs->u.cs.cs_lru_next != 0)
+ {
+ /* Not already the end of the LRU */
+ lrunode = cicn_hashtb_node_from_idx (pit->pcs_table,
+ pcs->u.cs.cs_lru_next);
+ lrupcs = cicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_prev = pcs->u.cs.cs_lru_prev;
+ }
+ else
+ {
+ /* This was the last LRU element */
+ ASSERT (pit->pcs_lru_tail ==
+ cicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ pit->pcs_lru_tail = pcs->u.cs.cs_lru_prev;
+ }
+
+ pit->pcs_lru_count -= 1;
+}
+
+/*
+ * Move a CS LRU element to the head, probably after it's been used.
+ */
+static inline void
+cicn_cs_lru_update_head (cicn_pit_cs_t * pit, cicn_hash_node_t * pnode,
+ cicn_pcs_entry_t * pcs)
+{
+
+ if (pcs->u.cs.cs_lru_prev != 0)
+ {
+ /* Not already on the head of the LRU, detach it from its current
+ * position
+ */
+ cicn_cs_lru_dequeue (pit, pnode, pcs);
+
+ /* Now detached from the list; attach at head */
+ cicn_cs_lru_insert (pit, pnode, pcs);
+
+ }
+ else
+ {
+ ASSERT (pit->pcs_lru_head ==
+ cicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ }
+}
+
+/*
+ * Remove a batch of nodes from the CS LRU, copying their node indexes into
+ * the caller's array. We expect this is done when the LRU size exceeds
+ * the CS's limit.
+ */
+static inline int
+cicn_cs_lru_trim (cicn_pit_cs_t * pit, uint32_t * node_list, int sz)
+{
+ cicn_hash_node_t *lrunode;
+ cicn_pcs_entry_t *lrupcs;
+ uint32_t idx;
+ int i;
+
+ idx = pit->pcs_lru_tail;
+
+ for (i = 0; i < sz; i++)
+ {
+
+ if (idx == 0)
+ {
+ break;
+ }
+
+ lrunode = cicn_hashtb_node_from_idx (pit->pcs_table, idx);
+ lrupcs = cicn_pit_get_data (lrunode);
+
+ node_list[i] = idx;
+
+ idx = lrupcs->u.cs.cs_lru_prev;
+ }
+
+ pit->pcs_lru_count -= i;
+
+ pit->pcs_lru_tail = idx;
+ if (idx != 0)
+ {
+ lrunode = cicn_hashtb_node_from_idx (pit->pcs_table, idx);
+ lrupcs = cicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_next = 0;
+ }
+ else
+ {
+ /* If the tail is empty, the whole lru is empty */
+ pit->pcs_lru_head = 0;
+ }
+
+ return (i);
+}
+
+/*
+ * Insert PIT/CS entry into the hashtable
+ * The main purpose of this wrapper is helping maintain the per-PIT stats.
+ */
+static inline int
+cicn_pit_insert (cicn_pit_cs_t * pitcs, cicn_pcs_entry_t * entry,
+ cicn_hash_node_t * node)
+{
+ int ret;
+
+ ASSERT (entry == cicn_hashtb_node_data (node));
+
+ ret = cicn_hashtb_insert (pitcs->pcs_table, node);
+ if (ret == AOK)
+ {
+ if (entry->shared.entry_type == CICN_PIT_TYPE)
+ {
+ pitcs->pcs_pit_count++;
+ }
+ else
+ {
+ pitcs->pcs_cs_count++;
+ }
+ }
+
+ return (ret);
+}
+
+/*
+ * Delete a PIT/CS entry from the hashtable, freeing the hash node struct.
+ * The caller's pointers are zeroed!
+ * If cs_trim is true, entry has already been removed from lru list
+ * The main purpose of this wrapper is helping maintain the per-PIT stats.
+ */
+static inline int
+cicn_pcs_delete_internal (cicn_pit_cs_t * pitcs,
+ cicn_pcs_entry_t ** pcs_entryp,
+ cicn_hash_node_t ** nodep, vlib_main_t * vm,
+ int cs_trim)
+{
+ int ret;
+ cicn_pcs_entry_t *pcs = *pcs_entryp;
+
+ ASSERT (pcs == cicn_hashtb_node_data (*nodep));
+
+ if (pcs->shared.entry_type == CICN_PIT_TYPE)
+ {
+ pitcs->pcs_pit_count--;
+ }
+ else
+ {
+ pitcs->pcs_cs_count--;
+ // Clean up LRU queue unless entry already removed by bulk CS LRU trim
+ if (!cs_trim)
+ {
+ cicn_cs_lru_dequeue (pitcs, *nodep, pcs);
+ }
+ /* Free any associated packet buffer */
+ if (pcs->u.cs.cs_pkt_buf != 0)
+ {
+ BUFTRC ("PCS-DEL", pcs->u.cs.cs_pkt_buf);
+ vlib_buffer_free_one (vm, pcs->u.cs.cs_pkt_buf);
+ pcs->u.cs.cs_pkt_buf = 0;
+ }
+ }
+
+ ret = cicn_hashtb_delete (pitcs->pcs_table, nodep);
+ *pcs_entryp = NULL;
+
+ return (ret);
+}
+
+/*
+ * Delete entry normally
+ */
+static inline int
+cicn_pcs_delete (cicn_pit_cs_t * pitcs, cicn_pcs_entry_t ** pcs_entryp,
+ cicn_hash_node_t ** nodep, vlib_main_t * vm)
+{
+ return (cicn_pcs_delete_internal
+ (pitcs, pcs_entryp, nodep, vm, 0 /*!cs_trim */ ));
+
+}
+
+/*
+ * Delete entry which has already been bulk-removed from lru list
+ */
+static inline int
+cicn_cs_delete_trimmed (cicn_pit_cs_t * pitcs, cicn_pcs_entry_t ** pcs_entryp,
+ cicn_hash_node_t ** nodep, vlib_main_t * vm)
+{
+ return (cicn_pcs_delete_internal
+ (pitcs, pcs_entryp, nodep, vm, 1 /*cs_trim */ ));
+
+}
+
+/*
+ * Other than the hash value, the arguments are all context needed
+ * to update the worker context.
+ */
+int
+cicn_pcs_timeout (vlib_main_t * vm,
+ cicn_pit_cs_t * pitcs, uint64_t h, u32 * pec, u32 * cec);
+
+
+#endif /* _CICN_PCS_H_ */
diff --git a/cicn-plugin/cicn/cicn_pg.c b/cicn-plugin/cicn/cicn_pg.c
new file mode 100644
index 00000000..91bf1759
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_pg.c
@@ -0,0 +1,945 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_pg.c: VPP packet-generator ('pg') graph nodes and assorted utilities.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <cicn/cicn.h>
+
+/* Registration struct for a graph node */
+vlib_node_registration_t icn_pg_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_icnpg_error \
+ _(PROCESSED, "ICN PG packets processed") \
+ _(DROPPED, "ICN PG packets dropped") \
+ _(INTEREST_MSGS_GENERATED, "ICN PG Interests generated") \
+ _(CONTENT_MSGS_RECEIVED, "ICN PG Content msgs received") \
+ _(NACKS_RECEIVED, "ICN PG NACKs received")
+
+typedef enum
+{
+#define _(sym,str) ICNPG_ERROR_##sym,
+ foreach_icnpg_error
+#undef _
+ ICNPG_N_ERROR,
+} icnpg_error_t;
+
+static char *icnpg_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icnpg_error
+#undef _
+};
+
+/* Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ ICNPG_NEXT_LOOKUP,
+ ICNPG_NEXT_DROP,
+ ICNPG_N_NEXT,
+} icnpg_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icnpg_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_icnpg_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ icnpg_trace_t *t = va_arg (*args, icnpg_trace_t *);
+
+ s = format (s, "ICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the
+ * vpp packet generator to generate icn request traffic.
+ */
+static uword
+icnpg_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ icnpg_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 interest_msgs_generated = 0, content_msgs_received = 0;
+ u32 nacks_received = 0;
+ uint64_t namecounter = 1LL;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ cicn_pkt_hdr_desc_t pkt_hdr_desc0, pkt_hdr_desc1;
+ u8 *body0, *body1;
+ u32 len0, len1;
+ udp_header_t *udp0, *udp1;
+ ip4_header_t *ip0, *ip1;
+ uint8_t *name0, *name1;
+ uint32_t namelen0, namelen1;
+ cicn_main_t *sm = &cicn_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = ICNPG_NEXT_DROP;
+ u32 next1 = ICNPG_NEXT_DROP;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* We think that the udp code has handed us the payloads,
+ * so we need to walk back to the IP headers
+ */
+ ASSERT (b0->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ ASSERT (b1->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ body0 = vlib_buffer_get_current (b0);
+ len0 = b0->current_length;
+
+ body1 = vlib_buffer_get_current (b1);
+ len1 = b1->current_length;
+
+ vlib_buffer_advance (b0, -(sizeof (udp_header_t)));
+ udp0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -(sizeof (ip4_header_t)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ vlib_buffer_advance (b1, -(sizeof (udp_header_t)));
+ udp1 = vlib_buffer_get_current (b1);
+ vlib_buffer_advance (b1, -(sizeof (ip4_header_t)));
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Check icn packets, locate names */
+ if (cicn_parse_pkt (body0, len0, &pkt_type0, &msg_type0,
+ &name0, &namelen0, &pkt_hdr_desc0) == AOK)
+ {
+
+ if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_INTEREST) &&
+ (msg_type0 == CICN_MSG_TYPE_INTEREST)))
+ {
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ /* Stuff the counter value into the last name-comp */
+ C_PUTINT64 ((name0 + namelen0 - 8), namecounter);
+ namecounter += 1LL;
+
+ /* Rewrite and send */
+
+ /* Rewrite and send */
+ ip0->src_address.as_u32 = sm->pgen_clt_src_addr;
+ ip0->dst_address.as_u32 = sm->pgen_clt_dest_addr;
+
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ udp0->src_port = sm->pgen_clt_src_port;
+ udp0->dst_port = sm->pgen_clt_dest_port;
+ udp0->checksum = 0;
+
+ next0 = ICNPG_NEXT_LOOKUP;
+ }
+ else if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_CONTENT) &&
+ (msg_type0 == CICN_MSG_TYPE_CONTENT)))
+ {
+ /* If we receive a content message, increment a counter */
+ content_msgs_received++;
+ }
+ else if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_NAK)))
+ {
+ /* If we receive a NACK, just increment a counter */
+ nacks_received++;
+ }
+ }
+
+ if (cicn_parse_pkt (body1, len1, &pkt_type1, &msg_type1,
+ &name1, &namelen1, &pkt_hdr_desc1) == AOK)
+ {
+ if (PREDICT_TRUE ((pkt_type1 == CICN_PKT_TYPE_INTEREST) &&
+ (msg_type1 == CICN_MSG_TYPE_INTEREST)))
+ {
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ /* Stuff the counter value into the last name-comp */
+ C_PUTINT64 ((name1 + namelen1 - 8), namecounter);
+ namecounter += 1LL;
+
+ /* Rewrite and send */
+
+ /* Rewrite and send */
+ ip1->src_address.as_u32 = sm->pgen_clt_src_addr;
+ ip1->dst_address.as_u32 = sm->pgen_clt_dest_addr;
+
+ ip1->checksum = ip4_header_checksum (ip0);
+
+ udp1->src_port = sm->pgen_clt_src_port;
+ udp1->dst_port = sm->pgen_clt_dest_port;
+ udp1->checksum = 0;
+
+ next1 = ICNPG_NEXT_LOOKUP;
+ }
+ else if (PREDICT_TRUE ((pkt_type1 == CICN_PKT_TYPE_CONTENT) &&
+ (msg_type1 == CICN_MSG_TYPE_CONTENT)))
+ {
+ /* If we receive a content message, increment a counter */
+ content_msgs_received++;
+ }
+ else if (PREDICT_TRUE ((pkt_type1 == CICN_PKT_TYPE_NAK)))
+ {
+ /* If we receive a NACK, just increment a counter */
+ nacks_received++;
+ }
+ }
+
+ /* Send pkt to next node */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ if (next0 == ICNPG_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == ICNPG_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = ICNPG_NEXT_DROP;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* We think that the udp code has handed us the payloads,
+ * so we need to walk back to the IP headers
+ */
+ ASSERT (b0->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ body0 = vlib_buffer_get_current (b0);
+ len0 = b0->current_length;
+
+ vlib_buffer_advance (b0, -(sizeof (udp_header_t)));
+ udp0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -(sizeof (ip4_header_t)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Check icn packets, locate names */
+ if (cicn_parse_pkt (body0, len0, &pkt_type0, &msg_type0,
+ &name0, &namelen0, &pkt_hdr_desc0) == AOK)
+ {
+
+ if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_INTEREST) &&
+ (msg_type0 == CICN_MSG_TYPE_INTEREST)))
+ {
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ /* Stuff the counter value into the last name-comp */
+ C_PUTINT64 ((name0 + namelen0 - 8), namecounter);
+ namecounter += 1LL;
+
+ /* Rewrite and send */
+ ip0->src_address.as_u32 = sm->pgen_clt_src_addr;
+ ip0->dst_address.as_u32 = sm->pgen_clt_dest_addr;
+
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ udp0->src_port = sm->pgen_clt_src_port;
+ udp0->dst_port = sm->pgen_clt_dest_port;
+ udp0->checksum = 0;
+
+ next0 = ICNPG_NEXT_LOOKUP;
+ }
+ else if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_CONTENT) &&
+ (msg_type0 == CICN_MSG_TYPE_CONTENT)))
+ {
+ /* If we receive a content message, increment a counter */
+ content_msgs_received++;
+ }
+ else if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_NAK)))
+ {
+ /* If we receive a NACK, just increment a counter */
+ nacks_received++;
+ }
+ }
+
+ /* Send pkt to ip lookup */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ icnpg_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ pkts_processed += 1;
+
+ if (next0 == ICNPG_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, icn_pg_node.index,
+ ICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, icn_pg_node.index,
+ ICNPG_ERROR_DROPPED, pkts_dropped);
+ vlib_node_increment_counter (vm, icn_pg_node.index,
+ ICNPG_ERROR_INTEREST_MSGS_GENERATED,
+ interest_msgs_generated);
+ vlib_node_increment_counter (vm, icn_pg_node.index,
+ ICNPG_ERROR_CONTENT_MSGS_RECEIVED,
+ content_msgs_received);
+ vlib_node_increment_counter (vm, icn_pg_node.index,
+ ICNPG_ERROR_NACKS_RECEIVED, nacks_received);
+
+ return (frame->n_vectors);
+}
+
+
+VLIB_REGISTER_NODE (icn_pg_node) =
+{
+ .function = icnpg_client_node_fn,.name = "icnpg",.vector_size =
+ sizeof (u32),.format_trace = format_icnpg_trace,.type =
+ VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (icnpg_error_strings),.error_strings =
+ icnpg_error_strings,.n_next_nodes = ICNPG_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [ICNPG_NEXT_LOOKUP] = "ip4-lookup",[ICNPG_NEXT_DROP] = "ip4-drop",}
+,};
+
+/*
+ * End of packet-generator client node
+ */
+
+/*
+ * Beginning of packet-generation server node
+ */
+
+/* Registration struct for a graph node */
+vlib_node_registration_t icn_pg_server_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_icnpg_server_error \
+_(PROCESSED, "ICN PG Server packets processed") \
+_(DROPPED, "ICN PG Server packets dropped")
+
+typedef enum
+{
+#define _(sym,str) ICNPG_SERVER_ERROR_##sym,
+ foreach_icnpg_server_error
+#undef _
+ ICNPG_SERVER_N_ERROR,
+} icnpg_server_error_t;
+
+static char *icnpg_server_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icnpg_server_error
+#undef _
+};
+
+/* Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ ICNPG_SERVER_NEXT_LOOKUP,
+ ICNPG_SERVER_NEXT_DROP,
+ ICNPG_SERVER_N_NEXT,
+} icnpg_server_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icnpg_server_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_icnpg_server_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ icnpg_server_trace_t *t = va_arg (*args, icnpg_server_trace_t *);
+
+ s =
+ format (s, "ICNPG SERVER: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type, t->sw_if_index,
+ t->next_index);
+ return (s);
+}
+
+/*
+ * Node function for the icn packet-generator server.
+ */
+static uword
+icnpg_node_server_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ icnpg_server_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ cicn_pkt_hdr_desc_t pkt_hdr_desc0, pkt_hdr_desc1;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ u8 *body0, *body1;
+ u32 len0, len1;
+ udp_header_t *udp0, *udp1;
+ ip4_header_t *ip0, *ip1;
+ uint8_t *name0, *name1;
+ uint32_t namelen0, namelen1;
+ cicn_main_t *sm = &cicn_main;
+
+ from = vlib_frame_vector_args (frame);
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = ICNPG_SERVER_NEXT_DROP;
+ u32 next1 = ICNPG_SERVER_NEXT_DROP;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ /* We think that the udp code has handed us the payloads,
+ * so we need to walk back to the IP headers
+ */
+ ASSERT (b0->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ ASSERT (b1->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ body0 = vlib_buffer_get_current (b0);
+ len0 = b0->current_length;
+
+ body1 = vlib_buffer_get_current (b1);
+ len1 = b1->current_length;
+
+ /* Check icn packets, locate names */
+ if (cicn_parse_pkt (body0, len0, &pkt_type0, &msg_type0,
+ &name0, &namelen0, &pkt_hdr_desc0) == AOK)
+ {
+
+ if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_INTEREST) &&
+ (msg_type0 == CICN_MSG_TYPE_INTEREST)))
+ {
+
+ /* Change message and packet from Interest to Content */
+ *(body0 + 1) = CICN_PKT_TYPE_CONTENT;
+ C_PUTINT16 (body0 + 8, CICN_MSG_TYPE_CONTENT);
+
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, sm->pgen_svr_buffer_idx);
+
+ /* Get the packet length */
+ uint16_t pkt_len;
+ C_GETINT16 (pkt_len, body0 + 2);
+
+ /* Figure out how many bytes we can add to the content
+ *
+ * Rule of thumb: We want the size of the IP packet
+ * to be <= 1400 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if (bytes_to_copy + pkt_len +
+ sizeof (udp_header_t) + sizeof (ip4_header_t) > 1400)
+ {
+ bytes_to_copy = 1400 - pkt_len - sizeof (ip4_header_t) -
+ sizeof (ip4_header_t);
+ }
+
+ /* Add content to the data packet */
+ u32 index = vlib_buffer_add_data (sm->vlib_main,
+ b0->free_list_index, bi0,
+ rb->data,
+ bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, index);
+ body0 = vlib_buffer_get_current (b0);
+
+ // Update the length of the message
+ uint16_t msg_len;
+ C_GETINT16 (msg_len, body0 + 10);
+ C_PUTINT16 (body0 + 10, msg_len + bytes_to_copy);
+
+ // Update the length of the packet
+ C_PUTINT16 (body0 + 2, pkt_len + bytes_to_copy);
+
+ vlib_buffer_advance (b0, -(sizeof (udp_header_t)));
+ udp0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -(sizeof (ip4_header_t)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Rewrite and send */
+ u32 src_addr = ip0->src_address.as_u32;
+ ip0->src_address.as_u32 = ip0->dst_address.as_u32;
+ ip0->dst_address.as_u32 = src_addr;
+
+ udp0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (sm->vlib_main,
+ b0) - sizeof (ip4_header_t));
+
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (sm->vlib_main, b0));
+
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ u16 src_port = udp0->src_port;
+ udp0->src_port = udp0->dst_port;
+ udp0->dst_port = src_port;
+ udp0->checksum = 0;
+
+ next0 = ICNPG_SERVER_NEXT_LOOKUP;
+ }
+ }
+
+ if (cicn_parse_pkt (body1, len1, &pkt_type1, &msg_type1,
+ &name1, &namelen1, &pkt_hdr_desc1) == AOK)
+ {
+ if (PREDICT_TRUE ((pkt_type1 == CICN_PKT_TYPE_INTEREST) &&
+ (msg_type1 == CICN_MSG_TYPE_INTEREST)))
+ {
+
+ /* Change message and packet types from Interest to Content */
+ *(body1 + 1) = CICN_PKT_TYPE_CONTENT;
+ C_PUTINT16 (body1 + 8, CICN_MSG_TYPE_CONTENT);
+
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, sm->pgen_svr_buffer_idx);
+
+ /* Get the packet length */
+ uint16_t pkt_len;
+ C_GETINT16 (pkt_len, body1 + 2);
+
+ /* Figure out how many bytes we can add to the content
+ *
+ * Rule of thumb: We want the size of the IP packet
+ * to be <= 1400 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if (bytes_to_copy + pkt_len +
+ sizeof (udp_header_t) + sizeof (ip4_header_t) > 1400)
+ {
+ bytes_to_copy = 1400 - pkt_len - sizeof (ip4_header_t) -
+ sizeof (ip4_header_t);
+ }
+
+ /* Add content to the data packet */
+ u32 index = vlib_buffer_add_data (sm->vlib_main,
+ b1->free_list_index, bi1,
+ rb->data,
+ bytes_to_copy);
+
+ b1 = vlib_get_buffer (vm, index);
+ body1 = vlib_buffer_get_current (b1);
+
+ // Update the length of the message
+ uint16_t msg_len;
+ C_GETINT16 (msg_len, body1 + 10);
+ C_PUTINT16 (body1 + 10, msg_len + bytes_to_copy);
+
+ // Update the length of the packet
+ C_PUTINT16 (body1 + 2, pkt_len + bytes_to_copy);
+
+ vlib_buffer_advance (b1, -(sizeof (udp_header_t)));
+ udp1 = vlib_buffer_get_current (b1);
+ vlib_buffer_advance (b1, -(sizeof (ip4_header_t)));
+ ip1 = vlib_buffer_get_current (b1);
+
+ /* Rewrite and send */
+ u32 src_addr = ip1->src_address.as_u32;
+ ip1->src_address.as_u32 = ip1->dst_address.as_u32;
+ ip1->dst_address.as_u32 = src_addr;
+
+ udp1->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (sm->vlib_main,
+ b1) - sizeof (ip4_header_t));
+
+ ip1->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (sm->vlib_main, b1));
+
+ ip1->checksum = ip4_header_checksum (ip1);
+
+ u16 src_port = udp1->src_port;
+ udp1->src_port = udp1->dst_port;
+ udp1->dst_port = src_port;
+ udp1->checksum = 0;
+
+ next1 = ICNPG_SERVER_NEXT_LOOKUP;
+ }
+ }
+
+ /* Send pkt to next node */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ if (next0 == ICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == ICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = ICNPG_SERVER_NEXT_DROP;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* We think that the udp code has handed us the payloads,
+ * so we need to walk back to the IP headers
+ */
+ ASSERT (b0->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ body0 = vlib_buffer_get_current (b0);
+ len0 = b0->current_length;
+
+ /* Check icn packets, locate names */
+ if (cicn_parse_pkt (body0, len0, &pkt_type0, &msg_type0,
+ &name0, &namelen0, &pkt_hdr_desc0) == AOK)
+ {
+
+ if (PREDICT_TRUE ((pkt_type0 == CICN_PKT_TYPE_INTEREST) &&
+ (msg_type0 == CICN_MSG_TYPE_INTEREST)))
+ {
+
+ /* Change message and packet types from Interest to Content */
+ *(body0 + 1) = CICN_PKT_TYPE_CONTENT;
+ C_PUTINT16 (body0 + 8, CICN_MSG_TYPE_CONTENT);
+
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, sm->pgen_svr_buffer_idx);
+
+ /* Get the packet length */
+ uint16_t pkt_len;
+ C_GETINT16 (pkt_len, body0 + 2);
+
+ /* Figure out how many bytes we can add to the content
+ *
+ * Rule of thumb: We want the size of the IP packet
+ * to be <= 1400 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if (bytes_to_copy + pkt_len +
+ sizeof (udp_header_t) + sizeof (ip4_header_t) > 1400)
+ {
+ bytes_to_copy = 1400 - pkt_len - sizeof (ip4_header_t) -
+ sizeof (ip4_header_t);
+ }
+
+ /* Add content to the data packet */
+ u32 index = vlib_buffer_add_data (sm->vlib_main,
+ b0->free_list_index, bi0,
+ rb->data,
+ bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, index);
+ body0 = vlib_buffer_get_current (b0);
+
+ // Update the length of the message
+ uint16_t msg_len;
+ C_GETINT16 (msg_len, body0 + 10);
+ C_PUTINT16 (body0 + 10, msg_len + bytes_to_copy);
+
+ // Update the length of the packet
+ C_PUTINT16 (body0 + 2, pkt_len + bytes_to_copy);
+
+ vlib_buffer_advance (b0, -(sizeof (udp_header_t)));
+ udp0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -(sizeof (ip4_header_t)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Rewrite and send */
+ u32 src_addr = ip0->src_address.as_u32;
+ ip0->src_address.as_u32 = ip0->dst_address.as_u32;
+ ip0->dst_address.as_u32 = src_addr;
+ udp0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (sm->vlib_main,
+ b0) - sizeof (ip4_header_t));
+
+ ip0->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (sm->vlib_main, b0));
+
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ u16 src_port = udp0->src_port;
+ udp0->src_port = udp0->dst_port;
+ udp0->dst_port = src_port;
+ udp0->checksum = 0;
+
+ next0 = ICNPG_SERVER_NEXT_LOOKUP;
+ }
+ }
+
+ /* Send pkt to ip lookup */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ icnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ pkts_processed += 1;
+
+ if (next0 == ICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, icn_pg_server_node.index,
+ ICNPG_SERVER_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, icn_pg_server_node.index,
+ ICNPG_SERVER_ERROR_DROPPED, pkts_dropped);
+
+ return (frame->n_vectors);
+}
+
+VLIB_REGISTER_NODE (icn_pg_server_node) =
+{
+ .function = icnpg_node_server_fn,.name = "icnpg-server",.vector_size =
+ sizeof (u32),.format_trace = format_icnpg_server_trace,.type =
+ VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (icnpg_server_error_strings),.error_strings =
+ icnpg_server_error_strings,.n_next_nodes = ICNPG_SERVER_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [ICNPG_SERVER_NEXT_LOOKUP] = "ip4-lookup",
+ [ICNPG_SERVER_NEXT_DROP] = "ip4-drop",}
+,};
+
+/*
+ * End of packet-generator server node
+ */
diff --git a/cicn-plugin/cicn/cicn_rte_mbuf.h b/cicn-plugin/cicn/cicn_rte_mbuf.h
new file mode 100644
index 00000000..43b14506
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_rte_mbuf.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Part of cicn plugin's dpdk/rte shim layer for using dpdk/rte mechanisms
+ * directly while hiding that fact from the bulk of the cicn plugin coce.
+ * - cicn plugin should not be looking at dpdk headers and should not need
+ * to. As of v17.01, howeverhowever, buffer cloning to support 0-copy on
+ * - content message replication
+ * - content message transmission based on CS hits
+ * is only available with dpdk, hence those mechanisms are used
+ * by cicn plugin.)
+ * - when vlib_buffer cloning support is provided, this shim layer
+ * can be deprecated/deleted, and cicn plugin will be simpler and will
+ * be able to run with a vpp that does not include dpdk.
+ * This is a shim file for including dpdk (rte) headers
+ * - The purpose of this header shimming includes (in addition to
+ * cicn_rte_mbuf_inlines.h that has the relevant code) is that
+ * as of v17.01, including dpdk rte headers works poorly due to conflicts
+ * between dpdk headers and vpp headers.
+ */
+#ifndef _CICN_RTE_MBUF_H_
+#define _CICN_RTE_MBUF_H_ 1
+
+#include "cicn_params.h"
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#if !CICN_FEATURE_VPP_VLIB_CLONING // waiting for this API to cut over
+#include <rte_config.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#endif // CICN_FEATURE_VPP_VLIB_CLONING
+#endif // _CICN_RTE_MBUF_H_
diff --git a/cicn-plugin/cicn/cicn_rte_mbuf_inlines.h b/cicn-plugin/cicn/cicn_rte_mbuf_inlines.h
new file mode 100644
index 00000000..caf70043
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_rte_mbuf_inlines.h
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Part of cicn plugin's dpdk/rte shim layer for using dpdk/rte mechanisms
+ * directly while hiding that fact from the bulk of the cicn plugin coce.
+ * - cicn plugin should not be looking at dpdk headers and should not need
+ * to. As of v17.01, howeverhowever, buffer cloning to support 0-copy on
+ * - content message replication
+ * - content message transmission based on CS hits
+ * is only available with dpdk, hence those mechanisms are used
+ * by cicn plugin.)
+ * - when vlib_buffer cloning support is provided, this shim layer
+ * can be deprecated/deleted, and cicn plugin will be simpler and will
+ * be able to run with a vpp that does not include dpdk.
+ * This file contains the code to use dpdk "struct rte_mbuf *" buffer
+ * headers for 0-copy cloning of content messages that are in CS, while
+ * hiding these references from the cicn plugin main code.
+ */
+#ifndef _CICN_RTE_MBUF_INLINES_H_
+#define _CICN_RTE_MBUF_INLINES_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <cicn/cicn_rte_mbuf.h>
+#include <vlib/vlib.h>
+
+/*
+ * Wrapper for buffer allocation that returns pointer rather than index
+ */
+static inline vlib_buffer_t *
+cicn_infra_vlib_buffer_alloc (vlib_main_t * vm, vlib_buffer_free_list_t * fl,
+ unsigned socket_id,
+ cicn_face_db_entry_t * outface)
+{
+ vlib_buffer_t *b0;
+ u32 bi0;
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ b0 = 0;
+ goto done;
+ }
+ b0 = vlib_get_buffer (vm, bi0);
+
+done:
+ return (b0);
+}
+
+/*
+ * Wrapper for buffer free that uses pointer rather than index
+ */
+static inline void
+cicn_infra_vlib_buffer_free (vlib_buffer_t * b0, vlib_main_t * vm,
+ cicn_face_db_entry_t * outface)
+{
+ u32 bi0 = vlib_get_buffer_index (vm, b0);
+ vlib_buffer_free_one (vm, bi0);
+}
+
+#if CICN_FEATURE_VPP_VLIB_CLONING // to cut over, need API from vpp gerrit 4872
+/*
+ * Long-term, vlib_buffer_clone() API will be supported and
+ * the cicn_rte_mbuf*.h files and all references to rte_mbuf can be removed from
+ * cicn plugin, which will then perform better and be linkable with vpp-lite.
+ *
+ * For a brief interim, can leave this file but
+ * with #define CICN_FEATURE_VPP_VLIB_CLONING 1
+ * Some code below (e.g. cicn_infra_vlib_buffer_clone_attach_finalize()
+ * contents) must be moved to node.c.
+ *
+ * See comments on alternate definition under !CICN_FEATURE_VPP_VLIB_CLONING
+ */
+
+/*
+ * not used if rte not used.
+ */
+static inline unsigned
+cicn_infra_rte_socket_id (void)
+{
+ return (0);
+}
+
+static inline void
+cicn_infra_vlib_buffer_cs_prep_finalize (vlib_main_t * vm,
+ vlib_buffer_t * cs_b0)
+{
+ // No action
+}
+
+static inline vlib_buffer_t *
+cicn_infra_vlib_buffer_clone (vlib_buffer_t * src_b0, vlib_main_t * vm,
+ vlib_buffer_free_list_t * fl,
+ unsigned socket_id,
+ cicn_face_db_entry_t * outface)
+{
+ return (vlib_buffer_clone (src_b0));
+}
+
+/*
+ * Force dpdk drivers to rewalk chain that has been changed
+ */
+static inline void
+cicn_infra_vlib_buffer_clone_attach_finalize (vlib_buffer_t * hdr_b0,
+ vlib_buffer_t * clone_b0)
+{
+ // no action
+}
+#else // !CICN_FEATURE_VPP_VLIB_CLONING
+
+/*
+ * Replacement for rte_mempool_get_bulk():
+ * - rte_mempool_get_bulk() does not coexist with vlib_buffer_free(): vpp
+ * runs out of buffers (even when only 1 buffer is being allocated per call).
+ * - this replacement instead calls vlib_buffer_alloc(), which does coexist
+ * with vlib_buffer_free().
+ */
+static inline int
+cicn_infra_pvt_rte_mempool_get_bulk (vlib_main_t * vm,
+ struct rte_mempool *rmp,
+ void **rte_mbufs, u32 new_bufs)
+{
+ u32 bi_bufs[5];
+
+ int i;
+ ASSERT (new_bufs <= ARRAY_LEN (bi_bufs));
+
+ if (vlib_buffer_alloc (vm, bi_bufs, new_bufs) != new_bufs)
+ {
+ return -ENOENT;
+ }
+ for (i = 0; i < new_bufs; i++)
+ {
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi_bufs[i]);
+ rte_mbufs[i] = rte_mbuf_from_vlib_buffer (b0);
+ }
+ return (0);
+}
+
+// #include <vnet/dpdk_replication.h> // copied/modified below
+
+/*
+ * Modified copy of .../vpp/vnet/vnet/dpdk_replication.h:
+ * - maintain foreign indentation for easier comparison
+ * - call cicn_infra_pvt_rte_mempool_get_bulk() in place of calling
+ * rte_mempool_get_bulk(), avoiding the issue described at
+ * cicn_infra_pvt_rte_mempool_get_bulk(), above.
+ */
+static inline vlib_buffer_t *
+cicn_infra_pvt_vlib_dpdk_copy_buffer (vlib_main_t * vm, vlib_buffer_t * b)
+{
+ u32 new_buffers_needed = 1;
+ unsigned socket_id = rte_socket_id ();
+ struct rte_mempool *rmp = vm->buffer_main->pktmbuf_pools[socket_id];
+ struct rte_mbuf *rte_mbufs[5];
+ vlib_buffer_free_list_t *fl;
+ vlib_buffer_t *rv;
+ u8 *copy_src, *copy_dst;
+ vlib_buffer_t *src_buf, *dst_buf;
+
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ vlib_buffer_t *tmp = b;
+ int i;
+
+ while (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ new_buffers_needed++;
+ tmp = vlib_get_buffer (vm, tmp->next_buffer);
+ }
+
+ /* Should never happen... */
+ if (PREDICT_FALSE (new_buffers_needed > ARRAY_LEN (rte_mbufs)))
+ {
+ clib_warning ("need %d buffers", new_buffers_needed);
+ return 0;
+ }
+
+#if 0 // bug workaround: vlib_buffer_free() of these does not work right
+ if (rte_mempool_get_bulk (rmp, (void **) rte_mbufs,
+ new_buffers_needed) < 0)
+ return 0;
+#else
+ if (cicn_infra_pvt_rte_mempool_get_bulk (vm, rmp, (void **) rte_mbufs,
+ new_buffers_needed) < 0)
+ return 0;
+#endif
+
+ src_buf = b;
+ rv = dst_buf = vlib_buffer_from_rte_mbuf (rte_mbufs[0]);
+ vlib_buffer_init_for_free_list (dst_buf, fl);
+ copy_src = b->data + src_buf->current_data;
+ copy_dst = dst_buf->data + src_buf->current_data;
+
+ for (i = 0; i < new_buffers_needed; i++)
+ {
+ clib_memcpy (copy_src, copy_dst, src_buf->current_length);
+ dst_buf->current_data = src_buf->current_data;
+ dst_buf->current_length = src_buf->current_length;
+ dst_buf->flags = src_buf->flags;
+
+ if (i == 0)
+ {
+ dst_buf->total_length_not_including_first_buffer =
+ src_buf->total_length_not_including_first_buffer;
+ vnet_buffer (dst_buf)->sw_if_index[VLIB_RX] =
+ vnet_buffer (src_buf)->sw_if_index[VLIB_RX];
+ vnet_buffer (dst_buf)->sw_if_index[VLIB_TX] =
+ vnet_buffer (src_buf)->sw_if_index[VLIB_TX];
+ vnet_buffer (dst_buf)->l2 = vnet_buffer (b)->l2;
+ }
+
+ if (i < new_buffers_needed - 1)
+ {
+ src_buf = vlib_get_buffer (vm, src_buf->next_buffer);
+ dst_buf = vlib_buffer_from_rte_mbuf (rte_mbufs[i + 1]);
+ vlib_buffer_init_for_free_list (dst_buf, fl);
+ copy_src = src_buf->data;
+ copy_dst = dst_buf->data;
+ }
+ }
+ return rv;
+ }
+
+#if 0 // bug workaround: vlib_buffer_free() of these does not work right
+ if (rte_mempool_get_bulk (rmp, (void **) rte_mbufs, 1) < 0)
+ return 0;
+#else
+ if (cicn_infra_pvt_rte_mempool_get_bulk (vm, rmp, (void **) rte_mbufs, 1) <
+ 0)
+ return 0;
+#endif
+
+ rv = vlib_buffer_from_rte_mbuf (rte_mbufs[0]);
+ vlib_buffer_init_for_free_list (rv, fl);
+
+ clib_memcpy (rv->data + b->current_data, b->data + b->current_data,
+ b->current_length);
+ rv->current_data = b->current_data;
+ rv->current_length = b->current_length;
+ vnet_buffer (rv)->sw_if_index[VLIB_RX] =
+ vnet_buffer (b)->sw_if_index[VLIB_RX];
+ vnet_buffer (rv)->sw_if_index[VLIB_TX] =
+ vnet_buffer (b)->sw_if_index[VLIB_TX];
+ vnet_buffer (rv)->l2 = vnet_buffer (b)->l2;
+
+ return (rv);
+}
+
+/*
+ * Could call rte_socket_id() wherever needed, not sure how expensive it is.
+ * For now, export and cache.
+ */
+static inline unsigned
+cicn_infra_rte_socket_id (void)
+{
+ return (rte_socket_id ());
+}
+
+/*
+ * For cs_pref, update rte_mbuf fields to correspond to vlib_buffer fields.
+ * (Probably could be skipped for non-dpdk drivers that must use copying.)
+ */
+static inline void
+cicn_infra_vlib_buffer_cs_prep_finalize (vlib_main_t * vm,
+ vlib_buffer_t * cs_b0)
+{
+ /* Adjust the dpdk buffer header, so we can use this copy for
+ * future cache hits.
+ * - if dpdk buffer header invalid (e.g. content msg arrived on veth intfc,
+ * initialize it.
+ * - effectively, advanceg the mbuf past the incoming IP and UDP headers,
+ * so that the buffer points to the start of the ICN payload that is
+ * to be replicated.
+ */
+ struct rte_mbuf *cs_mb0;
+ i16 delta;
+
+ cs_mb0 = rte_mbuf_from_vlib_buffer (cs_b0);
+ if ((cs_b0->flags & VNET_BUFFER_RTE_MBUF_VALID) == 0)
+ {
+ rte_pktmbuf_reset (cs_mb0);
+ }
+
+ delta = vlib_buffer_length_in_chain (vm, cs_b0) - (i16) (cs_mb0->pkt_len);
+
+ cs_mb0->data_len += delta;
+ cs_mb0->pkt_len += delta;
+ cs_mb0->data_off = (RTE_PKTMBUF_HEADROOM + cs_b0->current_data);
+}
+
+/*
+ * Wrapper for buffer "cloning" that uses
+ * - rte_mbuf buffer cloning for dpdk drivers that support cloning
+ * - vlib buffer copying for non-dpdk drivers that must use copying.
+ *
+ * CICN multicast support from vpp is currently problematic.
+ * Three mechanisms on offer, CICN currently uses [1] for physical
+ * output faces and [3] for virtual output faces:
+ * 1. rte_mbuf's rte_pktmbuf_clone()
+ * - advantages
+ * - PIT deaggregation (multicast) case
+ * - high-performance creation of clone chains (relying on
+ * reference-counting mechanism)
+ * - avoids copying
+ * - allows parallel transmission
+ * - CS hit case
+ * - allows modular handling of sending content and deleting CS entries
+ * (relying on reference counting mechanism)
+ * - disadvantages
+ * - requires allocating indirect buffers, which has a cost even
+ * without copying (but Content messages are generally large)
+ * - rte_pktmbufs are a DPDK mechanism
+ * - not supported by non-DPDK (i.e. virtual) drivers
+ * - not supported by vpp-lite, which is used for unit test
+ * 2. recycling-based replication (recirculation)
+ * - advantages
+ * - avoids copying
+ * - currently approved by vpp team
+ * - disadvantages
+ * - increased latency since need to transmit copies serially since
+ * only one buffer
+ * - mechanism not quite yet fully supported: notification that
+ * transmission <n> has occurred and recycle for transmission <n+1>
+ * may start does not occur on transmission completion, but on next
+ * transmission on that interface
+ * 3. cicn_infra_pvt_vlib_dpdk_copy_buffer (was vlib_dpdk_clone_buffer())
+ * - advantages
+ * - works in both cases, for all drivers
+ * - disadvantages
+ * - slow, due to copying
+ */
+static inline vlib_buffer_t *
+cicn_infra_vlib_buffer_clone (vlib_buffer_t * src_b0, vlib_main_t * vm,
+ vlib_buffer_free_list_t * fl,
+ unsigned socket_id,
+ cicn_face_db_entry_t * outface)
+{
+ vlib_buffer_t *dst_b0;
+
+ if (outface->swif_cloning_supported)
+ {
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ struct rte_mbuf *src_mb0 = rte_mbuf_from_vlib_buffer (src_b0);
+ struct rte_mbuf *dst_mb0;
+ dst_mb0 = rte_pktmbuf_clone (src_mb0, bm->pktmbuf_pools[socket_id]);
+ if (dst_mb0 == 0)
+ {
+ dst_b0 = 0;
+ goto done;
+ }
+
+ // rte_mbuf_clone uses rte_mbuf (dpdk) buffer header:
+ // copy relevant value to vlib_buffer_t header
+ dst_b0 = vlib_buffer_from_rte_mbuf (dst_mb0);
+ vlib_buffer_init_for_free_list (dst_b0, fl);
+ ASSERT (dst_b0->current_data == 0);
+ dst_b0->current_data = src_b0->current_data;
+ dst_b0->current_length = dst_mb0->data_len;
+ }
+ else
+ {
+ dst_b0 = cicn_infra_pvt_vlib_dpdk_copy_buffer (vm, src_b0);
+ if (dst_b0 == 0)
+ {
+ goto done;
+ }
+ }
+
+ //TODO: af_packet device.c chain walker ignores VLIB_BUFFER_NEXT_PRESENT
+ // clear next_buffer to maintain buffer sanity
+ ASSERT ((dst_b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
+ if (!(dst_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ dst_b0->next_buffer = 0;
+ }
+ ASSERT ((dst_b0->flags & VNET_BUFFER_RTE_MBUF_VALID) == 0);
+
+done:
+ return (dst_b0);
+}
+
+/*
+ * For clone attach, vlib_buffer chain is being changed, invalidating
+ * rte_mbuf chain (if present). Update the rte_mbuf chain information to
+ * be valid.
+ */
+static inline void
+cicn_infra_vlib_buffer_clone_attach_finalize (vlib_buffer_t * hdr_b0,
+ vlib_buffer_t * clone_b0,
+ cicn_face_db_entry_t * outface)
+{
+ struct rte_mbuf *hdr_mb0;
+ struct rte_mbuf *clone_mb0;
+ int hdr_rte_mbuf_valid;
+
+ hdr_mb0 = rte_mbuf_from_vlib_buffer (hdr_b0);
+ clone_mb0 = rte_mbuf_from_vlib_buffer (clone_b0);
+
+ hdr_rte_mbuf_valid = ((hdr_b0->flags & VNET_BUFFER_RTE_MBUF_VALID) != 0);
+ ASSERT ((clone_b0->flags & VNET_BUFFER_RTE_MBUF_VALID) == 0);
+
+ /* Update main rte_mbuf fields, even for non-dkdk output interfaces */
+ if (!hdr_rte_mbuf_valid)
+ {
+ rte_pktmbuf_reset (hdr_mb0);
+ }
+ hdr_mb0->data_len = hdr_b0->current_length;
+ hdr_mb0->pkt_len = hdr_b0->current_length +
+ hdr_b0->total_length_not_including_first_buffer;
+ hdr_mb0->next = clone_mb0;
+ hdr_mb0->nb_segs = clone_mb0->nb_segs + 1;
+
+ if (!outface->swif_is_dpdk_driver)
+ {
+ goto done;
+ }
+
+ hdr_b0->flags |= VNET_BUFFER_RTE_MBUF_VALID;
+ clone_b0->flags |= VNET_BUFFER_RTE_MBUF_VALID;
+
+ /* copy metadata from source packet (see sr_replicate.c) */
+ hdr_mb0->port = clone_mb0->port;
+ hdr_mb0->vlan_tci = clone_mb0->vlan_tci;
+ hdr_mb0->vlan_tci_outer = clone_mb0->vlan_tci_outer;
+ hdr_mb0->tx_offload = clone_mb0->tx_offload;
+ hdr_mb0->hash = clone_mb0->hash;
+
+ hdr_mb0->ol_flags = clone_mb0->ol_flags & ~(IND_ATTACHED_MBUF);
+
+ __rte_mbuf_sanity_check (hdr_mb0, 1);
+
+done:;
+}
+#endif // !CICN_FEATURE_VPP_VLIB_CLONING
+
+#endif // CICN_RTE_MBUF_INLINES_H_
diff --git a/cicn-plugin/cicn/cicn_siphash.h b/cicn-plugin/cicn/cicn_siphash.h
new file mode 100644
index 00000000..9b537828
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_siphash.h
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ SipHash reference C implementation
+
+ Copyright (c) 2012-2014 Jean-Philippe Aumasson
+ <jeanphilippe.aumasson@gmail.com>
+ Copyright (c) 2012-2014 Daniel J. Bernstein <djb@cr.yp.to>
+
+ To the extent possible under law, the author(s) have dedicated all copyright
+ and related and neighboring rights to this software to the public domain
+ worldwide. This software is distributed without any warranty.
+
+ You should have received a copy of the CC0 Public Domain Dedication along
+ with
+ this software. If not, see
+ <http://creativecommons.org/publicdomain/zero/1.0/>.
+ */
+#ifndef _CICN_SIPHASH_H_
+#define _CICN_SIPHASH_H_ 1
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+/* default: SipHash-2-4 */
+#define cROUNDS 2
+#define dROUNDS 4
+
+#define ROTL(x, b) (uint64_t)(((x) << (b)) | ((x) >> (64 - (b))))
+
+#define U32TO8_LE(p, v) \
+ (p)[0] = (uint8_t)((v)); \
+ (p)[1] = (uint8_t)((v) >> 8); \
+ (p)[2] = (uint8_t)((v) >> 16); \
+ (p)[3] = (uint8_t)((v) >> 24);
+
+#define U64TO8_LE(p, v) \
+ U32TO8_LE((p), (uint32_t)((v))); \
+ U32TO8_LE((p) + 4, (uint32_t)((v) >> 32));
+
+#define U8TO64_LE(p) \
+ (((uint64_t)((p)[0])) | ((uint64_t)((p)[1]) << 8) | \
+ ((uint64_t)((p)[2]) << 16) | ((uint64_t)((p)[3]) << 24) | \
+ ((uint64_t)((p)[4]) << 32) | ((uint64_t)((p)[5]) << 40) | \
+ ((uint64_t)((p)[6]) << 48) | ((uint64_t)((p)[7]) << 56))
+
+#define SIPROUND \
+ do { \
+ v0 += v1; \
+ v1 = ROTL(v1, 13); \
+ v1 ^= v0; \
+ v0 = ROTL(v0, 32); \
+ v2 += v3; \
+ v3 = ROTL(v3, 16); \
+ v3 ^= v2; \
+ v0 += v3; \
+ v3 = ROTL(v3, 21); \
+ v3 ^= v0; \
+ v2 += v1; \
+ v1 = ROTL(v1, 17); \
+ v1 ^= v2; \
+ v2 = ROTL(v2, 32); \
+ } while (0)
+
+#ifdef CICN_SIPHASH_DEBUG
+#define SIPTRACE \
+ do { \
+ printf("(%3d) v0 %08x %08x\n", (int)inlen, (uint32_t)(v0 >> 32), \
+ (uint32_t)v0); \
+ printf("(%3d) v1 %08x %08x\n", (int)inlen, (uint32_t)(v1 >> 32), \
+ (uint32_t)v1); \
+ printf("(%3d) v2 %08x %08x\n", (int)inlen, (uint32_t)(v2 >> 32), \
+ (uint32_t)v2); \
+ printf("(%3d) v3 %08x %08x\n", (int)inlen, (uint32_t)(v3 >> 32), \
+ (uint32_t)v3); \
+ } while (0)
+#else
+#define SIPTRACE
+#endif
+
+#ifdef CICN_SIPHASH_128 // exp. 128-bit code below, ifdef'd out, for reference
+#error "cicn_siphash doesn't support 128-bit yet!"
+#endif
+
+/* Cool - need an extern declaration in order to keep llvm happy... */
+extern inline uint64_t
+cicn_siphash (const uint8_t * in, uint64_t inlen, const uint8_t * k);
+
+/*
+ *
+ */
+inline uint64_t
+cicn_siphash (const uint8_t * in, uint64_t inlen, const uint8_t * k)
+{
+ /* "somepseudorandomlygeneratedbytes" */
+ uint64_t v0 = 0x736f6d6570736575ULL;
+ uint64_t v1 = 0x646f72616e646f6dULL;
+ uint64_t v2 = 0x6c7967656e657261ULL;
+ uint64_t v3 = 0x7465646279746573ULL;
+ uint64_t b;
+ uint64_t k0 = U8TO64_LE (k);
+ uint64_t k1 = U8TO64_LE (k + 8);
+ uint64_t m;
+ int i;
+ const uint8_t *end = in + inlen - (inlen % sizeof (uint64_t));
+ const int left = inlen & 7;
+ b = ((uint64_t) inlen) << 56;
+ v3 ^= k1;
+ v2 ^= k0;
+ v1 ^= k1;
+ v0 ^= k0;
+
+#ifdef CICN_SIPHASH_128
+ v1 ^= 0xee;
+#endif /* CICN_SIPHASH_128 */
+
+ for (; in != end; in += 8)
+ {
+ m = U8TO64_LE (in);
+ v3 ^= m;
+
+ SIPTRACE;
+ for (i = 0; i < cROUNDS; ++i)
+ SIPROUND;
+
+ v0 ^= m;
+ }
+
+ switch (left)
+ {
+ case 7:
+ b |= ((uint64_t) in[6]) << 48;
+ case 6:
+ b |= ((uint64_t) in[5]) << 40;
+ case 5:
+ b |= ((uint64_t) in[4]) << 32;
+ case 4:
+ b |= ((uint64_t) in[3]) << 24;
+ case 3:
+ b |= ((uint64_t) in[2]) << 16;
+ case 2:
+ b |= ((uint64_t) in[1]) << 8;
+ case 1:
+ b |= ((uint64_t) in[0]);
+ break;
+ case 0:
+ break;
+ }
+
+ v3 ^= b;
+
+ SIPTRACE;
+ for (i = 0; i < cROUNDS; ++i)
+ SIPROUND;
+
+ v0 ^= b;
+
+#ifndef CICN_SIPHASH_128
+ v2 ^= 0xff;
+#else
+ v2 ^= 0xee;
+#endif /* CICN_SIPHASH_128 */
+
+ SIPTRACE;
+ for (i = 0; i < dROUNDS; ++i)
+ SIPROUND;
+
+ return (v0 ^ v1 ^ v2 ^ v3);
+
+/* U64TO8_LE(out, b); TODO -- ref version mails back result and returns zero */
+
+#ifdef CICN_SIPHASH_128
+ v1 ^= 0xdd;
+
+ SIPTRACE;
+ for (i = 0; i < dROUNDS; ++i)
+ SIPROUND;
+
+ b = v0 ^ v1 ^ v2 ^ v3;
+ U64TO8_LE (out + 8, b);
+#endif /* CICN_SIPHASH_128 */
+
+/* return 0; TODO -- ref version mails back result and returns zero... */
+}
+
+/*
+ * Running state of hash, for taking advantage of incremental hashing
+ */
+typedef struct cicn_siphash_hi_s
+{
+ uint64_t sip_v_whole[4];
+} cicn_siphash_hi_t;
+
+/*
+ * cicn_siphash DOCUMENTATION (algorithm details)
+ *
+ * Sources:
+ * - Analysis: http://eprint.iacr.org/2012/351.pdf
+ * - Code: https://github.com/floodyberry/siphash
+ *
+ * siphash has an initialization phase, a compression phase, and a
+ * finalization phase.
+ * - The running state of siphash is stored in a "vector": 32 bytes,
+ * managed as a 4 element array of uint64_t.
+ * - The initialization phase initializes the vector ("V") for the
+ * hash calculation, based on the key and some constants
+ * - The compression phase processes the string to be hashed,
+ * processing an 8 byte (64 bit) block per iteration. Each
+ * interation includes
+ * - Convert the 8 bytes into a 64-bit number (using a little-endian
+ * conversion)
+ * - XOR the new 8 bytes into V[3]
+ * - Perform multiple (2) "rounds" of compression on V, using the logic
+ * in SipRound
+ * - XOR the new 8 bytes into V[0]
+ * - The last block is special. It is created as if extra bytes were
+ * available off the end of the string. The last block includes
+ * - leftover bytes at the tail of the string (e.g. 3 leftover bytes if
+ * the string were 11 bytes long)
+ * - nulls to fill out the tail of the string to 7 bytes (e.g. 4 nulls
+ * if the string were 11 bytes long)
+ * - The number of actual leftover bytes in the 8th byte (e.g. 3,
+ * if the string were 11 bytes long).
+ * - For another example, if the string were 8 bytes long, the last
+ * (2nd) block would be all null.
+ * - The finalization phase:
+ * - XOR 0xff info V[2]
+ * - Perform multiple (4) rounds of compression on V, using the
+ * logic in SipRound (i.e. compression and finalization use the same
+ * core compression logic)
+ * - XOR the 4 elements of V together to produce the 8 byte (64 bit)
+ * hash result.
+ */
+
+const unsigned char cicn_siphash_seed[16] = {
+ 0x12, 0x34, 0x56, 0x78, 0x98, 0x76, 0x54, 0x32,
+ 0x12, 0x34, 0x56, 0x78, 0x98, 0x76, 0x54, 0x32,
+};
+
+/*
+ * Copy one siphash vector to another, e.g. to as part of saving a
+ * hash's intermediate result for later re-use.
+ * When processing a CICN name, calculating the siphashes of
+ * each component prefix plus the siphash of the whole name, this
+ * is used to keep track of partial results rather than doing
+ * each siphash from scratch (restarting at the beginning of the whole name).
+ * (See summary at "cicn_siphash DOCUMENTATION".)
+ * Returns:
+ * No return value
+ * Vout:
+ * Output vector, target of copy
+ * Vin:
+ * Input vector, source of copy
+ */
+#define cicn_siphash_vec_copy(Vout, Vin) do { \
+ Vout[0] = Vin[0]; Vout[1] = Vin[1]; Vout[2] = Vin[2]; Vout[3] = Vin[3];\
+ } while(0);
+
+static inline void
+cicn_siphash_hi_initialize (cicn_siphash_hi_t * arg,
+ const unsigned char *seed)
+{
+ const unsigned char *key = seed;
+ uint64_t *V = arg->sip_v_whole;
+ uint64_t K[2];
+
+ K[0] = U8TO64_LE (&key[0]);
+ K[1] = U8TO64_LE (&key[8]);
+
+ /* "somepseu""dorandom""lygenera""tedbytes" */
+ V[0] = K[0] ^ 0x736f6d6570736575ull;
+ V[1] = K[1] ^ 0x646f72616e646f6dull;
+#ifdef CICN_SIPHASH_128
+ V[1] ^= 0xee;
+#endif /* CICN_SIPHASH_128 */
+ V[2] = K[0] ^ 0x6c7967656e657261ull;
+ V[3] = K[1] ^ 0x7465646279746573ull;
+}
+
+/*
+ * The core logic of one round of siphash compression/finalization
+ * (See summary at "cicn_siphash DOCUMENTATION".)
+ * V:
+ * Vector holding the current state of the hash, to be put through
+ * (the core logic of) one round of compression/finalization.
+ */
+#define ROTL64(x,b) ROTL(x,b)
+#define cicn_siphash_Round(V) { \
+ V[0] += V[1]; V[2] += V[3]; \
+ V[1] = ROTL64(V[1],13); V[3] = ROTL64(V[3],16); \
+ V[1] ^= V[0]; V[3] ^= V[2]; \
+ V[0] = ROTL64(V[0],32); \
+ V[2] += V[1]; V[0] += V[3]; \
+ V[1] = ROTL64(V[1],17); V[3] = ROTL64(V[3],21); \
+ V[1] ^= V[2]; V[3] ^= V[0]; \
+ V[2] = ROTL64(V[2],32); }
+
+/*
+ * The full logic of one round of siphash compression (not finalization)
+ * (See summary at "cicn_siphash DOCUMENTATION".)
+ */
+static inline void
+cicn_siphash_compress (uint64_t V[4], uint64_t block_le_val)
+{
+ V[3] ^= block_le_val;
+ cicn_siphash_Round (V);
+ cicn_siphash_Round (V);
+ V[0] ^= block_le_val;
+}
+
+/*
+ * At the end of a prefix/name/bytestring to be siphashed, 0-7 bytes will
+ * be left that do not make up a full 8-byte block. This routine
+ * convolves those 0-7 bytes with 1 byte derived from prefix overall length
+ * (not the count of trailing bytes) to get a last 64-bit quantity to be
+ * used in siphash finalization.
+ *
+ * @param[in] base is the base of the entire bytestring
+ * @param[in] len is the length of the entire bytestring
+ * @param[in] pblk_offset is the byte offset of the partial block (last
+ * 0-7 bytes)
+ *
+ * This routine, similar to the original code downloaded to siphash.c,
+ * is careful to not read any bytes past the end of the block
+ * (at the cost of doing multiple 1-byte reads rather than a single
+ * 8-byte read and mask).
+ * (See summary at "cicn_siphash DOCUMENTATION".)
+ */
+static inline uint64_t
+cicn_siphash_partial_blk_val (const unsigned char *base, int len,
+ int pblk_offset)
+{
+ uint64_t pblk_val_64_LE;
+ int partial_bytes = (len & 0x7);
+
+ pblk_val_64_LE = (uint64_t) (len & 0xff) << 56;
+ switch (partial_bytes)
+ {
+ case 7:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 6] << 48;
+ case 6:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 5] << 40;
+ case 5:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 4] << 32;
+ case 4:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 3] << 24;
+ case 3:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 2] << 16;
+ case 2:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 1] << 8;
+ case 1:
+ pblk_val_64_LE |= (uint64_t) base[pblk_offset + 0];
+ case 0:
+ default:;
+ }
+ return (pblk_val_64_LE);
+}
+
+/*
+ * The logic for convolving the final/partial 8-byte/64-bit block into
+ * the running 32-byte vector, which is then xor'd ito the 64-bit hash value.
+ * (See summary at "cicn_siphash DOCUMENTATION".)
+ */
+static inline uint64_t
+cicn_siphash_finalize (uint64_t V[4])
+{
+ uint64_t hash;
+
+#ifndef CICN_SIPHASH_128
+ V[2] ^= 0xff;
+#else
+ V[2] ^= 0xee;
+#endif /* CICN_SIPHASH_128 */
+
+ cicn_siphash_Round (V);
+ cicn_siphash_Round (V);
+ cicn_siphash_Round (V);
+ cicn_siphash_Round (V);
+ hash = V[0] ^ V[1] ^ V[2] ^ V[3];
+ return (hash);
+
+#ifdef CICN_SIPHASH_128
+ V[1] ^= 0xdd;
+
+ cicn_hfn_sip_Round (V);
+ cicn_hfn_sip_Round (V);
+ cicn_hfn_sip_Round (V);
+ cicn_hfn_sip_Round (V);
+
+ hash = V[0] ^ V[1] ^ V[2] ^ V[3];
+ U64TO8_LE (out + 8, hash);
+#endif /* CICN_SIPHASH_128 */
+}
+
+/*
+ * Calculate/return 64-bit siphash of bytestring (name prefix) beginning at
+ * nrec_val with length pfx_len, for which intermediate siphash
+ * information through crec_offset is already stored in V_running.
+ * (In other words, this optimized siphash calculation need only
+ * convolve the last pfx_len-crec_offset bytes of
+ * prefix into the calculation.)
+ *
+ * As an important side effect, V_running is updated with siphash
+ * information through the final full 8-byte block in the prefix, for
+ * use in calculating the siphash of the following prefix.
+ *
+ * (See summary at "cicn_siphash DOCUMENTATION".)
+ */
+static inline uint64_t
+cicn_siphash_hi_calculate (cicn_siphash_hi_t * arg,
+ const unsigned char *nrec_val, int pfx_len,
+ int crec_offset)
+{
+ uint64_t *V_running = arg->sip_v_whole;
+
+ uint64_t hash;
+ size_t cur_crec_base_blk, next_crec_base_blk, blk;
+ uint64_t V_finalize[4];
+
+ //printf("cur_crec_base_blk: %d ", cur_crec_base_blk);
+
+ /* blks (8 bytes) are byte offsets: they count 0,8,16... not 0,1,2... */
+ cur_crec_base_blk = (crec_offset & ~7);
+ next_crec_base_blk = (pfx_len & ~7);
+ for (blk = cur_crec_base_blk; blk < next_crec_base_blk; blk += 8)
+ {
+ cicn_siphash_compress (V_running, U8TO64_LE (&nrec_val[blk]));
+ }
+
+ /* copy V to v to finalize hash calculation for this prefix */
+ cicn_siphash_vec_copy (V_finalize, V_running);
+
+ cicn_siphash_compress (V_finalize,
+ cicn_siphash_partial_blk_val (nrec_val, pfx_len,
+ blk));
+ hash = cicn_siphash_finalize (V_finalize);
+
+ return (hash);
+}
+
+#endif /* _CICN_SIPHASH_H_ */
diff --git a/cicn-plugin/cicn/cicn_std.h b/cicn-plugin/cicn/cicn_std.h
new file mode 100644
index 00000000..28f179d7
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_std.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_std.h - core definitions
+ */
+
+#ifndef __CICN_STD_H__
+#define __CICN_STD_H__
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+/* unix return code success status */
+#ifndef AOK
+#define AOK 0
+#endif
+
+#endif // __CICN_STD_H__
diff --git a/cicn-plugin/cicn/cicn_types.h b/cicn-plugin/cicn/cicn_types.h
new file mode 100644
index 00000000..1a85e655
--- /dev/null
+++ b/cicn-plugin/cicn/cicn_types.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * cicn_types.h - struct typedefs to allow exposing opaque pointers
+ */
+
+#ifndef __CICN_TYPES_H__
+#define __CICN_TYPES_H__
+
+#if !CICN_VPP_PLUGIN
+#error "cicn-internal file included externally"
+#endif
+
+typedef struct cicn_rd_s cicn_rd_t;
+typedef struct cicn_face_db_entry_s cicn_face_db_entry_t;
+typedef struct test_cicn_api_op_s test_cicn_api_op_t;
+
+#endif // __CICN_TYPES_H__
diff --git a/cicn-plugin/cicn/node.c b/cicn-plugin/cicn/node.c
new file mode 100644
index 00000000..922ebf4b
--- /dev/null
+++ b/cicn-plugin/cicn/node.c
@@ -0,0 +1,2031 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * node.c - icn plug-in nodes for vpp
+ */
+
+#include "cicn_rte_mbuf.h" // needed first because vlib.h defs conflict
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include <cicn/cicn.h>
+#include <cicn/cicn_hashtb.h>
+#include <cicn/cicn_pcs.h>
+#include <cicn/cicn_rte_mbuf_inlines.h>
+#include <cicn/cicn_hello_inlines.h>
+
+int cicn_buftrc = 0; // make permanent or delete? Set to 1 to enable trace
+
+#define CICN_IP_TTL_DEFAULT 128
+
+/*
+ * First forwarder worker node starts here
+ */
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icnfwd_trace_t;
+
+/* packet trace format function */
+static u8 *
+icnfwd_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ icnfwd_trace_t *t = va_arg (*args, icnfwd_trace_t *);
+
+ s = format (s, "ICNFWD: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct icnfwd_runtime_s
+{
+ /* TODO -- use this more when we add shards */
+ int id;
+ cicn_pit_cs_t pitcs;
+} icnfwd_runtime_t;
+
+/* Registration struct for a graph node */
+vlib_node_registration_t icnfwd_node;
+
+/* Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ ICNFWD_NEXT_LOOKUP,
+ ICNFWD_NEXT_ERROR_DROP,
+ ICNFWD_N_NEXT,
+} icnfwd_next_t;
+
+/* Stats string values */
+static char *icnfwd_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icnfwd_error
+#undef _
+};
+
+/* Prototypes */
+static int cicn_trim_cs_lru (vlib_main_t * vm, vlib_node_runtime_t * node,
+ cicn_pit_cs_t * pit);
+
+/*
+ *
+ */
+static void
+update_node_counter (vlib_main_t * vm, u32 node_idx, u32 counter_idx, u64 val)
+{
+ vlib_node_t *node = vlib_get_node (vm, node_idx);
+ vlib_error_main_t *em = &(vm->error_main);
+ u32 base_idx = node->error_heap_index;
+
+ em->counters[base_idx + counter_idx] = val;
+}
+
+/*
+ * Prepare a packet buffer for the CS. We'll clone this mbuf and use a
+ * newly-allocated mbuf to hold the header/rewrite info needed to send
+ * each packet out.
+ */
+static int
+prep_buffer_for_cs (vlib_main_t * vm, vlib_buffer_t * b0)
+{
+ int ret = EINVAL;
+
+ /* Update the CS mbuf (and vlib buffer) so that it includes only the
+ * ICN payload
+ */
+
+ /* Advance the vlib buffer to the beginning of the ICN payload */
+ vlib_buffer_advance (b0, sizeof (ip4_header_t) + sizeof (udp_header_t));
+
+ cicn_infra_vlib_buffer_cs_prep_finalize (vm, b0);
+ ret = AOK;
+
+ return (ret);
+}
+
+/*
+ * Clone a packet being referenced in a CS entry, using another packet
+ * (received interest packet) as a header to hold content response
+ * rewrite info and pointer to cloned cs entry buffer.
+ */
+static int
+cicn_clone_cs_buffer (vlib_buffer_t * hdr_b0, const cicn_pcs_entry_t * pcs,
+ vlib_main_t * vm, vlib_buffer_free_list_t * fl,
+ unsigned socket_id, cicn_face_db_entry_t * outface)
+{
+ int ret = EINVAL;
+ vlib_buffer_t *cs_b0, *clone_b0;
+
+ BUFTRC ("CS-H-SW", GBI (vm, hdr_b0));
+ if (PREDICT_FALSE (pcs->u.cs.cs_pkt_buf == 0))
+ {
+ goto done;
+ }
+ BUFTRC ("CS-H-CS", pcs->u.cs.cs_pkt_buf);
+
+ cs_b0 = vlib_get_buffer (vm, pcs->u.cs.cs_pkt_buf);
+
+ /* Clone the buf held in the CS */
+ clone_b0 = cicn_infra_vlib_buffer_clone (cs_b0, vm, fl, socket_id, outface);
+ BUFTRC ("CS-H-CL", GBI (vm, clone_b0));
+ if (PREDICT_FALSE (clone_b0 == 0))
+ {
+ /* If we can't get a buf, we can't continue */
+ goto done;
+ }
+
+ /* At this point, the base CS buffer is pointing at the ICN payload
+ * part of the packet, and we'll be using the other buffer
+ * to hold the egress/tx rewrite info.
+ */
+ hdr_b0->current_data = 0;
+ hdr_b0->current_length = sizeof (ip4_header_t) + sizeof (udp_header_t);
+ hdr_b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ if (outface->swif_is_dpdk_driver)
+ {
+ ASSERT ((hdr_b0->flags & VNET_BUFFER_RTE_MBUF_VALID) != 0);
+ }
+ hdr_b0->total_length_not_including_first_buffer =
+ vlib_buffer_length_in_chain (vm, cs_b0);
+
+ /* Connect the header particle to the body */
+ hdr_b0->next_buffer = vlib_get_buffer_index (vm, clone_b0);
+
+ cicn_infra_vlib_buffer_clone_attach_finalize (hdr_b0, clone_b0, outface);
+
+ /* Looks like success */
+ ret = AOK;
+
+done:
+
+ return (ret);
+}
+
+/*
+ * ICN forwarder node: handling of Interests and Content Msgs delivered
+ * based on udp_register_dst_port().
+ * - 1 packet at a time
+ * - ipv4 udp only
+ */
+static uword
+icnfwd_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ icnfwd_next_t next_index;
+ u32 pkts_processed = 0;
+ u32 pkts_interest_count = 0, pkts_data_count = 0, pkts_nak_count = 0;
+ u32 pkts_control_request_count = 0, pkts_control_reply_count = 0;
+ u32 pkts_from_cache_count = 0;
+ u32 pkts_nacked_interests_count = 0;
+ u32 pkts_nak_hoplimit_count = 0, pkts_nak_no_route_count = 0;
+ u32 pkts_no_pit_count = 0, pit_expired_count = 0, cs_expired_count = 0;
+ u32 no_bufs_count = 0, pkts_interest_agg = 0, pkts_int_retrans = 0;
+ u32 pit_int_count, pit_cs_count;
+ u32 pkts_hello_int_rec = 0, pkts_hello_data_sent = 0;
+ u32 pkts_hello_data_rec = 0;
+ int i, ret;
+ icnfwd_runtime_t *rt;
+ cicn_prefix_hashinf_t pfxhash;
+ f64 tnow;
+ vlib_buffer_free_list_t *fl;
+ unsigned socket_id = cicn_infra_rte_socket_id ();
+ cicn_main_t *sm = &cicn_main;
+
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ rt = vlib_node_get_runtime_data (vm, icnfwd_node.index);
+
+ /* Alloc the pit/cs for each shard when the icn feature
+ * is enabled, access by thread in the node context.
+ */
+ if (rt->pitcs.pcs_table == NULL)
+ {
+ cicn_pit_create (&rt->pitcs, cicn_infra_shard_pit_size);
+ cicn_pit_set_lru_max (&rt->pitcs, cicn_infra_shard_cs_size);
+ }
+
+ /* Maybe update our thread's config generation number, if the global
+ * number has changed
+ */
+ if (cicn_infra_gshard.cfg_generation !=
+ cicn_infra_shards[vm->cpu_index].cfg_generation)
+ {
+ cicn_infra_shards[vm->cpu_index].cfg_generation =
+ cicn_infra_gshard.cfg_generation;
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* TODO -- just doing 1-at-a-time for now, to simplify things a bit. */
+
+ /* TODO -- more interesting stats and trace */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = ICNFWD_NEXT_LOOKUP;
+ u32 sw_if_index0;
+ udp_header_t *udp0;
+ ip4_header_t *ip0;
+ u8 *body0;
+ u32 len0;
+ u8 pkt_type;
+ u16 msg_type, bkt_ent_exp_time, sval;
+ cicn_pkt_hdr_desc_t pkt_hdr_desc0;
+ u8 *nameptr;
+ u32 namelen;
+ u64 hashval;
+ struct sockaddr_in srcaddr, destaddr;
+ cicn_face_db_entry_t *inface, *outface;
+ cicn_face_stats_t *inface_stats, *outface_stats;
+ cicn_hash_node_t *nodep;
+ cicn_pcs_entry_t *pitp;
+ cicn_fib_entry_t *pentry;
+ uint16_t faceid;
+ int clone_count;
+ vlib_buffer_t *hdr_vec[CICN_PARAM_PIT_ENTRY_PHOPS_MAX];
+ vlib_buffer_t *clone_vec[CICN_PARAM_PIT_ENTRY_PHOPS_MAX];
+ cicn_face_db_entry_t *face_vec[CICN_PARAM_PIT_ENTRY_PHOPS_MAX];
+ u64 seq_num;
+ int trace_p = 0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *p2;
+
+ p2 = vlib_get_buffer (vm, from[1]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+
+ CLIB_PREFETCH (p2->data, (CLIB_CACHE_LINE_BYTES * 2), STORE);
+ }
+
+ /* TODO -- we're not dealing with 'chained' buffers yet */
+
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ BUFTRC ("CICN-SW", bi0);
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ trace_p = 1;
+ }
+
+ /*
+ * From the udp code, we think we're handed the payload part
+ * of the packet
+ */
+ ASSERT (b0->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ /* Capture pointer to the payload */
+ body0 = vlib_buffer_get_current (b0);
+ len0 = b0->current_length;
+
+ /* Walk 'back' to the ip header */
+ vlib_buffer_advance (b0, -(sizeof (udp_header_t)));
+ udp0 = vlib_buffer_get_current (b0);
+ vlib_buffer_advance (b0, -(sizeof (ip4_header_t)));
+ ip0 = vlib_buffer_get_current (b0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /*
+ * Do a quick, in-place parse/validate pass, locating
+ * a couple of key pieces of info about the packet
+ * TODO -- could we pass this info from the dist node?
+ */
+ ret = cicn_parse_pkt (body0, len0, &pkt_type, &msg_type,
+ &nameptr, &namelen, &pkt_hdr_desc0);
+
+ if (ret != AOK)
+ {
+ /* Just drop on error? */
+ pkt_type = 0;
+ msg_type = 0;
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* Use result to determine next steps: forward, reply from CS,
+ * drop, nack
+ */
+
+ if (pkt_type == CICN_PKT_TYPE_INTEREST)
+ {
+ pkts_interest_count++;
+ }
+ else if (pkt_type == CICN_PKT_TYPE_CONTENT)
+ {
+ pkts_data_count++;
+ }
+ else if (pkt_type == CICN_PKT_TYPE_NAK)
+ {
+ pkts_nak_count++;
+ }
+ else if (pkt_type == CICN_PKT_TYPE_CONTROL_REQUEST)
+ {
+ pkts_control_request_count++;
+ }
+ else if (pkt_type == CICN_PKT_TYPE_CONTROL_REPLY)
+ {
+ pkts_control_reply_count++;
+ }
+
+ /* Locate ingress face */
+ srcaddr.sin_addr.s_addr = ip0->src_address.as_u32;
+ srcaddr.sin_port = udp0->src_port;
+
+ destaddr.sin_addr.s_addr = ip0->dst_address.as_u32;
+ destaddr.sin_port = udp0->dst_port;
+
+ /* Search for a match where the _local_ and _remote_ addresses
+ * correspond to the _dest_ and _src_ addresses from the packet.
+ */
+ ret = cicn_face_entry_find_by_addr (&destaddr /*local */ ,
+ &srcaddr /*remote */ , &inface);
+
+ /* If no matching face, don't do any more */
+ if (PREDICT_FALSE (ret != AOK || inface == NULL ||
+ (inface->flags & CICN_FACE_FLAGS_DOWN_HARD)))
+ {
+ /* Just drop on error? */
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ cicn_infra_shard_t *wshard = &cicn_infra_shards[vm->cpu_index];
+ inface_stats = &wshard->face_stats[cicn_face_db_index (inface)];
+
+ /* If content, use PIT info to determine egress face */
+
+ if (pkt_type == CICN_PKT_TYPE_CONTENT ||
+ pkt_type == CICN_PKT_TYPE_CONTROL_REPLY)
+ {
+
+ inface_stats->in_datas++;
+
+ if (PREDICT_FALSE (inface->flags & CICN_FACE_FLAG_HELLO_DOWN))
+ {
+ // hello down, only hello messages should be processed
+ goto hello_reply_rcvd_check;
+ }
+
+ /* Compute the full name hash for content lookup */
+ hashval = cicn_hashtb_hash_name (nameptr, namelen);
+
+ /*
+ * Search PIT/CS by name hash
+ */
+ /*
+ * Opportunistic scan of hash row/bucket for expirations.
+ * Some old code below could be removed with this addition,
+ * it won't be executed anyway.
+ *
+ * The timeout scan and the node lookup could be
+ * easily integrated.
+ */
+ cicn_pcs_timeout (vm, &rt->pitcs, hashval,
+ &pit_expired_count, &cs_expired_count);
+
+ ret =
+ cicn_hashtb_lookup_node (rt->pitcs.pcs_table, nameptr,
+ namelen, hashval, &nodep);
+
+ if (PREDICT_FALSE (ret != AOK))
+ {
+ goto hello_reply_rcvd_check; //no PIT entry: maybe a hello?
+ }
+
+ pitp = cicn_pit_get_data (nodep);
+
+ if (PREDICT_FALSE (pitp->shared.entry_type != CICN_PIT_TYPE))
+ {
+ /* Whatever this is, it's not a PIT */
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* Is the PIT entry expired? */
+ if (PREDICT_FALSE (tnow > pitp->shared.expire_time))
+ {
+ /*
+ * Remove existing entry; treat this as if no PIT entry
+ */
+ cicn_pcs_delete (&rt->pitcs, &pitp, &nodep, vm);
+ pit_expired_count++;
+
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* Content should arrive on face where interest tx happened */
+ if (PREDICT_FALSE (pitp->u.pit.pe_txface != inface->faceid))
+ {
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /*
+ * Hold the packet buffer in the CS, and
+ * then use it to satisfy the PIT entry. We allocate unique
+ * packet mbufs to hold the rewrite info for each reply we'll
+ * send; the rewrite mbufs all share clones of the reply
+ * payload.
+ */
+
+ /* Prepare the packet for cs and for cloning. */
+ BUFTRC ("CS--ADD", bi0);
+ ret = prep_buffer_for_cs (vm, b0);
+ if (PREDICT_FALSE (ret != AOK))
+ {
+
+ cicn_pcs_delete (&rt->pitcs, &pitp, &nodep, vm);
+ no_bufs_count++;
+
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* For each packet we will send, allocate a new packet
+ * buffer to hold the rewrite/header info and a clone of
+ * the ICN payload packet buf. We also capture the tx faceid.
+ */
+ ret = AOK;
+ for (clone_count = 0, i = 0; i < CICN_PARAM_PIT_ENTRY_PHOPS_MAX;
+ i++)
+ {
+
+ if (pitp->u.pit.pe_rxfaces[i] != 0)
+ {
+
+ ret =
+ cicn_face_entry_find_by_id (pitp->u.pit.pe_rxfaces[i],
+ &outface);
+ if (PREDICT_FALSE
+ ((ret != AOK)
+ || (outface->flags & CICN_FACE_FLAGS_DOWN)))
+ {
+ /* Can't use this face, skip the entry */
+ ret = AOK;
+ continue;
+ }
+
+ face_vec[clone_count] = outface;
+ hdr_vec[clone_count] =
+ cicn_infra_vlib_buffer_alloc (vm, fl, socket_id,
+ outface);
+ if (!cicn_cs_enabled (&rt->pitcs) && clone_count == 0)
+ {
+ clone_vec[clone_count] = b0;
+ }
+ else
+ {
+ clone_vec[clone_count] =
+ cicn_infra_vlib_buffer_clone (b0, vm, fl,
+ socket_id, outface);
+ }
+ BUFTRC ("CLN-HDR", GBI (vm, hdr_vec[clone_count]));
+ BUFTRC ("CLN-CLN", GBI (vm, clone_vec[clone_count]));
+
+ if (PREDICT_FALSE ((hdr_vec[clone_count] == NULL) ||
+ (clone_vec[clone_count] == NULL)))
+ {
+
+ /* Need to check this index in the arrays in
+ * the error-handling code below.
+ */
+ clone_count++;
+
+ ret = ENOMEM;
+ break;
+ }
+
+ clone_count++;
+ }
+ }
+
+ /* If error, clean up any buffers we allocated */
+ if (PREDICT_FALSE (ret != AOK))
+ {
+ for (i = 0; i < clone_count; i++)
+ {
+ BUFTRC ("ERR-FRE",
+ vlib_get_buffer_index (vm,
+ clone_vec[clone_count]));
+ if (hdr_vec[i])
+ {
+ cicn_infra_vlib_buffer_free (hdr_vec[i], vm,
+ face_vec[i]);
+ }
+ if (clone_vec[i])
+ {
+ cicn_infra_vlib_buffer_free (hdr_vec[i], vm,
+ face_vec[i]);
+ }
+ }
+
+ /* Drop */
+ cicn_pcs_delete (&rt->pitcs, &pitp, &nodep, vm);
+ no_bufs_count++;
+
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* No valid PIT faces found? Not much we can do.
+ * TODO -- for now, leaving the PIT entry; should we delete it?
+ */
+ if (PREDICT_FALSE (clone_count == 0))
+ {
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ // no cs entry for ctrl responses
+ if (pkt_type == CICN_PKT_TYPE_CONTENT)
+ {
+
+ if (cicn_cs_enabled (&rt->pitcs))
+ {
+ /* At this point we think we're safe to proceed.
+ * Store the CS buf in the PIT/CS hashtable entry
+ */
+
+ /* Start turning the PIT into a CS. Note that we may be
+ * stepping on the PIT part of the union as we
+ * update the CS part, so don't expect the PIT part
+ * to be valid after this point.
+ */
+ cicn_pit_to_cs (&rt->pitcs, pitp);
+
+ pitp->u.cs.cs_rxface = inface->faceid;
+ pitp->shared.create_time = tnow;
+
+ uint64_t dmsg_lifetime;
+ ret =
+ cicn_parse_hdr_time_ms (body0, &pkt_hdr_desc0,
+ CICN_HDR_TLV_CACHE_TIME,
+ &dmsg_lifetime);
+ if (ret != AOK)
+ { // no header timeout, use default
+ dmsg_lifetime = CICN_PARAM_CS_LIFETIME_DFLT;
+ }
+ else if (dmsg_lifetime != 0)
+ {
+ if (dmsg_lifetime < CICN_PARAM_CS_LIFETIME_MIN)
+ {
+ dmsg_lifetime = CICN_PARAM_CS_LIFETIME_MIN;
+ }
+ else if (dmsg_lifetime > CICN_PARAM_CS_LIFETIME_MAX)
+ {
+ dmsg_lifetime = CICN_PARAM_CS_LIFETIME_MAX;
+ }
+ }
+ pitp->shared.expire_time =
+ cicn_pcs_get_exp_time (tnow, dmsg_lifetime);
+
+ /* Update hashtable-level expiration value too */
+ bkt_ent_exp_time =
+ cicn_infra_get_slow_exp_time (dmsg_lifetime);
+
+ cicn_hashtb_entry_set_expiration (rt->pitcs.pcs_table,
+ nodep,
+ bkt_ent_exp_time, 0);
+
+ /* Store the original packet buffer in the CS node */
+ pitp->u.cs.cs_pkt_buf = vlib_get_buffer_index (vm, b0);
+
+ /* Add to CS LRU */
+ cicn_cs_lru_insert (&rt->pitcs, nodep, pitp);
+ }
+ else
+ {
+ cicn_pcs_delete (&rt->pitcs, &pitp, &nodep, vm);
+ }
+
+ /* Set up to enqueue frames to the transmit next-node */
+ if (next_index != ICNFWD_NEXT_LOOKUP)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next0 = ICNFWD_NEXT_LOOKUP;
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+
+ /* Ensure that we have space for at least one packet */
+ if (PREDICT_FALSE (n_left_to_next <= 0))
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+
+ ASSERT (n_left_to_next > 0);
+
+ /* Connect each header buffer to a clone
+ * of the payload buffer. The last packet will go through
+ * to the normal end of the node loop.
+ */
+ for (i = 0; i < clone_count; i++)
+ {
+ vlib_buffer_t *cs_b0;
+
+ b0 = hdr_vec[i];
+ cs_b0 = clone_vec[i];
+ outface = face_vec[i];
+
+ if (PREDICT_FALSE (trace_p != 0))
+ {
+ b0->flags |= VLIB_BUFFER_IS_TRACED;
+ }
+
+ bi0 = vlib_get_buffer_index (vm, b0);
+
+ b0->current_data = 0;
+ b0->current_length = (sizeof (ip4_header_t) +
+ sizeof (udp_header_t));
+ b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ b0->total_length_not_including_first_buffer =
+ vlib_buffer_length_in_chain (vm, cs_b0);
+
+ /* Connect the header particle to the body */
+ b0->next_buffer = vlib_get_buffer_index (vm, cs_b0);
+
+ cicn_infra_vlib_buffer_clone_attach_finalize (b0, cs_b0,
+ outface);
+
+ /* Refresh the ip and udp headers
+ * before the final part of the rewrite
+ */
+ ip0 = vlib_buffer_get_current (b0);
+ udp0 = (udp_header_t *) ((uint8_t *) ip0 +
+ sizeof (ip4_header_t));
+
+ memset (ip0, 0,
+ sizeof (ip4_header_t) + sizeof (udp_header_t));
+
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ sval = vlib_buffer_length_in_chain (vm, b0);
+ ip0->length = clib_host_to_net_u16 (sval);
+
+ sval -= sizeof (ip4_header_t);
+ udp0->length = clib_host_to_net_u16 (sval);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ if (i == (clone_count - 1))
+ {
+ /* Last packet - drop out of the loop, let the
+ * transit path finish with 'b0' now
+ */
+ break;
+ }
+
+ /* Rewrite ip and udp headers */
+
+ ip0->src_address.as_u32 =
+ outface->src_addr.sin_addr.s_addr;
+ ip0->dst_address.as_u32 =
+ outface->dest_addr.sin_addr.s_addr;
+
+ /* TODO -- Refresh IP ttl - is that ok? */
+ ip0->ttl = CICN_IP_TTL_DEFAULT;
+
+ /* TODO -- Not optimizing the IP checksum currently */
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ udp0->src_port = outface->src_addr.sin_port;
+ udp0->dst_port = outface->dest_addr.sin_port;
+
+ /* TODO -- clear UDP checksum; is this ok? */
+ udp0->checksum = 0;
+
+ pkts_from_cache_count++;
+
+ /* Update face-level stats */
+ outface_stats =
+ &wshard->face_stats[cicn_face_db_index (outface)];
+ outface_stats->out_datas++;
+
+ /* Enqueue packet to next graph node */
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ BUFTRC ("ICN-TX2", bi0);
+ if (n_left_to_next == 0)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+
+ vlib_get_next_frame (vm, node, next_index, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+
+ /* We're now processing the last (or only) PIT entry; 'b0',
+ * 'bi0', 'ip0', 'udp0', and 'outface' should be set
+ * properly. We'll just drop through to the normal
+ * 'send one packet code below.
+ */
+
+ /* Update face-level stats */
+ outface_stats =
+ &wshard->face_stats[cicn_face_db_index (outface)];
+ outface_stats->out_datas++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+
+ goto ready_to_send;
+
+ hello_reply_rcvd_check:
+ // not a normal content msg, maybe it's hello reply
+ if (cicn_hello_match (inface, pkt_type, nameptr, namelen,
+ &sm->hello_name, &seq_num))
+ {
+ /// it's a hello response
+ inface_stats->term_datas++;
+ pkts_hello_data_rec++;
+ /* Copy seq_num to global array of Up/Down data */
+ sm->cicn_hello_data_array[inface->faceid].seq_num = seq_num;
+ sm->cicn_hello_data_array[inface->faceid].faceid =
+ inface->faceid;
+
+ // Signal an event to the background process
+ vlib_process_signal_event_pointer (vm,
+ vlib_get_node_by_name
+ (vm,
+ (u8 *)
+ "icn-hello-process")->index,
+ CICN_HELLO_EVENT_DATA_RCVD,
+ &sm->cicn_hello_data_array
+ [inface->faceid]);
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* No PIT entry, not a hello, drop */
+ pkts_no_pit_count++;
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+
+ /* END: Content/Control Response */
+
+ }
+ else if (pkt_type == CICN_PKT_TYPE_INTEREST ||
+ pkt_type == CICN_PKT_TYPE_CONTROL_REQUEST)
+ {
+ cicn_packet_hdr_t *pkt_hdr0 = (cicn_packet_hdr_t *) body0;
+ uint8_t *msg_tlv = (uint8_t *) (pkt_hdr0 + 1);
+
+ inface_stats->in_interests++;
+
+ if (PREDICT_FALSE (pkt_hdr0->pkt_hop_limit == 0))
+ {
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ pkt_hdr0->pkt_hop_limit--;
+
+ /* Check whether this is an ICN Hello Interest */
+ if (PREDICT_FALSE
+ (cicn_hello_match
+ (inface, pkt_type, nameptr, namelen, &sm->hello_name,
+ NULL /*seq_num */ )))
+ {
+ goto hello_request_forus;
+ }
+
+ if (PREDICT_FALSE (inface->flags & CICN_FACE_FLAG_HELLO_DOWN))
+ {
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ if (PREDICT_FALSE (pkt_hdr0->pkt_hop_limit == 0))
+ {
+ /*
+ * If traceroute request, return our information.
+ * Otherwise NAK the interest in all cases.
+ * (draft-irtf-icnrg-ccnxsemantics-03 says
+ * "If the HopLimit equals 0, ... it MAY be sent to a
+ * publisher application or serviced from a local
+ * Content Store.". The current implemention does not.)
+ */
+ if (msg_type == CICN_MSG_TYPE_TRACEROUTE_REQUEST)
+ {
+ goto traceroute_request_forus;
+ }
+
+ pkt_hdr0->pkt_type = CICN_PKT_TYPE_NAK;
+ pkt_hdr0->pkt_nack_code = CICN_MSG_ERR_HOPLIM;
+
+ outface = inface;
+ outface_stats = inface_stats;
+
+ pkts_nacked_interests_count++;
+ pkts_nak_hoplimit_count++;
+ outface_stats->orig_naks++;
+ outface_stats->out_naks++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+ }
+
+ /* Compute the name and prefix hashes */
+
+ /* TODO -- could we carry hash value in from the dist node? */
+
+ /* TODO -- use FIB max-comps hint? */
+
+ /*
+ * Full and LPM prefix hashing for PIT and FIB lookups
+ */
+ ret =
+ cicn_hashtb_hash_prefixes (nameptr, namelen,
+ TRUE /*fullname */ , &pfxhash,
+ 0 /*!limit */ );
+ if (ret != AOK)
+ {
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* If this is a ping request, parse the target name and compare
+ * it to the name of the forwarder
+ */
+ if (pkt_type == CICN_PKT_TYPE_CONTROL_REQUEST &&
+ (msg_type == CICN_MSG_TYPE_ECHO_REQUEST ||
+ msg_type == CICN_MSG_TYPE_TRACEROUTE_REQUEST))
+ {
+
+ /* Check whether the hash of the ping request target
+ * prefix matches the hash of the forwarder's name.
+ * If so, respond
+ * If a name has not been given to the forwarder,
+ * or if the hashes do not match, forward the control
+ * packet as a regular insterest.
+ */
+
+ /* We received an echo request with an invalid name */
+ if (pfxhash.pfx_count < 3)
+ {
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ if (cicn_infra_fwdr_name.fn_reply_payload_flen != 0 &&
+ cicn_infra_fwdr_name.fn_match_pfx_hash ==
+ pfxhash.pfx_hashes[pfxhash.pfx_count - 3])
+ {
+ if (msg_type == CICN_MSG_TYPE_ECHO_REQUEST)
+ {
+ goto echo_request_forus;
+ }
+ else
+ {
+ goto traceroute_request_forus;
+ }
+ }
+ }
+
+ /*
+ * Opportunistic scan of hash row/bucket for expirations.
+ * Some old code below could be removed with this addition,
+ * it won't be executed anyway.
+ *
+ * The timeout scan and the node lookup could be
+ * easily integrated.
+ */
+ cicn_pcs_timeout (vm, &rt->pitcs, pfxhash.pfx_full_hash,
+ &pit_expired_count, &cs_expired_count);
+
+ /*
+ * Search PIT/CS by full-name hash
+ */
+ ret =
+ cicn_hashtb_lookup_node (rt->pitcs.pcs_table, nameptr,
+ namelen, pfxhash.pfx_full_hash,
+ &nodep);
+ if (ret != AOK)
+ {
+ goto interest_is_new;
+ }
+
+ pitp = cicn_pit_get_data (nodep);
+
+ if (pitp->shared.entry_type == CICN_CS_TYPE)
+ {
+ /* Case: Existing CS entry */
+
+ /* Check for expired CS entry (if not done during the
+ * scan)
+ */
+ if ((tnow > pitp->shared.expire_time) ||
+ (pitp->u.cs.cs_pkt_buf == 0))
+ {
+
+ /* Delete and clean up expired CS entry */
+ cicn_pcs_delete (&rt->pitcs, &pitp, &nodep, vm);
+ cs_expired_count++;
+
+ goto interest_is_new;
+ }
+
+ /* Update the CS LRU, moving this item to the head */
+ cicn_cs_lru_update_head (&rt->pitcs, nodep, pitp);
+
+ /*
+ * Clone the CS packet, and prepare the incoming request
+ * packet to hold the rewrite info as a particle.
+ */
+ if (cicn_clone_cs_buffer (b0, pitp, vm, fl, socket_id,
+ inface /*outface */ ) != AOK)
+ {
+ no_bufs_count++;
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ /* Refresh the ip and udp headers before the final part of
+ * the rewrite down below
+ */
+ ip0 = vlib_buffer_get_current (b0);
+ udp0 = (udp_header_t *) ((uint8_t *) ip0 +
+ sizeof (ip4_header_t));
+
+ memset (ip0, 0,
+ sizeof (ip4_header_t) + sizeof (udp_header_t));
+
+ ip0->ip_version_and_header_length = 0x45;
+ ip0->protocol = IP_PROTOCOL_UDP;
+
+ sval = vlib_buffer_length_in_chain (vm, b0);
+ ip0->length = clib_host_to_net_u16 (sval);
+
+ sval -= sizeof (ip4_header_t);
+ udp0->length = clib_host_to_net_u16 (sval);
+
+ pkts_from_cache_count++;
+
+ /* Reply to sender */
+ outface = inface;
+ inface_stats->out_datas++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+
+ }
+
+ /*
+ * Case: Existing PIT entry
+ */
+
+ /* Is the PIT entry expired? */
+ if (tnow > pitp->shared.expire_time)
+ {
+
+ /* Remove existing entry, and treat this as new Interest */
+ cicn_pcs_delete (&rt->pitcs, &pitp, &nodep, vm);
+ pit_expired_count++;
+ goto interest_is_new;
+ }
+
+ /*
+ * PIT aggregation: simple form for now, no change in PIT
+ * expiration.
+ *
+ * TODO -- many open questions: retransmissions,
+ * PIT entry expiration time handling.
+ */
+ for (i = 0; i < CICN_PARAM_PIT_ENTRY_PHOPS_MAX; i++)
+ {
+
+ /* Note that this loop is vulnerable if we remove
+ * rx faces from the middle of the PIT array. We don't
+ * do that right now, so I think this is ok.
+ */
+ if (pitp->u.pit.pe_rxfaces[i] == inface->faceid)
+ {
+ /*
+ * Already in the PIT - a retransmission? We allow
+ * retransmits, by capturing the egress face
+ * and jumping to the 'send interest' code.
+ */
+ ret =
+ cicn_face_entry_find_by_id (pitp->u.pit.pe_txface,
+ &outface);
+ if (ret == AOK)
+ {
+ pkts_int_retrans++;
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+ }
+
+ break;
+
+ }
+ else if (pitp->u.pit.pe_rxfaces[i] == 0)
+ {
+ /* Found an available slot in the PIT */
+ pitp->u.pit.pe_rxfaces[i] = inface->faceid;
+ break;
+ }
+ }
+
+ /* TODO -- stat for 'full PIT'? */
+
+ /*
+ * At this point, we've dealt with the PIT aggregation,
+ * and we can drop the current packet.
+ */
+ pkts_interest_agg++;
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+
+ interest_is_new:
+
+ /*
+ * Need PIT entry:
+ * - find outface from FIB lookup
+ * - init new PIT entry.
+ */
+ outface = NULL;
+
+ ret = cicn_fib_lookup (&cicn_main.fib, &pfxhash, &pentry);
+ if (PREDICT_FALSE (ret != AOK))
+ {
+ goto interest_noroute_check;
+ }
+
+ /* Look for the right next-hop - for now, use max weight */
+ u8 weight = 0;
+ for (i = 0; i < CICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ cicn_face_db_entry_t *face;
+ if ((pentry->fe_next_hops[i].nh_faceid == 0))
+ {
+ continue;
+ }
+ if (pentry->fe_next_hops[i].nh_weight <= weight)
+ {
+ continue;
+ }
+ faceid = pentry->fe_next_hops[i].nh_faceid;
+
+ /* Find tx face by face id */
+ ret = cicn_face_entry_find_by_id (faceid, &face);
+ if (PREDICT_FALSE (ret != AOK))
+ {
+ continue;
+ }
+ if (PREDICT_FALSE ((face->flags & CICN_FACE_FLAGS_DOWN)))
+ {
+ continue;
+ }
+ outface = face;
+ weight = pentry->fe_next_hops[i].nh_weight;
+ }
+
+ interest_noroute_check:
+ if (outface == NULL)
+ {
+ pkt_hdr0->pkt_type = CICN_PKT_TYPE_NAK;
+ pkt_hdr0->pkt_nack_code = CICN_MSG_ERR_NOROUTE;
+
+ outface = inface;
+ outface_stats = inface_stats;
+
+ pkts_nacked_interests_count++;
+ pkts_nak_no_route_count++;
+ outface_stats->orig_naks++;
+ outface_stats->out_naks++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+ }
+
+ /* Create PIT node and init PIT entry */
+ nodep = cicn_hashtb_alloc_node (rt->pitcs.pcs_table);
+ if (PREDICT_FALSE (nodep == NULL))
+ {
+ /* Nothing we can do - no mem */
+
+ no_bufs_count++;
+
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ pitp = cicn_pit_get_data (nodep);
+
+ cicn_pit_init_data (pitp);
+
+ pitp->shared.entry_type = CICN_PIT_TYPE;
+ pitp->shared.create_time = tnow;
+ pitp->u.pit.pe_txface = outface->faceid;
+ pitp->u.pit.pe_rxfaces[0] = inface->faceid;
+
+ /*
+ * Interest lifetime based on optional hdr_tlv, ranges, default
+ * - special case is lifetime of 0
+ * - this is "forward but do not expect content return" case
+ * - code sequence here (and above for content)
+ * always checks if an existing PIT entry has
+ * expired. If so, it is deleted before continuing
+ * to process current message. Thus, should be
+ * benign to enter an interest with a 0 lifetime
+ * into the PIT: it will always be be found to be
+ * expired at the earliest opportunity, the only
+ * cost being the held hash resources.
+ * - corresponding expiry time appears in pit entry and
+ * (compressed) in bucket entry
+ */
+ uint64_t imsg_lifetime;
+ ret =
+ cicn_parse_hdr_time_ms (body0, &pkt_hdr_desc0,
+ CICN_HDR_TLV_INT_LIFETIME,
+ &imsg_lifetime);
+ if (ret != AOK)
+ { // no header timeout, use default
+ imsg_lifetime = sm->pit_lifetime_dflt_ms;
+ }
+ else if (imsg_lifetime != 0)
+ {
+ if (imsg_lifetime < sm->pit_lifetime_min_ms)
+ {
+ imsg_lifetime = sm->pit_lifetime_min_ms;
+ }
+ else if (imsg_lifetime > sm->pit_lifetime_max_ms)
+ {
+ imsg_lifetime = sm->pit_lifetime_max_ms;
+ }
+ }
+ pitp->shared.expire_time =
+ cicn_pcs_get_exp_time (tnow, imsg_lifetime);
+ bkt_ent_exp_time = cicn_infra_get_fast_exp_time (imsg_lifetime);
+
+ /* TODO -- decide whether to hold/clone interest packet mbuf */
+
+ /* Set up the hash node and insert it */
+ ret =
+ cicn_hashtb_init_node (rt->pitcs.pcs_table, nodep,
+ pfxhash.pfx_full_hash, nameptr,
+ namelen);
+ if (ret == AOK)
+ {
+ ret = cicn_pit_insert (&rt->pitcs, pitp, nodep);
+ }
+ if (ret != AOK)
+ {
+ /* Just dropping on error for now */
+
+ /* Return hashtable node */
+ cicn_hashtb_free_node (rt->pitcs.pcs_table, nodep);
+
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ // Set the hashtable-level expiration value in bucket entry
+ cicn_hashtb_entry_set_expiration (rt->pitcs.pcs_table, nodep,
+ bkt_ent_exp_time,
+ CICN_HASH_ENTRY_FLAG_FAST_TIMEOUT);
+
+ /* Looks like we're ok to forward */
+ outface_stats =
+ &wshard->face_stats[cicn_face_db_index (outface)];
+ outface_stats->out_interests++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+
+
+ /*
+ * Code routes control requests for us to these labels:
+ * respond to with control reply
+ */
+
+ hello_request_forus:
+ /* Hello Request: For now, just change the packet and msg type
+ * (do not attach any extra payload) and reflect back
+ */
+ pkt_hdr0->pkt_type = CICN_PKT_TYPE_CONTROL_REPLY;
+ C_PUTINT16 (&msg_tlv[0], CICN_MSG_TYPE_CONTENT);
+
+ outface = inface;
+ outface_stats = inface_stats;
+
+ pkts_hello_int_rec++;
+ pkts_hello_data_sent++;
+ inface_stats->term_interests++;
+ outface_stats->orig_datas++;
+ outface_stats->out_datas++;
+
+ /* Send it out directly without doing anything further */
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+
+ echo_request_forus:
+ /* Tweak packet and message types and send back
+ * as a ping request reply
+ */
+ pkt_hdr0->pkt_type = CICN_PKT_TYPE_CONTROL_REPLY;
+ C_PUTINT16 (msg_tlv, CICN_MSG_TYPE_ECHO_REPLY);
+
+ outface = inface;
+ outface_stats = inface_stats;
+
+ pkts_control_reply_count++;
+ inface_stats->term_interests++;
+ outface_stats->out_datas++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+
+ traceroute_request_forus:
+ /* Update msg type and hop limit value */
+ pkt_hdr0->pkt_type = CICN_PKT_TYPE_CONTROL_REPLY;
+ C_PUTINT16 (msg_tlv, CICN_MSG_TYPE_TRACEROUTE_REPLY);
+ pkt_hdr0->pkt_hop_limit = CICN_DEFAULT_HOP_LIMIT;
+ if (cicn_infra_fwdr_name.fn_reply_payload_flen > 0)
+ {
+ int payload_size =
+ cicn_infra_fwdr_name.fn_reply_payload_flen;
+ vlib_buffer_add_data (vm, b0->free_list_index,
+ bi0,
+ cicn_infra_fwdr_name.fn_reply_payload,
+ payload_size);
+
+ uint16_t imsg_size;
+ C_GETINT16 (imsg_size, &msg_tlv[CICN_TLV_TYPE_LEN]);
+ C_PUTINT16 (&msg_tlv[CICN_TLV_TYPE_LEN],
+ imsg_size + payload_size);
+ pkt_hdr0->pkt_len =
+ clib_host_to_net_u16 (clib_net_to_host_u16
+ (pkt_hdr0->pkt_len) + payload_size);
+ udp0->length =
+ clib_host_to_net_u16 (clib_net_to_host_u16 (udp0->length)
+ + payload_size);
+ ip0->length =
+ clib_host_to_net_u16 (clib_net_to_host_u16 (ip0->length) +
+ payload_size);
+ }
+
+ outface = inface;
+ outface_stats = inface_stats;
+
+ pkts_control_reply_count++;
+ inface_stats->term_interests++;
+ outface_stats->out_datas++;
+
+ next0 = ICNFWD_NEXT_LOOKUP;
+ goto ready_to_send;
+
+ }
+ else if (pkt_type == CICN_PKT_TYPE_NAK)
+ {
+
+ inface_stats->in_naks++;
+
+ }
+ else
+ {
+ /* Don't expect any other packets: just drop? */
+
+ next0 = ICNFWD_NEXT_ERROR_DROP;
+ goto trace_single;
+ }
+
+ ready_to_send:
+
+ /* Use info to prep and enqueue: we expect that
+ * the egress face and the next-node have been set.
+ */
+
+ /* TODO -- worth optimizing/remembering egress interface? */
+
+ /* TODO -- assuming ipv4 udp egress for now */
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ /* Rewrite ip and udp headers */
+ ip0->src_address.as_u32 = outface->src_addr.sin_addr.s_addr;
+ ip0->dst_address.as_u32 = outface->dest_addr.sin_addr.s_addr;
+
+ /* TODO -- Refresh IP ttl - is that ok? */
+ ip0->ttl = CICN_IP_TTL_DEFAULT;
+
+ /* TODO -- Not optimizing the IP checksum currently */
+ ip0->checksum = ip4_header_checksum (ip0);
+
+ udp0->src_port = outface->src_addr.sin_port;
+ udp0->dst_port = outface->dest_addr.sin_port;
+
+ /* TODO -- clear UDP checksum; is this ok? */
+ udp0->checksum = 0;
+
+ /* Next-node should already be ok at this point */
+
+ trace_single:
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+
+ icnfwd_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type;
+ t->msg_type = msg_type;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ /* Speculatively enqueue packet b0 (index in bi0)
+ * to the current next frame
+ */
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ /* Incr packet counter */
+ pkts_processed += 1;
+
+ BUFTRC ((next0 == ICNFWD_NEXT_ERROR_DROP) ? "DROPTX1" : "ICN-TX1",
+ bi0);
+ /* Verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ /*
+ * End of 1-at-a-time loop; finish 'next' processing
+ */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Check the CS LRU, and trim if necessary. */
+ cicn_trim_cs_lru (vm, node, &(rt->pitcs));
+
+ pit_int_count = cicn_pit_get_int_count (&(rt->pitcs));
+ pit_cs_count = cicn_pit_get_cs_count (&(rt->pitcs));
+
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_INTERESTS, pkts_interest_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_DATAS, pkts_data_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_NAKS, pkts_nak_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_CACHED, pkts_from_cache_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_NACKED_INTERESTS,
+ pkts_nacked_interests_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_HOPLIMIT_EXCEEDED,
+ pkts_nak_hoplimit_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_NO_ROUTE,
+ pkts_nak_no_route_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_NO_PIT, pkts_no_pit_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_PIT_EXPIRED, pit_expired_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_CS_EXPIRED, cs_expired_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_NO_BUFS, no_bufs_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_INTEREST_AGG, pkts_interest_agg);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_INT_RETRANS, pkts_int_retrans);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_CONTROL_REQUESTS,
+ pkts_control_request_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_CONTROL_REPLIES,
+ pkts_control_reply_count);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_HELLO_INTERESTS_RCVD,
+ pkts_hello_int_rec);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_HELLO_DMSGS_SENT,
+ pkts_hello_data_sent);
+ vlib_node_increment_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_HELLO_DMSGS_RCVD,
+ pkts_hello_data_rec);
+
+
+ update_node_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_INT_COUNT, pit_int_count);
+ update_node_counter (vm, icnfwd_node.index,
+ ICNFWD_ERROR_CS_COUNT, pit_cs_count);
+ ASSERT (rt->pitcs.pcs_lru_count == pit_cs_count);
+
+ return (frame->n_vectors);
+}
+
+/*
+ * Check the CS LRU, trim if necessary
+ */
+static int
+cicn_trim_cs_lru (vlib_main_t * vm, vlib_node_runtime_t * node,
+ cicn_pit_cs_t * pit)
+{
+#define LRU_TRIM_COUNT 512
+
+ int i, count = 0, bufcount;
+ u32 node_list[LRU_TRIM_COUNT], buf_list[LRU_TRIM_COUNT];
+ cicn_hash_node_t *np;
+ cicn_pcs_entry_t *pcs;
+
+ if (pit->pcs_lru_count > pit->pcs_lru_max)
+ {
+
+ /* Collect an armful of entries from the back of the LRU */
+ count = cicn_cs_lru_trim (pit, node_list, LRU_TRIM_COUNT);
+
+ bufcount = 0;
+
+ for (i = 0; i < count; i++)
+ {
+ /* Retrieve the CS data */
+ np = cicn_hashtb_node_from_idx (pit->pcs_table, node_list[i]);
+
+ pcs = cicn_pit_get_data (np);
+
+ /* Extract the packet buffer id and save it */
+ if (pcs->u.cs.cs_pkt_buf != 0)
+ {
+ BUFTRC (" CS-TRIM", pcs->u.cs.cs_pkt_buf);
+ buf_list[bufcount++] = pcs->u.cs.cs_pkt_buf;
+ pcs->u.cs.cs_pkt_buf = 0;
+ }
+
+ /* Remove the hash node from the hashtable and free it */
+ cicn_cs_delete_trimmed (pit, &pcs, &np, vm);
+
+ }
+
+ /* Free packet buffers */
+ BUFTRC ("CS-TRIM-ALL", bufcount);
+ if (bufcount > 0)
+ {
+#if 1 //$$$XXX TODO: does this work better than drop-node approach? seems so(?)
+ vlib_buffer_free (vm, buf_list, bufcount);
+#else
+ /* This ought to work, not limited to a single frame size. It has
+ * the nice property that we get to set a stat/error code for
+ * the bufs we're freeing. Note that we specify the 'next index'
+ * in terms of our own node's array of 'nexts'.
+ *
+ * Seems to work but can replace with
+ * vlib_buffer_free (vm, buf_list, bufcount);
+ * if willing to give up the counter.
+ */
+ vlib_error_drop_buffers (vm, node, buf_list, 1 /*stride */ ,
+ bufcount,
+ ICNFWD_NEXT_ERROR_DROP /*next index */ ,
+ icnfwd_node.index, ICNFWD_ERROR_CS_LRU);
+#endif
+ }
+ }
+
+ return (count);
+}
+
+/*
+ * Node registration for the forwarder node
+ */
+VLIB_REGISTER_NODE (icnfwd_node) =
+{
+ .function = icnfwd_node_fn,.name = "icnfwd",.vector_size =
+ sizeof (u32),.runtime_data_bytes =
+ sizeof (icnfwd_runtime_t),.format_trace = icnfwd_format_trace,.type =
+ VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (icnfwd_error_strings),.error_strings =
+ icnfwd_error_strings,.n_next_nodes = ICNFWD_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [ICNFWD_NEXT_LOOKUP] = "ip4-lookup",
+ [ICNFWD_NEXT_ERROR_DROP] = "error-drop",}
+,};
+
+/*
+ * TODO -- new worker node ends here
+ */
+
+#if CICN_FEATURE_MULTITHREAD
+/*
+ * Work-distribution node (first pass, anyway). We use the full-name hash
+ * to direct packets at forwarding worker threads. We've informed the
+ * handoff node running at the edge of each graph instance of the existence
+ * of our forwarding node, as part of setup/enable. As a result, the
+ * target thread's handoff node will be able to hand our packets
+ * directly to our forwarding node.
+ */
+
+/*
+ * Node context data; we think this is per-thread/instance/graph
+ */
+typedef struct icndist_runtime_s
+{
+
+ /* Vector of queues directed at each forwarding worker thread */
+ vlib_frame_queue_elt_t **handoff_q_elt_by_worker;
+
+} icndist_runtime_t;
+
+/* Registration struct for a graph node */
+vlib_node_registration_t icndist_node;
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ ICNDIST_NEXT_FWD,
+ ICNDIST_NEXT_ERROR_DROP,
+ ICNDIST_N_NEXT,
+} icndist_next_t;
+
+/* Stats string values */
+static char *icndist_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icndist_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_worker;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icndist_trace_t;
+
+/* Distribution node packet trace format function */
+static u8 *
+icndist_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ icndist_trace_t *t = va_arg (*args, icndist_trace_t *);
+
+ s = format (s, "ICN-DIST: pkt: %d, msg %d, sw_if_index %d, next worker %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_worker);
+ return (s);
+}
+
+/*
+ * IP-worker allocates a free packet frame to fill in and handed off
+ * to ICN-worker.
+ */
+static inline vlib_frame_queue_elt_t *
+get_new_handoff_queue_elt (u32 vlib_worker_index)
+{
+ vlib_frame_queue_t *fq;
+ vlib_frame_queue_elt_t *elt;
+ u64 new_tail;
+
+ fq = vlib_frame_queues[vlib_worker_index];
+ ASSERT (fq);
+
+ new_tail = __sync_add_and_fetch (&fq->tail, 1);
+
+ /* Wait until a ring slot is available */
+ while (new_tail >= fq->head_hint + fq->nelts)
+ {
+ vlib_worker_thread_barrier_check ();
+ }
+
+ elt = fq->elts + (new_tail & (fq->nelts - 1));
+
+ /* Should not happen that available ring slot is marked valid */
+ while (elt->valid)
+ ;
+
+ elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
+ elt->last_n_vectors = elt->n_vectors = 0;
+
+ return (elt);
+}
+
+/*
+ * IP-worker gets frame for ICN-worker, allocating new frame if needed.
+ */
+static inline vlib_frame_queue_elt_t *
+icn_get_handoff_queue_elt (u32 vlib_worker_index,
+ vlib_frame_queue_elt_t ** handoff_queue_elt)
+{
+ vlib_frame_queue_elt_t *elt;
+
+ if (handoff_queue_elt[vlib_worker_index])
+ {
+ return (handoff_queue_elt[vlib_worker_index]);
+ }
+
+ elt = get_new_handoff_queue_elt (vlib_worker_index);
+
+ handoff_queue_elt[vlib_worker_index] = elt;
+
+ return (elt);
+}
+
+/*
+ * Enables the frame once the IP-worker is done with it.
+ */
+static inline void
+icn_put_handoff_queue_elt (vlib_frame_queue_elt_t * hf)
+{
+ CLIB_MEMORY_BARRIER ();
+ hf->valid = 1;
+}
+
+/*
+ * Second-level work-distribution node: IP-worker got packets based on
+ * IP 5-tuple hash, redistributes to (final) ICN-worker based on ICN name hash.
+ */
+static uword
+icndist_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from;
+ u32 *to_next;
+ icndist_next_t next_index;
+ u32 pkts_processed = 0, pkts_interest_count = 0, pkts_data_count = 0;
+ u32 pkts_dropped = 0;
+ int i, ret;
+ icndist_runtime_t *rt;
+ u32 current_worker_index = ~0;
+ u32 next_worker_index = 0;
+ vlib_frame_queue_elt_t *hf = 0;
+ u32 n_left_to_next_worker = 0, *to_next_worker = 0;
+ cicn_main_t *icnmain = &cicn_main;
+ u32 n_left_to_next;
+ u32 drop_count = 0, drop_list[VLIB_FRAME_SIZE];
+
+ /* Retrieve the per-thread context struct */
+ rt = vlib_node_get_runtime_data (vm, icndist_node.index);
+
+ /*
+ * If necessary, do one-time init
+ */
+ if (rt->handoff_q_elt_by_worker == NULL)
+ {
+
+ /* Init/allocate a vector we'll use to store queues directed at
+ * each worker thread we're using for forwarding.
+ */
+ vec_validate (rt->handoff_q_elt_by_worker,
+ icnmain->worker_first_index + icnmain->worker_count - 1);
+
+ /* TODO -- dpdk io does a 'congested_handoff' queue also? Do we have
+ * to do that too, or is there other infra that will do something
+ * sane if we're overrunning the forwarder threads?
+ */
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ next_index = node->cached_next_index;
+ next_index = ICNDIST_NEXT_FWD;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* TODO -- just doing 1-at-a-time for now, to simplify things a bit. */
+
+ /* TODO -- v6 support too? */
+ /* TODO -- more interesting stats and trace */
+
+ /* TODO -- simpler loop since we don't use the vlib api? */
+// while (n_left_from > 0 && n_left_to_next > 0) {
+
+ while (n_left_from > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 sw_if_index0;
+ u8 *body0, *ptr0;
+ u32 len0;
+ u8 pkt_type;
+ u16 msg_type;
+ cicn_pkt_hdr_desc_t pkt_hdr_desc0;
+ u8 *nameptr;
+ u32 namelen;
+ u64 hashval;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *p2;
+
+ p2 = vlib_get_buffer (vm, from[1]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), LOAD);
+ }
+
+ /* Set up to access the packet */
+
+ /* We don't use the normal 'to next node' variables, because we're
+ * mostly moving packets to other threads. We only use the direct
+ * path for packets destined for the current thread's forwarder
+ * node; even error/drop packets are dealt with all at once, at
+ * the end of the loop.
+ */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /*
+ * From the IPv4 udp code, we think we're handed the payload part
+ * of the packet
+ */
+ ASSERT (b0->current_data >=
+ (sizeof (ip4_header_t) + sizeof (udp_header_t)));
+
+ /* Capture pointer to the payload */
+ ptr0 = body0 = vlib_buffer_get_current (b0);
+ len0 = b0->current_length;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ /* Reset destination worker thread idx */
+ next_worker_index = icnmain->worker_first_index;
+
+ /*
+ * Do a quick, in-place parse/validate pass, locating
+ * a couple of key pieces of info about the packet
+ */
+ ret = cicn_parse_pkt (ptr0, len0, &pkt_type, &msg_type,
+ &nameptr, &namelen, &pkt_hdr_desc0);
+
+ /* If we can't even get at the name, we just drop */
+ if (ret != AOK)
+ {
+ /* Just drop on error? */
+ drop_list[drop_count] = bi0;
+ drop_count++;
+
+ pkts_dropped++;
+ goto trace_single;
+ }
+
+ if (pkt_type == CICN_PKT_TYPE_INTEREST)
+ {
+ pkts_interest_count++;
+ }
+ else if (pkt_type == CICN_PKT_TYPE_CONTENT)
+ {
+ pkts_data_count++;
+ }
+
+ /* Compute the full name hash, for distribution
+ * (so only doing the full-name hash here, no LPM prefix hashing).
+ * TODO - could we capture the hash and pass it along?
+ */
+ hashval = cicn_hashtb_hash_name (nameptr, namelen);
+
+ /* Use the hash to identify the correct worker thread */
+
+ if (PREDICT_TRUE (is_pow2 (icnmain->worker_count)))
+ {
+ next_worker_index += hashval & (icnmain->worker_count - 1);
+ }
+ else
+ {
+ next_worker_index += hashval % icnmain->worker_count;
+ }
+
+ /* Use normal next-node path if we're
+ * using the forwarding node on the current thread; that'd
+ * save some work.
+ */
+ if (next_worker_index == vm->cpu_index)
+ {
+ if (n_left_to_next == 0)
+ {
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ }
+
+ ASSERT (n_left_to_next > 0);
+
+ to_next[0] = bi0;
+ to_next++;
+ n_left_to_next--;
+
+ /* Skip to end of the loop */
+ goto trace_single;
+ }
+
+ /* On the target worker thread, the buffers will arrive at the
+ * dpdk handoff node. Convince target's dpdk handoff node to move
+ * the buffers to the icn orwarding node.
+ */
+ vnet_buffer (b0)->handoff.next_index = cicn_main.fwd_next_node;
+
+ /* Locate or allocate a queue for the thread; update current
+ * queue if we're changing destination threads.
+ */
+ if (next_worker_index != current_worker_index)
+ {
+ if (hf)
+ {
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
+ }
+
+ hf = icn_get_handoff_queue_elt (next_worker_index,
+ rt->handoff_q_elt_by_worker);
+
+ n_left_to_next_worker = VLIB_FRAME_SIZE - hf->n_vectors;
+ to_next_worker = &hf->buffer_index[hf->n_vectors];
+ current_worker_index = next_worker_index;
+ }
+
+ /* Enqueue to correct worker thread */
+ to_next_worker[0] = bi0;
+ to_next_worker++;
+ n_left_to_next_worker--;
+
+ /*
+ * If we've filled a frame, pass it on
+ */
+ if (n_left_to_next_worker == 0)
+ {
+ hf->n_vectors = VLIB_FRAME_SIZE;
+ icn_put_handoff_queue_elt (hf);
+ current_worker_index = ~0;
+ rt->handoff_q_elt_by_worker[next_worker_index] = 0;
+ hf = 0;
+ }
+
+ trace_single:
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+
+ icndist_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+
+ t->sw_if_index = sw_if_index0;
+ t->pkt_type = pkt_type;
+ t->msg_type = msg_type;
+ t->next_worker = next_worker_index;
+ }
+
+ /* Incr packet counter */
+ pkts_processed += 1;
+ }
+
+ /*
+ * End of 1-at-a-time loop through the incoming frame
+ */
+
+ /* Finish handing frames to threads, and reset */
+ if (hf)
+ {
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_worker;
+ }
+
+ /* Ship remaining frames to the worker nodes */
+ for (i = 0; i < vec_len (rt->handoff_q_elt_by_worker); i++)
+ {
+ if (rt->handoff_q_elt_by_worker[i])
+ {
+ hf = rt->handoff_q_elt_by_worker[i];
+ /*
+ * It works better to let the handoff node
+ * rate-adapt, always ship the handoff queue element.
+ */
+ if (1 || hf->n_vectors == hf->last_n_vectors)
+ {
+ icn_put_handoff_queue_elt (hf);
+ rt->handoff_q_elt_by_worker[i] = 0;
+ }
+ else
+ {
+ hf->last_n_vectors = hf->n_vectors;
+ }
+ }
+
+#if 0 /* TODO -- no congested queues for now? */
+ congested_handoff_queue_by_worker_index[i] =
+ (vlib_frame_queue_t *) (~0);
+#endif /* TODO */
+ }
+
+ hf = 0;
+ current_worker_index = ~0;
+
+ /* Dispose of any pending 'normal' frame within this thread */
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ /* Deal with any error/drop packets */
+ if (drop_count > 0)
+ {
+ vlib_error_drop_buffers (vm, node, drop_list, 1, drop_count,
+ ICNDIST_NEXT_ERROR_DROP /* next index */ ,
+ icndist_node.index, ICNDIST_ERROR_DROPS);
+ }
+
+ /* Update counters */
+ vlib_node_increment_counter (vm, icndist_node.index,
+ ICNDIST_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, icndist_node.index,
+ ICNDIST_ERROR_INTERESTS, pkts_interest_count);
+ vlib_node_increment_counter (vm, icndist_node.index,
+ ICNDIST_ERROR_DATAS, pkts_data_count);
+ vlib_node_increment_counter (vm, icndist_node.index,
+ ICNDIST_ERROR_DROPS, pkts_dropped);
+
+ return (frame->n_vectors);
+}
+
+/* End of the packet-distributing node function */
+
+/*
+ * Node registration block for the work-distributing node.
+ * TODO -- using the same trace func as the icnfwd node for now
+ */
+VLIB_REGISTER_NODE (icndist_node) =
+{
+ .function = icndist_node_fn,.name = "icndist",.vector_size =
+ sizeof (u32),.runtime_data_bytes =
+ sizeof (icndist_runtime_t),.format_trace = icndist_format_trace,.type =
+ VLIB_NODE_TYPE_INTERNAL,.n_errors =
+ ARRAY_LEN (icndist_error_strings),.error_strings =
+ icndist_error_strings,.n_next_nodes = ICNDIST_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [ICNDIST_NEXT_FWD] = "icnfwd",[ICNDIST_NEXT_ERROR_DROP] = "error-drop",}
+,};
+#endif // CICN_FEATURE_MULTITHREAD
diff --git a/cicn-plugin/cicn/test/test_cicn.c b/cicn-plugin/cicn/test/test_cicn.c
new file mode 100644
index 00000000..77eff120
--- /dev/null
+++ b/cicn-plugin/cicn/test/test_cicn.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * framework for dynamically linked cicn plugin unit tests
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/format.h>
+#include <vlibapi/api.h>
+
+#include "test_cicn.h"
+
+#include <cicn/cicn.h>
+
+/*
+ * Per-suite function, called to execute all that suite's tests
+ */
+typedef int (*test_cicn_suite_fn) (cicn_api_test_suite_results_t * tr,
+ test_cicn_running_t * running);
+
+/*
+ * descriptor for each suite, called by engine
+ */
+typedef struct test_cicn_suite_s
+{
+ char *cs_name;
+ test_cicn_suite_fn cs_func;
+} test_cicn_suite_t;
+
+test_cicn_suite_t test_cicn_suites[] = {
+ {.cs_name = "cicn_hash",.cs_func = test_cicn_hash_suite,},
+};
+
+/*
+ * Helper function called by suites on each test, to record
+ * success/failure of that test.
+ */
+void
+test_cicn_result_record (cicn_api_test_suite_results_t * tr, int rc,
+ test_cicn_running_t * running)
+{
+ running->ntests++;
+ if (rc == AOK)
+ {
+ running->nsuccesses++;
+ }
+ else
+ {
+ running->nfailures++;
+ int test_idx = running->ntests - 1;
+ tr->failures_mask[test_idx / 8] = (1 << (test_idx % 8));
+ }
+}
+
+/*
+ * Execution and serialization for UT test API
+ */
+vnet_api_error_t
+test_cicn_api_results_serialize (test_cicn_api_op_t * test_cicn_api_op)
+{
+ vnet_api_error_t vaec = VNET_API_ERROR_UNSPECIFIED;
+
+ vl_api_cicn_api_test_run_get_reply_t *reply;
+ int nentries;
+
+ reply = test_cicn_api_op->reply;
+
+ nentries = ARRAY_LEN (test_cicn_suites);
+ if (nentries * sizeof (cicn_api_test_suite_results_t) >
+ sizeof (reply->suites))
+ { // should never happen
+ vaec = VNET_API_ERROR_INVALID_MEMORY_SIZE; // best available choice(?)
+ goto done;
+ }
+
+ cicn_api_test_suite_results_t *suites_results =
+ (cicn_api_test_suite_results_t *) reply->suites;
+
+ int i;
+ for (i = 0; i < ARRAY_LEN (test_cicn_suites); i++)
+ {
+ test_cicn_suite_t *suite = &test_cicn_suites[i];
+ cicn_api_test_suite_results_t *suite_results = &suites_results[i];
+
+ memset (suite_results, 0, sizeof (*suite_results));
+ snprintf ((char *) suite_results->suitename,
+ sizeof (suite_results->suitename), "%s", suite->cs_name);
+
+ test_cicn_running_t running;
+ memset (&running, 0, sizeof (running));
+
+ (suite->cs_func) (suite_results, &running);
+
+ suite_results->ntests = clib_host_to_net_i32 (running.ntests);
+ suite_results->nsuccesses = clib_host_to_net_i32 (running.nsuccesses);
+ suite_results->nfailures = clib_host_to_net_i32 (running.nfailures);
+ suite_results->nskipped = clib_host_to_net_i32 (running.nskipped);
+ }
+
+ reply->nentries = clib_host_to_net_i32 (nentries);
+
+ vaec = CICN_VNET_API_ERROR_NONE;
+
+done:
+ return (vaec);
+}
+
+
+/*
+ * VLIB_INIT_FUNCTION() that registers the test modules with cicn_mgmt.c
+ */
+static clib_error_t *
+test_cicn_init (vlib_main_t * vm)
+{
+ clib_error_t *error = 0;
+
+ cicn_main_t *sm = &cicn_main;
+
+ /* support for UT sub-plugin */
+ sm->test_cicn_api_handler = test_cicn_api_results_serialize;
+
+ return (error);
+}
+
+VLIB_INIT_FUNCTION (test_cicn_init);
+
diff --git a/cicn-plugin/cicn/test/test_cicn.h b/cicn-plugin/cicn/test/test_cicn.h
new file mode 100644
index 00000000..ddc14b08
--- /dev/null
+++ b/cicn-plugin/cicn/test/test_cicn.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * framework for dynamically linked cicn plugin unit tests
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vppinfra/pool.h>
+
+#include <cicn/cicn.h>
+
+typedef struct test_cicn_running_s
+{
+ i32 ntests;
+ i32 nsuccesses;
+ i32 nfailures;
+ i32 nskipped;
+} test_cicn_running_t;
+
+void
+test_cicn_result_record (cicn_api_test_suite_results_t * tr, int rc,
+ test_cicn_running_t * running);
+
+int
+test_cicn_hash_suite (cicn_api_test_suite_results_t * tr,
+ test_cicn_running_t * running);
+
diff --git a/cicn-plugin/cicn/test/test_cicn_hash.c b/cicn-plugin/cicn/test/test_cicn_hash.c
new file mode 100644
index 00000000..a8ffac21
--- /dev/null
+++ b/cicn-plugin/cicn/test/test_cicn_hash.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * UT for cicn plugin hash function, included in UT framework.
+ */
+
+#include "test_cicn_hash.h"
+
+static void test_hash_cdata_dump_all (void); // forward decl
+
+/*
+ * test cases
+ */
+test_cicn_hash_namedata_t thash_data[] = {
+ TEST_CICN_HASH_NAMEDATA ("/"),
+ TEST_CICN_HASH_NAMEDATA ("/1"),
+ TEST_CICN_HASH_NAMEDATA ("/1/2"),
+ TEST_CICN_HASH_NAMEDATA ("/1/2/3"),
+ TEST_CICN_HASH_NAMEDATA ("/1/2/3/4/5/6/7"),
+ TEST_CICN_HASH_NAMEDATA ("/1/2/3/4/5/6/7.1"),
+ TEST_CICN_HASH_NAMEDATA ("/1/2/3/4/5/6/7/8"),
+ TEST_CICN_HASH_NAMEDATA ("/1/2/3/4/5/6/7/8/9"),
+ TEST_CICN_HASH_NAMEDATA
+ ("/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20"),
+ TEST_CICN_HASH_NAMEDATA_FULL ("/ccnx/list/\001", 1 /*is_chunk_name */ ),
+};
+
+#define TEST_CICN_VERIFY(expr, assert_on_mismatch) \
+ do { \
+ if (assert_on_mismatch) { assert(expr); } \
+ else if (!(expr)) { goto done; } \
+ } while (0);
+
+
+static int
+test_cicn_hash_pfx_inf_compare (const uint8_t * name, uint16_t namelen,
+ const cicn_prefix_hashinf_t * pfx_info1,
+ const cicn_prefix_hashinf_t * pfx_info2,
+ int is_full_name, int assert_on_mismatch)
+{
+ int i;
+
+ int ret = EINVAL;
+ TEST_CICN_VERIFY (pfx_info1->pfx_count == pfx_info2->pfx_count,
+ assert_on_mismatch);
+ TEST_CICN_VERIFY (pfx_info1->pfx_overflow == pfx_info2->pfx_overflow,
+ assert_on_mismatch);
+
+ for (i = 0; i < pfx_info1->pfx_count; i++)
+ {
+ TEST_CICN_VERIFY (pfx_info1->pfx_lens[i] == pfx_info2->pfx_lens[i],
+ assert_on_mismatch);
+ TEST_CICN_VERIFY (pfx_info1->pfx_hashes[i] == pfx_info2->pfx_hashes[i],
+ assert_on_mismatch);
+ if (i == pfx_info1->pfx_count - 1)
+ { // verify chunk comp handling
+ if (pfx_info1->pfx_lens[i] == pfx_info1->pfx_len)
+ {
+ break; // parsed whole name
+ }
+ if (pfx_info1->pfx_overflow)
+ {
+ break; // quit early on overflow
+ }
+ /* quit early on (hashed component before) chunk component */
+ int chunk_comp_idx =
+ pfx_info1->pfx_lens[i] + (is_full_name ? CICN_TLV_HDR_LEN : 0);
+ uint16_t type;
+ C_GETINT16 (type, &name[chunk_comp_idx]);
+ TEST_CICN_VERIFY (type == CICN_NAME_COMP_CHUNK, assert_on_mismatch);
+ }
+ }
+
+ if (is_full_name)
+ {
+ TEST_CICN_VERIFY (pfx_info1->pfx_full_hash == pfx_info2->pfx_full_hash,
+ assert_on_mismatch);
+ }
+
+ ret = AOK;
+
+done:
+ return (ret);
+}
+
+/*
+ * Version of cicn_hashtb_hash_prefixes() that calculates hash of
+ * each prefix by doing an independent hash from the beginning of the
+ * bytestring.
+ */
+static int
+test_cicn_hashtb_hash_prefixes_nonincr (const uint8_t * name,
+ uint16_t namelen, int is_full_name,
+ cicn_prefix_hashinf_t * pfx,
+ int limit)
+{
+ int i, ret = EINVAL;
+ const uint8_t *name_end, *pfx_start, *pfx_end;
+ uint16_t type = CICN_NAME_COMP, tlen;
+
+ /* Must point to something, and it must be at least as long
+ * as an empty name or name-comp
+ */
+ if ((name == NULL) || (namelen < 4) || (pfx == NULL))
+ {
+ goto done;
+ }
+
+ /* Establish sane limit on number of comps */
+ if (limit == 0 || limit > CICN_HASHTB_MAX_NAME_COMPS)
+ {
+ limit = CICN_HASHTB_MAX_NAME_COMPS;
+ }
+
+ name_end = name + namelen;
+
+ /* Hash the name-comp prefixes first */
+ if (is_full_name)
+ {
+ pfx_start = name + 4;
+
+ /* Capture tlv pointer and len in the context struct */
+ pfx->pfx_ptr = name + 4;
+ pfx->pfx_len = namelen - 4;
+ }
+ else
+ {
+ pfx_start = name;
+
+ /* Capture tlv pointer and len in the context struct */
+ pfx->pfx_ptr = name;
+ pfx->pfx_len = namelen;
+ }
+
+ pfx_end = pfx_start;
+
+ /* We hash each sub-set of the input name, all the way to the end. This
+ * means that in the case of a fib prefix, for example, the _last_
+ * sub-prefix hash is the hash of the full prefix.
+ */
+ for (i = 0; (i < limit) && (pfx_end < name_end); i++)
+ {
+ /* Double-check: in order to advance 'end' to another component tlv,
+ * there must be a min number of bytes left
+ */
+ if ((name_end - pfx_end) < 4)
+ {
+ /* Whoops - that's not valid */
+ goto done;
+ }
+
+ /* Advance 'end' to the next name-comp */
+ C_GETINT16 (type, pfx_end);
+ if (type == CICN_NAME_COMP_CHUNK)
+ {
+ /* Special-case, chunk/sequence not part of routeable prefix */
+ break;
+ }
+
+ pfx_end += 2;
+
+ C_GETINT16 (tlen, pfx_end);
+
+ pfx_end += (2 + tlen);
+ if (pfx_end > name_end)
+ {
+ /* Whoops - that's bad, sub-tlv shouldn't have overrun */
+ break;
+ }
+
+ /* And compute prefix's hash */
+ pfx->pfx_lens[i] = pfx_end - pfx_start;
+ pfx->pfx_hashes[i] =
+ cicn_hashtb_hash_bytestring (pfx_start, pfx_end - pfx_start);
+ }
+
+ if (pfx_end > name_end)
+ {
+ /* Whoops - that's bad, sub-tlv shouldn't have overrun */
+ goto done;
+ }
+
+ pfx->pfx_count = i;
+ pfx->pfx_overflow =
+ (pfx_end < name_end && type != CICN_NAME_COMP_CHUNK) ? 1 : 0;
+
+ /* If needed, compute the full-name hash */
+ if (is_full_name)
+ {
+ pfx->pfx_full_hash = cicn_hashtb_hash_name (name, namelen);
+ }
+
+ if (pfx->pfx_overflow && limit == CICN_HASHTB_MAX_NAME_COMPS)
+ {
+ ret = ENOSPC;
+ goto done;
+ }
+
+ ret = AOK;
+
+done:
+ return (ret);
+}
+
+/*
+ * Run test on a single case
+ */
+int
+test_cicn_hash_hd (test_cicn_hash_namedata_t * hnd,
+ const test_cicn_hash_namedata_t * hn_cdata)
+{
+ int ret = EINVAL;
+ int len;
+ cicn_rd_t cicn_rd;
+
+ uint8_t buf[1024];
+ cicn_prefix_hashinf_t *pfx_hi1 = &hnd->th_pfx_hi;
+ cicn_prefix_hashinf_t pfx_hi2;
+
+ len =
+ cicn_parse_name_from_str (buf, sizeof (buf), hnd->th_name,
+ hnd->th_is_chunk_name, &cicn_rd);
+ if (len <= 0)
+ {
+ goto done;
+ }
+
+ int ret1 =
+ cicn_hashtb_hash_prefixes (buf, len, TRUE /*fullname */ , pfx_hi1,
+ 0 /*!limit */ );
+ switch (ret1)
+ {
+ case AOK:
+ break;
+ case ENOSPC:
+ if (pfx_hi1->pfx_count != ARRAY_LEN (pfx_hi1->pfx_hashes))
+ {
+ goto done;
+ }
+ break;
+ default:
+ goto done;
+ }
+
+ int ret2 =
+ test_cicn_hashtb_hash_prefixes_nonincr (buf, len, TRUE /*fullname */ ,
+ &pfx_hi2, 0 /*!limit */ );
+ switch (ret2)
+ {
+ case AOK:
+ break;
+ case ENOSPC:
+ if (pfx_hi2.pfx_count != ARRAY_LEN (pfx_hi2.pfx_hashes))
+ {
+ goto done;
+ }
+ break;
+ default:
+ goto done;
+ }
+
+ if (ret1 != ret2)
+ {
+ goto done;
+ }
+ ret = test_cicn_hash_pfx_inf_compare (buf, len,
+ pfx_hi1, &hn_cdata->th_pfx_hi,
+ 1 /*is_full_name */ ,
+ 0 /*!assert_on_mismatch */ );
+
+ if (ret != AOK)
+ {
+ goto done;
+ }
+
+ ret = test_cicn_hash_pfx_inf_compare (buf, len,
+ &pfx_hi2, &hn_cdata->th_pfx_hi,
+ 1 /*is_full_name */ ,
+ 0 /*!assert_on_mismatch */ );
+
+ if (ret != AOK)
+ {
+ goto done;
+ }
+
+done:
+ return (ret);
+}
+
+/*
+ * Run all test cases
+ */
+int
+test_cicn_hash_suite (cicn_api_test_suite_results_t * tr,
+ test_cicn_running_t * running)
+{
+ int i;
+ if (0)
+ { // temporarily enable for adding new test cases
+ test_hash_cdata_dump_all ();
+ return (AOK);
+ }
+ for (i = 0; i < ARRAY_LEN (thash_data); i++)
+ {
+ int ret = test_cicn_hash_hd (&thash_data[i], &hash_namedata_cdata[i]);
+ test_cicn_result_record (tr, ret, running);
+ }
+ return (AOK);
+}
+
+/*
+ * Part of routines to generate "known good" output in test_cicn_hash_cdata.c.
+ */
+static void
+test_hash_cdata_dump_hnd (test_cicn_hash_namedata_t * hdn, int indent)
+{
+ cicn_prefix_hashinf_t *pfx_hi = &hdn->th_pfx_hi;
+ int i;
+
+ printf ("%*s{ .th_name = \"", indent, "");
+ for (i = 0; i < hdn->th_namebytes; i++)
+ {
+ uint8_t c = hdn->th_name[i];
+ if (isprint (c))
+ {
+ printf ("%c", c);
+ }
+ else
+ {
+ printf ("\\%3.3o", c);
+ }
+ }
+ printf ("\",\n");
+ printf ("%*s .th_namebytes = %u,\n", indent, "", hdn->th_namebytes);
+ if (hdn->th_is_chunk_name)
+ {
+ printf ("%*s .th_is_chunk_name = %d,\n",
+ indent, "", hdn->th_is_chunk_name);
+ }
+ printf ("%*s .th_pfx_hi = {\n", indent, "");
+ printf ("%*s "
+ ".pfx_len = %" PRIu16 ", "
+ ".pfx_count = %" PRIu16 ", "
+ ".pfx_overflow = %" PRIu16 ", "
+ ".pfx_full_hash = %#" PRIx64 ",\n",
+ indent, "",
+ pfx_hi->pfx_len, pfx_hi->pfx_count, pfx_hi->pfx_overflow,
+ pfx_hi->pfx_full_hash);
+
+ printf ("%*s .pfx_lens = { ", indent, "");
+ for (i = 0; i < hdn->th_pfx_hi.pfx_count; i++)
+ {
+ printf ("%" PRIu16 ", ", pfx_hi->pfx_lens[i]);
+ }
+ printf ("},\n");
+
+ printf ("%*s .pfx_hashes = { ", indent, "");
+ for (i = 0; i < hdn->th_pfx_hi.pfx_count; i++)
+ {
+ printf ("%#" PRIx64 ", ", pfx_hi->pfx_hashes[i]);
+ }
+ printf ("}, },\n"); // terminate pfx_hashes, th_pfx_hi
+
+ printf ("%*s},\n", indent, ""); // terminate hdn
+}
+
+/*
+ * Part of routines to generate "known good" compare data in
+ * test_cicn_hash_cdata.c.
+ * Not called during normal UT execution, only when adding/changing
+ * test cases.
+ */
+static void
+test_hash_cdata_dump_all (void)
+{
+ int i;
+
+ printf ("\n"); // skip debug cli prompt, for easier cut-and-paste
+ printf ("test_cicn_hash_namedata_t hash_namedata_cdata[] = {\n");
+
+ for (i = 0; i < ARRAY_LEN (thash_data); i++)
+ {
+ test_cicn_hash_namedata_t *hnd = &thash_data[i];
+ uint8_t buf[1024];
+ int len;
+ int ret;
+ cicn_rd_t cicn_rd;
+
+ len =
+ cicn_parse_name_from_str (buf, sizeof (buf), hnd->th_name,
+ hnd->th_is_chunk_name, &cicn_rd);
+ ASSERT (len > 0);
+
+ ret =
+ test_cicn_hashtb_hash_prefixes_nonincr (buf, len, TRUE /*fullname */ ,
+ &hnd->th_pfx_hi,
+ 0 /*!limit */ );
+ ASSERT (ret == AOK || ret == ENOSPC);
+ test_hash_cdata_dump_hnd (hnd, 4 /*indent */ );
+ }
+
+ printf ("};\n");
+}
diff --git a/cicn-plugin/cicn/test/test_cicn_hash.h b/cicn-plugin/cicn/test/test_cicn_hash.h
new file mode 100644
index 00000000..b173233a
--- /dev/null
+++ b/cicn-plugin/cicn/test/test_cicn_hash.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * UT for hash function.
+ */
+#include "test_cicn.h"
+
+#include <cicn/cicn_hashtb.h>
+#include <cicn/cicn_parser.h>
+
+/*
+ * Store known-good comparison data.
+ *
+ * separate length to support chunk=0, but parser not yet ready for that
+ * (terminates on null byte)
+ */
+typedef struct test_cicn_hash_namedata_s
+{
+ char *th_name;
+ int th_namebytes;
+ int th_is_chunk_name;
+ cicn_prefix_hashinf_t th_pfx_hi;
+} test_cicn_hash_namedata_t;
+
+#define TEST_CICN_HASH_NAMEDATA_FULL(str, is_chunk_name) \
+ {.th_name = (str), .th_namebytes = sizeof(str)-1, .th_is_chunk_name = is_chunk_name, }
+#define TEST_CICN_HASH_NAMEDATA(str) \
+ TEST_CICN_HASH_NAMEDATA_FULL(str, 0/*is_chunk_name*/)
+
+extern test_cicn_hash_namedata_t hash_namedata_cdata[];
diff --git a/cicn-plugin/cicn/test/test_cicn_hash_cdata.c b/cicn-plugin/cicn/test/test_cicn_hash_cdata.c
new file mode 100644
index 00000000..ab5e7993
--- /dev/null
+++ b/cicn-plugin/cicn/test/test_cicn_hash_cdata.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * test_cicn_hash_cdata.c: UT for hash function: comparison data, claimed good to check calculated data
+ *
+ * - NOTE: Entries in array generated by test_hash_cdata_dump().
+ */
+
+#include "test_cicn_hash.h"
+
+test_cicn_hash_namedata_t hash_namedata_cdata[] = {
+ {.th_name = "/",
+ .th_namebytes = 1,
+ .th_pfx_hi = {
+ .pfx_len = 0,.pfx_count = 0,.pfx_overflow =
+ 0,.pfx_full_hash = 0xf1a89ef060ec5ab2,
+ .pfx_lens = {}
+ ,
+ .pfx_hashes = {}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1",
+ .th_namebytes = 2,
+ .th_pfx_hi = {
+ .pfx_len = 5,.pfx_count = 1,.pfx_overflow =
+ 0,.pfx_full_hash = 0xa21d733c21450fb0,
+ .pfx_lens = {5,}
+ ,
+ .pfx_hashes = {0xa21d733c21450fb0,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2",
+ .th_namebytes = 4,
+ .th_pfx_hi = {
+ .pfx_len = 10,.pfx_count = 2,.pfx_overflow =
+ 0,.pfx_full_hash = 0x2483307ff018e378,
+ .pfx_lens = {5, 10,}
+ ,
+ .pfx_hashes = {0xa21d733c21450fb0, 0x2483307ff018e378,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2/3",
+ .th_namebytes = 6,
+ .th_pfx_hi = {
+ .pfx_len = 15,.pfx_count = 3,.pfx_overflow =
+ 0,.pfx_full_hash = 0x359fc658c64a6901,
+ .pfx_lens = {5, 10, 15,}
+ ,
+ .pfx_hashes =
+ {0xa21d733c21450fb0, 0x2483307ff018e378, 0x359fc658c64a6901,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2/3/4/5/6/7",
+ .th_namebytes = 14,
+ .th_pfx_hi = {
+ .pfx_len = 35,.pfx_count = 7,.pfx_overflow =
+ 0,.pfx_full_hash = 0xb62b3a4a289f1f16,
+ .pfx_lens = {5, 10, 15, 20, 25, 30, 35,}
+ ,
+ .pfx_hashes =
+ {0xa21d733c21450fb0, 0x2483307ff018e378, 0x359fc658c64a6901,
+ 0xe1f52cd7e83b0c01, 0xe883c57a77ccbb45, 0xd3b98a367b166454,
+ 0xb62b3a4a289f1f16,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2/3/4/5/6/7.1",
+ .th_namebytes = 16,
+ .th_pfx_hi = {
+ .pfx_len = 37,.pfx_count = 7,.pfx_overflow =
+ 0,.pfx_full_hash = 0xff30f5f715e86753,
+ .pfx_lens = {5, 10, 15, 20, 25, 30, 37,}
+ ,
+ .pfx_hashes =
+ {0xa21d733c21450fb0, 0x2483307ff018e378, 0x359fc658c64a6901,
+ 0xe1f52cd7e83b0c01, 0xe883c57a77ccbb45, 0xd3b98a367b166454,
+ 0xff30f5f715e86753,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2/3/4/5/6/7/8",
+ .th_namebytes = 16,
+ .th_pfx_hi = {
+ .pfx_len = 40,.pfx_count = 8,.pfx_overflow =
+ 0,.pfx_full_hash = 0xbf61949d6499859a,
+ .pfx_lens = {5, 10, 15, 20, 25, 30, 35, 40,}
+ ,
+ .pfx_hashes =
+ {0xa21d733c21450fb0, 0x2483307ff018e378, 0x359fc658c64a6901,
+ 0xe1f52cd7e83b0c01, 0xe883c57a77ccbb45, 0xd3b98a367b166454,
+ 0xb62b3a4a289f1f16, 0xbf61949d6499859a,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2/3/4/5/6/7/8/9",
+ .th_namebytes = 18,
+ .th_pfx_hi = {
+ .pfx_len = 45,.pfx_count = 8,.pfx_overflow =
+ 1,.pfx_full_hash = 0x57b69dba5ce74acf,
+ .pfx_lens = {5, 10, 15, 20, 25, 30, 35, 40,}
+ ,
+ .pfx_hashes =
+ {0xa21d733c21450fb0, 0x2483307ff018e378, 0x359fc658c64a6901,
+ 0xe1f52cd7e83b0c01, 0xe883c57a77ccbb45, 0xd3b98a367b166454,
+ 0xb62b3a4a289f1f16, 0xbf61949d6499859a,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/1/2/3/4/5/6/7/8/9/10/11/12/13/14/15/16/17/18/19/20",
+ .th_namebytes = 51,
+ .th_pfx_hi = {
+ .pfx_len = 111,.pfx_count = 8,.pfx_overflow =
+ 1,.pfx_full_hash = 0x9c3c7384d4566a4a,
+ .pfx_lens = {5, 10, 15, 20, 25, 30, 35, 40,}
+ ,
+ .pfx_hashes =
+ {0xa21d733c21450fb0, 0x2483307ff018e378, 0x359fc658c64a6901,
+ 0xe1f52cd7e83b0c01, 0xe883c57a77ccbb45, 0xd3b98a367b166454,
+ 0xb62b3a4a289f1f16, 0xbf61949d6499859a,}
+ ,}
+ ,
+ }
+ ,
+ {.th_name = "/ccnx/list/\001",
+ .th_namebytes = 12,
+ .th_is_chunk_name = 1,
+ .th_pfx_hi = {
+ .pfx_len = 21,.pfx_count = 2,.pfx_overflow =
+ 0,.pfx_full_hash = 0x3016dc26837a1cd,
+ .pfx_lens = {8, 16,}
+ ,
+ .pfx_hashes = {0x4a235626854c2554, 0x6519af585bec2ef4,}
+ ,}
+ ,
+ }
+ ,
+};
diff --git a/cicn-plugin/configure.ac b/cicn-plugin/configure.ac
new file mode 100644
index 00000000..a1477f8a
--- /dev/null
+++ b/cicn-plugin/configure.ac
@@ -0,0 +1,33 @@
+
+AC_INIT(cicn_plugin, 1.0)
+AM_INIT_AUTOMAKE
+
+AC_PROG_LIBTOOL
+AM_PROG_AS
+AC_PROG_CC
+AM_PROG_CC_C_O
+
+# always give this switch, should be hard-wired(?)
+# ../configure ... --with-plugin-toolkit
+AC_ARG_WITH(plugin-toolkit,
+ AC_HELP_STRING([--with-plugin-toolkit],
+ [build using the vpp toolkit]),
+ [with_plugin_toolkit=${prefix}/include],
+ [with_plugin_toolkit=.])
+
+AC_SUBST(TOOLKIT_INCLUDE,[${with_plugin_toolkit}])
+AM_CONDITIONAL(WITH_PLUGIN_TOOLKIT, test "$with_plugin_toolkit" != ".")
+
+# control build inclusion of cicn UT modules
+# include: <default>
+# omit: ../configure ... --without-cicn-test
+AC_ARG_WITH(cicn-test,
+ AC_HELP_STRING([--without-cicn-test],
+ [disable support for cicn-test]),
+ [],
+ [with_cicn_test=yes])
+
+AC_SUBST(TOOLKIT_INCLUDE,[${with_cicn_test}])
+AM_CONDITIONAL(WITH_CICN_TEST, test "x$with_cicn_test" != x"no")
+
+AC_OUTPUT([Makefile])