summaryrefslogtreecommitdiffstats
path: root/hicn-plugin/src/network
diff options
context:
space:
mode:
authorMauro Sardara <msardara@cisco.com>2020-03-24 17:34:14 +0000
committerMauro Sardara <msardara@cisco.com>2020-09-14 17:31:15 +0000
commit88509fe353767cbde707c3e3b1f29392957819f3 (patch)
treecccd51bac7966cd3138c525e8075d90341184a66 /hicn-plugin/src/network
parentd875ae92a7fa1eaab3bc2616aeeedfc64a81fea4 (diff)
[HICN-574] Host stack plugin for VPP.
Signed-off-by: Mauro Sardara <msardara@cisco.com> Change-Id: I8d8fdffef31a7013265d6529c5f52f3d5ec70d18 Signed-off-by: Mauro Sardara <msardara@cisco.com> Signed-off-by: Mauro <you@example.com> Signed-off-by: Mauro Sardara <msardara@cisco.com>
Diffstat (limited to 'hicn-plugin/src/network')
-rw-r--r--hicn-plugin/src/network/asd304
-rw-r--r--hicn-plugin/src/network/cache_policies/cs_lru.c268
-rw-r--r--hicn-plugin/src/network/cache_policies/cs_lru.h76
-rw-r--r--hicn-plugin/src/network/cache_policies/cs_policy.h96
-rw-r--r--hicn-plugin/src/network/cli.c897
-rw-r--r--hicn-plugin/src/network/data_fwd.h214
-rw-r--r--hicn-plugin/src/network/data_fwd_node.c620
-rw-r--r--hicn-plugin/src/network/data_input_node.c697
-rw-r--r--hicn-plugin/src/network/data_pcslookup.h61
-rw-r--r--hicn-plugin/src/network/data_pcslookup_node.c223
-rw-r--r--hicn-plugin/src/network/error.c28
-rw-r--r--hicn-plugin/src/network/face_db.h151
-rw-r--r--hicn-plugin/src/network/faces/app/address_mgr.c242
-rw-r--r--hicn-plugin/src/network/faces/app/address_mgr.h76
-rw-r--r--hicn-plugin/src/network/faces/app/face_app_cli.c205
-rw-r--r--hicn-plugin/src/network/faces/app/face_cons.c116
-rw-r--r--hicn-plugin/src/network/faces/app/face_cons.h76
-rw-r--r--hicn-plugin/src/network/faces/app/face_prod.c369
-rw-r--r--hicn-plugin/src/network/faces/app/face_prod.h106
-rw-r--r--hicn-plugin/src/network/faces/app/face_prod_node.c318
-rw-r--r--hicn-plugin/src/network/faces/face.c475
-rw-r--r--hicn-plugin/src/network/faces/face.h796
-rw-r--r--hicn-plugin/src/network/faces/face_cli.c194
-rw-r--r--hicn-plugin/src/network/faces/face_node.c940
-rw-r--r--hicn-plugin/src/network/faces/face_node.h52
-rw-r--r--hicn-plugin/src/network/faces/iface_node.c915
-rw-r--r--hicn-plugin/src/network/faces/iface_node.h54
-rw-r--r--hicn-plugin/src/network/hashtb.c1017
-rw-r--r--hicn-plugin/src/network/hashtb.h546
-rw-r--r--hicn-plugin/src/network/hicn.api579
-rw-r--r--hicn-plugin/src/network/hicn.h42
-rw-r--r--hicn-plugin/src/network/hicn_api.c727
-rw-r--r--hicn-plugin/src/network/hicn_api_test.c1319
-rw-r--r--hicn-plugin/src/network/hicn_config.c224
-rw-r--r--hicn-plugin/src/network/infra.h102
-rw-r--r--hicn-plugin/src/network/interest_hitcs.h66
-rw-r--r--hicn-plugin/src/network/interest_hitcs_node.c291
-rw-r--r--hicn-plugin/src/network/interest_hitpit.h68
-rw-r--r--hicn-plugin/src/network/interest_hitpit_node.c308
-rw-r--r--hicn-plugin/src/network/interest_pcslookup.h67
-rw-r--r--hicn-plugin/src/network/interest_pcslookup_node.c237
-rw-r--r--hicn-plugin/src/network/mapme.h365
-rw-r--r--hicn-plugin/src/network/mapme_ack.h58
-rw-r--r--hicn-plugin/src/network/mapme_ack_node.c234
-rw-r--r--hicn-plugin/src/network/mapme_ctrl.h78
-rw-r--r--hicn-plugin/src/network/mapme_ctrl_node.c337
-rw-r--r--hicn-plugin/src/network/mapme_eventmgr.c567
-rw-r--r--hicn-plugin/src/network/mapme_eventmgr.h57
-rw-r--r--hicn-plugin/src/network/mgmt.c96
-rw-r--r--hicn-plugin/src/network/mgmt.h129
-rw-r--r--hicn-plugin/src/network/params.h126
-rw-r--r--hicn-plugin/src/network/parser.h121
-rw-r--r--hicn-plugin/src/network/pcs.c53
-rw-r--r--hicn-plugin/src/network/pcs.h839
-rw-r--r--hicn-plugin/src/network/pg.c1326
-rw-r--r--hicn-plugin/src/network/pg.h101
-rw-r--r--hicn-plugin/src/network/route.c766
-rw-r--r--hicn-plugin/src/network/route.h130
-rw-r--r--hicn-plugin/src/network/state.h115
-rw-r--r--hicn-plugin/src/network/strategies/dpo_mw.c161
-rw-r--r--hicn-plugin/src/network/strategies/dpo_mw.h149
-rw-r--r--hicn-plugin/src/network/strategies/dpo_rr.c160
-rw-r--r--hicn-plugin/src/network/strategies/dpo_rr.h152
-rw-r--r--hicn-plugin/src/network/strategies/strategy_mw.c128
-rw-r--r--hicn-plugin/src/network/strategies/strategy_mw.h41
-rw-r--r--hicn-plugin/src/network/strategies/strategy_mw_cli.c142
-rw-r--r--hicn-plugin/src/network/strategies/strategy_rr.c122
-rw-r--r--hicn-plugin/src/network/strategies/strategy_rr.h41
-rw-r--r--hicn-plugin/src/network/strategy.h99
-rw-r--r--hicn-plugin/src/network/strategy_dpo_ctx.c164
-rw-r--r--hicn-plugin/src/network/strategy_dpo_ctx.h197
-rw-r--r--hicn-plugin/src/network/strategy_dpo_manager.c160
-rw-r--r--hicn-plugin/src/network/strategy_dpo_manager.h193
-rw-r--r--hicn-plugin/src/network/strategy_node.c323
-rw-r--r--hicn-plugin/src/network/udp_tunnels/udp_decap.h32
-rw-r--r--hicn-plugin/src/network/udp_tunnels/udp_decap_node.c623
-rw-r--r--hicn-plugin/src/network/udp_tunnels/udp_tunnel.c281
-rw-r--r--hicn-plugin/src/network/udp_tunnels/udp_tunnel.h114
-rw-r--r--hicn-plugin/src/network/utils.h87
79 files changed, 23029 insertions, 0 deletions
diff --git a/hicn-plugin/src/network/asd b/hicn-plugin/src/network/asd
new file mode 100644
index 000000000..13d3b5c3c
--- /dev/null
+++ b/hicn-plugin/src/network/asd
@@ -0,0 +1,304 @@
+# Copyright (c) 2017-2020 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
+
+# Dependencies
+
+find_package(Vpp REQUIRED)
+
+include_directories(${VPP_INCLUDE_DIR})
+
+set(LIBHICN_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/mapme.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/name.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/ops.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/ah.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/icmp.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/ipv4.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/ipv6.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/tcp.c
+)
+
+set(LIBHICN_HEADER_FILES_SRC
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/hicn.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/base.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/common.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/error.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/header.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/name.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/ops.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/mapme.h
+)
+
+set(LIBHICN_HEADER_FILES_PROTOCOL
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/ah.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/icmp.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/icmprd.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/ipv4.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/ipv6.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/tcp.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/udp.h
+)
+
+set(LIBHICN_HEADER_FILES_UTIL
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/util/ip_address.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/util/token.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/util/types.h
+)
+
+set(HICN_PLUGIN_SOURCE_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/hashtb.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mgmt.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pcs.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/route.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_ctx.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_manager.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_pcslookup_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitpit_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitcs_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_input_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_pcslookup_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_fwd_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_push_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/iface_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/dpo_face.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/address_mgr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_cons.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_app_cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pg.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_mw.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw_cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_lru.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ack_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ctrl_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_eventmgr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_decap_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_tunnel.c
+)
+
+set(HICN_PLUGIN_HEADER_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_all_api_h.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hashtb.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mgmt.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/params.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/pcs.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/state.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/infra.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_msg_enum.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/parser.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/route.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_ctx.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_manager.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_pcslookup.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitpit.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitcs.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_pcslookup.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_fwd.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/face_db.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_node.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/iface_node.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_dpo.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/address_mgr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_cons.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/pg.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_mw.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_policy.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_lru.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ack.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ctrl.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_eventmgr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_tunnel.h
+)
+
+set(HICN_API_TEST_SOURCE_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api_test.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.c)
+
+set(HICN_API_HEADER_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_msg_enum.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_all_api_h.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.h)
+
+set(HICN_API_GENERATED_FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h
+ ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_types.h
+ ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_enum.h
+)
+
+set(HICN_VAPI_GENERATED_FILES
+ ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.vapi.h
+ ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.vapi.hpp)
+
+set(HICN_VPP_STARTUP_CONF_FILE
+ ${CMAKE_BINARY_DIR}/startup.conf)
+
+if (NOT VPP_HOME)
+ set(VPP_HOME /usr)
+endif()
+
+if (NOT CMAKE_BUILD_TYPE)
+ set (CMAKE_BUILD_TYPE "Release")
+endif (NOT CMAKE_BUILD_TYPE)
+
+SET(HICN_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} CACHE STRING "hicn_install_prefix")
+
+if (CMAKE_BUILD_TYPE STREQUAL "Release")
+ set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall -march=native -O3 -g")
+elseif (CMAKE_BUILD_TYPE STREQUAL "Debug")
+ set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wall -march=native -O0 -g")
+ add_definitions(-DCLIB_DEBUG -fPIC -fstack-protector-all)
+endif()
+
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/hicn)
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vapi)
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip)
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vnet/fib)
+file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vnet/udp)
+
+# These files are missing from vpp binary distribution
+execute_process(
+ COMMAND
+ bash -c
+ "if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vapi_json_parser.py ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/stable/2001/src/vpp-api/vapi/vapi_json_parser.py -o ${CMAKE_CURRENT_BINARY_DIR}/vapi_json_parser.py;
+ fi;
+ if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/stable/2001/src/vpp-api/vapi/vapi_c_gen.py -o ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py;
+ fi;
+ if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/stable/2001/src/vpp-api/vapi/vapi_cpp_gen.py -o ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py;
+ fi;
+ if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_types.api ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/stable/2001/src/vnet/ip/ip_types.api -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_types.api;
+ fi;
+ if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_format_fns.h ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/master/src/vnet/ip/ip_format_fns.h -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_format_fns.h;
+ fi;
+ if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/fib/fib_entry_track.h ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/master/src/vnet/fib/fib_entry_track.h -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/fib/fib_entry_track.h;
+ fi;
+ if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/udp/udp_encap.h ]; then
+ curl https://raw.githubusercontent.com/FDio/vpp/master/src/vnet/udp/udp_encap.h -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/udp/udp_encap.h;
+ fi;
+
+ chmod +x ${CMAKE_CURRENT_BINARY_DIR}/vapi_json_parser.py ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py"
+)
+
+add_custom_command(
+ OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_types.h ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_enum.h ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json
+ COMMAND ${VPP_HOME}/bin/vppapigen ARGS --includedir ${CMAKE_CURRENT_BINARY_DIR} --input ${CMAKE_CURRENT_SOURCE_DIR}/hicn.api --output ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h --outputdir ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/
+ COMMAND ${VPP_HOME}/bin/vppapigen ARGS JSON --includedir ${CMAKE_CURRENT_BINARY_DIR} --input ${CMAKE_CURRENT_SOURCE_DIR}/hicn.api --output ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json --outputdir ${CMAKE_CURRENT_BINARY_DIR}/vapi/
+)
+add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.vapi.h ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.vapi.hpp
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py ARGS ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json
+ COMMAND ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py ARGS ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json
+ DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json
+)
+
+include_directories(SYSTEM)
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DHICN_VPP_PLUGIN=1")
+add_library(hicn_plugin SHARED
+ ${LIBHICN_FILES}
+ ${HICN_PLUGIN_SOURCE_FILES}
+ ${HICN_API_GENERATED_FILES}
+ ${HICN_VAPI_GENERATED_FILES})
+
+file(COPY ${HICN_API_HEADER_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn)
+include_directories(${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins)
+
+file(COPY ${LIBHICN_HEADER_FILES_SRC} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/hicn)
+file(COPY ${LIBHICN_HEADER_FILES_PROTOCOL} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/hicn/protocol)
+file(COPY ${LIBHICN_HEADER_FILES_UTIL} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/hicn/util)
+
+add_library(hicn_api_test_plugin SHARED
+ ${HICN_API_TEST_SOURCE_FILES}
+ ${HICN_API_GENERATED_FILES})
+
+set(VPP_INSTALL_PLUGIN ${HICN_INSTALL_PREFIX}/vpp_plugins)
+set(VPP_INSTALL_API_TEST_PLUGIN ${HICN_INSTALL_PREFIX}/vpp_api_test_plugins CACHE STRING "vpp_install_api_test_plugin")
+set(VPP_STARTUP_CONF /etc/vpp/)
+
+set_target_properties(hicn_plugin
+ PROPERTIES
+ LINKER_LANGUAGE C
+ INSTALL_RPATH ${VPP_INSTALL_PLUGIN}
+ PREFIX "")
+set_target_properties(hicn_api_test_plugin
+ PROPERTIES
+ LINKER_LANGUAGE C
+ PREFIX "")
+
+message (STATUS "hicn-plugin variable ${HICN_PLUGIN}")
+
+install(DIRECTORY
+ DESTINATION ${VPP_INSTALL_PLUGIN}
+ COMPONENT ${HICN_PLUGIN})
+install(TARGETS hicn_plugin
+ DESTINATION
+ ${VPP_INSTALL_PLUGIN}
+ COMPONENT ${HICN_PLUGIN})
+
+install(DIRECTORY
+ DESTINATION ${VPP_INSTALL_API_TEST_PLUGIN}
+ COMPONENT ${HICN_PLUGIN})
+install(TARGETS hicn_api_test_plugin
+ DESTINATION ${VPP_INSTALL_API_TEST_PLUGIN}
+ COMPONENT ${HICN_PLUGIN})
+
+install(FILES ${HICN_API_HEADER_FILES} ${HICN_API_GENERATED_FILES}
+ DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/vpp_plugins/hicn
+ COMPONENT ${HICN_PLUGIN}-dev)
+
+install(FILES ${HICN_API_GENERATED_FILES}
+ DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/vpp_plugins/hicn
+ COMPONENT ${HICN_PLUGIN}-dev)
+
+install(FILES ${HICN_VAPI_GENERATED_FILES}
+ DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/vapi
+ COMPONENT ${HICN_PLUGIN}-dev)
+
+#Set variables for other project depending on hicn-plugin
+set(HICNPLUGIN_INCLUDE_DIRS
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins
+ ${VPP_INCLUDE_DIRS}
+ CACHE INTERNAL "" FORCE)
+set(HICNPLUGIN_LIBRARIES ${VPP_LIBRARIES} CACHE INTERNAL "" FORCE)
diff --git a/hicn-plugin/src/network/cache_policies/cs_lru.c b/hicn-plugin/src/network/cache_policies/cs_lru.c
new file mode 100644
index 000000000..079af58ab
--- /dev/null
+++ b/hicn-plugin/src/network/cache_policies/cs_lru.c
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../hashtb.h"
+#include "../strategy_dpo_manager.h"
+#include "../error.h"
+#include "cs_lru.h"
+#include "cs_policy.h"
+
+hicn_cs_policy_vft_t hicn_cs_lru = {
+ .hicn_cs_insert = &hicn_cs_lru_insert,
+ .hicn_cs_update = &hicn_cs_lru_update_head,
+ .hicn_cs_dequeue = &hicn_cs_lru_dequeue,
+ .hicn_cs_delete_get = &hicn_cs_lru_delete_get,
+ .hicn_cs_trim = &hicn_cs_lru_trim,
+ .hicn_cs_flush = &hicn_cs_lru_flush,
+};
+
+/*
+ * Insert a new CS element at the head of the CS LRU
+ */
+void
+hicn_cs_lru_insert (hicn_pit_cs_t * p, hicn_hash_node_t * node,
+ hicn_pcs_entry_t * pcs, hicn_cs_policy_t * policy_state)
+{
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+ u32 idx;
+
+ idx = hicn_hashtb_node_idx_from_node (p->pcs_table, node);
+
+ if (policy_state->head != 0)
+ {
+ lrunode = hicn_hashtb_node_from_idx (p->pcs_table, policy_state->head);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ ASSERT (lrupcs->u.cs.cs_lru_prev == 0);
+ lrupcs->u.cs.cs_lru_prev = idx;
+
+ pcs->u.cs.cs_lru_prev = 0;
+ pcs->u.cs.cs_lru_next = policy_state->head;
+
+ policy_state->head = idx;
+ }
+ else
+ {
+ ASSERT (policy_state->tail == 0); /* We think the list is
+ * empty */
+
+ policy_state->head = policy_state->tail = idx;
+
+ pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
+ }
+
+ policy_state->count++;
+}
+
+void
+hicn_cs_lru_delete_get (hicn_pit_cs_t * p, hicn_cs_policy_t * policy_state,
+ hicn_hash_node_t ** nodep,
+ hicn_pcs_entry_t ** pcs_entry,
+ hicn_hash_entry_t ** hash_entry)
+{
+ *nodep = hicn_hashtb_node_from_idx (p->pcs_table, policy_state->tail);
+ *pcs_entry = hicn_pit_get_data (*nodep);
+
+ *hash_entry = hicn_hashtb_get_entry (p->pcs_table, (*nodep)->entry_idx,
+ (*nodep)->bucket_id,
+ (*nodep)->hn_flags &
+ HICN_HASH_NODE_OVERFLOW_BUCKET);
+}
+
+/*
+ * Dequeue an LRU element, for example when it has expired.
+ */
+void
+hicn_cs_lru_dequeue (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * pcs, hicn_cs_policy_t * lru)
+{
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+
+ if (pcs->u.cs.cs_lru_prev != 0)
+ {
+ /* Not already on the head of the LRU */
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table,
+ pcs->u.cs.cs_lru_prev);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_next;
+ }
+ else
+ {
+ ASSERT (lru->head ==
+ hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ lru->head = pcs->u.cs.cs_lru_next;
+ }
+
+ if (pcs->u.cs.cs_lru_next != 0)
+ {
+ /* Not already the end of the LRU */
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table,
+ pcs->u.cs.cs_lru_next);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_prev = pcs->u.cs.cs_lru_prev;
+ }
+ else
+ {
+ /* This was the last LRU element */
+ ASSERT (lru->tail ==
+ hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ lru->tail = pcs->u.cs.cs_lru_prev;
+ }
+
+ pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
+ lru->count--;
+}
+
+/*
+ * Move a CS LRU element to the head, probably after it's been used.
+ */
+void
+hicn_cs_lru_update_head (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * pcs, hicn_cs_policy_t * lru)
+{
+ if (pcs->u.cs.cs_lru_prev != 0)
+ {
+ /*
+ * Not already on the head of the LRU, detach it from its
+ * current position
+ */
+ hicn_cs_lru_dequeue (pit, pnode, pcs, lru);
+
+ /* Now detached from the list; attach at head */
+ hicn_cs_lru_insert (pit, pnode, pcs, lru);
+
+ }
+ else
+ {
+ /* The element is already dequeue */
+ if (pcs->u.cs.cs_lru_next == 0)
+ {
+ /* Now detached from the list; attach at head */
+ hicn_cs_lru_insert (pit, pnode, pcs, lru);
+ }
+ ASSERT (lru->head ==
+ hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ }
+}
+
+/*
+ * Remove a batch of nodes from the CS LRU, copying their node indexes into
+ * the caller's array. We expect this is done when the LRU size exceeds the
+ * CS's limit. Return the number of removed nodes.
+ */
+int
+hicn_cs_lru_trim (hicn_pit_cs_t * pit, u32 * node_list, int sz,
+ hicn_cs_policy_t * lru)
+{
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+ u32 idx;
+ int i;
+
+ idx = lru->tail;
+
+ for (i = 0; i < sz; i++)
+ {
+
+ if (idx == 0)
+ {
+ break;
+ }
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table, idx);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ node_list[i] = idx;
+
+ idx = lrupcs->u.cs.cs_lru_prev;
+ lrupcs->u.cs.cs_lru_prev = 0;
+ lrupcs->u.cs.cs_lru_next = 0;
+ }
+
+ lru->count -= i;
+
+ lru->tail = idx;
+ if (idx != 0)
+ {
+ lrunode = hicn_hashtb_node_from_idx (pit->pcs_table, idx);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ lrupcs->u.cs.cs_lru_next = 0;
+ }
+ else
+ {
+ /* If the tail is empty, the whole lru is empty */
+ lru->head = 0;
+ }
+
+ return (i);
+}
+
+int
+hicn_cs_lru_flush (vlib_main_t * vm, struct hicn_pit_cs_s *pitcs,
+ hicn_cs_policy_t * state)
+{
+ if (state->head == 0 && state->tail == 0)
+ return 0;
+
+ hicn_hash_node_t *lrunode;
+ hicn_pcs_entry_t *lrupcs;
+ u32 idx;
+ int i = 0;
+
+ idx = state->tail;
+
+ while (idx != 0)
+ {
+ lrunode = hicn_hashtb_node_from_idx (pitcs->pcs_table, idx);
+ lrupcs = hicn_pit_get_data (lrunode);
+
+ u64 hashval = 0;
+ hicn_hashtb_fullhash ((u8 *) & (lrunode->hn_key.ks.key),
+ lrunode->hn_keysize, &hashval);
+ hicn_hash_bucket_t *bucket = NULL;
+ if ((hashval & (pitcs->pcs_table->ht_bucket_count - 1)) ==
+ lrunode->bucket_id)
+ {
+ //The bucket is in the non overflown
+ bucket = pitcs->pcs_table->ht_buckets + lrunode->bucket_id;
+ }
+ else
+ {
+ bucket =
+ pool_elt_at_index (pitcs->pcs_table->ht_overflow_buckets,
+ lrunode->bucket_id);
+ }
+ hicn_hash_entry_t *hash_entry =
+ &(bucket->hb_entries[lrunode->entry_idx]);
+ hash_entry->locks++;
+ hicn_pcs_cs_delete (vm, pitcs, &lrupcs, &lrunode, hash_entry, NULL,
+ NULL);
+ idx = state->tail;
+ i++;
+ }
+
+ return (i);
+
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/cache_policies/cs_lru.h b/hicn-plugin/src/network/cache_policies/cs_lru.h
new file mode 100644
index 000000000..3bd18060d
--- /dev/null
+++ b/hicn-plugin/src/network/cache_policies/cs_lru.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __LRU_H__
+#define __LRU_H__
+
+#include "../pcs.h"
+#include "../hashtb.h"
+#include "cs_policy.h"
+
+/**
+ * @file cs_lru.h
+ *
+ * This file implements the LRU policy for the CS
+ */
+
+
+extern hicn_cs_policy_vft_t hicn_cs_lru;
+
+/*
+ * Insert a new CS element at the head of the CS LRU
+ */
+void
+hicn_cs_lru_insert (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+
+
+/*
+ * Dequeue an LRU element, for example when it has expired.
+ */
+void
+hicn_cs_lru_dequeue (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+
+/*
+ * Move a CS LRU element to the head, probably after it's been used.
+ */
+void
+hicn_cs_lru_update_head (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
+ hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+
+void
+hicn_cs_lru_delete_get (hicn_pit_cs_t * p, hicn_cs_policy_t * policy,
+ hicn_hash_node_t ** node, hicn_pcs_entry_t ** pcs,
+ hicn_hash_entry_t ** hash_entry);
+
+/*
+ * Remove a batch of nodes from the CS LRU, copying their node indexes into
+ * the caller's array. We expect this is done when the LRU size exceeds the
+ * CS's limit. Return the number of removed nodes.
+ */
+int hicn_cs_lru_trim (hicn_pit_cs_t * pcs, u32 * node_list, int sz,
+ hicn_cs_policy_t * lru);
+
+
+int hicn_cs_lru_flush (vlib_main_t * vm, struct hicn_pit_cs_s *pitcs,
+ hicn_cs_policy_t * state);
+#endif /* // __LRU_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/cache_policies/cs_policy.h b/hicn-plugin/src/network/cache_policies/cs_policy.h
new file mode 100644
index 000000000..0bf745915
--- /dev/null
+++ b/hicn-plugin/src/network/cache_policies/cs_policy.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_CS_POLICY_H__
+#define __HICN_CS_POLICY_H__
+
+#include "../hashtb.h"
+
+/**
+ * @file cs_policy.h
+ *
+ * This file provides the needed structures to implement a CS policy
+ */
+
+
+/*
+ * Structure
+ */
+typedef struct hicn_cs_policy_s
+{
+ u32 max;
+ u32 count;
+
+ /* Indexes to hashtable nodes forming CS LRU */
+ u32 head;
+ u32 tail;
+
+} hicn_cs_policy_t;
+
+/* Forward declaration */
+struct hicn_pit_cs_s;
+struct hicn_hash_node_s;
+struct hicn_pcs_entry_s;
+struct hicn_cs_policy_s;
+
+/**
+ * @brief Definition of the virtual functin table for a cache policy.
+ *
+ * A cache policy must implement all the following functions:
+ * - insert: add a new element
+ * - update: update the position of an existing element
+ * - dequeue: remove an element from the list
+ * - delete_get: return the next element that should be removed trim
+ * - flush: clean the cs
+ */
+typedef struct hicn_cs_policy_vft_s
+{
+ void (*hicn_cs_insert) (struct hicn_pit_cs_s * p,
+ struct hicn_hash_node_s * node,
+ struct hicn_pcs_entry_s * pcs,
+ hicn_cs_policy_t * policy);
+
+ void (*hicn_cs_update) (struct hicn_pit_cs_s * p,
+ struct hicn_hash_node_s * node,
+ struct hicn_pcs_entry_s * pcs,
+ hicn_cs_policy_t * policy);
+
+ void (*hicn_cs_dequeue) (struct hicn_pit_cs_s * p,
+ struct hicn_hash_node_s * node,
+ struct hicn_pcs_entry_s * pcs,
+ hicn_cs_policy_t * policy);
+
+ void (*hicn_cs_delete_get) (struct hicn_pit_cs_s * p,
+ hicn_cs_policy_t * policy,
+ struct hicn_hash_node_s ** node,
+ struct hicn_pcs_entry_s ** pcs,
+ struct hicn_hash_entry_s ** hash_entry);
+
+ int (*hicn_cs_trim) (struct hicn_pit_cs_s * p, u32 * node_list, int sz,
+ hicn_cs_policy_t * policy);
+
+ int (*hicn_cs_flush) (vlib_main_t * vm, struct hicn_pit_cs_s * p,
+ hicn_cs_policy_t * policy_state);
+} hicn_cs_policy_vft_t;
+
+
+
+#endif /* // __HICN_POLICY_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/cli.c b/hicn-plugin/src/network/cli.c
new file mode 100644
index 000000000..22522b28b
--- /dev/null
+++ b/hicn-plugin/src/network/cli.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/udp/udp.h> // port registration
+#include <vnet/ip/ip6_packet.h> // ip46_address_t
+#include <vnet/ip/format.h>
+#include <vnet/fib/fib_types.h>
+
+#include <vpp_plugins/hicn/hicn_api.h>
+
+#include "hicn.h"
+#include "infra.h"
+#include "parser.h"
+#include "mgmt.h"
+#include "strategy_dpo_manager.h"
+#include "strategy.h"
+#include "pg.h"
+#include "error.h"
+#include "faces/face.h"
+#include "route.h"
+
+static vl_api_hicn_api_node_params_set_t node_ctl_params = {
+ .pit_max_size = -1,
+ .pit_max_lifetime_sec = -1.0f,
+ .cs_max_size = -1,
+};
+
+typedef enum
+{
+ IP,
+ ETHERNET,
+} interface_type_t;
+
+/*
+ * cli handler for 'control start'
+ */
+static clib_error_t *
+hicn_cli_node_ctl_start_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int ret;
+
+ ret = hicn_infra_plugin_enable_disable (1 /* enable */ ,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size, ~0);
+
+ vlib_cli_output (vm, "hicn: fwdr initialize => %s\n",
+ get_error_string (ret));
+
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
+ get_error_string
+ (ret));
+}
+
+/*
+ * cli handler for 'control stop'
+ */
+static clib_error_t *
+hicn_cli_node_ctl_stop_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int ret;
+
+ /*
+ * Catch unexpected extra arguments on this line. See comment on
+ * hicn_cli_node_ctrl_start_set_command_fn
+ */
+ if (main_input->index > 0 &&
+ main_input->buffer[main_input->index - 1] != '\n')
+ {
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ ret = hicn_infra_plugin_enable_disable (0 /* !enable */ ,
+ node_ctl_params.pit_max_size,
+ node_ctl_params.pit_max_lifetime_sec,
+ node_ctl_params.cs_max_size, ~0);
+
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
+ get_error_string
+ (ret));
+}
+
+#define DFLTD_RANGE_OK(val, min, max) \
+({ \
+ __typeof__ (val) _val = (val); \
+ __typeof__ (min) _min = (min); \
+ __typeof__ (max) _max = (max); \
+ (_val == -1) || \
+ (_val >= _min && _val <= _max); \
+})
+
+/*
+ * cli handler for 'control param'
+ */
+static clib_error_t *
+hicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int rv = 0;
+
+ int table_size;
+ f64 lifetime;
+
+ if (hicn_main.is_enabled)
+ {
+ return (clib_error_return
+ (0, "params cannot be altered once hicn started"));
+ }
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return clib_error_return (0,
+ get_error_string
+ (HICN_ERROR_FWD_ALREADY_ENABLED));
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "pit"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, HICN_PARAM_PIT_ENTRIES_MIN,
+ HICN_PARAM_PIT_ENTRIES_MAX))
+ {
+ rv = HICN_ERROR_PIT_CONFIG_SIZE_OOB;
+ break;
+ }
+ node_ctl_params.pit_max_size = table_size;
+ }
+ else if (unformat (line_input, "maxlife %f", &lifetime))
+ {
+ if (!DFLTD_RANGE_OK
+ (lifetime, HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ {
+ rv = HICN_ERROR_PIT_CONFIG_MAXLT_OOB;
+ break;
+ }
+ node_ctl_params.pit_max_lifetime_sec = lifetime;
+ }
+ else
+ {
+ rv = HICN_ERROR_CLI_INVAL;
+ break;
+ }
+ }
+ else if (unformat (line_input, "cs"))
+ {
+ if (unformat (line_input, "size %d", &table_size))
+ {
+ if (!DFLTD_RANGE_OK (table_size, HICN_PARAM_CS_ENTRIES_MIN,
+ HICN_PARAM_CS_ENTRIES_MAX))
+ {
+ rv = HICN_ERROR_CS_CONFIG_SIZE_OOB;
+ break;
+ }
+ node_ctl_params.cs_max_size = table_size;
+ }
+ else
+ {
+ rv = HICN_ERROR_CLI_INVAL;
+ break;
+ }
+ }
+ else
+ {
+ rv = HICN_ERROR_CLI_INVAL;
+ break;
+ }
+ }
+
+ if (node_ctl_params.cs_max_size == 0)
+ vlib_cli_output (vm,
+ "CS size set to 0. Consider disable CS at compilation time for better performances\n");
+
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s '%U'",
+ get_error_string
+ (rv),
+ format_unformat_error,
+ line_input);
+}
+
+/*
+ * cli handler for 'hicn show'
+ */
+static clib_error_t *
+hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ int face_p = 0, fib_p = 0, all_p, internal_p = 0, strategies_p = 0, ret =
+ HICN_ERROR_NONE;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "face all"))
+ {
+ face_p = 1;
+ }
+ else if (unformat (line_input, "internal"))
+ {
+ /*
+ * We consider 'internal' a superset, so
+ * include 'detail' too
+ */
+ internal_p = 1;
+ }
+ else if (unformat (line_input, "strategies"))
+ {
+ /*
+ * We consider 'internal' a superset, so
+ * include 'detail' too
+ */
+ strategies_p = 1;
+ }
+ else
+ {
+ ret = HICN_ERROR_CLI_INVAL;
+ goto done;
+ }
+ }
+ }
+ /* If nothing specified, show everything */
+ if ((face_p == 0) && (fib_p == 0) && (strategies_p == 0))
+ {
+ all_p = 1;
+ }
+ if (!hicn_main.is_enabled)
+ {
+ if (node_ctl_params.pit_max_size == -1 &&
+ node_ctl_params.pit_max_lifetime_sec == -1 &&
+ node_ctl_params.cs_max_size == -1)
+ {
+ ret = HICN_ERROR_FWD_NOT_ENABLED;
+ goto done;
+ }
+ vlib_cli_output (vm, "Forwarder: %sabled\nPreconfiguration:\n",
+ hicn_main.is_enabled ? "en" : "dis");
+
+ if (node_ctl_params.pit_max_size != -1)
+ {
+ vlib_cli_output (vm, " PIT:: max entries:%d\n",
+ node_ctl_params.pit_max_size);
+ }
+ if (node_ctl_params.pit_max_lifetime_sec != -1)
+ {
+ vlib_cli_output (vm, " PIT:: max lifetime: %05.3f seconds\n",
+ node_ctl_params.pit_max_lifetime_sec);
+ }
+ if (node_ctl_params.cs_max_size != -1)
+ {
+ vlib_cli_output (vm, " CS:: max entries:%d\n",
+ node_ctl_params.cs_max_size);
+ }
+ goto done;
+ }
+ /* Globals */
+ vlib_cli_output (vm,
+ "Forwarder: %sabled\n"
+ " PIT:: max entries:%d,"
+ " lifetime default: max:%05.3f\n"
+ " CS:: max entries:%d\n",
+ hicn_main.is_enabled ? "en" : "dis",
+ hicn_infra_pit_size,
+ ((f64) hicn_main.pit_lifetime_max_ms) / SEC_MS,
+ hicn_infra_cs_size);
+
+ vl_api_hicn_api_node_stats_get_reply_t rm = { 0, }
+ , *rmp = &rm;
+ if (hicn_mgmt_node_stats_get (&rm) == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, //compare vl_api_hicn_api_node_stats_get_reply_t_handler block
+ " PIT entries (now): %d\n"
+ " CS total entries (now): %d, network entries (now): %d\n"
+ " Forwarding statistics:\n"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_ntw_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
+ }
+ if (face_p || all_p)
+ {
+ u8 *strbuf = NULL;
+
+ strbuf = format_hicn_face_all (strbuf, 1, 0);
+ vlib_cli_output (vm, "%s", strbuf);
+
+ }
+ if (strategies_p || all_p)
+ {
+ u8 *strbuf = NULL;
+
+ strbuf = format_hicn_strategy_list (strbuf, 1, 0);
+ vlib_cli_output (vm, (char *) strbuf);
+ }
+done:
+ if (all_p && internal_p && ret == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Plugin features: cs:%d\n", HICN_FEATURE_CS);
+ vlib_cli_output (vm,
+ "Removed CS entries (and freed vlib buffers) %d, Removed PIT entries %d\n",
+ hicn_main.pitcs.pcs_cs_dealloc,
+ hicn_main.pitcs.pcs_pit_dealloc);
+ vlib_cli_output (vm,
+ "Bucke count %d, Overflow buckets count %d, used %d\n",
+ hicn_main.pitcs.pcs_table->ht_bucket_count,
+ hicn_main.pitcs.pcs_table->ht_overflow_bucket_count,
+ hicn_main.pitcs.pcs_table->ht_overflow_buckets_used);
+
+ }
+ return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (ret));
+}
+
+/*
+ * cli handler for 'fib'
+ */
+static clib_error_t *
+hicn_cli_strategy_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+
+ int rv = HICN_ERROR_NONE;
+ int addpfx = -1;
+ ip46_address_t address;
+ u32 strategy_id;
+ u32 plen = 0;
+ fib_prefix_t prefix;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "set %d", &strategy_id))
+ {
+ addpfx = 2;
+ }
+ else if (addpfx != -1
+ && unformat (line_input, "prefix %U/%d", unformat_ip46_address,
+ &address, IP46_TYPE_ANY, &plen))
+ {;
+ }
+ else
+ {
+ cl_err = clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ fib_prefix_from_ip46_addr (&address, &prefix);
+ prefix.fp_len = plen;
+ /* Check parse */
+ if (hicn_dpo_strategy_id_is_valid (strategy_id) ==
+ HICN_ERROR_DPO_MGR_ID_NOT_VALID)
+ {
+ cl_err = clib_error_return (0, "Please specify a valid strategy...");
+ goto done;
+ }
+
+ rv = hicn_route_set_strategy (&prefix, strategy_id);
+ cl_err =
+ (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
+ get_error_string
+ (rv));
+done:
+
+ return (cl_err);
+}
+
+/*
+ * cli handler for 'pgen'
+ */
+static clib_error_t *
+hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ hicnpg_main_t *hpgm = &hicnpg_main;
+ ip46_address_t src_addr;
+ fib_prefix_t *prefix = malloc (sizeof (fib_prefix_t));
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 sw_if_index = ~0;
+ u16 lifetime = 4000;
+ int rv = VNET_API_ERROR_UNIMPLEMENTED;
+ u32 max_seq = ~0;
+ u32 n_flows = ~0;
+ u32 n_ifaces = 1;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (line_input, "intfc %U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ {
+ ;
+ }
+ else if (unformat (line_input, "src %U",
+ unformat_ip46_address, &src_addr))
+ {
+ ;
+ }
+ else if (unformat (line_input, "n_ifaces %d", &n_ifaces))
+ {
+ ;
+ }
+ else if (unformat (line_input, "name %U/%d",
+ unformat_ip46_address, &prefix->fp_addr,
+ IP46_TYPE_ANY, &prefix->fp_len))
+ {
+ ;
+ }
+ else if (unformat (line_input, "lifetime %d", &lifetime))
+ {
+ ;
+ }
+ else if (unformat (line_input, "max_seq %d", &max_seq))
+ {
+ ;
+ }
+ else if (unformat (line_input, "n_flows %d", &n_flows))
+ {
+ ;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+ }
+ hpgm->interest_lifetime = lifetime;
+
+ if (sw_if_index == ~0)
+ {
+ return (clib_error_return (0, "Packet generator interface missing"));
+ }
+
+ //Remove bits that are out of the subnet
+ if (ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ ip4_address_t mask;
+ ip4_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip4.as_u32 = prefix->fp_addr.ip4.as_u32 & mask.as_u32;
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+ }
+ else
+ {
+ ip6_address_t mask;
+ ip6_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip6.as_u64[0] =
+ prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
+ prefix->fp_addr.ip6.as_u64[1] =
+ prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+ }
+
+ /*
+ * Enable the feature to divert data packet to the hicnpg-data node to count
+ * how many data packets have been received.
+ * Diver all the packets from the packet-generator to the hicn-pg-interest node
+ * to generate valid interests.
+ */
+ if (ip46_address_is_ip4 (&src_addr)
+ && ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+
+ vnet_feature_enable_disable ("ip4-unicast", "hicnpg-data",
+ sw_if_index, 1, 0, 0);
+
+ /* Add pgen_client node to the vpp graph */
+ vlib_node_add_next (vm,
+ pg_input_node.index, hicn_pg_interest_node.index);
+
+
+ pg_node_t *pn;
+ pn = pg_get_node (hicn_pg_interest_node.index);
+ pn->unformat_edit = unformat_pg_ip4_header;
+
+ }
+ else if (!ip46_address_is_ip4 (&src_addr)
+ && !ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+
+ vnet_feature_enable_disable ("ip6-unicast", "hicnpg-data",
+ sw_if_index, 1, 0, 0);
+
+ /* Add pgen_client node to the vpp graph */
+ vlib_node_add_next (vm, pg_input_node.index,
+ hicn_pg_interest_node.index);
+
+ pg_node_t *pn;
+ pn = pg_get_node (hicn_pg_interest_node.index);
+ pn->unformat_edit = unformat_pg_ip6_header;
+ }
+ else
+ {
+ return (clib_error_return
+ (0,
+ "pg interface source address, source address and hicn name must be of the same type IPv4 or IPv6"));
+ }
+
+
+ hpgm->pgen_clt_src_addr = src_addr;
+ hpgm->pgen_clt_hicn_name = prefix;
+ hpgm->max_seq_number = max_seq;
+ hpgm->n_flows = n_flows;
+ hpgm->n_ifaces = n_ifaces;
+ hpgm->sw_if = sw_if_index;
+ vlib_cli_output (vm, "ifaces %d", hpgm->n_ifaces);
+ rv = 0;
+
+ switch (rv)
+ {
+ case 0:
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ return clib_error_return (0, "Unimplemented, NYI");
+ break;
+
+ default:
+ return clib_error_return (0, "hicn enable_disable returned %d", rv);
+ }
+
+ return 0;
+}
+
+/*
+ * cli handler for 'pgen'
+ */
+static clib_error_t *
+hicn_cli_pgen_server_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err;
+ int rv = HICN_ERROR_NONE;
+ hicnpg_server_main_t *pg_main = &hicnpg_server_main;
+ int payload_size = 1440;
+ u32 sw_if_index = ~0;
+ vnet_main_t *vnm = vnet_get_main ();
+ fib_prefix_t *prefix = calloc (1, sizeof (fib_prefix_t));
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ /* Parse the arguments */
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %U/%d",
+ unformat_ip46_address, &prefix->fp_addr,
+ IP46_TYPE_ANY, &prefix->fp_len))
+ {;
+ }
+ else if (unformat (line_input, "size %d", &payload_size))
+ {
+ if (payload_size > 1440)
+ {
+ return (clib_error_return (0,
+ "Payload size must be <= 1440 bytes..."));
+ }
+ }
+ else
+ if (unformat
+ (line_input, "intfc %U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ {
+ ;
+ }
+ else
+ {
+ return (clib_error_return
+ (0, "Unknown input '%U'", format_unformat_error,
+ line_input));
+ break;
+ }
+ }
+ }
+
+ /* Attach our packet-gen node for ip4 udp local traffic */
+ if ((prefix->fp_addr.ip6.as_u64[0] == (u64) 0
+ && prefix->fp_addr.ip6.as_u64[1] == 0) || payload_size == 0
+ || sw_if_index == ~0)
+ {
+ return clib_error_return (0,
+ "Error: must supply local port, payload size and incoming hICN prefix");
+ }
+
+ //Remove bits that are out of the subnet
+ if (ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ ip4_address_t mask;
+ ip4_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip4.as_u32 = prefix->fp_addr.ip4.as_u32 & mask.as_u32;
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+ }
+ else
+ {
+ ip6_address_t mask;
+ ip6_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip6.as_u64[0] =
+ prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
+ prefix->fp_addr.ip6.as_u64[1] =
+ prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+ }
+
+ /* Allocate the buffer with the actual content payload TLV */
+ int n_buf = vlib_buffer_alloc (vm, &pg_main->pgen_svr_buffer_idx, 1);
+
+ if (n_buf == 0)
+ {
+ return (clib_error_return (0, "Impossible to allocate paylod buffer."));
+ }
+
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, pg_main->pgen_svr_buffer_idx);
+
+ pg_main->pgen_srv_hicn_name = prefix;
+
+ /* Initialize the buffer data with zeros */
+ memset (rb->data, 0, payload_size);
+ rb->current_length = payload_size;
+
+ vnet_feature_enable_disable ("ip4-unicast", "hicnpg-server",
+ sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast", "hicnpg-server",
+ sw_if_index, 1, 0, 0);
+
+ switch (rv)
+ {
+ case 0:
+ cl_err = 0;
+ break;
+
+ case VNET_API_ERROR_UNIMPLEMENTED:
+ cl_err = clib_error_return (0, "Unimplemented, NYI");
+ break;
+
+ default:
+ cl_err = clib_error_return (0, "hicn pgen server returned %d", rv);
+ }
+
+ return cl_err;
+}
+
+static clib_error_t *
+hicn_enable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+
+ int rv = HICN_ERROR_NONE;
+ fib_prefix_t pfx;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U/%d",
+ unformat_ip4_address, &pfx.fp_addr.ip4, &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ }
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip6_address, &pfx.fp_addr.ip6, &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP6;
+ }
+ else
+ {
+ cl_err = clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+ rv = hicn_route_enable (&pfx);
+done:
+
+ cl_err =
+ (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
+ get_error_string
+ (rv));
+ return cl_err;
+}
+
+static clib_error_t *
+hicn_disable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+
+ int rv = HICN_ERROR_NONE;
+ fib_prefix_t pfx;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U/%d",
+ unformat_ip4_address, &pfx.fp_addr.ip4, &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ }
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip6_address, &pfx.fp_addr.ip6, &pfx.fp_len))
+ {
+ pfx.fp_proto = FIB_PROTOCOL_IP6;
+ }
+ else
+ {
+ cl_err = clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ rv = hicn_route_disable (&pfx);
+
+done:
+ cl_err =
+ (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
+ get_error_string
+ (rv));
+ return cl_err;
+}
+
+
+/* cli declaration for 'control start' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_start_set_command, static)=
+{
+ .path = "hicn control start",
+ .short_help = "hicn control start",
+ .function = hicn_cli_node_ctl_start_set_command_fn,
+};
+
+
+/* cli declaration for 'control stop' */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_stop_set_command, static)=
+{
+ .path = "hicn control stop",
+ .short_help = "hicn control stop",
+ .function = hicn_cli_node_ctl_stop_set_command_fn,
+};
+
+
+/* cli declaration for 'control param' */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_param_set_command, static)=
+{
+ .path = "hicn control param",
+ .short_help = "hicn control param { pit { size <entries> | { dfltlife | minlife | maxlife } <seconds> } | fib size <entries> | cs {size <entries> | app <portion to reserved to app>} }\n",
+ .function = hicn_cli_node_ctl_param_set_command_fn,
+};
+
+/* cli declaration for 'control' (root path of multiple commands, for help) */
+VLIB_CLI_COMMAND(hicn_cli_node_ctl_command, static)=
+{
+ .path = "hicn control",
+ .short_help = "hicn control"
+};
+
+/* cli declaration for 'fib' */
+VLIB_CLI_COMMAND(hicn_cli_strategy_set_command, static)=
+ {
+ .path = "hicn strategy",
+ .short_help = "hicn strategy set <strategy_id> prefix <prefix>",
+ .function = hicn_cli_strategy_set_command_fn,
+ };
+
+/* cli declaration for 'show' */
+VLIB_CLI_COMMAND(hicn_cli_show_command, static)=
+{
+ .path = "hicn show",
+ .short_help = "hicn show "
+ "[internal]"
+ "[strategies]",
+ .function = hicn_cli_show_command_fn,
+};
+
+/* cli declaration for 'hicn pgen client' */
+VLIB_CLI_COMMAND(hicn_cli_pgen_client_set_command, static)=
+{
+ .path = "hicn pgen client",
+ .short_help = "hicn pgen client src <src_addr> name <prefix> { n_ifaces <n_ifaces> lifetime <interest-lifetime> intfc <data in-interface> max_seq <max sequence number> n_flows <number of flows>}",
+ .long_help = "Run hicn in packet-gen client mode\n",
+ .function = hicn_cli_pgen_client_set_command_fn,
+};
+
+/* cli declaration for 'hicn pgen client' */
+VLIB_CLI_COMMAND(hicn_cli_pgen_server_set_command, static)=
+{
+ .path = "hicn pgen server",
+ .short_help = "hicn pgen server name <prefix> intfc <interest in-interface> size <payload_size>",
+ .long_help = "Run hicn in packet-gen server mode\n",
+ .function = hicn_cli_pgen_server_set_command_fn,
+};
+
+/* cli declaration for 'hicn pgen client' */
+VLIB_CLI_COMMAND(hicn_enable_command, static)=
+ {
+ .path = "hicn enable",
+ .short_help = "hicn enable <prefix>",
+ .long_help = "Enable hicn for the give prefix\n",
+ .function = hicn_enable_command_fn,
+ };
+
+/* cli declaration for 'hicn pgen client' */
+VLIB_CLI_COMMAND(hicn_disable_command, static)=
+ {
+ .path = "hicn disable",
+ .short_help = "hicn disable <prefix>",
+ .long_help = "Disable hicn for the give prefix\n",
+ .function = hicn_disable_command_fn,
+ };
+
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/data_fwd.h b/hicn-plugin/src/network/data_fwd.h
new file mode 100644
index 000000000..d95f564c3
--- /dev/null
+++ b/hicn-plugin/src/network/data_fwd.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DATA_FWD_H__
+#define __HICN_DATA_FWD_H__
+
+#include <vlib/buffer.h>
+
+#include "pcs.h"
+
+/**
+ * @file data_fwd.h
+ *
+ * This is the node encoutered by data packets after the hicn-data-pcslookup.
+ * This node has two goals: 1) clone/copy the vlib buffer as many time as the number
+ * of faces stored in the pit entry, 2) store a clone/copy of the vlib buffer in the CS.
+ * Unless there are memory issue (no more vlib buffer available to perform cloning/copy),
+ * a single vlib buffer received might results in several vlib buffer sent to the next
+ * vlib node (hicn4-iface-output or hicn6-iface-output).
+ *
+ * It must be noted that cloning is possible only if the lentgh of the data pointed by
+ * the vlib buffer is at least 256 bytes. This is due to an imposition in the vpp source
+ * code. In all the other cases the vlib buffer is copied. Cloning is performed by advancing
+ * the vlib buffer of 256 bytes and a new vlib buffer is created and chained in from of the received
+ * buffer. Additionally, the 256 bytes removed (advanced) from the received vlib buffer are
+ * copied in the head vlib buffer. In case of multiple cloning for the same vlib buffer, this
+ * mechanism allows us to have a different hICN header for each clone (+ the same additional bytes
+ * due to the vpp restriction on cloning).
+ */
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[64];
+} hicn_data_fwd_trace_t;
+
+typedef enum
+{
+ HICN_DATA_FWD_NEXT_V4_LOOKUP,
+ HICN_DATA_FWD_NEXT_V6_LOOKUP,
+ HICN_DATA_FWD_NEXT_IFACE4_OUT,
+ HICN_DATA_FWD_NEXT_IFACE6_OUT,
+ HICN_DATA_FWD_NEXT_ERROR_DROP,
+ HICN_DATA_FWD_N_NEXT,
+} hicn_data_fwd_next_t;
+
+/**
+ * @brief Create a maximum of 256 clones of buffer and store them
+ * in the supplied array. Unlike the original function in the vlib
+ * library, we don't prevent cloning if n_buffer==1 and if
+ * s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2.
+ *
+ * @param vm - (vlib_main_t *) vlib main data structure pointer
+ * @param src_buffer - (u32) source buffer index
+ * @param buffers - (u32 * ) buffer index array
+ * @param n_buffers - (u16) number of buffer clones requested (<=256)
+ * @param head_end_offset - (u16) offset relative to current position
+ * where packet head ends
+ * @return - (u16) number of buffers actually cloned, may be
+ * less than the number requested or zero
+ */
+always_inline u16
+vlib_buffer_clone_256_2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u16 n_buffers, u16 head_end_offset)
+{
+ u16 i;
+ vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
+
+ ASSERT (n_buffers);
+ ASSERT (n_buffers <= 256);
+
+ if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
+ {
+ for (i = 0; i < n_buffers; i++)
+ {
+ vlib_buffer_t *d;
+ d = vlib_buffer_copy (vm, s);
+ if (d == 0)
+ return i;
+ buffers[i] = vlib_get_buffer_index (vm, d);
+ }
+ return n_buffers;
+ }
+ n_buffers = vlib_buffer_alloc_from_pool (vm, buffers, n_buffers,
+ s->buffer_pool_index);
+
+ for (i = 0; i < n_buffers; i++)
+ {
+ vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
+ d->current_data = s->current_data;
+ d->current_length = head_end_offset;
+ d->trace_handle = s->trace_handle;
+
+ d->total_length_not_including_first_buffer = s->current_length -
+ head_end_offset;
+ if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ d->total_length_not_including_first_buffer +=
+ s->total_length_not_including_first_buffer;
+ }
+ d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
+ d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
+ d->trace_handle = s->trace_handle;
+ clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
+ clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
+ clib_memcpy_fast (vlib_buffer_get_current (d),
+ vlib_buffer_get_current (s), head_end_offset);
+ d->next_buffer = src_buffer;
+ }
+ vlib_buffer_advance (s, head_end_offset);
+ s->ref_count = n_buffers;
+ while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ s = vlib_get_buffer (vm, s->next_buffer);
+ s->ref_count = n_buffers;
+ }
+
+ return n_buffers;
+}
+
+/**
+ * @brief Create multiple clones of buffer and store them
+ * in the supplied array. Unlike the function in the vlib library,
+ * we allow src_buffer to have ref_count != 0.
+ *
+ * @param vm - (vlib_main_t *) vlib main data structure pointer
+ * @param src_buffer - (u32) source buffer index
+ * @param buffers - (u32 * ) buffer index array
+ * @param n_buffers - (u16) number of buffer clones requested (<=256)
+ * @param head_end_offset - (u16) offset relative to current position
+ * where packet head ends
+ * @return - (u16) number of buffers actually cloned, may be
+ * less than the number requested or zero
+ */
+always_inline u16
+vlib_buffer_clone2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u16 n_buffers, u16 head_end_offset)
+{
+ vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
+
+ /*
+ * total_length_not_including_first_buffer is not initialized to 0
+ * when a buffer is used.
+ */
+ if (PREDICT_TRUE (s->next_buffer == 0))
+ s->total_length_not_including_first_buffer = 0;
+
+ u16 n_cloned = 0;
+ u8 n_clone_src = 255 - s->ref_count;
+
+ /*
+ * We need to copy src for all the clones that cannot be chained in
+ * the src_buffer
+ */
+ /* MAX(ref_count) = 256 */
+ if (n_buffers > n_clone_src)
+ {
+ vlib_buffer_t *copy;
+ /* Ok to call the original vlib_buffer_copy. */
+ copy = vlib_buffer_copy (vm, s);
+ n_cloned += vlib_buffer_clone (vm,
+ vlib_get_buffer_index (vm, copy),
+ buffers,
+ n_buffers - n_clone_src,
+ head_end_offset);
+ n_buffers -= n_cloned;
+ }
+ /*
+ * vlib_buffer_clone_256 check if ref_count is 0. We force it to be
+ * 0 before calling the function and we retore it to the right value
+ * after the function has been called
+ */
+ u8 tmp_ref_count = s->ref_count;
+
+ s->ref_count = 1;
+ /*
+ * The regular vlib_buffer_clone_256 does copy if we need to clone
+ * only one packet. While this is not a problem per se, it adds
+ * complexity to the code, especially because we need to add 1 to
+ * ref_count when the packet is cloned.
+ */
+ n_cloned += vlib_buffer_clone_256_2 (vm,
+ src_buffer,
+ (buffers + n_cloned),
+ n_buffers, head_end_offset);
+
+ s->ref_count += (tmp_ref_count - 1);
+
+ return n_cloned;
+}
+
+#endif /* //__HICN_DATA_FWD_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/data_fwd_node.c b/hicn-plugin/src/network/data_fwd_node.c
new file mode 100644
index 000000000..c65b62454
--- /dev/null
+++ b/hicn-plugin/src/network/data_fwd_node.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/dpo.h>
+
+#include "data_fwd.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy.h"
+#include "strategy_dpo_manager.h"
+#include "state.h"
+#include "error.h"
+
+/* Stats string values */
+static char *hicn_data_fwd_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Declarations */
+always_inline void
+drop_packet (vlib_main_t * vm, u32 bi0,
+ u32 * n_left_to_next, u32 * next0, u32 ** to_next,
+ u32 * next_index, vlib_node_runtime_t * node);
+
+always_inline int
+hicn_satisfy_faces (vlib_main_t * vm, u32 b0,
+ hicn_pcs_entry_t * pitp, u32 * n_left_to_next,
+ u32 ** to_next, u32 * next_index,
+ vlib_node_runtime_t * node, u8 isv6,
+ vl_api_hicn_api_node_stats_get_reply_t * stats);
+
+always_inline void
+clone_data_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pitp, hicn_header_t * hicn0, f64 tnow,
+ hicn_hash_node_t * nodep, vlib_buffer_t * b0,
+ hicn_hash_entry_t * hash_entry, u64 name_hash,
+ hicn_buffer_t * hicnb, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id, hicn_lifetime_t dmsg_lifetime);
+
+
+/* packet trace format function */
+always_inline u8 *hicn_data_fwd_format_trace (u8 * s, va_list * args);
+
+vlib_node_registration_t hicn_data_fwd_node;
+
+/*
+ * ICN forwarder node for interests: handling of Data delivered based on ACL.
+ * - 1 packet at a time - ipv4/tcp ipv6/tcp
+ */
+static uword
+hicn_data_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+
+ u32 n_left_from, *from, *to_next;
+ hicn_data_fwd_next_t next_index;
+ hicn_pit_cs_t *pitcs = &hicn_main.pitcs;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+ u32 data_received = 1;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ hicn_buffer_t *hicnb0;
+ hicn_hash_node_t *node0;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0;
+ hicn_pcs_entry_t *pitp;
+ hicn_hash_entry_t *hash_entry0;
+ int ret = HICN_ERROR_NONE;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+ /* Dequeue a packet buffer */
+ /*
+ * Do not copy the index in the next buffer, we'll do
+ * it later. The packet might be cloned, so the buffer to move
+ * to next must be the cloned one
+ */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Get hicn buffer and state */
+ hicnb0 = hicn_get_buffer (b0);
+ hicn_get_internal_state (hicnb0, pitcs, &node0, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+
+ ret = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ pitp = hicn_pit_get_data (node0);
+ nameptr = (u8 *) (&name);
+
+ if (PREDICT_FALSE
+ (ret != HICN_ERROR_NONE
+ || !hicn_node_compare (nameptr, namelen, node0)
+ || (hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)))
+ {
+ /*
+ * Remove the lock acquired from
+ * data_pcslookup node
+ */
+ dpo_id_t hicn_dpo_id0 = { dpo_vft0->hicn_dpo_get_type (), 0, 0,
+ dpo_ctx_id0
+ };
+ hicn_pcs_remove_lock (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+
+ goto end_processing;
+ }
+ /*
+ * Check if the hit is instead a collision in the
+ * hash table. Unlikely to happen.
+ */
+ /*
+ * there is no guarantee that the type of entry has
+ * not changed from the lookup.
+ */
+
+ if (tnow > pitp->shared.expire_time
+ || (hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
+ {
+ dpo_id_t hicn_dpo_id0 =
+ { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+ hicn_pcs_delete (pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+ stats.pit_expired_count++;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (b0),
+ sizeof (t->packet_data));
+ }
+ }
+ else
+ {
+ ASSERT ((hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ == 0);
+
+ data_received++;
+ /*
+ * We do not check if the data is coming from
+ * the outgoing interest face.
+ */
+
+ /* Prepare the buffer for the cloning */
+ ret = hicn_satisfy_faces (vm, bi0, pitp, &n_left_to_next,
+ &to_next, &next_index, node,
+ isv6, &stats);
+
+ dpo_id_t hicn_dpo_id0 = { dpo_vft0->hicn_dpo_get_type (), 0, 0,
+ dpo_ctx_id0
+ };
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
+ {
+ hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+ continue;
+ }
+ /*
+ * Call the strategy callback since the
+ * interest has been satisfied
+ */
+ strategy_vft0->hicn_receive_data (dpo_ctx_id0,
+ pitp->u.pit.pe_txnh);
+
+#if HICN_FEATURE_CS
+ hicn_lifetime_t dmsg_lifetime;
+
+ hicn_type_t type = hicnb0->type;
+ hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
+ &dmsg_lifetime);
+
+ if (dmsg_lifetime)
+ {
+ /*
+ * Clone data packet in the content store and
+ * convert the PIT entry into a CS entry
+ */
+ clone_data_to_cs (vm, pitcs, pitp, hicn0, tnow, node0,
+ b0, hash_entry0, hicnb0->name_hash,
+ hicnb0, dpo_vft0, &hicn_dpo_id0,
+ dmsg_lifetime);
+
+ hicn_pcs_remove_lock (pitcs, &pitp, &node0, vm,
+ hash_entry0, NULL, NULL);
+ }
+ else
+ {
+ /*
+ * If the packet is copied and not cloned, we need to free the vlib_buffer
+ */
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
+ {
+ vlib_buffer_free_one (vm, bi0);
+ }
+ else
+ {
+ /*
+ * Remove one reference as the buffer is no
+ * longer in any frame. The vlib_buffer will be freed when
+ * all its cloned vlib_buffer will be freed.
+ */
+ b0->ref_count--;
+ }
+
+ /* Delete the PIT entry */
+ hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+ }
+#else
+ ASSERT (pitp == hicn_pit_get_data (node0));
+ /*
+ * If the packet is copied and not cloned, we need to free the vlib_buffer
+ */
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
+ {
+ vlib_buffer_free_one (vm, bi0);
+ }
+ else
+ {
+ /*
+ * Remove one reference as the buffer is no
+ * longer in any frame. The vlib_buffer will be freed when
+ * all its cloned vlib_buffer will be freed.
+ */
+ b0->ref_count--;
+ }
+
+ /* Delete the PIT entry */
+ hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+#endif
+ }
+ end_processing:
+
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pit_get_int_count (pitcs);
+ u32 pit_cs_count = hicn_pit_get_cs_count (pitcs);
+
+ vlib_node_increment_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_INTEREST_AGG_ENTRY,
+ stats.pkts_data_count / data_received);
+
+ return (frame->n_vectors);
+}
+
+always_inline void
+drop_packet (vlib_main_t * vm, u32 bi0,
+ u32 * n_left_to_next, u32 * next0, u32 ** to_next,
+ u32 * next_index, vlib_node_runtime_t * node)
+{
+ *next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
+
+ (*to_next)[0] = bi0;
+ *to_next += 1;
+ *n_left_to_next -= 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index,
+ *to_next, *n_left_to_next, bi0, *next0);
+}
+
+always_inline int
+hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
+ hicn_pcs_entry_t * pitp, u32 * n_left_to_next,
+ u32 ** to_next, u32 * next_index,
+ vlib_node_runtime_t * node, u8 isv6,
+ vl_api_hicn_api_node_stats_get_reply_t * stats)
+{
+ int found = 0;
+ int ret = HICN_ERROR_NONE;
+ u32 *clones = NULL, *header = NULL;
+ u32 n_left_from = 0;
+ u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP, next1 =
+ HICN_DATA_FWD_NEXT_ERROR_DROP;
+ word buffer_advance = CLIB_CACHE_LINE_BYTES * 2;
+
+ /*
+ * We have a hard limit on the number of vlib_buffer that we can
+ * chain (no more than 256)
+ */
+ /*
+ * The first group of vlib_buffer can be directly cloned from b0. We
+ * need to be careful to clone it only 254 times as the buffer
+ * already has n_add_reds=1.
+ */
+ vec_alloc (clones, pitp->u.pit.faces.n_faces);
+ header = clones;
+
+ /* Clone bi0 */
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
+
+ hicn_buffer_t *hicnb = hicn_get_buffer (b0);
+
+ /*
+ * Mark the buffer as smaller than TWO_CL. It will be stored as is in the CS, without excluding
+ * the hicn_header. Cloning is not possible, it will be copied.
+ */
+ if (b0->current_length <= (buffer_advance + (CLIB_CACHE_LINE_BYTES * 2)))
+ {
+ /* In this case the packet is copied. We don't need to add a reference as no buffer are
+ * chained to it.
+ */
+ hicnb->flags |= HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL;
+ }
+ else
+ {
+ /* Add one reference to maintain the buffer in the CS.
+ * b0->ref_count == 0 has two meaning: it has 1 buffer or no buffer chained to it.
+ * vlib_buffer_clone2 add a number of reference equalt to pitp->u.pit.faces.n_faces - 1
+ * as vlib_buffer_clone does. So after all the packet are forwarded the buffer stored in
+ * the CS will have ref_count == 0;
+ */
+ b0->ref_count++;
+ }
+
+ found = n_left_from =
+ vlib_buffer_clone2 (vm, bi0, clones, pitp->u.pit.faces.n_faces,
+ buffer_advance);
+
+ ASSERT (n_left_from == pitp->u.pit.faces.n_faces);
+
+ /* Index to iterate over the faces */
+ int i = 0;
+
+ while (n_left_from > 0)
+ {
+
+ //Dual loop, X2
+ while (n_left_from >= 4 && *n_left_to_next >= 2)
+ {
+ vlib_buffer_t *h0, *h1;
+ u32 hi0, hi1;
+ hicn_face_id_t face0, face1;
+
+ /* Prefetch for next iteration. */
+ {
+ vlib_buffer_t *h2, *h3;
+ h2 = vlib_get_buffer (vm, clones[2]);
+ h3 = vlib_get_buffer (vm, clones[3]);
+ CLIB_PREFETCH (h2, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (h3, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ face0 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+ face1 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+
+ h0 = vlib_get_buffer (vm, clones[0]);
+ h1 = vlib_get_buffer (vm, clones[1]);
+
+ (*to_next)[0] = hi0 = clones[0];
+ (*to_next)[1] = hi1 = clones[1];
+ *to_next += 2;
+ *n_left_to_next -= 2;
+ n_left_from -= 2;
+ clones += 2;
+
+ next0 = isv6 ? HICN_DATA_FWD_NEXT_IFACE6_OUT :
+ HICN_DATA_FWD_NEXT_IFACE4_OUT;
+ next1 = isv6 ? HICN_DATA_FWD_NEXT_IFACE6_OUT :
+ HICN_DATA_FWD_NEXT_IFACE4_OUT;
+
+ vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0;
+ vnet_buffer (h1)->ip.adj_index[VLIB_TX] = face1;
+
+ stats->pkts_data_count += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (h0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, h0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (h0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (h0),
+ sizeof (t->packet_data));
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (h1->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, h1, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (h1)->sw_if_index[VLIB_RX];
+ t->next_index = next1;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (h1),
+ sizeof (t->packet_data));
+ }
+ vlib_validate_buffer_enqueue_x2 (vm, node, *next_index,
+ (*to_next), *n_left_to_next,
+ hi0, hi1, next0, next1);
+ }
+
+
+ while (n_left_from > 0 && *n_left_to_next > 0)
+ {
+ vlib_buffer_t *h0;
+ u32 hi0;
+ hicn_face_id_t face0;
+
+ face0 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+
+ h0 = vlib_get_buffer (vm, clones[0]);
+
+ (*to_next)[0] = hi0 = clones[0];
+ *to_next += 1;
+ *n_left_to_next -= 1;
+ n_left_from -= 1;
+ clones += 1;
+
+ next0 = isv6 ? HICN_DATA_FWD_NEXT_IFACE6_OUT :
+ HICN_DATA_FWD_NEXT_IFACE4_OUT;
+ vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0;
+
+ stats->pkts_data_count++;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (h0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, h0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (h0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ clib_memcpy (t->packet_data,
+ vlib_buffer_get_current (h0),
+ sizeof (t->packet_data));
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ /*
+ * Fix in case of a wrong speculation. Needed to
+ * clone the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index,
+ *to_next, *n_left_to_next,
+ hi0, next0);
+
+ }
+
+ /* Ensure that there is space for the next clone (if any) */
+ if (PREDICT_FALSE (*n_left_to_next == 0))
+ {
+ vlib_put_next_frame (vm, node, *next_index, *n_left_to_next);
+
+ vlib_get_next_frame (vm, node, *next_index, *to_next,
+ *n_left_to_next);
+ }
+ }
+
+ vec_free (header);
+
+ if (PREDICT_FALSE (!found))
+ {
+ ASSERT (0);
+ drop_packet (vm, bi0, n_left_to_next, &next0, to_next, next_index,
+ node);
+ ret = HICN_ERROR_FACE_NOT_FOUND;
+ }
+ return ret;
+}
+
+always_inline void
+clone_data_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pitp, hicn_header_t * hicn0, f64 tnow,
+ hicn_hash_node_t * nodep, vlib_buffer_t * b0,
+ hicn_hash_entry_t * hash_entry, u64 name_hash,
+ hicn_buffer_t * hicnb, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id, hicn_lifetime_t dmsg_lifetime)
+{
+ /*
+ * At this point we think we're safe to proceed. Store the CS buf in
+ * the PIT/CS hashtable entry
+ */
+
+ /*
+ * Start turning the PIT into a CS. Note that we may be stepping on
+ * the PIT part of the union as we update the CS part, so don't
+ * expect the PIT part to be valid after this point.
+ */
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
+ hicn_pit_to_cs (vm, pitcs, pitp, hash_entry, nodep, dpo_vft, hicn_dpo_id,
+ hicnb->face_id,
+ hicnb0->flags & HICN_BUFFER_FLAGS_FACE_IS_APP);
+
+ pitp->shared.create_time = tnow;
+
+ if (dmsg_lifetime < HICN_PARAM_CS_LIFETIME_MIN
+ || dmsg_lifetime > HICN_PARAM_CS_LIFETIME_MAX)
+ {
+ dmsg_lifetime = HICN_PARAM_CS_LIFETIME_DFLT;
+ }
+ pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, dmsg_lifetime);
+
+ /* Store the original packet buffer in the CS node */
+ pitp->u.cs.cs_pkt_buf = vlib_get_buffer_index (vm, b0);
+}
+
+/* packet trace format function */
+always_inline u8 *
+hicn_data_fwd_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_data_fwd_trace_t *t = va_arg (*args, hicn_data_fwd_trace_t *);
+ u32 indent = format_get_indent (s);
+
+ s = format (s, "DATAFWD: pkt: %d, sw_if_index %d, next index %d\n",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+
+ s = format (s, "%U%U", format_white_space, indent,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the data forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_data_fwd_node) =
+{
+ .function = hicn_data_node_fn,
+ .name = "hicn-data-fwd",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn_data_fwd_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_data_fwd_error_strings),
+ .error_strings = hicn_data_fwd_error_strings,
+ .n_next_nodes = HICN_DATA_FWD_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [HICN_DATA_FWD_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICN_DATA_FWD_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICN_DATA_FWD_NEXT_IFACE4_OUT] = "hicn4-iface-output",
+ [HICN_DATA_FWD_NEXT_IFACE6_OUT] = "hicn6-iface-output",
+ [HICN_DATA_FWD_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/data_input_node.c b/hicn-plugin/src/network/data_input_node.c
new file mode 100644
index 000000000..8d20f54a6
--- /dev/null
+++ b/hicn-plugin/src/network/data_input_node.c
@@ -0,0 +1,697 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/fib/ip6_fib.h>
+#include <vnet/fib/fib_table.h> /* for FIB table and entry creation */
+#include <vnet/fib/fib_entry.h> /* for FIB table and entry creation */
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/dpo/load_balance.h>
+
+#include "mgmt.h"
+#include "strategy_dpo_manager.h"
+
+static __clib_unused char *hicn_data_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} hicn_data_input_t;
+
+typedef enum
+{
+ HICN_DATA_INPUT_IP6_NEXT_FACE,
+ HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL,
+ HICN_DATA_INPUT_IP6_N_NEXT,
+} hicn_data_input_ip6_next_t;
+
+typedef enum
+{
+ HICN_DATA_INPUT_IP4_NEXT_FACE,
+ HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL,
+ HICN_DATA_INPUT_IP4_N_NEXT,
+} hicn_data_input_ip4_next_t;
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 isv6;
+} hicn_data_input_trace_t;
+
+vlib_node_registration_t hicn_data_input_ip6_node;
+vlib_node_registration_t hicn_data_input_ip4_node;
+
+static __clib_unused u8 *
+format_hicn_data_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_data_input_trace_t *t = va_arg (*args, hicn_data_input_trace_t *);
+ u32 indent = format_get_indent (s);
+ u8 isv6 = (u8) va_arg (*args, int);
+
+ s =
+ format (s, "%U hicn_data_input%s: sw_if_index %d next-index %d",
+ format_white_space, indent, isv6 ? "_ip6" : "_ip4",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+static uword
+hicn_data_input_ip6_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip6_main_t *im = &ip6_main;
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
+ u32 n_left_from, n_left_to_next, *from, *to_next;
+ ip_lookup_next_t next;
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ vlib_buffer_t *p0, *p1;
+ u32 pi0, pi1, lbi0, lbi1, wrong_next;
+ ip_lookup_next_t next0, next1;
+ ip6_header_t *ip0, *ip1;
+ ip6_address_t *src_addr0, *src_addr1;
+ const dpo_id_t *dpo0, *dpo1;
+ const load_balance_t *lb0, *lb1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+ CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (p3->data, sizeof (ip0[0]), LOAD);
+ }
+
+ pi0 = to_next[0] = from[0];
+ pi1 = to_next[1] = from[1];
+
+ p0 = vlib_get_buffer (vm, pi0);
+ p1 = vlib_get_buffer (vm, pi1);
+
+ ip0 = vlib_buffer_get_current (p0);
+ ip1 = vlib_buffer_get_current (p1);
+
+ src_addr0 = &ip0->src_address;
+ src_addr1 = &ip1->src_address;
+
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p0);
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p1);
+
+ lbi0 = ip6_fib_table_fwding_lookup (vnet_buffer (p0)->ip.fib_index,
+ src_addr0);
+ lbi1 = ip6_fib_table_fwding_lookup (vnet_buffer (p1)->ip.fib_index,
+ src_addr1);
+
+ lb0 = load_balance_get (lbi0);
+ lb1 = load_balance_get (lbi1);
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+
+ vnet_buffer (p0)->ip.flow_hash = vnet_buffer (p1)->ip.flow_hash = 0;
+
+ //No vpp loadbalancing. Missing header file to exploit it
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
+
+ if (dpo_is_hicn (dpo0))
+ next0 = HICN_DATA_INPUT_IP6_NEXT_FACE;
+ else
+ next0 = HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
+
+ if (dpo_is_hicn (dpo1))
+ next1 = HICN_DATA_INPUT_IP6_NEXT_FACE;
+ else
+ next1 = HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, p0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->isv6 = 1;
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (p1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, p1, sizeof (*t));
+ t->sw_if_index = vnet_buffer (p1)->sw_if_index[VLIB_RX];
+ t->next_index = next1;
+ t->isv6 = 1;
+ }
+
+
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
+
+ wrong_next = (next0 != next) + 2 * (next1 != next);
+ if (PREDICT_FALSE (wrong_next != 0))
+ {
+ switch (wrong_next)
+ {
+ case 1:
+ /* A B A */
+ to_next[-2] = pi1;
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ break;
+
+ case 2:
+ /* A A B */
+ to_next -= 1;
+ n_left_to_next += 1;
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ break;
+
+ case 3:
+ /* A B C */
+ to_next -= 2;
+ n_left_to_next += 2;
+ vlib_set_next_frame_buffer (vm, node, next0, pi0);
+ vlib_set_next_frame_buffer (vm, node, next1, pi1);
+ if (next0 == next1)
+ {
+ /* A B B */
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next1;
+ vlib_get_next_frame (vm, node, next, to_next,
+ n_left_to_next);
+ }
+ }
+ }
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *p0;
+ ip6_header_t *ip0;
+ u32 pi0, lbi0;
+ ip_lookup_next_t next0;
+ load_balance_t *lb0;
+ ip6_address_t *src_addr0;
+ const dpo_id_t *dpo0;
+
+ pi0 = from[0];
+ to_next[0] = pi0;
+
+ p0 = vlib_get_buffer (vm, pi0);
+ ip0 = vlib_buffer_get_current (p0);
+ src_addr0 = &ip0->src_address;
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p0);
+ lbi0 = ip6_fib_table_fwding_lookup (vnet_buffer (p0)->ip.fib_index,
+ src_addr0);
+
+ lb0 = load_balance_get (lbi0);
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+
+ //No vpp loadbalancing. Missing header file to exploit it
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+
+ if (dpo_is_hicn (dpo0))
+ next0 = HICN_DATA_INPUT_IP6_NEXT_FACE;
+ else
+ next0 = HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, p0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (p0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->isv6 = 1;
+ }
+
+ vlib_increment_combined_counter
+ (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+
+ from += 1;
+ to_next += 1;
+ n_left_to_next -= 1;
+ n_left_from -= 1;
+
+ if (PREDICT_FALSE (next0 != next))
+ {
+ n_left_to_next += 1;
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ next = next0;
+ vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+ to_next[0] = pi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_data_input_ip6) =
+ {
+ .function = hicn_data_input_ip6_fn,
+ .name = "hicn-data-input-ip6",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicn_data_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_data_input_error_strings),
+ .error_strings = hicn_data_input_error_strings,
+ .n_next_nodes = HICN_DATA_INPUT_IP6_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_DATA_INPUT_IP6_NEXT_FACE] = "hicn6-face-input",
+ [HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL] = "ip6-local-end-of-arc"
+ },
+ };
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_data_input_ip6_arc, static)=
+ {
+ .arc_name = "ip6-local",
+ .node_name = "hicn-data-input-ip6",
+ .runs_before = VNET_FEATURES("ip6-local-end-of-arc"),
+ };
+/* *INDENT-ON* */
+
+
+
+always_inline uword
+hicn_data_input_ip4_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip4_main_t *im = &ip4_main;
+ vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
+ u32 n_left, *from;
+ u32 thread_index = vm->thread_index;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
+ vlib_buffer_t **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+
+ from = vlib_frame_vector_args (frame);
+ n_left = frame->n_vectors;
+ next = nexts;
+ vlib_get_buffers (vm, from, bufs, n_left);
+
+#if (CLIB_N_PREFETCHES >= 8)
+ while (n_left >= 4)
+ {
+ ip4_header_t *ip0, *ip1, *ip2, *ip3;
+ const load_balance_t *lb0, *lb1, *lb2, *lb3;
+ ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
+ ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
+ ip4_address_t *src_addr0, *src_addr1, *src_addr2, *src_addr3;
+ u32 lb_index0, lb_index1, lb_index2, lb_index3;
+ const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
+
+ /* Prefetch next iteration. */
+ if (n_left >= 8)
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+
+ CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
+ }
+
+ ip0 = vlib_buffer_get_current (b[0]);
+ ip1 = vlib_buffer_get_current (b[1]);
+ ip2 = vlib_buffer_get_current (b[2]);
+ ip3 = vlib_buffer_get_current (b[3]);
+
+ src_addr0 = &ip0->src_address;
+ src_addr1 = &ip1->src_address;
+ src_addr2 = &ip2->src_address;
+ src_addr3 = &ip3->src_address;
+
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[2]);
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[3]);
+
+ mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
+ mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
+ mtrie2 = &ip4_fib_get (vnet_buffer (b[2])->ip.fib_index)->mtrie;
+ mtrie3 = &ip4_fib_get (vnet_buffer (b[3])->ip.fib_index)->mtrie;
+
+ leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
+ leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, src_addr1);
+ leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, src_addr2);
+ leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, src_addr3);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 2);
+ leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, src_addr2, 2);
+ leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, src_addr3, 2);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 3);
+ leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, src_addr2, 3);
+ leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, src_addr3, 3);
+
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+ lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
+ lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
+
+ ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
+ lb0 = load_balance_get (lb_index0);
+ lb1 = load_balance_get (lb_index1);
+ lb2 = load_balance_get (lb_index2);
+ lb3 = load_balance_get (lb_index3);
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+ ASSERT (lb2->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb2->lb_n_buckets));
+ ASSERT (lb3->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb3->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
+ dpo2 = load_balance_get_bucket_i (lb2, 0);
+ dpo3 = load_balance_get_bucket_i (lb3, 0);
+
+ if (dpo_is_hicn (dpo0))
+ next[0] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[0] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+ if (dpo_is_hicn (dpo1))
+ next[1] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[1] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+ if (dpo_is_hicn (dpo2))
+ next[2] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[2] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+ if (dpo_is_hicn (dpo3))
+ next[3] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[3] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ t->next_index = next[0];
+ t->isv6 = 0;
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[1]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b[1], sizeof (*t));
+ t->sw_if_index = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ t->next_index = next[1];
+ t->isv6 = 0;
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[2]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b[2], sizeof (*t));
+ t->sw_if_index = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ t->next_index = next[0];
+ t->isv6 = 0;
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[3]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b[3], sizeof (*t));
+ t->sw_if_index = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
+ t->next_index = next[3];
+ t->isv6 = 0;
+ }
+
+ vlib_increment_combined_counter
+ (cm, thread_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ vlib_increment_combined_counter
+ (cm, thread_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, b[1]));
+ vlib_increment_combined_counter
+ (cm, thread_index, lb_index2, 1,
+ vlib_buffer_length_in_chain (vm, b[2]));
+ vlib_increment_combined_counter
+ (cm, thread_index, lb_index3, 1,
+ vlib_buffer_length_in_chain (vm, b[3]));
+
+ b += 4;
+ next += 4;
+ n_left -= 4;
+ }
+#elif (CLIB_N_PREFETCHES >= 4)
+ while (n_left >= 4)
+ {
+ ip4_header_t *ip0, *ip1;
+ const load_balance_t *lb0, *lb1;
+ ip4_fib_mtrie_t *mtrie0, *mtrie1;
+ ip4_fib_mtrie_leaf_t leaf0, leaf1;
+ ip4_address_t *src_addr0, *src_addr1;
+ u32 lb_index0, lb_index1;
+ flow_hash_config_t flow_hash_config0, flow_hash_config1;
+ u32 hash_c0, hash_c1;
+ const dpo_id_t *dpo0, *dpo1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+
+ CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
+ CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
+ }
+
+ ip0 = vlib_buffer_get_current (b[0]);
+ ip1 = vlib_buffer_get_current (b[1]);
+
+ src_addr0 = &ip0->src_address;
+ src_addr1 = &ip1->src_address;
+
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
+
+ mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
+ mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
+
+ leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
+ leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, src_addr1);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 2);
+
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
+ leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 3);
+
+ lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+
+ ASSERT (lb_index0 && lb_index1);
+ lb0 = load_balance_get (lb_index0);
+ lb1 = load_balance_get (lb_index1);
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+ ASSERT (lb1->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb1->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+ dpo1 = load_balance_get_bucket_i (lb1, 0);
+
+ if (dpo_is_hicn (dpo0))
+ next[0] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[0] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+ if (dpo_is_hicn (dpo1))
+ next[1] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[1] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next[0];
+ t->isv6 = 0;
+ }
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ t->next_index = next[1];
+ t->isv6 = 0;
+ }
+
+
+ vlib_increment_combined_counter
+ (cm, thread_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ vlib_increment_combined_counter
+ (cm, thread_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, b[1]));
+
+ b += 2;
+ next += 2;
+ n_left -= 2;
+ }
+#endif
+ while (n_left > 0)
+ {
+ ip4_header_t *ip0;
+ const load_balance_t *lb0;
+ ip4_fib_mtrie_t *mtrie0;
+ ip4_fib_mtrie_leaf_t leaf0;
+ ip4_address_t *src_addr0;
+ u32 lbi0;
+ const dpo_id_t *dpo0;
+
+ ip0 = vlib_buffer_get_current (b[0]);
+ src_addr0 = &ip0->src_address;
+ ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
+
+ mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
+ leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
+ lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ ASSERT (lbi0);
+ lb0 = load_balance_get (lbi0);
+
+ ASSERT (lb0->lb_n_buckets > 0);
+ ASSERT (is_pow2 (lb0->lb_n_buckets));
+
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+
+ if (dpo_is_hicn (dpo0))
+ next[0] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ else
+ next[0] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ hicn_data_input_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ t->next_index = next[0];
+ t->isv6 = 0;
+ }
+
+ vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
+ vlib_buffer_length_in_chain (vm,
+ b[0]));
+
+ b += 1;
+ next += 1;
+ n_left -= 1;
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+
+ if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_data_input_ip4) =
+ {
+ .function = hicn_data_input_ip4_fn,
+ .name = "hicn-data-input-ip4",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicn_data_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_data_input_error_strings),
+ .error_strings = hicn_data_input_error_strings,
+ .n_next_nodes = HICN_DATA_INPUT_IP4_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_DATA_INPUT_IP4_NEXT_FACE] = "hicn4-face-input",
+ [HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL] = "ip4-local-end-of-arc"
+ },
+ };
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_data_input_ip4_arc, static)=
+ {
+ .arc_name = "ip4-local",
+ .node_name = "hicn-data-input-ip4",
+ .runs_before = VNET_FEATURES("ip4-local-end-of-arc"),
+ };
+/* *INDENT-ON* */
diff --git a/hicn-plugin/src/network/data_pcslookup.h b/hicn-plugin/src/network/data_pcslookup.h
new file mode 100644
index 000000000..e3050c31c
--- /dev/null
+++ b/hicn-plugin/src/network/data_pcslookup.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DATA_PCSLOOKUP_H__
+#define __HICN_DATA_PCSLOOKUP_H__
+
+#include "pcs.h"
+
+/**
+ * @file data_pcslookup.h
+ *
+ * This is the node encoutered by data packets after the hicn6-face-input or
+ * hicn4-face-input. This node performs a lookup in the pit and content store and
+ * if there is a hit in the PIT, the vlib buffer is passed to the hicn-data-fwd
+ * while if there is a hit in the CS or there isn't any hit, the packet is dropped.
+ */
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_data_pcslookup_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_data_pcslookup_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_data_pcslookup_trace_t;
+
+typedef enum
+{
+ HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD, /* This must be one position
+ * before the error drop!! */
+ HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP,
+ HICN_DATA_PCSLOOKUP_N_NEXT,
+} hicn_data_pcslookup_next_t;
+
+#endif /* //__HICN_DATA_PCSLOOKUP_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/data_pcslookup_node.c b/hicn-plugin/src/network/data_pcslookup_node.c
new file mode 100644
index 000000000..99af350b0
--- /dev/null
+++ b/hicn-plugin/src/network/data_pcslookup_node.c
@@ -0,0 +1,223 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "data_pcslookup.h"
+#include "infra.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "state.h"
+#include "strategy.h"
+#include "strategy_dpo_manager.h"
+
+/* Stats string values */
+static char *hicn_data_pcslookup_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* packet trace format function */
+always_inline u8 *hicn_data_pcslookup_format_trace (u8 * s, va_list * args);
+
+vlib_node_registration_t hicn_data_pcslookup_node;
+
+/*
+ * hICN node for handling data. It performs a lookup in the PIT.
+ */
+static uword
+hicn_data_pcslookup_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_data_pcslookup_next_t next_index;
+ hicn_data_pcslookup_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ rt = vlib_node_get_runtime_data (vm, node->node_index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
+ u64 name_hash = 0;
+ hicn_name_t name;
+ hicn_header_t *hicn0 = NULL;
+ u32 node_id0 = 0;
+ index_t dpo_ctx_id0 = 0;
+ int ret0;
+ u8 vft_id0;
+ u8 is_cs0;
+ u8 hash_entry_id = 0;
+ u8 bucket_is_overflown = 0;
+ u32 bucket_id = ~0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ // Prefetch two cache lines-- 128 byte-- so that we load the
+ // hicn_buffer_t as well
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
+
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+
+ ret0 = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ nameptr = (u8 *) (&name);
+
+ if (PREDICT_TRUE (ret0 == HICN_ERROR_NONE &&
+ hicn_hashtb_fullhash (nameptr, namelen,
+ &name_hash) ==
+ HICN_ERROR_NONE))
+ {
+ int res =
+ hicn_hashtb_lookup_node (rt->pitcs->pcs_table, nameptr,
+ namelen, name_hash,
+ 1
+ /*is_data. Do not take lock if hit CS */
+ ,
+ &node_id0, &dpo_ctx_id0, &vft_id0,
+ &is_cs0, &hash_entry_id, &bucket_id,
+ &bucket_is_overflown);
+
+ stats.pkts_data_count += 1;
+
+ if (res == HICN_ERROR_NONE)
+ {
+ /*
+ * In case the result of the lookup
+ * is a CS entry, the packet is
+ * dropped
+ */
+ next0 = HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD + is_cs0;
+ }
+ }
+
+ hicn_store_internal_state (b0, name_hash, node_id0, dpo_ctx_id0,
+ vft_id0, hash_entry_id, bucket_id,
+ bucket_is_overflown);
+
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ /*
+ * Fix in case of a wrong speculation. Needed to
+ * clone the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_pcslookup_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ /* Check the CS LRU, and trim if necessary. */
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pit_get_cs_count (rt->pitcs);
+
+ vlib_node_increment_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+
+ vlib_node_increment_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ update_node_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+ update_node_counter (vm, hicn_data_pcslookup_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_data_pcslookup_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_data_pcslookup_trace_t *t =
+ va_arg (*args, hicn_data_pcslookup_trace_t *);
+
+ s = format (s, "DATA-PCSLOOKUP: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the data forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_data_pcslookup_node) =
+{
+ .function = hicn_data_pcslookup_node_fn,
+ .name = "hicn-data-pcslookup",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_data_pcslookup_runtime_t),
+ .format_trace = hicn_data_pcslookup_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_data_pcslookup_error_strings),
+ .error_strings = hicn_data_pcslookup_error_strings,
+ .n_next_nodes = HICN_DATA_PCSLOOKUP_N_NEXT,
+ .next_nodes = {
+ [HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD] = "hicn-data-fwd",
+ [HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/error.c b/hicn-plugin/src/network/error.c
new file mode 100644
index 000000000..edd0dd77b
--- /dev/null
+++ b/hicn-plugin/src/network/error.c
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vpp_plugins/hicn/error.h>
+
+const char *HICN_ERROR_STRING[] = {
+#define _(a,b,c) c,
+ foreach_hicn_error
+#undef _
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/face_db.h b/hicn-plugin/src/network/face_db.h
new file mode 100644
index 000000000..4dd8b2f32
--- /dev/null
+++ b/hicn-plugin/src/network/face_db.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_DB_H__
+#define __HICN_FACE_DB_H__
+
+#include <vnet/dpo/dpo.h>
+#include "faces/face.h"
+
+/**
+ * @file face_db.h
+ *
+ * Define a face db that is store in every pit entry. A face db containes a list
+ * of incoming faces for interest packets that are used to forward data packets
+ * on the interests' reverse path
+ */
+
+/* Must be power of two */
+#define HICN_FACE_DB_INLINE_FACES 8
+
+#define HICN_PIT_BITMAP_SIZE_BYTE HICN_PARAM_FACES_MAX/8
+#define HICN_PIT_N_HOP_BITMAP_SIZE HICN_PARAM_FACES_MAX
+
+#define HICN_PIT_N_HOP_BUCKET (HICN_PARAM_PIT_ENTRY_PHOPS_MAX - HICN_FACE_DB_INLINE_FACES)
+
+typedef struct hicn_face_bucket_s
+{
+ /* Array of indexes of virtual faces */
+ hicn_face_id_t faces[HICN_PIT_N_HOP_BUCKET];
+
+ /* Used to check if interests are retransmission */
+ u8 bitmap[HICN_PIT_BITMAP_SIZE_BYTE];
+
+} hicn_face_bucket_t;
+
+extern hicn_face_bucket_t *hicn_face_bucket_pool;
+
+typedef struct __attribute__ ((packed)) hicn_face_db_s
+{
+ /* 19B + 1B = 20B */
+ /* Equal to one or zero */
+ u8 is_overflow;
+
+ /* Number of faces in the last bucket */
+ /* Or next availabe entry for storing a dpo_id_t */
+ /* 20B + 4B = 24B */
+ u32 n_faces;
+
+ /* 24B + 32B (8*4) = 56B */
+ /* Array of indexes of virtual faces */
+ hicn_face_id_t inline_faces[HICN_FACE_DB_INLINE_FACES];
+
+ /* 56B + 4B = 60B */
+ u32 next_bucket;
+
+ /* 60B + 4B = 64B */
+ u32 align;
+ //align back to 64
+
+} hicn_face_db_t;
+
+always_inline hicn_face_id_t
+hicn_face_db_get_dpo_face (u32 index, hicn_face_db_t * face_db)
+{
+ ASSERT (index < face_db->n_faces);
+
+ return index < HICN_FACE_DB_INLINE_FACES ? (face_db->inline_faces[index]) :
+ (pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket)->faces
+ [(index - HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
+}
+
+always_inline void
+hicn_face_db_init (int max_element)
+{
+ pool_init_fixed (hicn_face_bucket_pool, max_element);
+}
+
+always_inline hicn_face_bucket_t *
+hicn_face_db_get_bucket (u32 bucket_index)
+{
+ return pool_elt_at_index (hicn_face_bucket_pool, bucket_index);
+}
+
+always_inline void
+hicn_face_db_add_face (hicn_face_id_t face_id, hicn_face_db_t * face_db)
+{
+ //ASSERT (dpo->dpoi_index != ~0);
+
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+
+ hicn_face_id_t *element =
+ face_db->n_faces <
+ HICN_FACE_DB_INLINE_FACES ? &(face_db->inline_faces[face_db->n_faces]) :
+ &(faces_bkt->faces
+ [(face_db->n_faces -
+ HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
+
+ *element = face_id;
+
+ u32 bitmap_index = face_id % HICN_PIT_N_HOP_BITMAP_SIZE;
+ u32 position_array = bitmap_index / 8;
+ u8 bit_index = (u8) (bitmap_index - position_array * 8);
+
+ faces_bkt->bitmap[position_array] |= (0x01 << bit_index);
+ face_db->n_faces++;
+}
+
+always_inline u8
+hicn_face_search (hicn_face_id_t index, hicn_face_db_t * face_db)
+{
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ u32 bitmap_index = index % HICN_PIT_N_HOP_BITMAP_SIZE;
+
+ u32 position_array = bitmap_index / 8;
+ u8 bit_index = bitmap_index - position_array * 8;
+
+ return (faces_bkt->bitmap[position_array] >> bit_index) & 0x01;
+}
+
+always_inline void
+hicn_faces_flush (hicn_face_db_t * face_db)
+{
+ hicn_face_bucket_t *faces_bkt =
+ pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
+ clib_memset_u8 (&(faces_bkt->bitmap), 0, HICN_PIT_BITMAP_SIZE_BYTE);
+ face_db->n_faces = 0;
+ pool_put_index (hicn_face_bucket_pool, face_db->next_bucket);
+}
+
+
+#endif /* // __HICN_FACE_DB_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/address_mgr.c b/hicn-plugin/src/network/faces/app/address_mgr.c
new file mode 100644
index 000000000..ecc99a200
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/address_mgr.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by cisco systems inc. All rights reserved.
+ *
+ */
+
+#include <dlfcn.h>
+
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip4.h> //ip4_add_del_ip_address
+#include <vnet/ip/ip6.h> //ip6_add_del_ip_address
+#include <vnet/fib/fib_types.h> //FIB_PROTOCOL_IP4/6, FIB_NODE_INDEX_INVALID
+#include <vnet/fib/fib_entry.h> //FIB_SOURCE_PRIORITY_HI
+#include <vnet/fib/fib_table.h>
+#include <vppinfra/format.h>
+#include <vnet/interface.h> //appif_flags
+#include <vnet/interface_funcs.h> //vnet_sw_interface_set_flags
+
+#include <vpp_plugins/hicn/error.h>
+
+#include "address_mgr.h"
+#include "../../hicn.h"
+#include "../../infra.h"
+#include "../face.h"
+#include "../../strategy_dpo_ctx.h"
+#include "../../route.h"
+
+typedef struct address_mgr_main_s
+{
+ ip4_address_t next_ip4_local_addr;
+ ip6_address_t next_ip6_local_addr;
+} address_mgr_main_t;
+
+address_mgr_main_t address_mgr_main;
+
+static void
+increment_v4_address (ip4_address_t * a, u32 val)
+{
+ u32 v;
+
+ v = clib_net_to_host_u32 (a->as_u32) + val;
+ a->as_u32 = clib_host_to_net_u32 (v);
+}
+
+static void
+increment_v6_address (ip6_address_t * a, u64 val)
+{
+ u64 v;
+
+ v = clib_net_to_host_u64 (a->as_u64[1]) + val;
+ a->as_u64[1] = clib_host_to_net_u64 (v);
+}
+
+void
+get_two_ip4_addresses (ip4_address_t * appif_addr, ip4_address_t * nh_addr)
+{
+ /* We want two consecutives address that fall into a /31 mask */
+ if (address_mgr_main.next_ip4_local_addr.as_u8[3] & 0x01)
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+
+ *appif_addr = address_mgr_main.next_ip4_local_addr;
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+ *nh_addr = address_mgr_main.next_ip4_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP4;
+ fib_pfx.fp_len = ADDR_MGR_IP4_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, appif_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index =
+ fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ FIB_SOURCE_PRIORITY_HI);
+ }
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ increment_v4_address (appif_addr, 2);
+ increment_v4_address (nh_addr, 2);
+ }
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ address_mgr_main.next_ip4_local_addr = *nh_addr;
+ increment_v4_address (&(address_mgr_main.next_ip4_local_addr), 1);
+}
+
+void
+get_two_ip6_addresses (ip6_address_t * appif_addr, ip6_address_t * nh_addr)
+{
+
+ /* We want two consecutives address that fall into a /127 mask */
+ if (address_mgr_main.next_ip6_local_addr.as_u8[15] & 0x01)
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+
+ *appif_addr = address_mgr_main.next_ip6_local_addr;
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+ *nh_addr = address_mgr_main.next_ip6_local_addr;
+
+
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP6;
+ fib_pfx.fp_len = ADDR_MGR_IP6_LEN;
+
+ fib_index = fib_table_find (fib_pfx.fp_proto, 0);
+
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, appif_addr->as_u8);
+
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ //fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+
+ fib_entry_index =
+ fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ // fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ // FIB_SOURCE_PRIORITY_HI);
+ }
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ increment_v6_address (appif_addr, 2);
+ increment_v6_address (nh_addr, 2);
+ }
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ address_mgr_main.next_ip6_local_addr = *nh_addr;
+ increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
+}
+
+ip4_address_t
+get_ip4_address ()
+{
+ ip4_address_t *prefix = &address_mgr_main.next_ip4_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP4;
+ fib_pfx.fp_len = ADDR_MGR_IP4_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ increment_v4_address (prefix, 1);
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ return fib_pfx.fp_addr.ip4;
+}
+
+ip6_address_t
+get_ip6_address ()
+{
+ ip6_address_t *prefix = &address_mgr_main.next_ip6_local_addr;
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
+ u32 fib_index;
+
+ fib_pfx.fp_proto = FIB_PROTOCOL_IP6;
+ fib_pfx.fp_len = ADDR_MGR_IP6_LEN;
+ /* At this point the face exists in the face table */
+ do
+ {
+ /* Check if the route already exist in the fib */
+ fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
+ HICN_FIB_TABLE,
+ FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ increment_v6_address (prefix, 1);
+ }
+ while (fib_entry_index != FIB_NODE_INDEX_INVALID);
+
+ return fib_pfx.fp_addr.ip6;
+}
+
+void
+address_mgr_init ()
+{
+
+ address_mgr_main.next_ip4_local_addr.as_u8[0] = 169;
+ address_mgr_main.next_ip4_local_addr.as_u8[1] = 254;
+ address_mgr_main.next_ip4_local_addr.as_u8[2] = 1;
+ address_mgr_main.next_ip4_local_addr.as_u8[3] = 1;
+
+ ip6_address_set_zero (&address_mgr_main.next_ip6_local_addr);
+ address_mgr_main.next_ip6_local_addr.as_u16[0] =
+ clib_host_to_net_u16 (0xfc00);
+ address_mgr_main.next_ip6_local_addr.as_u8[15] = 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/address_mgr.h b/hicn-plugin/src/network/faces/app/address_mgr.h
new file mode 100644
index 000000000..99450dcdd
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/address_mgr.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _ADDRESS_MGR_H_
+#define _ADDRESS_MGR_H_
+
+/**
+ * @file
+ *
+ * @brief Address manager.
+ *
+ * Address manager that maintains a pool of ip4 and ip6 addresses to assign to
+ * an interface.
+ */
+
+#define ADDR_MGR_IP4_LEN 32
+#define ADDR_MGR_IP4_CONS_LEN 31
+#define ADDR_MGR_IP6_LEN 128
+#define ADDR_MGR_IP6_CONS_LEN 127
+
+/**
+ * @brief Get two consecutive IP v4 addresses from the same /31 subnet
+ *
+ * @param addr1 first ip address with the least significant bit set to 0
+ * @param addr2 second ip address with the least significant bit set to 1
+ */
+void get_two_ip4_addresses (ip4_address_t * addr1, ip4_address_t * addr2);
+
+/**
+ * @brief Get two consecutive IP v6 addresses from the same /126 subnet
+ *
+ * @param addr1 first ip address with the least significant bit set to 0
+ * @param addr2 second ip address with the least significant bit set to 1
+ */
+void get_two_ip6_addresses (ip6_address_t * addr1, ip6_address_t * addr2);
+
+/**
+ * @brief Get one IP v4 address
+ *
+ * @return ip address
+ */
+ip4_address_t get_ip4_address (void);
+
+/**
+ * @brief Get one IP v6 address
+ *
+ * @return ip address
+ */
+ip6_address_t get_ip6_address (void);
+
+/**
+ * @brief Init the address manager
+ */
+void address_mgr_init (void);
+
+#endif /* _ADDRESS_MGR_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/face_app_cli.c b/hicn-plugin/src/network/faces/app/face_app_cli.c
new file mode 100644
index 000000000..1aa27adc7
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/face_app_cli.c
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+#include <vnet/ip/ip6_packet.h>
+
+//#include "../face_dpo.h"
+#include "../face.h"
+#include "face_prod.h"
+#include "face_cons.h"
+
+#define HICN_FACE_NONE 0
+#define HICN_FACE_DELETE 1
+#define HICN_FACE_ADD 2
+
+static clib_error_t *
+hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ fib_prefix_t prefix;
+ hicn_face_id_t face_id1 = HICN_FACE_NULL;
+ hicn_face_id_t face_id2 = HICN_FACE_NULL;
+ u32 cs_reserved = HICN_PARAM_FACE_DFT_CS_RESERVED;
+ int ret = HICN_ERROR_NONE;
+ int sw_if;
+ int face_op = HICN_FACE_NONE;
+ int prod = 0;
+
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ {
+ face_op = HICN_FACE_DELETE;
+ }
+ else if (face_op == HICN_FACE_DELETE
+ && unformat (line_input, "id %d", &face_id1))
+ ;
+ else if (unformat (line_input, "add"))
+ {
+ face_op = HICN_FACE_ADD;
+ }
+ else if (face_op == HICN_FACE_ADD)
+ {
+ if (unformat (line_input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if))
+ ;
+ else
+ if (unformat
+ (line_input, "prod prefix %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {
+ prod = 1;
+ }
+ else if (prod && unformat (line_input, "cs_size %d", &cs_reserved))
+ ;
+ else if (unformat (line_input, "cons"))
+ ;
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (face_id1 != HICN_FACE_NULL)
+ {
+
+ if (!hicn_dpoi_idx_is_valid (face_id1))
+ {
+ return clib_error_return (0, "%s, face_id1 %d not valid",
+ get_error_string (ret), face_id1);
+ }
+ }
+
+ int rv;
+ switch (face_op)
+ {
+ case HICN_FACE_ADD:
+ {
+ ip46_address_t prod_addr;
+ ip4_address_t cons_addr4;
+ ip6_address_t cons_addr6;
+
+ if (prod)
+ {
+ prefix.fp_proto =
+ ip46_address_is_ip4 (&prefix.
+ fp_addr) ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+ rv =
+ hicn_face_prod_add (&prefix, sw_if, &cs_reserved, &prod_addr,
+ &face_id1);
+ if (rv == HICN_ERROR_NONE)
+ {
+ u8 *sbuf = NULL;
+ sbuf =
+ format (sbuf, "Face id: %d, producer address %U", face_id1,
+ format_ip46_address, &prod_addr,
+ 0 /*IP46_ANY_TYPE */ );
+ vlib_cli_output (vm, "%s", sbuf);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ }
+ else
+ {
+ rv =
+ hicn_face_cons_add (&cons_addr4, &cons_addr6, sw_if, &face_id1,
+ &face_id2);
+ if (rv == HICN_ERROR_NONE)
+ {
+ u8 *sbuf = NULL;
+ sbuf =
+ format (sbuf,
+ "Face id: %d, address v4 %U, face id: %d address v6 %U",
+ face_id1, format_ip4_address, &cons_addr4, face_id2,
+ format_ip6_address, &cons_addr6);
+ vlib_cli_output (vm, "%s", sbuf);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ }
+ break;
+ }
+ case HICN_FACE_DELETE:
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id1);
+
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ rv = hicn_face_cons_del (face_id1);
+ else
+ rv = hicn_face_prod_del (face_id1);
+ if (rv == HICN_ERROR_NONE)
+ {
+ vlib_cli_output (vm, "Face %d deleted", face_id1);
+ }
+ else
+ {
+ return clib_error_return (0, get_error_string (rv));
+ }
+ break;
+ }
+ default:
+ return clib_error_return (0, "Operation (%d) not implemented", face_op);
+ break;
+ }
+ return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (rv));
+}
+
+/* cli declaration for 'cfg face' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_app_cli_set_command, static) =
+{
+ .path = "hicn face app",
+ .short_help = "hicn face app {add intfc <sw_if> { prod prefix <hicn_prefix> cs_size <size_in_packets>} {cons} | {del <face_id>}",
+ .function = hicn_face_app_cli_set_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/face_cons.c b/hicn-plugin/src/network/faces/app/face_cons.c
new file mode 100644
index 000000000..d44ba1a2b
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/face_cons.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "face_cons.h"
+#include "address_mgr.h"
+#include "../../infra.h"
+
+int
+hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
+ u32 swif, hicn_face_id_t * faceid1,
+ hicn_face_id_t * faceid2)
+{
+ /* Create the corresponding appif if */
+ /* Retrieve a valid local ip address to assign to the appif */
+ /* Set the ip address and create the face in the face db */
+
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hicn_main_t *hm = &hicn_main;
+
+ ip46_address_t if_ip;
+ ip46_address_reset (&if_ip);
+ nh_addr4->as_u32 = 0;
+ nh_addr6->as_u64[0] = 0;
+ nh_addr6->as_u64[1] = 0;
+ u32 if_flags = 0;
+
+ if (!hm->is_enabled)
+ {
+ return HICN_ERROR_FWD_NOT_ENABLED;
+ }
+ if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, swif, if_flags);
+
+ get_two_ip4_addresses (&(if_ip.ip4), nh_addr4);
+ ip4_add_del_interface_address (vm,
+ swif,
+ &(if_ip.ip4),
+ ADDR_MGR_IP4_CONS_LEN, 0 /* is_del */ );
+
+ ip46_address_t nh_addr = to_ip46 (0, (u8 *) nh_addr4);
+
+ index_t adj_index = adj_nbr_find(FIB_PROTOCOL_IP4, VNET_LINK_IP4, &nh_addr, swif);
+
+ hicn_iface_add (&nh_addr, swif, faceid1, DPO_PROTO_IP4, adj_index);
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (*faceid1);
+ face->flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+
+ get_two_ip6_addresses (&(if_ip.ip6), nh_addr6);
+ ip6_add_del_interface_address (vm,
+ swif,
+ &(if_ip.ip6),
+ ADDR_MGR_IP6_CONS_LEN, 0 /* is_del */ );
+
+ adj_index = adj_nbr_find(FIB_PROTOCOL_IP6, VNET_LINK_IP6, &nh_addr, swif);
+
+ hicn_iface_add ((ip46_address_t *) nh_addr6, swif, faceid2, DPO_PROTO_IP6, adj_index);
+
+ face = hicn_dpoi_get_from_idx (*faceid2);
+ face->flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_face_cons_del (hicn_face_id_t face_id)
+{
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ {
+ return hicn_face_del (face_id);
+ }
+ else
+ {
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+ }
+}
+
+u8 *
+format_hicn_face_cons (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ s = format (s, " (consumer face)");
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/face_cons.h b/hicn-plugin/src/network/faces/app/face_cons.h
new file mode 100644
index 000000000..5f8f5dde8
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/face_cons.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FACE_CONSUMER_H_
+#define _FACE_CONSUMER_H_
+
+#include <vnet/vnet.h>
+#include "../face.h"
+
+/**
+ * @file
+ *
+ * @brief Consumer application face.
+ *
+ * A consumer application face is built upon an ip face and identify a local
+ * consumer application (co-located with the forwarder) that acts as a
+ * consumer. The interface used by the consumer application face is
+ * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
+ * connects the applictation to the forwarder). Only one application face can be
+ * assigned to an interface.
+ *
+ * In the vlib graph a consumer application face directly connect the
+ * device-input node to the hicn-vface-ip node.
+ */
+
+/**
+ * @brief Add a new consumer application face
+ *
+ * The method creates the internal ip face and set the ip address to the interface.
+ * @param nh_addr4 ipv4 address to assign to interface used by the application to
+ * send interest to the consumer face
+ * @param nh_addr6 ipv6 address to assign to interface used by the application to
+ * send interest to the consumer face
+ * @param swif interface associated to the face
+ */
+int
+hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
+ u32 swif, hicn_face_id_t * faceid1,
+ hicn_face_id_t * faceid2);
+
+/**
+ * @brief Delete an existing consumer application face
+ *
+ * @param face_id Id of the consumer application face
+ */
+int hicn_face_cons_del (hicn_face_id_t face_id);
+
+/**
+ * @brief Format an application consumer face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_cons (u8 * s, va_list * args);
+
+
+#endif /* _FACE_CONSUMER_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/face_prod.c b/hicn-plugin/src/network/faces/app/face_prod.c
new file mode 100644
index 000000000..645154325
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/face_prod.c
@@ -0,0 +1,369 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/interface_funcs.h>
+
+#include "face_prod.h"
+#include "address_mgr.h"
+#include "../../infra.h"
+#include "../../route.h"
+#include "../../cache_policies/cs_lru.h"
+
+hicn_face_prod_state_t *face_state_vec;
+
+/* used to check if an interface is already in the vector */
+u32 *face_state_pool;
+
+static int
+hicn_app_state_create (u32 swif, fib_prefix_t * prefix)
+{
+ /* Make sure that the pool is not empty */
+ pool_validate_index (face_state_pool, 0);
+
+ u32 *swif_app;
+ u8 found = 0;
+ /* *INDENT-OFF* */
+ pool_foreach (swif_app, face_state_pool,{
+ if (*swif_app == swif)
+ {
+ found = 1;
+ }
+ }
+ );
+ /* *INDENT-ON* */
+
+
+ if (found)
+ return HICN_ERROR_APPFACE_ALREADY_ENABLED;
+
+
+ /* Create the appif and store in the vector */
+ vec_validate (face_state_vec, swif);
+ clib_memcpy (&(face_state_vec[swif].prefix), prefix, sizeof (fib_prefix_t));
+
+ /* Set as busy the element in the vector */
+ pool_get (face_state_pool, swif_app);
+ *swif_app = swif;
+
+ int ret = HICN_ERROR_NONE;
+ if (ip46_address_is_ip4 (&(prefix->fp_addr)))
+ {
+ ret =
+ vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
+ }
+ else
+ {
+ ret =
+ vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
+ }
+
+ return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+static int
+hicn_app_state_del (u32 swif)
+{
+ /* Make sure that the pool is not empty */
+ pool_validate_index (face_state_pool, 0);
+
+ u32 *temp;
+ u32 *swif_app = NULL;
+ u8 found = 0;
+ fib_prefix_t *prefix;
+ /* *INDENT-OFF* */
+ pool_foreach (temp, face_state_pool,{
+ if (*temp == swif)
+ {
+ found = 1;
+ swif_app = temp;
+ }
+ }
+ );
+ /* *INDENT-ON* */
+
+ if (!found)
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ prefix = &(face_state_vec[swif].prefix);
+
+ int ret = HICN_ERROR_NONE;
+ if (ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ ret =
+ vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
+ }
+ else
+ {
+ ret =
+ vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
+ }
+
+ pool_put (face_state_pool, swif_app);
+ memset (&face_state_vec[swif], 0, sizeof (hicn_face_prod_state_t));
+
+ return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
+}
+
+int
+hicn_face_prod_add (fib_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
+ ip46_address_t * prod_addr, hicn_face_id_t * faceid)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hicn_main_t *hm = &hicn_main;
+
+ ip46_address_t local_app_ip;
+ CLIB_UNUSED(ip46_address_t remote_app_ip);
+ u32 if_flags = 0;
+
+ if (!hm->is_enabled)
+ {
+ return HICN_ERROR_FWD_NOT_ENABLED;
+ }
+
+ if (vnet_get_sw_interface_or_null (vnm, sw_if) == NULL)
+ {
+ return HICN_ERROR_FACE_HW_INT_NOT_FOUND;
+ }
+
+ int ret = HICN_ERROR_NONE;
+ hicn_face_t *face = NULL;
+
+ if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, sw_if, if_flags);
+
+ u8 *s0;
+ s0 = format (0, "Prefix %U", format_fib_prefix, prefix);
+
+ vlib_cli_output (vm, "Received request for %s, swif %d\n", s0, sw_if);
+
+ if (ip46_address_is_zero (&prefix->fp_addr))
+ {
+ return HICN_ERROR_APPFACE_PROD_PREFIX_NULL;
+ }
+
+ u8 isv6 = ip46_address_is_ip4(prod_addr);
+ index_t adj_index = adj_nbr_find(isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4, isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
+
+ /*
+ * Check if a producer face is already existing for the same prefix
+ * and sw_if
+ */
+ face = hicn_face_get (&(prefix->fp_addr), sw_if,
+ &hicn_face_hashtb, adj_index);
+
+ if (face != NULL)
+ {
+ if (!(face->flags & HICN_FACE_FLAGS_DELETED))
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ /*
+ * Something went worng, a consumer face exists for the
+ * producer's prefix.
+ */
+ /* It should never happens, this is a safety check. */
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ /* If the face exists but is marked as deleted, undelete it */
+ if (face->flags & HICN_FACE_FLAGS_DELETED)
+ {
+ /*
+ * remove the deleted flag and retrieve the face
+ * local addr
+ */
+ face->flags &= HICN_FACE_FLAGS_DELETED;
+ }
+ }
+ else
+ {
+ /* Otherwise create the face */
+ if (ip46_address_is_ip4 (&prefix->fp_addr))
+ {
+ /*
+ * Otherwise retrieve an ip address to assign as a
+ * local ip addr.
+ */
+ ip4_address_t local_app_ip4;
+ ip4_address_t remote_app_ip4;
+ get_two_ip4_addresses (&local_app_ip4, &remote_app_ip4);
+ ip4_add_del_interface_address (vm,
+ sw_if,
+ &local_app_ip4, 31, 0 /* is_del */ );
+ local_app_ip = to_ip46 ( /* isv6 */ 0, local_app_ip4.as_u8);
+ remote_app_ip = to_ip46 ( /* isv6 */ 0, remote_app_ip4.as_u8);
+
+ vnet_build_rewrite_for_sw_interface(vnm, sw_if, VNET_LINK_IP4, &remote_app_ip4);
+ }
+ else
+ {
+ ip6_address_t local_app_ip6;
+ ip6_address_t remote_app_ip6;
+ get_two_ip6_addresses (&local_app_ip6, &remote_app_ip6);
+ u8 *s0;
+ s0 = format (0, "%U", format_ip6_address, &local_app_ip6);
+
+ vlib_cli_output (vm, "Setting ip address %s\n", s0);
+
+ ip6_add_del_interface_address (vm,
+ sw_if,
+ &local_app_ip6, 127,
+ 0 /* is_del */ );
+ local_app_ip = to_ip46 ( /* isv6 */ 1, local_app_ip6.as_u8);
+ remote_app_ip = to_ip46 ( /* isv6 */ 1, remote_app_ip6.as_u8);
+ }
+ }
+
+ if (ret == HICN_ERROR_NONE)
+ // && hicn_face_prod_set_lru_max (*faceid, cs_reserved) == HICN_ERROR_NONE)
+ {
+ fib_route_path_t rpath = {0};
+ fib_route_path_t * rpaths = NULL;
+
+ if (ip46_address_is_ip4(&(prefix->fp_addr)))
+ {
+ ip4_address_t mask;
+ ip4_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip4.as_u32 = prefix->fp_addr.ip4.as_u32 & mask.as_u32;
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_addr.ip4.as_u32 = remote_app_ip.ip4.as_u32;
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_proto = DPO_PROTO_IP4;
+
+ vec_add1 (rpaths, rpath);
+ }
+ else
+ {
+ ip6_address_t mask;
+ ip6_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip6.as_u64[0] =
+ prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
+ prefix->fp_addr.ip6.as_u64[1] =
+ prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_addr.ip6.as_u64[0] = remote_app_ip.ip6.as_u64[0];
+ rpath.frp_addr.ip6.as_u64[1] = remote_app_ip.ip6.as_u64[1];
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_proto = DPO_PROTO_IP6;
+
+ vec_add1 (rpaths, rpath);
+ }
+
+ u32 fib_index = fib_table_find (prefix->fp_proto, 0);
+ fib_table_entry_path_add2 (fib_index,
+ prefix,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE, rpaths);
+
+ hicn_route_enable(prefix);
+ hicn_app_state_create (sw_if, prefix);
+ }
+
+ adj_index = adj_nbr_find(isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4, isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
+ face = hicn_face_get(&local_app_ip, sw_if, &hicn_face_hashtb, adj_index);//HICN_FACE_FLAGS_APPFACE_PROD);
+
+ *faceid = hicn_dpoi_get_index (face);
+
+ face->flags |= HICN_FACE_FLAGS_APPFACE_PROD;
+
+ hicn_face_unlock_with_id(*faceid);
+
+ *prod_addr = local_app_ip;
+
+ /* Cleanup in case of something went wrong. */
+ if (ret)
+ {
+ hicn_app_state_del (sw_if);
+ }
+ return ret;
+}
+
+int
+hicn_face_prod_del (hicn_face_id_t face_id)
+{
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (face->flags & HICN_FACE_FLAGS_APPFACE_PROD)
+ {
+ /* Remove the face from the fib */
+ hicn_route_disable(&(face_state_vec[face->sw_if].prefix));
+ //hicn_route_del_nhop (&(face_state_vec[face->sw_if].prefix),
+ // face_id);
+
+ //int ret = hicn_face_del (face_id);
+ return hicn_app_state_del (face->sw_if);
+ //ret == HICN_ERROR_NONE ? hicn_app_state_del (face->sw_if) : ret;
+ }
+ else
+ {
+ return HICN_ERROR_APPFACE_NOT_FOUND;
+ }
+
+ return HICN_ERROR_NONE;
+}
+
+u8 *
+format_hicn_face_prod (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ s =
+ format (s, " (producer)");
+
+ return s;
+}
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_prod_app_input_ip6, static)=
+{
+ .arc_name = "ip6-unicast",
+ .node_name = "hicn-face-prod-input",
+ .runs_before = VNET_FEATURES("ip6-inacl"),
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_prod_app_input_ip4, static)=
+{
+ .arc_name = "ip4-unicast",
+ .node_name = "hicn-face-prod-input",
+ .runs_before = VNET_FEATURES("ip4-inacl"),
+};
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/face_prod.h b/hicn-plugin/src/network/faces/app/face_prod.h
new file mode 100644
index 000000000..4cb2e3fbf
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/face_prod.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _FACE_PRODUCER_H_
+#define _FACE_PRODUCER_H_
+
+#include "../../cache_policies/cs_policy.h"
+#include "../face.h"
+
+/**
+ * @file
+ *
+ * @brief Producer application face.
+ *
+ * A producer application face is built upon an ip face and identify a local
+ * producer application (co-located with the forwarder) that acts as a producer. In the
+ * current design an application face is either a face towards a consumer face
+ * or towards a producer. The interface used by the producer application face is
+ * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
+ * connects the applictation to the forwarder). Only one application face can be
+ * assigned to an interface.
+ *
+ * To each producer application face it is assigned a portion of the CS. Every
+ * data arriving to a producer application will be stored in the portion of the
+ * CS assigned to the face. The eviction policy is defined in the
+ * face. Available eviction faces are list in the /cache_policy folder.
+ *
+ * In the vlib graph a producer application face is directly connected to the
+ * device-input node (with the node hicn-face-prod-input) and passes every packet to
+ * the hicn-face-ip node.
+ */
+
+/**
+ * @brief Producer application face state that refer to the hICN producer socket
+ * created by the application.
+ *
+ */
+typedef struct
+{
+ fib_prefix_t prefix;
+} hicn_face_prod_state_t;
+
+extern hicn_face_prod_state_t *face_state_vec;
+
+#define DEFAULT_PROBING_PORT 3784
+
+/**
+ * @brief Add a new producer application face
+ *
+ * The method creates the internal ip face and the state specific to the
+ * producer application face. This method setups a route in the FIB for the
+ * producer's prefix.
+ * @param prefix hicn prefix name assigned to the producer face
+ * @param len length of the prefix
+ * @param swif interface associated to the face
+ * @param cs_reserved return the amount of cs assigned to the face
+ * @param prod_addr address to assign to interface used by the appliction to
+ * send data to the producer face
+ */
+int
+hicn_face_prod_add (fib_prefix_t * prefix, u32 swif, u32 * cs_reserved,
+ ip46_address_t * prod_addr, hicn_face_id_t * faceid);
+
+/**
+ * @brief Delete an existing application face
+ *
+ * @param faceid id of the face to remove
+ */
+int hicn_face_prod_del (hicn_face_id_t faceid);
+
+/**
+ * @brief Set lru queue size for an app face
+ *
+ * @param face_id Id of the producer application face
+ */
+int hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 * requested_size);
+
+/**
+ * @brief Format an application producer face
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param args Array storing input values. Expected u32 face_id and u32 indent
+ * @return String with the formatted face
+ */
+u8 *format_hicn_face_prod (u8 * s, va_list * args);
+
+
+#endif /* _FACE_PROD_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/app/face_prod_node.c b/hicn-plugin/src/network/faces/app/face_prod_node.c
new file mode 100644
index 000000000..e9b687a06
--- /dev/null
+++ b/hicn-plugin/src/network/faces/app/face_prod_node.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @file
+ *
+ * @brief Application interface node
+ *
+ * This node runs after the device-input node and perfoms some safety checks in
+ * order to avoid unespected interest and data (i.e., hICN packets whose name do
+ * not contain the prefix associated to the application face)
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include <vpp_plugins/hicn/hicn_api.h>
+
+#include "face_prod.h"
+#include "../../mgmt.h"
+
+static __clib_unused char *face_prod_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Node context data */
+typedef struct hicn_face_prod_runtime_s
+{
+ int id;
+} hicn_face_prod_runtime_t;
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} hicn_face_prod_input_trace_t;
+
+typedef enum
+{
+ HICN_FACE_PROD_NEXT_DATA_IP4,
+ HICN_FACE_PROD_NEXT_DATA_IP6,
+ HICN_FACE_PROD_NEXT_ERROR_DROP,
+ HICN_FACE_PROD_N_NEXT,
+} hicn_face_prod_next_t;
+
+vlib_node_registration_t hicn_face_prod_input_node;
+
+static __clib_unused u8 *
+format_face_prod_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_face_prod_input_trace_t *t =
+ va_arg (*args, hicn_face_prod_input_trace_t *);
+ CLIB_UNUSED (u32 indent) = format_get_indent (s);
+
+ s = format (s, "prod-face: sw_if_index %d next-index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+static_always_inline int
+match_ip4_name (u32 * name, fib_prefix_t * prefix)
+{
+ u32 xor = 0;
+
+ xor = *name & prefix->fp_addr.ip4.data_u32;
+
+ return xor == prefix->fp_addr.ip4.data_u32;
+}
+
+static_always_inline int
+match_ip6_name (u8 * name, fib_prefix_t * prefix)
+{
+ union
+ {
+ u32x4 as_u32x4;
+ u64 as_u64[2];
+ u32 as_u32[4];
+ } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
+
+ xor_sum.as_u64[0] = ((u64 *) name)[0] & prefix->fp_addr.ip6.as_u64[0];
+ xor_sum.as_u64[1] = ((u64 *) name)[1] & prefix->fp_addr.ip6.as_u64[1];
+
+ return (xor_sum.as_u64[0] == prefix->fp_addr.ip6.as_u64[0]) &&
+ (xor_sum.as_u64[1] == prefix->fp_addr.ip6.as_u64[1]);
+}
+
+static_always_inline u32
+hicn_face_prod_next_from_data_hdr (vlib_node_runtime_t * node,
+ vlib_buffer_t * b, fib_prefix_t * prefix)
+{
+ u8 *ptr = vlib_buffer_get_current (b);
+ u8 v = *ptr & 0xf0;
+ int match_res = 1;
+
+ if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ match_res = match_ip4_name ((u32 *) & (ptr[12]), prefix);
+ }
+ else if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ match_res = match_ip6_name (& (ptr[8]), prefix);
+ }
+
+ return match_res ? HICN_FACE_PROD_NEXT_DATA_IP4 + (v ==
+ 0x60) :
+ HICN_FACE_PROD_NEXT_ERROR_DROP;
+}
+
+static_always_inline void
+hicn_face_prod_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u32 swif, vlib_buffer_t * b, u32 next)
+{
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_face_prod_input_trace_t *t =
+ vlib_add_trace (vm, node, b, sizeof (*t));
+ t->next_index = next;
+ t->sw_if_index = swif;
+ }
+}
+
+static uword
+hicn_face_prod_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_face_prod_next_t next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 bi0, bi1, bi2, bi3;
+ hicn_face_prod_state_t *prod_face0 = NULL;
+ hicn_face_prod_state_t *prod_face1 = NULL;
+ hicn_face_prod_state_t *prod_face2 = NULL;
+ hicn_face_prod_state_t *prod_face3 = NULL;
+ u32 next0, next1, next2, next3;
+
+ {
+ vlib_buffer_t *b4, *b5, *b6, *b7;
+ b4 = vlib_get_buffer (vm, from[4]);
+ b5 = vlib_get_buffer (vm, from[5]);
+ b6 = vlib_get_buffer (vm, from[6]);
+ b7 = vlib_get_buffer (vm, from[7]);
+ CLIB_PREFETCH (b4, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b5, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b6, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b7, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+
+ from += 4;
+ n_left_from -= 4;
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ to_next[2] = bi2;
+ to_next[3] = bi3;
+
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ prod_face0 =
+ &face_state_vec[vnet_buffer (b0)->sw_if_index[VLIB_RX]];
+ prod_face1 =
+ &face_state_vec[vnet_buffer (b1)->sw_if_index[VLIB_RX]];
+ prod_face2 =
+ &face_state_vec[vnet_buffer (b2)->sw_if_index[VLIB_RX]];
+ prod_face3 =
+ &face_state_vec[vnet_buffer (b3)->sw_if_index[VLIB_RX]];
+
+ next0 =
+ hicn_face_prod_next_from_data_hdr (node, b0, &prod_face0->prefix);
+ next1 =
+ hicn_face_prod_next_from_data_hdr (node, b1, &prod_face1->prefix);
+ next2 =
+ hicn_face_prod_next_from_data_hdr (node, b2, &prod_face2->prefix);
+ next3 =
+ hicn_face_prod_next_from_data_hdr (node, b3, &prod_face3->prefix);
+ stats.pkts_data_count += 4;
+
+ /* trace */
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ b0, next0);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX],
+ b1, next1);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b2)->sw_if_index[VLIB_RX],
+ b2, next2);
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b3)->sw_if_index[VLIB_RX],
+ b3, next3);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+
+ stats.pkts_processed += 4;
+
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0, swif;
+ hicn_face_prod_state_t *prod_face = NULL;
+ u32 next0;
+
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ swif = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ prod_face = &face_state_vec[swif];
+
+ next0 =
+ hicn_face_prod_next_from_data_hdr (node, b0, &prod_face->prefix);
+ stats.pkts_data_count++;
+
+ /* trace */
+ hicn_face_prod_trace_buffer (vm, node,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ b0, next0);
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ stats.pkts_processed += 1;
+
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_face_prod_input_node) =
+{
+ .function = hicn_face_prod_input_node_fn,
+ .name = "hicn-face-prod-input",
+ .vector_size = sizeof(u32),
+ .format_trace = format_face_prod_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(face_prod_input_error_strings),
+ .error_strings = face_prod_input_error_strings,
+ .n_next_nodes = HICN_FACE_PROD_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_FACE_PROD_NEXT_DATA_IP4] = "hicn4-face-input",
+ [HICN_FACE_PROD_NEXT_DATA_IP6] = "hicn6-face-input",
+ [HICN_FACE_PROD_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/faces/face.c b/hicn-plugin/src/network/faces/face.c
new file mode 100644
index 000000000..b495d18b0
--- /dev/null
+++ b/hicn-plugin/src/network/faces/face.c
@@ -0,0 +1,475 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/fib/fib_entry_track.h>
+
+#include "face.h"
+#include "../hicn.h"
+#include "../params.h"
+#include "../error.h"
+#include "../mapme.h"
+#include "../mapme_eventmgr.h"
+
+dpo_id_t *face_dpo_vec;
+hicn_face_vft_t *face_vft_vec;
+char **face_type_names_vec;
+
+hicn_face_t *hicn_dpoi_face_pool;
+
+dpo_type_t first_type = DPO_FIRST;
+
+vlib_combined_counter_main_t *counters;
+
+dpo_type_t hicn_face_type;
+
+fib_node_type_t hicn_face_fib_node_type;
+
+const char *HICN_FACE_CTRX_STRING[] = {
+#define _(a,b,c) c,
+ foreach_hicn_face_counter
+#undef _
+};
+
+u8 *
+face_show (u8 * s, int face_id, u32 indent)
+{
+ s = format (s, "%U Faces:\n", format_white_space, indent);
+ indent += 4;
+ int i;
+ vec_foreach_index (i, face_dpo_vec)
+ {
+ s =
+ format (s, "%U", face_vft_vec[i].format_face,
+ face_dpo_vec[face_id].dpoi_index, indent);
+ }
+
+ return (s);
+
+}
+
+mhash_t hicn_face_vec_hashtb;
+mhash_t hicn_face_hashtb;
+
+hicn_face_vec_t *hicn_vec_pool;
+
+const static char *const hicn_face6_nodes[] = {
+ "hicn6-face-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ "hicn6-iface-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+const static char *const hicn_face4_nodes[] = {
+ "hicn4-face-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ "hicn4-iface-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+
+const static char *const *const hicn_face_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = hicn_face4_nodes,
+ [DPO_PROTO_IP6] = hicn_face6_nodes
+};
+
+const static dpo_vft_t hicn_face_dpo_vft = {
+ .dv_lock = hicn_face_lock,
+ .dv_unlock = hicn_face_unlock,
+ .dv_format = format_hicn_face,
+};
+
+static fib_node_t *
+hicn_face_node_get (fib_node_index_t index)
+{
+ hicn_face_t *face;
+
+ face = hicn_dpoi_get_from_idx (index);
+
+ return (&face->fib_node);
+}
+
+static void
+hicn_face_last_lock_gone (fib_node_t * node)
+{
+}
+
+static hicn_face_t *
+hicn_face_from_fib_node (fib_node_t * node)
+{
+ return ((hicn_face_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (hicn_face_t, fib_node)));
+}
+
+static fib_node_back_walk_rc_t
+hicn_face_back_walk_notify (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+{
+
+ hicn_face_t *face = hicn_face_from_fib_node (node);
+
+ const dpo_id_t *dpo_loadbalance =
+ fib_entry_contribute_ip_forwarding (face->fib_entry_index);
+ const load_balance_t *lb0 = load_balance_get (dpo_loadbalance->dpoi_index);
+
+ const dpo_id_t *dpo = load_balance_get_bucket_i (lb0, 0);
+
+ dpo_stack (hicn_face_type, face->dpo.dpoi_proto, &face->dpo, dpo);
+ /* if (dpo_is_adj(dpo)) */
+ /* { */
+ /* ip_adjacency_t * adj = adj_get (dpo->dpoi_index); */
+
+ /* if (dpo->dpoi_type == DPO_ADJACENCY_MIDCHAIN || */
+ /* dpo->dpoi_type == DPO_ADJACENCY_MCAST_MIDCHAIN) */
+ /* { */
+ /* adj_nbr_midchain_stack(dpo->dpoi_index, &face->dpo); */
+ /* } */
+ /* else */
+ /* { */
+ /* dpo_stack(hicn_face_type, face->dpo.dpoi_proto, &face->dpo, dpo); */
+ /* } */
+ /* } */
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static void
+hicn_face_show_memory (void)
+{
+}
+
+
+static const fib_node_vft_t hicn_face_fib_node_vft = {
+ .fnv_get = hicn_face_node_get,
+ .fnv_last_lock = hicn_face_last_lock_gone,
+ .fnv_back_walk = hicn_face_back_walk_notify,
+ .fnv_mem_show = hicn_face_show_memory,
+};
+
+// Make this more flexible for future types face
+void
+hicn_face_module_init (vlib_main_t * vm)
+{
+ pool_validate (hicn_dpoi_face_pool);
+ pool_alloc (hicn_dpoi_face_pool, 1024);
+ counters =
+ vec_new (vlib_combined_counter_main_t,
+ HICN_PARAM_FACES_MAX * HICN_N_COUNTER);
+
+ mhash_init (&hicn_face_vec_hashtb,
+ sizeof (hicn_face_input_faces_t) /* value */ ,
+ sizeof (hicn_face_key_t) /* key */ );
+ mhash_init (&hicn_face_hashtb, sizeof (hicn_face_id_t) /* value */ ,
+ sizeof (hicn_face_key_t) /* key */ );
+
+ pool_alloc (hicn_vec_pool, 100);
+
+ /*
+ * How much useful is the following registration?
+ * So far it seems that we need it only for setting the dpo_type.
+ */
+ hicn_face_type =
+ dpo_register_new_type (&hicn_face_dpo_vft, hicn_face_nodes);
+
+ /*
+ * We register a new node type to get informed when the adjacency corresponding
+ * to a face is updated
+ */
+ hicn_face_fib_node_type =
+ fib_node_register_new_type (&hicn_face_fib_node_vft);
+}
+
+u8 *
+format_hicn_face (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
+ hicn_face_t *face;
+
+ face = hicn_dpoi_get_from_idx (index);
+
+ if (face->flags & HICN_FACE_FLAGS_FACE)
+ {
+ hicn_face_id_t face_id = hicn_dpoi_get_index (face);
+ s = format (s, "%U Face %d: ", format_white_space, indent, face_id);
+ s = format (s, "nat address %U locks %u, path_label %u",
+ format_ip46_address, &face->nat_addr, IP46_TYPE_ANY,
+ face->locks, face->pl_id);
+
+ if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD))
+ s = format (s, " (producer)");
+ else if ((face->flags & HICN_FACE_FLAGS_APPFACE_CONS))
+ s = format (s, " (consumer)");
+
+ if ((face->flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+
+ s = format (s, "\n%U%U",
+ format_white_space, indent + 2,
+ format_dpo_id, &face->dpo, indent + 3);
+ }
+ else
+ {
+ hicn_face_id_t face_id = hicn_dpoi_get_index (face);
+ s = format (s, "%U iFace %d: ", format_white_space, indent, face_id);
+ s = format (s, "nat address %U locks %u, path_label %u",
+ format_ip46_address, &face->nat_addr, IP46_TYPE_ANY,
+ face->locks, face->pl_id);
+
+ if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD))
+ s = format (s, " (producer)");
+ else if ((face->flags & HICN_FACE_FLAGS_APPFACE_CONS))
+ s = format (s, " (consumer)");
+
+ if ((face->flags & HICN_FACE_FLAGS_DELETED))
+ s = format (s, " (deleted)");
+ }
+
+ return s;
+}
+
+
+u8 *
+format_hicn_face_all (u8 * s, int n, ...)
+{
+ va_list ap;
+ va_start (ap, n);
+ u32 indent = va_arg (ap, u32);
+
+ s = format (s, "%U Faces:\n", format_white_space, indent);
+
+ hicn_face_t *face;
+
+ /* *INDENT-OFF* */
+ pool_foreach ( face, hicn_dpoi_face_pool,
+ {
+ s = format(s, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), indent);
+ });
+ /* *INDENT-ON* */
+
+ return s;
+}
+
+int
+hicn_face_del (hicn_face_id_t face_id)
+{
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ hicn_face_key_t key;
+ hicn_face_key_t old_key;
+ hicn_face_key_t old_key2;
+
+ hicn_face_get_key (&(face->nat_addr), face->sw_if, &(face->dpo), &key);
+ hicn_face_input_faces_t *in_faces_vec =
+ hicn_face_get_vec (&(face->nat_addr),
+ &hicn_face_vec_hashtb);
+ if (in_faces_vec != NULL)
+ {
+ hicn_face_vec_t *vec =
+ pool_elt_at_index (hicn_vec_pool, in_faces_vec->vec_id);
+ u32 index_face = vec_search (*vec, face_id);
+ vec_del1 (*vec, index_face);
+
+ if (vec_len (*vec) == 0)
+ {
+ pool_put_index (hicn_vec_pool, in_faces_vec->vec_id);
+ mhash_unset (&hicn_face_vec_hashtb, &key, (uword *) & old_key);
+ vec_free (*vec);
+ }
+ else
+ {
+ /* Check if the face we are deleting is the preferred one. */
+ /* If so, repleace with another. */
+ if (in_faces_vec->face_id == face_id)
+ {
+ in_faces_vec->face_id = (*vec)[0];
+ }
+ }
+
+ mhash_unset (&hicn_face_hashtb, &key, (uword *) & old_key2);
+ }
+
+ int ret = HICN_ERROR_NONE;
+
+ if (hicn_dpoi_idx_is_valid (face_id))
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ face->locks--;
+ if (face->locks == 0)
+ pool_put_index (hicn_dpoi_face_pool, face_id);
+ else
+ face->flags |= HICN_FACE_FLAGS_DELETED;
+ }
+ else
+ ret = HICN_ERROR_FACE_NOT_FOUND;
+
+
+ return ret;
+}
+
+static void
+hicn_iface_to_face (hicn_face_t * face, const dpo_id_t * dpo)
+{
+ dpo_stack (hicn_face_type, dpo->dpoi_proto, &face->dpo, dpo);
+
+ face->flags &= ~HICN_FACE_FLAGS_IFACE;
+ face->flags |= HICN_FACE_FLAGS_FACE;
+
+ if (dpo_is_adj (dpo))
+ {
+ fib_node_init (&face->fib_node, hicn_face_fib_node_type);
+ fib_node_lock (&face->fib_node);
+
+ if (dpo->dpoi_type != DPO_ADJACENCY_MIDCHAIN ||
+ dpo->dpoi_type != DPO_ADJACENCY_MCAST_MIDCHAIN)
+ {
+ ip_adjacency_t *adj = adj_get (dpo->dpoi_index);
+ ip46_address_t *nh = &(adj->sub_type.nbr.next_hop);
+ fib_prefix_t prefix;
+
+ if (!ip46_address_is_zero (nh))
+ {
+ fib_prefix_from_ip46_addr (nh, &prefix);
+
+ u32 fib_index =
+ fib_table_find (prefix.fp_proto, HICN_FIB_TABLE);
+
+ face->fib_entry_index = fib_entry_track (fib_index,
+ &prefix,
+ hicn_face_fib_node_type,
+ hicn_dpoi_get_index
+ (face),
+ &face->fib_sibling);
+ }
+ }
+ }
+}
+
+/*
+ * Utility that adds a new face cache entry. For the moment we assume that
+ * the ip_adjacency has already been set up.
+ */
+int
+hicn_face_add (const dpo_id_t * dpo_nh, ip46_address_t * nat_address,
+ int sw_if, hicn_face_id_t * pfaceid, u8 is_app_prod)
+{
+
+ hicn_face_flags_t flags = (hicn_face_flags_t) 0;
+ flags |= HICN_FACE_FLAGS_FACE;
+
+ hicn_face_t *face;
+
+ face =
+ hicn_face_get_with_dpo (nat_address, sw_if, dpo_nh, &hicn_face_hashtb);
+
+ if (face != NULL)
+ {
+ *pfaceid = hicn_dpoi_get_index (face);
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+ }
+
+ face =
+ hicn_face_get (nat_address, sw_if, &hicn_face_hashtb, dpo_nh->dpoi_index);
+
+ dpo_id_t temp_dpo = DPO_INVALID;
+ temp_dpo.dpoi_index = dpo_nh->dpoi_index;
+ hicn_face_key_t key;
+ hicn_face_get_key (nat_address, sw_if, dpo_nh, &key);
+
+ if (face == NULL)
+ {
+
+ hicn_iface_add (nat_address, sw_if, pfaceid, dpo_nh->dpoi_proto,
+ dpo_nh->dpoi_index);
+ face = hicn_dpoi_get_from_idx (*pfaceid);
+
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+
+ hicn_face_get_key (nat_address, sw_if, &temp_dpo, &key);
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+ }
+ else
+ {
+ /* We found an iface and we convert it to a face */
+ *pfaceid = hicn_dpoi_get_index (face);
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+ }
+
+ hicn_iface_to_face (face, dpo_nh);
+
+ temp_dpo.dpoi_index = ~0;
+
+ hicn_face_input_faces_t *in_faces =
+ hicn_face_get_vec (nat_address, &hicn_face_vec_hashtb);
+
+ if (in_faces == NULL)
+ {
+ hicn_face_input_faces_t in_faces_temp;
+ hicn_face_vec_t *vec;
+ pool_get (hicn_vec_pool, vec);
+ *vec = vec_new (hicn_face_id_t, 0);
+ u32 index = vec - hicn_vec_pool;
+ in_faces_temp.vec_id = index;
+ vec_add1 (*vec, *pfaceid);
+
+ in_faces_temp.face_id = *pfaceid;
+
+ hicn_face_get_key (nat_address, 0, &temp_dpo, &key);
+
+ mhash_set_mem (&hicn_face_vec_hashtb, &key,
+ (uword *) & in_faces_temp, 0);
+ }
+ else
+ {
+ hicn_face_vec_t *vec =
+ pool_elt_at_index (hicn_vec_pool, in_faces->vec_id);
+
+ /* */
+ if (vec_search (*vec, *pfaceid) != ~0)
+ return HICN_ERROR_FACE_ALREADY_CREATED;
+
+ vec_add1 (*vec, *pfaceid);
+
+ hicn_iface_to_face (face, dpo_nh);
+
+ hicn_face_get_key (nat_address, 0, &temp_dpo, &key);
+
+ mhash_set_mem (&hicn_face_vec_hashtb, &key, (uword *) in_faces, 0);
+
+ /* If the face is an application producer face, we set it as the preferred incoming face. */
+ /* This is required to handle the CS separation, and the push api in a lightway */
+ if (is_app_prod)
+ {
+ in_faces->face_id = *pfaceid;
+ }
+ }
+
+ retx_t *retx = vlib_process_signal_event_data (vlib_get_main (),
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_ADD, 1,
+ sizeof (retx_t));
+
+ /* *INDENT-OFF* */
+ *retx = (retx_t) {
+ .face_id = *pfaceid,
+ };
+ /* *INDENT-ON* */
+
+ return HICN_ERROR_NONE;
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/face.h b/hicn-plugin/src/network/faces/face.h
new file mode 100644
index 000000000..84a36d239
--- /dev/null
+++ b/hicn-plugin/src/network/faces/face.h
@@ -0,0 +1,796 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_H__
+#define __HICN_FACE_H__
+
+#include "hicn_buffer.h"
+
+#include <vnet/fib/fib_node.h>
+#include <vnet/vnet.h>
+#include <vlib/vlib.h>
+#include <vnet/ip/ip46_address.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/adj/adj_types.h>
+#include <vppinfra/bihash_8_8.h>
+#include <vnet/adj/adj_midchain.h>
+
+
+#include <vpp_plugins/hicn/error.h>
+typedef u8 hicn_face_flags_t;
+typedef index_t hicn_face_id_t;
+
+/**
+ * @file face.h
+ *
+ * This file implements a general face type. The purpose of a face is to
+ * carry the needed information to forward interest and data packets to the
+ * next node in the network. There are two type of faces: complete faces (in short
+ * faces), and incomplete faces (in short ifaces).
+ *
+ * A face that does not contain the indication of the adjacency is an
+ * incomplete face (iface), otherwise it is considered to be complete. Ifaces are
+ * used to forward data back to the previous hICN hop from which we received an
+ * interest, while faces are used to forward interest packets to the next hicn node.
+ * Faces and ifaces are created at two different points in time. Faces are created
+ * when a route is added, while ifaces are created when an interest is received.
+ * In details, faces and ifaces carry the following information:
+ * - nat_addr: the ip address to perform src nat or dst nat on interest and data packets, respectively;
+ * - pl_id: the path label
+ * - locks: the number of entities using this face. When 0 the face can be deallocated
+ * - dpo: the dpo that identifies the next node in the vlib graph for processing the vlib
+ * buffer. The dpo contains the dpo.dpoi_next field that points to the next node
+ * in the vlib graph and the dpo.dpoi_index which is an index to adj used by the next node
+ * to perform the l2 rewrite. In case of ifaces, it is likely we don't know the
+ * adjacency when creting the face. In this case, the next node in the vlib graph
+ * will be the node that performs a lookup in the fib. Only in case of udp tunnels,
+ * which are bidirectional tunnel we know that the incoming tunnel is also the outgoing
+ * one, therefore in this case we store the tunnel in the dpo.dpoi_index fields. For
+ * all the other tunnels (which are most likely unidirectional), the source address of
+ * the interest will be used to retrieve the outgoing tunnel when sending the corresponding
+ * data back.
+ * - sw_if: the incoming interface of the interest
+ * - fib_node, fib_entry_index and fib_sibling are information used to be notified of
+ * changes in the adjacency pointed by the dpo.
+ *
+ * We maintain two hash tables to retrieve faces and ifaces. In particular one hash table which
+ * index faces and ifaces for nat_address, sw_if and dpo. This is used to retrieve existing faces
+ * or ifaces when an interest is received and when an new face is created. A second hash table that
+ * indexes vectors of faces for nat_address and sw_if. This is used to retrieve a list of possible
+ * incoming faces when a data is received.
+ */
+
+/**
+ * @brief Structure representing a face. It containes the fields shared among
+ * all the types of faces as well it leaves some space for storing additional
+ * information specific to each type.
+ */
+typedef struct __attribute__ ((packed)) hicn_face_s
+{
+ /* Flags to idenfity if the face is incomplete (iface), complete (face) */
+ /* And a network or application face (1B) */
+ hicn_face_flags_t flags;
+
+ /* Align the upcoming fields */
+ u8 align;
+
+ /* Path label (2B) */
+ u16 pl_id;
+
+ /* Number of dpo holding a reference to the dpoi (4B) */
+ u32 locks;
+
+ /* Dpo for the adjacency (8B) */
+ union {
+ dpo_id_t dpo;
+ u64 align_dpo;
+ };
+
+ /* Local address of the interface sw_if */
+ ip46_address_t nat_addr;
+
+ /* local interface for the local ip address */
+ u32 sw_if;
+
+ fib_node_t fib_node;
+
+ fib_node_index_t fib_entry_index;
+
+ u32 fib_sibling;
+} hicn_face_t;
+
+/* Pool of faces */
+extern hicn_face_t *hicn_dpoi_face_pool;
+
+/* Flags */
+/* A face is complete and it stores all the information. A iface lacks of the
+ adj index, therefore sending a packet through a iface require a lookup in
+ the FIB. */
+#define HICN_FACE_FLAGS_DEFAULT 0x00
+#define HICN_FACE_FLAGS_FACE 0x01
+#define HICN_FACE_FLAGS_IFACE 0x02
+#define HICN_FACE_FLAGS_APPFACE_PROD 0x04 /* Currently only IP face can be appface */
+#define HICN_FACE_FLAGS_APPFACE_CONS 0x08 /* Currently only IP face can be appface */
+#define HICN_FACE_FLAGS_DELETED 0x10
+
+#define HICN_FACE_NULL (hicn_face_id_t) ~0
+
+#define HICN_FACE_FLAGS_APPFACE_PROD_BIT 2
+#define HICN_FACE_FLAGS_APPFACE_CONS_BIT 3
+
+STATIC_ASSERT ((1 << HICN_FACE_FLAGS_APPFACE_PROD_BIT) ==
+ HICN_FACE_FLAGS_APPFACE_PROD,
+ "HICN_FACE_FLAGS_APPFACE_PROD_BIT and HICN_FACE_FLAGS_APPFACE_PROD must correspond");
+
+STATIC_ASSERT ((1 << HICN_FACE_FLAGS_APPFACE_CONS_BIT) ==
+ HICN_FACE_FLAGS_APPFACE_CONS,
+ "HICN_FACE_FLAGS_APPFACE_CONS_BIT and HICN_FACE_FLAGS_APPFACE_CONS must correspond");
+
+STATIC_ASSERT ((HICN_FACE_FLAGS_APPFACE_PROD >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT) ==
+ HICN_BUFFER_FLAGS_FACE_IS_APP,
+ "hicn buffer app flag does not correspond to HICN_FACE_FLAGS_APPFACE_PROD");
+
+STATIC_ASSERT ((HICN_FACE_FLAGS_APPFACE_CONS >>
+ HICN_FACE_FLAGS_APPFACE_CONS_BIT) ==
+ HICN_BUFFER_FLAGS_FACE_IS_APP,
+ "hicn buffer app flag does not correspond to HICN_FACE_FLAGS_APPFACE_PROD");
+
+/**
+ * @brief Definition of the virtual functin table for an hICN FACE DPO.
+ */
+typedef struct hicn_face_vft_s
+{
+ u8 *(*format_face) (u8 * s, va_list * args);
+ /**< Format an hICN face dpo*/
+ int (*hicn_face_del) (hicn_face_id_t face_id);
+ void (*hicn_face_get_dpo) (hicn_face_t * face, dpo_id_t * dpo);
+} hicn_face_vft_t;
+
+#define foreach_hicn_face_counter \
+ _(INTEREST_RX, 0, "Interest rx") \
+ _(INTEREST_TX, 1, "Interest tx") \
+ _(DATA_RX, 2, "Data rx") \
+ _(DATA_TX, 3, "Data tx") \
+
+typedef enum
+{
+#define _(a,b,c) HICN_FACE_COUNTERS_##a = (b),
+ foreach_hicn_face_counter
+#undef _
+ HICN_N_COUNTER
+} hicn_face_counters_t;
+
+extern mhash_t hicn_face_hashtb;
+
+extern const char *HICN_FACE_CTRX_STRING[];
+
+#define get_face_counter_string(ctrxno) (char *)(HICN_FACE_CTRX_STRING[ctrxno])
+
+
+/* Vector maintaining a dpo per face */
+extern dpo_id_t *face_dpo_vec;
+extern hicn_face_vft_t *face_vft_vec;
+
+/* Vector holding the set of face names */
+extern char **face_type_names_vec;
+
+/* First face type registered in the sytem.*/
+extern dpo_type_t first_type;
+
+/* Per-face counters */
+extern vlib_combined_counter_main_t *counters;
+
+/**
+ * @brief Return the face id from the face object
+ *
+ * @param Pointer to the face state
+ * @return face id
+ */
+always_inline hicn_face_id_t
+hicn_dpoi_get_index (hicn_face_t * face_dpoi)
+{
+ return face_dpoi - hicn_dpoi_face_pool;
+}
+
+/**
+ * @brief Return the face object from the face id.
+ * This method is robust to invalid face id.
+ *
+ * @param dpoi_index Face identifier
+ * @return Pointer to the face or NULL
+ */
+always_inline hicn_face_t *
+hicn_dpoi_get_from_idx_safe (hicn_face_id_t dpoi_index)
+{
+ if (!pool_is_free_index(hicn_dpoi_face_pool, dpoi_index))
+ return (hicn_face_t *) pool_elt_at_index (hicn_dpoi_face_pool, dpoi_index);
+ else
+ return NULL;
+}
+
+/**
+ * @brief Return the face from the face id. Face id must be valid.
+ *
+ * @param dpoi_index Face identifier
+ * @return Pointer to the face
+ */
+always_inline hicn_face_t *
+hicn_dpoi_get_from_idx (hicn_face_id_t dpoi_index)
+{
+ return (hicn_face_t *) pool_elt_at_index (hicn_dpoi_face_pool, dpoi_index);
+}
+
+/**
+ * @brief Return true if the face id belongs to an existing face
+ */
+always_inline int
+hicn_dpoi_idx_is_valid (hicn_face_id_t face_id)
+{
+ return pool_len (hicn_dpoi_face_pool) > face_id
+ && !pool_is_free_index (hicn_dpoi_face_pool, face_id);
+}
+
+
+/**
+ * @brief Add a lock to the face dpo
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_lock_with_id (hicn_face_id_t face_id)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (face_id);
+ face->locks++;
+}
+
+/**
+ * @brief Remove a lock to the face dpo. Deallocate the face id locks == 0
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_unlock_with_id (hicn_face_id_t face_id)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (face_id);
+ face->locks--;
+}
+
+/**
+ * @brief Add a lock to the face through its dpo
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_lock (dpo_id_t * dpo)
+{
+ hicn_face_lock_with_id(dpo->dpoi_index);
+}
+
+/**
+ * @brief Remove a lock to the face through its dpo. Deallocate the face id locks == 0
+ *
+ * @param dpo Pointer to the face dpo
+ */
+always_inline void
+hicn_face_unlock (dpo_id_t * dpo)
+{
+ hicn_face_unlock_with_id (dpo->dpoi_index);
+}
+
+
+/**
+ * @brief Init the internal structures of the face module
+ *
+ * Must be called before processing any packet
+ */
+void hicn_face_module_init (vlib_main_t * vm);
+
+u8 * format_hicn_face (u8 * s, va_list * args);
+
+
+/**
+ * @brief Format all the existing faces
+ *
+ * @param s Pointer to a previous string. If null it will be initialize
+ * @param n Number of input parameters
+ * @return String with the faces formatted
+ */
+u8 *format_hicn_face_all (u8 * s, int n, ...);
+
+/**
+ * @brief Delete a face
+ *
+ * @param face_id Id of the face to delete
+ * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
+ * HICN_ERROR_NONE
+ */
+int hicn_face_del (hicn_face_id_t face_id);
+
+/**
+ * @bried vector of faces used to collect faces having the same local address
+ *
+ */
+typedef hicn_face_id_t *hicn_face_vec_t;
+
+typedef struct hicn_input_faces_s_
+{
+ /* Vector of all possible input faces */
+ u32 vec_id;
+
+ /* Preferred face. If an prod_app face is in the vector it will be the preferred one. */
+ /* It's not possible to have multiple prod_app face in the same vector, they would have */
+ /* the same local address. Every prod_app face is a point-to-point face between the forwarder */
+ /* and the application. */
+ hicn_face_id_t face_id;
+
+} hicn_face_input_faces_t;
+
+/**
+ * Pool containing the vector of possible incoming faces.
+ */
+extern hicn_face_vec_t *hicn_vec_pool;
+
+/**
+ * Hash tables that indexes a face by remote address. For fast lookup when an
+ * interest arrives.
+ */
+extern mhash_t hicn_face_vec_hashtb;
+
+
+/**
+ * Key definition for the mhash table. An face is uniquely identified by ip
+ * address, the interface id and a dpo pointing to the next node in the vlib graph.
+ * The ip address can correspond to the remote ip address of the next hicn hop,
+ * or to the local address of the receiving interface. The former is used to
+ * retrieve the incoming face when an interest is received, the latter when
+ * the arring packet is a data. If the face is a regular face
+ * In case of iface, the following structure can be filled in different ways:
+ * - dpo equal to DPO_INVALID when the iface is a regular hICN iface
+ * - in case of udp_tunnel dpo =
+ * {
+ * .dpoi_index = tunnel_id,
+ * .dpoi_type = DPO_FIRST, //We don't need the type, we leave it invalid
+ * .dpoi_proto = DPO_PROTO_IP4 or DPO_PROTO_IP6,
+ * .dpoi_next_node = HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP or
+ * HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP or
+ * HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP or
+ * HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP
+ * }
+ */
+typedef struct __attribute__ ((packed)) hicn_face_key_s
+{
+ ip46_address_t addr;
+ union {
+ dpo_id_t dpo;
+ u64 align_dpo;
+ };
+ u32 sw_if;
+} hicn_face_key_t;
+
+/**
+ * @brief Create the key object for the mhash. Fill in the key object with the
+ * expected values.
+ *
+ * @param addr nat address of the face
+ * @param sw_if interface associated to the face
+ * @param key Pointer to an allocated hicn_face_ip_key_t object
+ */
+always_inline void
+hicn_face_get_key (const ip46_address_t * addr,
+ u32 sw_if, const dpo_id_t * dpo, hicn_face_key_t * key)
+{
+ key->dpo = *dpo;
+ key->addr = *addr;
+ key->sw_if = sw_if;
+}
+
+/**
+ * @brief Get the face obj from the nat address. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_get (const ip46_address_t * addr, u32 sw_if, mhash_t * hashtb, index_t adj_index)
+{
+ hicn_face_key_t key;
+
+ dpo_id_t dpo = DPO_INVALID;
+
+ dpo.dpoi_index = adj_index;
+
+ hicn_face_get_key (addr, sw_if, &dpo, &key);
+
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
+ &key);
+
+ if ( dpoi_index != NULL)
+ {
+ hicn_face_lock_with_id(*dpoi_index);
+ return hicn_dpoi_get_from_idx (*dpoi_index);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Get the face obj from the nat address and the dpo. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_t *
+hicn_face_get_with_dpo (const ip46_address_t * addr, u32 sw_if, const dpo_id_t * dpo, mhash_t * hashtb)
+{
+ hicn_face_key_t key;
+
+ hicn_face_get_key (addr, sw_if, dpo, &key);
+
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
+ &key);
+
+ if ( dpoi_index != NULL)
+ {
+ hicn_face_lock_with_id(*dpoi_index);
+ return hicn_dpoi_get_from_idx (*dpoi_index);
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Get the vector of faces from the ip v4 address. Does not add any lock.
+ *
+ * @param addr Ip v4 address used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash table.
+ * @param hashtb Hash table (remote or local) where to perform the lookup.
+ *
+ * @result Pointer to the face.
+ */
+always_inline hicn_face_input_faces_t *
+hicn_face_get_vec (const ip46_address_t * addr,
+ mhash_t * hashtb)
+{
+ hicn_face_key_t key;
+
+ dpo_id_t dpo = DPO_INVALID;
+
+ hicn_face_get_key (addr, 0, &dpo, &key);
+ return (hicn_face_input_faces_t *) mhash_get (hashtb, &key);
+}
+
+/**
+ * @brief Create a new face ip. API for other modules (e.g., routing)
+ *
+ * @param dpo_nh dpo contained in the face that points to the next node in
+ * the vlib graph
+ * @param nat_addr nat ip v4 or v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param pfaceid Pointer to return the face id
+ * @param is_app_prod if HICN_FACE_FLAGS_APPFACE_PROD the face is a local application face, all other values are ignored
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+int hicn_face_add (const dpo_id_t * dpo_nh,
+ ip46_address_t * nat_address,
+ int sw_if,
+ hicn_face_id_t * pfaceid,
+ u8 is_app_prod);
+
+/**
+ * @brief Create a new incomplete face ip. (Meant to be used by the data plane)
+ *
+ * @param local_addr Local ip v4 or v6 address of the face
+ * @param remote_addr Remote ip v4 or v6 address of the face
+ * @param sw_if interface associated to the face
+ * @param pfaceid Pointer to return the face id
+ * @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
+ * reachable ip address, otherwise HICN_ERROR_NONE
+ */
+always_inline void
+hicn_iface_add (ip46_address_t * nat_address, int sw_if,
+ hicn_face_id_t * pfaceid, dpo_proto_t proto,
+ u32 adj_index)
+{
+ hicn_face_t *face;
+ pool_get (hicn_dpoi_face_pool, face);
+
+ clib_memcpy (&(face->nat_addr), nat_address,
+ sizeof (ip46_address_t));
+ face->sw_if = sw_if;
+
+ face->dpo.dpoi_type = DPO_FIRST;
+ face->dpo.dpoi_proto = DPO_PROTO_NONE;
+ face->dpo.dpoi_index = adj_index;
+ face->dpo.dpoi_next_node = 0;
+ face->pl_id = (u16) 0;
+ face->flags = HICN_FACE_FLAGS_IFACE;
+ face->locks = 1;
+
+ hicn_face_key_t key;
+ hicn_face_get_key (nat_address, sw_if, &face->dpo, &key);
+ *pfaceid = hicn_dpoi_get_index (face);
+
+ mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_validate_combined_counter (&counters[(*pfaceid) * HICN_N_COUNTER],
+ i);
+ vlib_zero_combined_counter (&counters[(*pfaceid) * HICN_N_COUNTER], i);
+ }
+}
+
+/**** Helpers to manipulate faces and ifaces from the face/iface input nodes ****/
+
+/**
+ * @brief Retrieve a vector of faces from the ip4 local address and returns its index.
+ *
+ * @param vec: Result of the lookup. If no face exists for the local address vec = NULL
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param local_addr: Ip v4 nat address of the face
+ * @param sw_if: software interface id of the face
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_face_ip4_lock (hicn_face_id_t * face_id,
+ u32 * in_faces_vec_id,
+ u8 * hicnb_flags,
+ const ip4_address_t * nat_addr)
+{
+ ip46_address_t ip_address = {0};
+ ip46_address_set_ip4(&ip_address, nat_addr);
+ hicn_face_input_faces_t *in_faces_vec =
+ hicn_face_get_vec (&ip_address, &hicn_face_vec_hashtb);
+
+ if (PREDICT_FALSE (in_faces_vec == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ *in_faces_vec_id = in_faces_vec->vec_id;
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_faces_vec->face_id);
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *face_id = in_faces_vec->face_id;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Retrieve a face from the ip6 local address and returns its dpo. This
+ * method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param nat_addr: Ip v6 nat address of the face
+ * @param sw_if: software interface id of the face
+ *
+ * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ */
+always_inline int
+hicn_face_ip6_lock (hicn_face_id_t * face_id,
+ u32 * in_faces_vec_id,
+ u8 * hicnb_flags,
+ const ip6_address_t * nat_addr)
+{
+ hicn_face_input_faces_t *in_faces_vec =
+ hicn_face_get_vec ((ip46_address_t *)nat_addr, &hicn_face_vec_hashtb);
+
+ if (PREDICT_FALSE (in_faces_vec == NULL))
+ return HICN_ERROR_FACE_NOT_FOUND;
+
+ *in_faces_vec_id = in_faces_vec->vec_id;
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_faces_vec->face_id);
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *face_id = in_faces_vec->face_id;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Call back to get the adj of the tunnel
+ */
+static adj_walk_rc_t
+hicn4_iface_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+
+ hicn_face_t *face = (hicn_face_t *)ctx;
+
+ dpo_set(&face->dpo, DPO_ADJACENCY_MIDCHAIN, DPO_PROTO_IP4, ai);
+ adj_nbr_midchain_stack(ai, &face->dpo);
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
+ * address and returns its dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param nat_addr: Ip v4 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_iface_ip4_add_and_lock (hicn_face_id_t * index,
+ u8 * hicnb_flags,
+ const ip4_address_t * nat_addr,
+ u32 sw_if, u32 adj_index, u32 node_index)
+{
+ /*All (complete) faces are indexed by remote addess as well */
+
+ ip46_address_t ip_address = {0};
+ ip46_address_set_ip4(&ip_address, nat_addr);
+
+ /* if the face exists, it adds a lock */
+ hicn_face_t *face =
+ hicn_face_get (&ip_address, sw_if, &hicn_face_hashtb, adj_index);
+
+ if (face == NULL)
+ {
+ hicn_face_id_t idx;
+ hicn_iface_add (&ip_address, sw_if, &idx, DPO_PROTO_IP4, adj_index);
+
+ face = hicn_dpoi_get_from_idx(idx);
+
+ face->dpo.dpoi_type = DPO_FIRST;
+ face->dpo.dpoi_proto = DPO_PROTO_IP4;
+ face->dpo.dpoi_index = adj_index;
+ face->dpo.dpoi_next_node = node_index;
+
+ /* if (nat_addr->as_u32 == 0) */
+ /* { */
+ adj_nbr_walk(face->sw_if,
+ FIB_PROTOCOL_IP4,
+ hicn4_iface_adj_walk_cb,
+ face);
+ /* } */
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+
+ *index = idx;
+ return;
+ }
+ else
+ {
+ /* unlock the face. We don't take a lock on each interest we receive */
+ hicn_face_id_t face_id = hicn_dpoi_get_index(face);
+ hicn_face_unlock_with_id(face_id);
+ }
+
+ /* Code replicated on purpose */
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *index = hicn_dpoi_get_index (face);
+}
+
+/**
+ * @brief Call back to get the adj of the tunnel
+ */
+static adj_walk_rc_t
+hicn6_iface_adj_walk_cb (adj_index_t ai,
+ void *ctx)
+{
+
+ hicn_face_t *face = (hicn_face_t *)ctx;
+
+ ip_adjacency_t *adj = adj_get(ai);
+ if ((adj->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN) ||
+ (adj->lookup_next_index == IP_LOOKUP_NEXT_MCAST_MIDCHAIN))
+ {
+ dpo_set(&face->dpo, DPO_ADJACENCY_MIDCHAIN, adj->ia_nh_proto, ai);
+ adj_nbr_midchain_stack(ai, &face->dpo);
+ }
+
+ return (ADJ_WALK_RC_CONTINUE);
+}
+
+
+/**
+ * @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
+ * address and returns its dpo. This method adds a lock on the face state.
+ *
+ * @param dpo: Result of the lookup
+ * @param hicnb_flags: Flags that indicate whether the face is an application
+ * face or not
+ * @param nat_addr: Ip v6 remote address of the face
+ * @param sw_if: software interface id of the face
+ * @param node_index: vlib edge index to use in the packet processing
+ */
+always_inline void
+hicn_iface_ip6_add_and_lock (hicn_face_id_t * index,
+ u8 * hicnb_flags,
+ const ip6_address_t * nat_addr,
+ u32 sw_if, u32 adj_index, u32 node_index)
+{
+ /*All (complete) faces are indexed by remote addess as well */
+ /* if the face exists, it adds a lock */
+ hicn_face_t *face =
+ hicn_face_get ((ip46_address_t *)nat_addr, sw_if, &hicn_face_hashtb, adj_index);
+
+ if (face == NULL)
+ {
+ hicn_face_id_t idx;
+ hicn_iface_add ((ip46_address_t *) nat_addr, sw_if, &idx, DPO_PROTO_IP6, adj_index);
+
+ face = hicn_dpoi_get_from_idx(idx);
+
+ face->dpo.dpoi_type = DPO_FIRST;
+ face->dpo.dpoi_proto = DPO_PROTO_IP6;
+ face->dpo.dpoi_index = adj_index;
+ face->dpo.dpoi_next_node = node_index;
+
+ adj_nbr_walk(face->sw_if,
+ FIB_PROTOCOL_IP6,
+ hicn6_iface_adj_walk_cb,
+ face);
+
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+
+ *index = idx;
+
+ return;
+ }
+ else
+ {
+ /* unlock the face. We don't take a lock on each interest we receive */
+ hicn_face_id_t face_id = hicn_dpoi_get_index(face);
+ hicn_face_unlock_with_id(face_id);
+ }
+
+ /* Code replicated on purpose */
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |=
+ (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
+ HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+
+ *index = hicn_dpoi_get_index (face);
+}
+
+#endif // __HICN_FACE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/face_cli.c b/hicn-plugin/src/network/faces/face_cli.c
new file mode 100644
index 000000000..e9e516cc6
--- /dev/null
+++ b/hicn-plugin/src/network/faces/face_cli.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include "face.h"
+#include "../error.h"
+
+static clib_error_t *
+hicn_face_cli_show_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+ char *face_type_name = NULL;
+ int found = ~0;
+ int deleted = 0;
+ u8 *n = 0;
+ u8 *s = 0;
+ vlib_counter_t v;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%u", &face_id))
+ ;
+ else if (unformat (line_input, "type %s", &face_type_name))
+ ;
+ else if (unformat (line_input, "deleted"))
+ deleted = 1;
+ else
+ {
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL));
+ }
+ }
+
+ if (face_type_name != NULL)
+ {
+ int idx = 0;
+ vec_foreach_index (idx, face_type_names_vec)
+ {
+ if (!strcmp (face_type_names_vec[idx], face_type_name))
+ found = idx;
+ }
+ if (found == ~0)
+ return (clib_error_return (0, "Face type unknown"));
+ }
+
+ }
+
+ if (face_id != HICN_FACE_NULL)
+ {
+ if (!hicn_dpoi_idx_is_valid (face_id))
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_FACE_NOT_FOUND));
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+ vlib_cli_output (vm, "%U\n", format_hicn_face, face_id, 0 /*indent */ );
+
+ u32 indent = 3;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (&counters
+ [hicn_dpoi_get_index (face) *
+ HICN_N_COUNTER], i, &v);
+ s =
+ format (s, "%U%s", format_white_space, indent,
+ HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "packets");
+ s =
+ format (s, "%U%-16v%16Ld", format_white_space,
+ 30 - strlen (HICN_FACE_CTRX_STRING[i]), n, v.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n",
+ format_white_space, indent + 30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ else
+ {
+ if (found != ~0)
+ {
+ hicn_face_t *face;
+ /* *INDENT-OFF* */
+ pool_foreach(face, hicn_dpoi_face_pool,
+ {
+ if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ if (face->flags)
+ {
+ vlib_cli_output(vm, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), 0);
+ u8 * s = 0;
+ u32 indent = 3;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (&counters[hicn_dpoi_get_index(face) * HICN_N_COUNTER], i, &v);
+ s = format (s, "%U%s",format_white_space, indent, HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "packets");
+ s = format (s, "%U%-16v%16Ld", format_white_space, 30-strlen(HICN_FACE_CTRX_STRING[i]), n, v.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n",
+ format_white_space, indent+30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ hicn_face_t *face;
+ /* *INDENT-OFF* */
+ pool_foreach(face, hicn_dpoi_face_pool,
+ {
+ if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ vlib_cli_output(vm, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), 0);
+ u32 indent = 3;
+ u8 * s = 0;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (&counters[hicn_dpoi_get_index(face) * HICN_N_COUNTER], i, &v);
+ s = format (s, "%U%s",format_white_space, indent, HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ _vec_len (n) = 0;
+ n = format (n, "packets");
+ s = format (s, "%U%-16v%16Ld", format_white_space, 30-strlen(HICN_FACE_CTRX_STRING[i]), n, v.packets);
+
+ _vec_len (n) = 0;
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n",
+ format_white_space, indent+30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ }
+
+ return 0;
+}
+
+/* cli declaration for 'show faces' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (hicn_face_cli_show_command, static) =
+{
+ .path = "hicn face show",
+ .short_help = "hicn face show [<face_id>]",
+ .function = hicn_face_cli_show_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/face_node.c b/hicn-plugin/src/network/faces/face_node.c
new file mode 100644
index 000000000..e1fd81ca0
--- /dev/null
+++ b/hicn-plugin/src/network/faces/face_node.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/adj/adj.h>
+
+#include "face.h"
+#include "face_node.h"
+#include "../strategy_dpo_manager.h"
+#include "face.h"
+#include "../cache_policies/cs_lru.h"
+#include "../infra.h"
+#include "../hicn.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for ip incomplete faces.
+ */
+
+vlib_node_registration_t hicn4_face_input_node;
+vlib_node_registration_t hicn4_face_output_node;
+vlib_node_registration_t hicn6_face_input_node;
+vlib_node_registration_t hicn6_face_output_node;
+
+#define ip_v4 4
+#define ip_v6 6
+
+static char *hicn4_face_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_face_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn4_face_input_trace_t;
+
+typedef enum
+{
+ HICN4_FACE_INPUT_NEXT_DATA,
+ HICN4_FACE_INPUT_NEXT_MAPME,
+ HICN4_FACE_INPUT_NEXT_ERROR_DROP,
+ HICN4_FACE_INPUT_N_NEXT,
+} hicn4_face_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn6_face_input_trace_t;
+
+typedef enum
+{
+ HICN6_FACE_INPUT_NEXT_DATA,
+ HICN6_FACE_INPUT_NEXT_MAPME,
+ HICN6_FACE_INPUT_NEXT_ERROR_DROP,
+ HICN6_FACE_INPUT_N_NEXT,
+} hicn6_face_input_next_t;
+
+#define NEXT_MAPME_IP4 HICN4_FACE_INPUT_NEXT_MAPME
+#define NEXT_MAPME_IP6 HICN6_FACE_INPUT_NEXT_MAPME
+#define NEXT_DATA_IP4 HICN4_FACE_INPUT_NEXT_DATA
+#define NEXT_DATA_IP6 HICN6_FACE_INPUT_NEXT_DATA
+
+#define NEXT_ERROR_DROP_IP4 HICN4_FACE_INPUT_NEXT_ERROR_DROP
+#define NEXT_ERROR_DROP_IP6 HICN6_FACE_INPUT_NEXT_ERROR_DROP
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define LOCK_DPO_FACE_IP4 hicn_face_ip4_lock
+#define LOCK_DPO_FACE_IP6 hicn_face_ip6_lock
+
+#define TRACE_INPUT_PKT_IP4 hicn4_face_input_trace_t
+#define TRACE_INPUT_PKT_IP6 hicn6_face_input_trace_t
+
+/*
+ * NOTE: Both hicn4_face_input_node_fn and hicn6_face_input_node_fn
+ * present a similar codebase. Macro are hard to debug, although the
+ * followind code is pretty straighforward and most of the complexity is in
+ * functions that can be easily debug.
+ */
+#define face_input_x1(ipv) \
+ do{ \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ int ret; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer(b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ \
+ u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp)*NEXT_DATA_IP##ipv; \
+ \
+ ret = LOCK_DPO_FACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &(hicnb0->in_faces_vec_id), \
+ &hicnb0->flags, \
+ &(ip_hdr->dst_address)); \
+ \
+ if ( PREDICT_FALSE(ret != HICN_ERROR_NONE) ) \
+ next0 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ { \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define face_input_x2(ipv) \
+ do{ \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ u32 next1 = NEXT_ERROR_DROP_IP##ipv; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ hicn_buffer_t * hicnb0; \
+ hicn_buffer_t * hicnb1; \
+ int ret0, ret1; \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ \
+ u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
+ u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp0)*NEXT_DATA_IP##ipv; \
+ \
+ next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp1)*NEXT_DATA_IP##ipv; \
+ \
+ \
+ ret0 = LOCK_DPO_FACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &(hicnb0->in_faces_vec_id), \
+ &hicnb0->flags, \
+ &(ip_hdr0->dst_address)); \
+ \
+ ret1 = LOCK_DPO_FACE_IP##ipv \
+ (&(hicnb1->face_id), \
+ &(hicnb1->in_faces_vec_id), \
+ &hicnb1->flags, \
+ &(ip_hdr1->dst_address)); \
+ \
+ if ( PREDICT_FALSE(ret0 != HICN_ERROR_NONE) ) \
+ next0 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ { \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if ( PREDICT_FALSE(ret1 != HICN_ERROR_NONE) ) \
+ next1 = NEXT_ERROR_DROP_IP##ipv; \
+ else \
+ { \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb1->face_id \
+ * HICN_N_COUNTER], thread_index,\
+ HICN_FACE_COUNTERS_DATA_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ stats.pkts_data_count += 1; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn4_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_face_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_face_input_trace_t *t =
+ va_arg (*args, hicn4_face_input_trace_t *);
+
+ s = format (s, "FACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn4_face_input_node) =
+{
+ .function = hicn4_face_input_node_fn,
+ .name = "hicn4-face-input",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn4_face_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn4_face_input_error_strings),
+ .error_strings = hicn4_face_input_error_strings,
+ .n_next_nodes = HICN4_FACE_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN4_FACE_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN4_FACE_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN4_FACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/**
+ * @brief IPv6 face input node function
+ * @see hicn6_face_input_node_fn
+ */
+static uword
+hicn6_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_input_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_face_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_face_input_trace_t *t =
+ va_arg (*args, hicn6_face_input_trace_t *);
+
+ s = format (s, "FACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn6_face_input_node) =
+{
+ .function = hicn6_face_input_node_fn,
+ .name = "hicn6-face-input",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn6_face_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn6_face_input_error_strings),
+ .error_strings = hicn6_face_input_error_strings,
+ .n_next_nodes = HICN6_FACE_INPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN6_FACE_INPUT_NEXT_DATA] = "hicn-data-pcslookup",
+ [HICN6_FACE_INPUT_NEXT_MAPME] = "hicn-mapme-ack",
+ [HICN6_FACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/**** FACE OUTPUT *****/
+
+typedef enum
+{
+ HICN4_FACE_OUTPUT_NEXT_ECHO_REPLY,
+ HICN4_FACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN4_FACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN4_FACE_OUTPUT_N_NEXT,
+} hicn4_face_output_next_t;
+
+typedef enum
+{
+ HICN6_FACE_OUTPUT_NEXT_ECHO_REPLY,
+ HICN6_FACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN6_FACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN6_FACE_OUTPUT_N_NEXT,
+} hicn6_face_output_next_t;
+
+/* static_always_inline void */
+/* hicn_reply_probe_v4 (vlib_buffer_t * b, hicn_face_t * face) */
+/* { */
+/* hicn_header_t *h0 = vlib_buffer_get_current (b); */
+/* hicn_face_ip_t * face_ip = (hicn_face_ip_t *)(&face->data); */
+/* h0->v4.ip.saddr = h0->v4.ip.daddr; */
+/* h0->v4.ip.daddr = face_ip->local_addr.ip4; */
+/* vnet_buffer (b)->sw_if_index[VLIB_RX] = face->shared.sw_if; */
+
+/* u16 * dst_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip4_header_t) + sizeof(u16)); */
+/* u16 dst_port = *dst_port_ptr; */
+/* u16 * src_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip4_header_t)); */
+
+/* *dst_port_ptr = *src_port_ptr; */
+/* *src_port_ptr = dst_port; */
+
+/* hicn_type_t type = hicn_get_buffer (b)->type; */
+/* hicn_ops_vft[type.l1]->set_lifetime (type, &h0->protocol, 0); */
+/* } */
+
+/* static_always_inline void */
+/* hicn_reply_probe_v6 (vlib_buffer_t * b, hicn_face_t * face) */
+/* { */
+/* hicn_header_t *h0 = vlib_buffer_get_current (b); */
+/* hicn_face_ip_t * face_ip = (hicn_face_ip_t *)(&face->data); */
+/* h0->v6.ip.saddr = h0->v6.ip.daddr; */
+/* h0->v6.ip.daddr = face_ip->local_addr.ip6; */
+/* vnet_buffer (b)->sw_if_index[VLIB_RX] = face->shared.sw_if; */
+
+/* u16 * dst_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip6_header_t) + sizeof(u16)); */
+/* u16 dst_port = *dst_port_ptr; */
+/* u16 * src_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip6_header_t)); */
+
+/* *dst_port_ptr = *src_port_ptr; */
+/* *src_port_ptr = dst_port; */
+
+/* hicn_type_t type = hicn_get_buffer (b)->type; */
+/* hicn_ops_vft[type.l1]->set_lifetime (type, &h0->protocol, 0); */
+
+/* } */
+
+/* static_always_inline u32 */
+/* hicn_face_match_probe (vlib_buffer_t * b, hicn_face_t * face, u32 * next) */
+/* { */
+
+/* u8 *ptr = vlib_buffer_get_current (b); */
+/* u8 v = *ptr & 0xf0; */
+/* u8 res = 0; */
+
+/* if ( v == 0x40 ) */
+/* { */
+/* u16 * dst_port = (u16 *)(ptr + sizeof(ip4_header_t) + sizeof(u16)); */
+/* if (*dst_port == clib_net_to_host_u16(DEFAULT_PROBING_PORT)) */
+/* { */
+/* hicn_reply_probe_v6(b, face); */
+/* *next = HICN4_FACE_NEXT_ECHO_REPLY; */
+/* res = 1; */
+/* } */
+/* } */
+/* else if ( v == 0x60 ) */
+/* { */
+/* u16 * dst_port = (u16 *)(ptr + sizeof(ip6_header_t) + sizeof(u16)); */
+/* if (*dst_port == clib_net_to_host_u16(DEFAULT_PROBING_PORT)) */
+/* { */
+/* hicn_reply_probe_v6(b, face); */
+/* *next = HICN6_FACE_NEXT_ECHO_REPLY; */
+/* res = 1; */
+/* } */
+/* } */
+/* return res; */
+/* } */
+
+
+static inline void
+hicn_face_rewrite_interest (vlib_main_t * vm, vlib_buffer_t * b0,
+ hicn_face_t * face, u32 * next)
+{
+
+ /* if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD) && hicn_face_match_probe(b0, face, next)) */
+ /* return; */
+
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ //hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_interest (type, &hicn->protocol,
+ &face->nat_addr, &temp_addr);
+
+ if (ip46_address_is_ip4(&face->nat_addr))
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+
+ ASSERT(face->flags & HICN_FACE_FLAGS_FACE);
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = face->dpo.dpoi_index;
+ *next = face->dpo.dpoi_next_node;
+}
+
+static char *hicn4_face_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_face_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn4_face_output_trace_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+}
+hicn6_face_output_trace_t;
+
+#define TRACE_OUTPUT_PKT_IP4 hicn4_face_output_trace_t
+#define TRACE_OUTPUT_PKT_IP6 hicn6_face_output_trace_t
+
+#define face_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = ~0; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ face = \
+ hicn_dpoi_get_from_idx (face_id); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b0, face, &next0); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+#define face_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = ~0; \
+ u32 next1 = ~0; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ face0 = \
+ hicn_dpoi_get_from_idx (face_id0); \
+ face1 = \
+ hicn_dpoi_get_from_idx (face_id1); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b0, face0, &next0); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id0 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ hicn_face_rewrite_interest \
+ (vm, b1, face1, &next1); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id1 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+
+static uword
+hicn4_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_face_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_face_output_trace_t *t =
+ va_arg (*args, hicn4_face_output_trace_t *);
+
+ s =
+ format (s, "FACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn4_face_output_node) =
+{
+ .function = hicn4_face_output_node_fn,
+ .name = "hicn4-face-output",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn4_face_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn4_face_output_error_strings),
+ .error_strings = hicn4_face_output_error_strings,
+ .n_next_nodes = HICN4_FACE_OUTPUT_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with arp */
+ .next_nodes =
+ {
+ [HICN4_FACE_OUTPUT_NEXT_ECHO_REPLY] = "hicn4-face-input",
+ [HICN4_FACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN4_FACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+ }
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn6_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ face_output_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ face_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_face_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_face_output_trace_t *t =
+ va_arg (*args, hicn6_face_output_trace_t *);
+
+ s =
+ format (s, "FACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn6_face_output_node) =
+{
+ .function = hicn6_face_output_node_fn,
+ .name = "hicn6-face-output",
+ .vector_size = sizeof(u32),
+ .format_trace = hicn6_face_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn6_face_output_error_strings),
+ .error_strings = hicn6_face_output_error_strings,
+ .n_next_nodes = HICN6_FACE_OUTPUT_N_NEXT,
+ /* Reusing the list of nodes from lookup to be compatible with neighbour discovery */
+ .next_nodes =
+ {
+ [HICN6_FACE_OUTPUT_NEXT_ECHO_REPLY] = "hicn6-face-input",
+ [HICN6_FACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN6_FACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+ }
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/face_node.h b/hicn-plugin/src/network/faces/face_node.h
new file mode 100644
index 000000000..f5a8bf5ae
--- /dev/null
+++ b/hicn-plugin/src/network/faces/face_node.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_NODE_H__
+#define __HICN_FACE_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @file face_node.h
+ *
+ * Implements the input and output face nodes. Input face nodes
+ * process incoming data while output face nodes process outgoing
+ * interests packets.
+ *
+ * Input face nodes follow hicn-face-input nodes and their purpose
+ * is to retrieve the list of possible incoming faces for each the data packet.
+ * The following node to the input face nodes is the hicn-data-pcslookup.
+ * Output face nodes follow the strategy and the hicn-interest-hitpit nodes and
+ * they perform the src nat on each interest packet. The node following the
+ * output face nodes depends on the adjacency type. In case of ip, the following
+ * node is the ip-rewrite, in case of tunnels the next node is the one implementing
+ * the tunnel encapsulation (udp-encap, mpls, etc).
+ */
+
+extern vlib_node_registration_t hicn4_face_input_node;
+extern vlib_node_registration_t hicn4_face_output_node;
+extern vlib_node_registration_t hicn6_face_input_node;
+extern vlib_node_registration_t hicn6_face_output_node;
+
+#endif // __HICN_FACE_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/iface_node.c b/hicn-plugin/src/network/faces/iface_node.c
new file mode 100644
index 000000000..433cf0b02
--- /dev/null
+++ b/hicn-plugin/src/network/faces/iface_node.c
@@ -0,0 +1,915 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "face.h"
+#include "../strategy_dpo_manager.h"
+#include "../hicn.h"
+#include "../infra.h"
+#include "../cache_policies/cs_lru.h"
+
+/**
+ * @File
+ *
+ * Definition of the nodes for ip incomplete faces.
+ */
+
+vlib_node_registration_t hicn4_iface_input_node;
+vlib_node_registration_t hicn4_iface_output_node;
+vlib_node_registration_t hicn6_iface_input_node;
+vlib_node_registration_t hicn6_iface_output_node;
+
+u32 data_fwd_iface_ip4_vlib_edge;
+u32 data_fwd_iface_ip6_vlib_edge;
+
+static char *hicn4_iface_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_iface_input_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn4_iface_input_trace_t;
+
+typedef enum
+{
+ HICN4_IFACE_INPUT_NEXT_INTEREST,
+ HICN4_IFACE_INPUT_NEXT_MAPME,
+ HICN4_IFACE_INPUT_NEXT_ERROR_DROP,
+ HICN4_IFACE_INPUT_N_NEXT,
+} hicn4_iface_input_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn6_iface_input_trace_t;
+
+typedef enum
+{
+ HICN6_IFACE_INPUT_NEXT_INTEREST,
+ HICN6_IFACE_INPUT_NEXT_MAPME,
+ HICN6_IFACE_INPUT_NEXT_ERROR_DROP,
+ HICN6_IFACE_INPUT_N_NEXT,
+} hicn6_iface_input_next_t;
+
+#define NEXT_MAPME_IP4 HICN4_IFACE_INPUT_NEXT_MAPME
+#define NEXT_MAPME_IP6 HICN6_IFACE_INPUT_NEXT_MAPME
+
+#define NEXT_INTEREST_IP4 HICN4_IFACE_INPUT_NEXT_INTEREST
+#define NEXT_INTEREST_IP6 HICN6_IFACE_INPUT_NEXT_INTEREST
+
+#define ADDRESS_IP4 ip_interface_address_t *ia = 0;ip4_address_t *local_address = ip4_interface_first_address(&ip4_main, swif, &ia)
+#define ADDRESS_IP6 ip6_address_t *local_address = ip6_interface_first_address(&ip6_main, swif)
+
+#define ADDRESSX2_IP4 ip_interface_address_t *ia0, *ia1; ia0 = ia1 = 0; \
+ ip4_address_t *local_address0 = ip4_interface_first_address(&ip4_main, swif0, &ia0); \
+ ip4_address_t *local_address1 = ip4_interface_first_address(&ip4_main, swif1, &ia1);
+
+#define ADDRESSX2_IP6 ip6_address_t *local_address0 = ip6_interface_first_address(&ip6_main, swif0); \
+ ip6_address_t *local_address1 = ip6_interface_first_address(&ip6_main, swif1);
+
+#define DPO_ADD_LOCK_IFACE_IP4 hicn_iface_ip4_add_and_lock
+#define DPO_ADD_LOCK_IFACE_IP6 hicn_iface_ip6_add_and_lock
+
+//#define VLIB_EDGE_IP4 data_fwd_iface_ip4_vlib_edge
+//#define VLIB_EDGE_IP6 data_fwd_iface_ip6_vlib_edge
+
+#define IP_HEADER_4 ip4_header_t
+#define IP_HEADER_6 ip6_header_t
+
+#define TRACE_INPUT_PKT_IP4 hicn4_iface_input_trace_t
+#define TRACE_INPUT_PKT_IP6 hicn6_iface_input_trace_t
+
+// NODE OUTPUT
+
+static char *hicn4_iface_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+static char *hicn6_iface_output_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn4_iface_output_trace_t;
+
+typedef enum
+{
+ HICN4_IFACE_OUTPUT_NEXT_LOOKUP,
+ HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN4_IFACE_OUTPUT_N_NEXT,
+} hicn4_iface_output_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u8 packet_data[60];
+} hicn6_iface_output_trace_t;
+
+typedef enum
+{
+ HICN6_IFACE_OUTPUT_NEXT_LOOKUP,
+ HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN6_IFACE_OUTPUT_N_NEXT,
+} hicn6_iface_output_next_t;
+
+//#define ERROR_OUTPUT_IP4 HICN4_IFACE_OUTPUT_NEXT_ERROR_DROP
+//#define ERROR_OUTPUT_IP6 HICN6_IFACE_OUTPUT_NEXT_ERROR_DROP
+
+#define NEXT_DATA_LOOKUP_IP4 HICN4_IFACE_OUTPUT_NEXT_LOOKUP
+#define NEXT_DATA_LOOKUP_IP6 HICN6_IFACE_OUTPUT_NEXT_LOOKUP
+
+#define NEXT_UDP_ENCAP_IP4 HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP
+#define NEXT_UDP_ENCAP_IP6 HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP
+
+#define HICN_REWRITE_DATA_IP4 hicn_rewrite_iface_data4
+#define HICN_REWRITE_DATA_IP6 hicn_rewrite_iface_data6
+
+#define TRACE_OUTPUT_PKT_IP4 hicn4_iface_output_trace_t
+#define TRACE_OUTPUT_PKT_IP6 hicn6_iface_output_trace_t
+
+// NODES IMPLEMENTATIONS
+
+#define iface_input_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0, next0, next_iface0; \
+ IP_HEADER_##ipv * ip_hdr = NULL; \
+ hicn_buffer_t * hicnb0; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer(b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ \
+ stats.pkts_interest_count += 1; \
+ \
+ u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp)*NEXT_INTEREST_IP##ipv; \
+ \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ else if(hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ \
+ DPO_ADD_LOCK_IFACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &hicnb0->flags, \
+ &(ip_hdr->src_address), \
+ vnet_buffer(b0)->sw_if_index[VLIB_RX], \
+ vnet_buffer(b0)->ip.adj_index[VLIB_RX], \
+ next_iface0); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0)
+
+
+#define iface_input_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1, next0, next1, next_iface0, next_iface1; \
+ IP_HEADER_##ipv * ip_hdr0 = NULL; \
+ IP_HEADER_##ipv * ip_hdr1 = NULL; \
+ hicn_buffer_t *hicnb0, *hicnb1; \
+ \
+ /* Prefetch for next iteration. */ \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer(b0); \
+ hicnb1 = hicn_get_buffer(b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
+ \
+ stats.pkts_interest_count += 2; \
+ \
+ u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
+ u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
+ \
+ next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp0)*NEXT_INTEREST_IP##ipv; \
+ \
+ next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
+ (1-is_icmp1)*NEXT_INTEREST_IP##ipv; \
+ \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ else if(hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ \
+ next_iface1 = NEXT_DATA_LOOKUP_IP##ipv; \
+ \
+ if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
+ next_iface1 = NEXT_UDP_ENCAP_IP4; \
+ else if(hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
+ next_iface1 = NEXT_UDP_ENCAP_IP6; \
+ \
+ DPO_ADD_LOCK_IFACE_IP##ipv \
+ (&(hicnb0->face_id), \
+ &hicnb0->flags, \
+ &(ip_hdr0->src_address), \
+ vnet_buffer(b0)->sw_if_index[VLIB_RX], \
+ vnet_buffer(b0)->ip.adj_index[VLIB_RX], \
+ next_iface0); \
+ \
+ DPO_ADD_LOCK_IFACE_IP##ipv \
+ (&(hicnb1->face_id), \
+ &hicnb1->flags, \
+ &(ip_hdr1->src_address), \
+ vnet_buffer(b1)->sw_if_index[VLIB_RX], \
+ vnet_buffer(b1)->ip.adj_index[VLIB_RX], \
+ next_iface1); \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0)); \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb1->face_id \
+ * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0)
+
+static uword
+hicn4_iface_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (4);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (4);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_iface_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_iface_input_trace_t *t =
+ va_arg (*args, hicn4_iface_input_trace_t *);
+
+ s =
+ format (s, "IFACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn4_iface_input_node) =
+{
+ .function = hicn4_iface_input_node_fn,
+ .name = "hicn4-iface-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn4_iface_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn4_iface_input_error_strings),
+ .error_strings = hicn4_iface_input_error_strings,
+ .n_next_nodes = HICN4_IFACE_INPUT_N_NEXT,
+ /* edit / add dispositions*/
+ .next_nodes =
+ {
+ [HICN4_IFACE_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN4_IFACE_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN4_IFACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+static uword
+hicn6_iface_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_input_x2 (6);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_input_x1 (6);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_iface_input_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_iface_input_trace_t *t =
+ va_arg (*args, hicn6_iface_input_trace_t *);
+
+ s =
+ format (s, "IFACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn6_iface_input_node) =
+{
+ .function = hicn6_iface_input_node_fn,
+ .name = "hicn6-iface-input",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn6_iface_input_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn6_iface_input_error_strings),
+ .error_strings = hicn6_iface_input_error_strings,
+ .n_next_nodes = HICN6_IFACE_INPUT_N_NEXT,
+ /* edit / add dispositions*/
+ .next_nodes =
+ {
+ [HICN6_IFACE_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN6_IFACE_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
+ [HICN6_IFACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+
+/**** IFACE OUTPUT *****/
+
+static inline void
+hicn_rewrite_iface_data4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * iface, u32 * next)
+{
+ ip4_header_t *ip0;
+
+ /* Get the pointer to the old ip and tcp header */
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Set up the ip6 header */
+ /* IP4 lenght contains the size of the ip4 header too */
+ u16 sval = (vlib_buffer_length_in_chain (vm, b0));
+ ip0->length = clib_host_to_net_u16 (sval);
+ ip0->ttl = 254; // FIXME TTL
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = iface->dpo.dpoi_index;
+ *next = iface->dpo.dpoi_next_node;
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &(iface->nat_addr), &(temp_addr),
+ iface->pl_id);
+}
+
+static inline void
+hicn_rewrite_iface_data6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ const hicn_face_t * iface, u32 * next)
+{
+ ip6_header_t *ip0;
+
+ /* Get the pointer to the old ip and tcp header */
+ /* Copy the previous ip and tcp header to the new portion of memory */
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Set up the ip6 header */
+ /* IP6 lenght does not include the size of the ip6 header */
+ u16 sval = (vlib_buffer_length_in_chain (vm, b0) - (sizeof (ip6_header_t)));
+ ip0->payload_length = clib_host_to_net_u16 (sval);
+ ip0->hop_limit = HICN_IP6_HOP_LIMIT;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = iface->dpo.dpoi_index;
+ *next = iface->dpo.dpoi_next_node;
+
+ hicn_header_t *hicn = vlib_buffer_get_current (b0);
+
+ ip46_address_t temp_addr;
+ ip46_address_reset (&temp_addr);
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
+ &(iface->nat_addr), &(temp_addr),
+ iface->pl_id);
+}
+
+#define iface_output_x1(ipv) \
+ do { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = next_index; \
+ hicn_face_t * face; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ face = \
+ hicn_dpoi_get_from_idx (face_id); \
+ \
+ if (PREDICT_TRUE(face != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b0, face, &next0); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0));\
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, next0); \
+ }while(0); \
+
+
+#define iface_output_x2(ipv) \
+ do { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = next_index; \
+ u32 next1 = next_index; \
+ hicn_face_t *face0, *face1; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ face0 = \
+ hicn_dpoi_get_from_idx (face_id0); \
+ face1 = \
+ hicn_dpoi_get_from_idx (face_id1); \
+ \
+ if (PREDICT_TRUE(face0 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b0, face0, &next0); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id0 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b0));\
+ } \
+ \
+ if (PREDICT_TRUE(face1 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv \
+ (vm, b1, face1, &next1); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id1 * HICN_N_COUNTER], \
+ thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, \
+ 1, \
+ vlib_buffer_length_in_chain(vm, b1)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PKT_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, \
+ vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
+ to_next, n_left_to_next, \
+ bi0, bi1, next0, next1); \
+ }while(0); \
+
+
+
+static uword
+hicn4_iface_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (4);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (4);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn4_iface_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn4_iface_output_trace_t *t =
+ va_arg (*args, hicn4_iface_output_trace_t *);
+
+ s =
+ format (s, "IFACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn4_iface_output_node) =
+{
+ .function = hicn4_iface_output_node_fn,
+ .name = "hicn4-iface-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn4_iface_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn4_iface_output_error_strings),
+ .error_strings = hicn4_iface_output_error_strings,
+ .n_next_nodes = HICN4_IFACE_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN4_IFACE_OUTPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+ },
+};
+/* *INDENT-ON* */
+
+
+static uword
+hicn6_iface_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ iface_output_x2 (6);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ iface_output_x1 (6);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn6_iface_output_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn6_iface_output_trace_t *t =
+ va_arg (*args, hicn6_iface_output_trace_t *);
+
+ s =
+ format (s, "IFACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn6_iface_output_node) =
+{
+ .function = hicn6_iface_output_node_fn,
+ .name = "hicn6-iface-output",
+ .vector_size = sizeof (u32),
+ .format_trace = hicn6_iface_output_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn6_iface_output_error_strings),
+ .error_strings = hicn6_iface_output_error_strings,
+ .n_next_nodes = HICN6_IFACE_OUTPUT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN6_IFACE_OUTPUT_NEXT_LOOKUP] = "ip6-lookup",
+ [HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
+
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/faces/iface_node.h b/hicn-plugin/src/network/faces/iface_node.h
new file mode 100644
index 000000000..1a7c4291b
--- /dev/null
+++ b/hicn-plugin/src/network/faces/iface_node.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_IFACE_NODE_H__
+#define __HICN_IFACE_NODE_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @file iface_node.h
+ *
+ * Implements the input and output iface nodes. Input iface nodes
+ * process incoming interests while output face nodes process outgoing
+ * data packets.
+ *
+ * Input iface nodes follow ip-lookup nodes and their purpose
+ * is to create (or retrieve if already existing) the list incoming face
+ * for each the interest packet.
+ * The following node to the input iface nodes is the hicn-interest-pcslookup.
+ * Output iface nodes follow the hicn-data-fwd and the hicn-interest-hitcs nodes and
+ * they perform the dst nat on each data packet. The node following the
+ * output face nodes depends on the adjacency type. In case of ip, the following
+ * node is the ip4/6-lookup, in case of tunnels the next node is the one implementing
+ * the tunnel encapsulation (udp-encap, mpls, etc).
+ */
+
+
+/**
+ * @brief Initialize the ip iface module
+ */
+void hicn_iface_init (vlib_main_t * vm);
+
+#endif // __HICN_IFACE_IP_NODE_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/hashtb.c b/hicn-plugin/src/network/hashtb.c
new file mode 100644
index 000000000..6deddbd84
--- /dev/null
+++ b/hicn-plugin/src/network/hashtb.c
@@ -0,0 +1,1017 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vppinfra/pool.h>
+
+#include "pcs.h"
+#include "hashtb.h"
+#include "parser.h"
+#include "error.h"
+
+/* return dvd/dvr, rounded up (intended for integer values) */
+#define CEIL(dvd, dvr) \
+ ({ \
+ __typeof__ (dvd) _dvd = (dvd); \
+ __typeof__ (dvr) _dvr = (dvr); \
+ (_dvd + _dvr - 1)/_dvr; \
+ })
+
+#ifndef ALIGN8
+#define ALIGN8(p) (((p) + 0x7) & ~(0x7))
+#endif
+
+#ifndef ALIGNPTR8
+#define ALIGNPTR8(p) ((void *)(((u8 * )(p) + 0x7) & ~(0x7)))
+#endif
+
+#ifndef ALIGN64
+#define ALIGN64(p) (((p) + 0x3f) & ~(0x3f))
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+
+/*
+ * Offset to aligned start of additional data (PIT/CS, FIB) embedded in each
+ * node.
+ */
+u32 ht_node_data_offset_aligned;
+
+/* Some support for posix vs vpp mem management */
+#define MEM_ALLOC(x) clib_mem_alloc_aligned((x), 8)
+#define MEM_FREE(p) clib_mem_free((p))
+
+/*
+ * Internal utilities
+ */
+
+/* Allocate an overflow bucket */
+static hicn_hash_bucket_t *
+alloc_overflow_bucket (hicn_hashtb_h h)
+{
+ hicn_hash_bucket_t *newbkt = NULL;
+
+ if (h->ht_overflow_buckets_used < h->ht_overflow_bucket_count)
+ {
+ pool_get_aligned (h->ht_overflow_buckets, newbkt, 8);
+
+ if (newbkt)
+ {
+ h->ht_overflow_buckets_used++;
+ }
+ }
+ return (newbkt);
+}
+
+/* Free an overflow bucket; clear caller's pointer */
+static void
+free_overflow_bucket (hicn_hashtb_h h, hicn_hash_bucket_t ** pb)
+{
+ hicn_hash_bucket_t *bkt = *pb;
+
+ ASSERT (h->ht_overflow_buckets_used > 0);
+
+ pool_put (h->ht_overflow_buckets, bkt);
+ h->ht_overflow_buckets_used--;
+ *pb = NULL;
+}
+
+/*
+ * Init, allocate a new hashtable
+ */
+int
+hicn_hashtb_alloc (hicn_hashtb_h * ph, u32 max_elems, size_t app_data_size)
+{
+ int ret = HICN_ERROR_NONE;
+ hicn_hashtb_h h = NULL;
+ u32 count;
+ u32 total_buckets;
+ size_t sz;
+ hicn_hash_node_t *nodep;
+ hicn_hash_bucket_t *bucket;
+
+ if (ph == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_INVAL;
+ goto done;
+ }
+ if (max_elems < HICN_HASHTB_MIN_ENTRIES ||
+ max_elems > HICN_HASHTB_MAX_ENTRIES)
+ {
+ goto done;
+ }
+ /* Allocate and init main hashtable struct */
+ h = MEM_ALLOC (sizeof (hicn_hashtb_t));
+ if (h == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ memset (h, 0, sizeof (hicn_hashtb_t));
+
+ /* Compute main table bucket (row) count and size, and allocate */
+
+ /* Consider the last entry as used for containing the overflow bucket */
+ total_buckets = CEIL (max_elems, HICN_HASHTB_BUCKET_ENTRIES - 1);
+ count = ALIGN8 (CEIL (total_buckets, HICN_HASHTB_FILL_FACTOR));
+
+ h->ht_bucket_count = count;
+
+ /* We _really_ expect to have buckets aligned on cache lines ... */
+ sz = sizeof (hicn_hash_bucket_t);
+ assert (sz == ALIGN64 (sz));
+
+ h->ht_buckets = MEM_ALLOC (count * sz);
+ if (h->ht_buckets == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ memset (h->ht_buckets, 0, count * sz);
+
+ /*
+ * First time through, compute offset to aligned extra data start in
+ * each node struct it's crucial that both the node struct (that the
+ * base hashtable uses) and the extra data area (that's also probably
+ * a struct) are aligned.
+ */
+ if (ht_node_data_offset_aligned == 0)
+ {
+ count = STRUCT_OFFSET_OF (hicn_hash_node_t, hn_data);
+ ht_node_data_offset_aligned = ALIGN8 (count);
+ }
+ //check app struct fits into space provided(HICN_HASH_NODE_APP_DATA_SIZE)
+ u32 ht_node_data_size;
+ ht_node_data_size = sizeof (hicn_hash_node_t) - ht_node_data_offset_aligned;
+ if (app_data_size > ht_node_data_size)
+ {
+ clib_error
+ ("hicn hashtable: fatal error: requested app data size(%u) > hashtb node's configured bytes available(%u), sizeof(hicn_shared_t)=%u, sizeof(hicn_pit_entry_t)=%u, sizeof(hicn_cs_entry_t)=%u",
+ app_data_size, ht_node_data_size, sizeof (hicn_pcs_shared_t),
+ sizeof (hicn_pit_entry_t), sizeof (hicn_cs_entry_t));
+ }
+ /*
+ * Compute entry node count and size, allocate Allocate/'Hide' the
+ * zero-th node so can use zero as an 'empty' value
+ */
+ pool_alloc_aligned (h->ht_nodes, max_elems, 8);
+ if (h->ht_nodes == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ pool_get_aligned (h->ht_nodes, nodep, 8);
+ //alloc node 0
+ nodep = nodep; /* Silence 'not used' warning */
+
+ h->ht_node_count = max_elems;
+ h->ht_nodes_used = 1;
+
+ /*
+ * Compute overflow bucket count and size, allocate
+ */
+ //count = ALIGN8(CEIL(max_elems, HICN_HASHTB_OVERFLOW_FRACTION));
+ count = ALIGN8 (total_buckets - h->ht_bucket_count);
+
+ pool_alloc_aligned (h->ht_overflow_buckets, count, 8);
+ if (h->ht_overflow_buckets == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ /* 'Hide' the zero-th node so we can use zero as an 'empty' value */
+ pool_get_aligned (h->ht_overflow_buckets, bucket, 8);
+ bucket = bucket; /* Silence 'not used' warning */
+
+ h->ht_overflow_bucket_count = count;
+ h->ht_overflow_buckets_used = 1;
+
+done:
+
+ if (h)
+ {
+ if ((ret == HICN_ERROR_NONE) && ph)
+ {
+ *ph = h;
+ }
+ else
+ {
+ hicn_hashtb_free (&h);
+ }
+ }
+ return (ret);
+}
+
+/*
+ * Free, de-allocate a hashtable
+ */
+int
+hicn_hashtb_free (hicn_hashtb_h * ph)
+{
+ int ret = 0;
+
+ if (ph)
+ {
+ if ((*ph)->ht_nodes)
+ {
+ pool_free ((*ph)->ht_nodes);
+ (*ph)->ht_nodes = 0;
+ }
+ if ((*ph)->ht_overflow_buckets)
+ {
+ pool_free ((*ph)->ht_overflow_buckets);
+ (*ph)->ht_overflow_buckets = 0;
+ }
+ if ((*ph)->ht_buckets)
+ {
+ MEM_FREE ((*ph)->ht_buckets);
+ (*ph)->ht_buckets = 0;
+ }
+ MEM_FREE (*ph);
+
+ *ph = NULL;
+ }
+ return (ret);
+}
+
+
+
+/*
+ * Basic api to lookup a specific hash+key tuple. This does the entire lookup
+ * operation, retrieving node structs and comparing keys, so it's not
+ * optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ return (hicn_hashtb_lookup_node_ex
+ (h, key, keylen, hashval, is_data, FALSE /* deleted nodes */ ,
+ node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow));
+}
+
+/*
+ * Extended api to lookup a specific hash+key tuple. The implementation
+ * allows the caller to locate nodes that are marked for deletion, which is
+ * part of some hashtable applications, such as the FIB.
+ *
+ * This does the entire lookup operation, retrieving node structs and comparing
+ * keys, so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node_ex (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ int include_deleted_p, u32 * node_id,
+ index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ int i, ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ int found_p = FALSE;
+ u32 bidx;
+ hicn_hash_bucket_t *bucket;
+ u32 current_bucket_id = ~0;
+
+ /*
+ * Use some bits of the low half of the hash to locate a row/bucket
+ * in the table
+ */
+ current_bucket_id = bidx = (hashval & (h->ht_bucket_count - 1));
+
+ bucket = h->ht_buckets + bidx;
+
+ *bucket_is_overflow = 0;
+ /* Check the entries in the bucket for matching hash value */
+
+loop_buckets:
+
+ for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES && !found_p; i++)
+ {
+ /*
+ * If an entry is marked for deletion, ignore it unless the
+ * caller explicitly wants these nodes.
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ if (!include_deleted_p)
+ {
+ continue;
+ }
+ }
+ if (bucket->hb_entries[i].he_msb64 == hashval)
+ {
+ /*
+ * Found a candidate - must retrieve the actual node
+ * and check the key.
+ */
+ *node_id = bucket->hb_entries[i].he_node;
+ *dpo_ctx_id = bucket->hb_entries[i].dpo_ctx_id;
+ *vft_id = bucket->hb_entries[i].vft_id;
+ *is_cs =
+ bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ *hash_entry_id = i;
+ *bucket_id = current_bucket_id;
+ /*
+ * If we are doing lookup for a data, do not take a
+ * lock in case of a hit with a CS entry
+ */
+ if (!(is_data && *is_cs))
+ {
+ bucket->hb_entries[i].locks++;
+ }
+ found_p = TRUE;
+ ret = HICN_ERROR_NONE;
+ goto done;
+ }
+ }
+
+ /*
+ * Be prepared to continue to an overflow bucket if necessary. We
+ * only expect the last entry in a bucket to refer to an overflow
+ * bucket...
+ */
+ i = HICN_HASHTB_BUCKET_ENTRIES - 1;
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ current_bucket_id = bucket->hb_entries[i].he_node;
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ *bucket_is_overflow = 1;
+ goto loop_buckets;
+ }
+done:
+
+ return (ret);
+}
+
+/**
+ * This function allows to split the hash verification from the comparison of
+ * the entire key. Useful to exploit prefertching.
+ * return 1 if equals, 0 otherwise
+ */
+int
+hicn_node_compare (const u8 * key, u32 keylen, hicn_hash_node_t * node)
+{
+
+ int ret = 0;
+
+ if (key && keylen == node->hn_keysize)
+ {
+ ret = (memcmp (key, node->hn_key.ks.key, keylen) == 0);
+ }
+ return ret;
+}
+
+/*
+ * Utility to init a new entry in a hashtable bucket/row. We use this to add
+ * new a node+hash, and to clear out an entry during removal.
+ */
+void
+hicn_hashtb_init_entry (hicn_hash_entry_t * entry, u32 nodeidx,
+ u64 hashval, u32 locks)
+{
+ entry->he_msb64 = hashval;
+ entry->he_node = nodeidx;
+
+ /* Clear out some other fields in the entry */
+ entry->he_flags = 0;
+ entry->locks = locks;
+ entry->vft_id = 0;
+ entry->dpo_ctx_id = 0;
+}
+
+/*
+ * Insert a node into the hashtable. We expect the caller has a) computed the
+ * hash value to use, b) initialized the node with the hash and key info, and
+ * c) filled in its app-specific data portion of the node.
+ */
+
+int
+hicn_hashtb_insert (hicn_hashtb_h h, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hash,
+ u32 * node_id,
+ index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ int i, ret = HICN_ERROR_HASHTB_INVAL;
+ u32 bidx;
+ hicn_hash_bucket_t *bucket, *newbkt;
+ int use_seven;
+ u32 current_bucket_id = ~0;
+ int is_overflow = 0;
+
+ *hash_entry = NULL;
+
+ if (h == NULL)
+ {
+ goto done;
+ }
+ /*
+ * Use some bits of the low half of the hash to locate a row/bucket
+ * in the table
+ */
+ current_bucket_id = bidx = (hash & (h->ht_bucket_count - 1));
+
+ bucket = h->ht_buckets + bidx;
+
+ use_seven = (h->ht_flags & HICN_HASHTB_FLAG_USE_SEVEN);
+
+ /* Locate a free entry slot in the bucket */
+
+loop_buckets:
+
+ for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+ /*
+ * Be sure that we are not inserting the same entry twice
+ */
+ if (bucket->hb_entries[i].he_msb64 == hash)
+ {
+ /*
+ * We hit an existing pit entry. increase lock.
+ */
+
+ *node_id = bucket->hb_entries[i].he_node;
+ *dpo_ctx_id = bucket->hb_entries[i].dpo_ctx_id;
+ *vft_id = bucket->hb_entries[i].vft_id;
+ *is_cs =
+ bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ *hash_entry_id = i;
+ *bucket_id = current_bucket_id;
+ *hash_entry = &(bucket->hb_entries[i]);
+ /*
+ * If we are doing lookup for a data, do not take a
+ * lock in case of a hit with a CS entry
+ */
+ if (!(*is_cs))
+ bucket->hb_entries[i].locks++;
+ *bucket_is_overflow = is_overflow;
+ ret = HICN_ERROR_HASHTB_EXIST;
+ goto done;
+ }
+ if ((bucket->hb_entries[i].he_msb64 == 0LL) &&
+ (bucket->hb_entries[i].he_node == 0))
+ {
+ /* Found a candidate -- fill it in */
+
+ /*
+ * Special case if the application asked not to use
+ * the last entry in each bucket.
+ */
+ if ((i != (HICN_HASHTB_BUCKET_ENTRIES - 1)) || use_seven)
+ {
+ hicn_hashtb_init_entry (&(bucket->hb_entries[i]),
+ NODE_IDX_FROM_NODE (node, h), hash, 0);
+
+ *hash_entry = &(bucket->hb_entries[i]);
+
+ node->bucket_id = current_bucket_id;
+ node->entry_idx = i;
+ (*hash_entry)->vft_id = *vft_id;
+ (*hash_entry)->dpo_ctx_id = *dpo_ctx_id;
+ if (is_overflow)
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+
+ ret = HICN_ERROR_NONE;
+ goto done;
+ }
+ }
+ }
+ /*
+ * Be prepared to continue to an overflow bucket if necessary, or to
+ * add a new overflow bucket. We only expect the last entry in a
+ * bucket to refer to an overflow bucket...
+ */
+ i = HICN_HASHTB_BUCKET_ENTRIES - 1;
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ /* Existing overflow bucket - re-start the search loop */
+ current_bucket_id = bucket->hb_entries[i].he_node;
+ bucket = pool_elt_at_index (h->ht_overflow_buckets, current_bucket_id);
+ is_overflow = 1;
+ goto loop_buckets;
+
+ }
+ else
+ {
+ /*
+ * Overflow - reached the end of a bucket without finding a
+ * free entry slot. Need to allocate an overflow bucket, and
+ * connect it to this bucket.
+ */
+ newbkt = alloc_overflow_bucket (h);
+ if (newbkt == NULL)
+ {
+ ret = HICN_ERROR_HASHTB_NOMEM;
+ goto done;
+ }
+ /*
+ * We're touching some more bytes than we absolutely have to
+ * here, but ... that seems ok.
+ */
+ memset (newbkt, 0, sizeof (hicn_hash_bucket_t));
+
+ if (use_seven)
+ {
+ /*
+ * Copy existing entry into new bucket - we really
+ * expect these to be properly aligned so they can be
+ * treated as int.
+ */
+ memcpy (&(newbkt->hb_entries[0]),
+ &(bucket->hb_entries[i]), sizeof (hicn_hash_entry_t));
+
+ /* Update bucket id and entry_idx on the hash node */
+ hicn_hash_node_t *node =
+ pool_elt_at_index (h->ht_nodes, newbkt->hb_entries[0].he_node);
+ node->bucket_id = (newbkt - h->ht_overflow_buckets);
+ node->entry_idx = 0;
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+
+ }
+ /*
+ * Connect original bucket to the index of the new overflow
+ * bucket
+ */
+ bucket->hb_entries[i].he_flags |= HICN_HASH_ENTRY_FLAG_OVERFLOW;
+ bucket->hb_entries[i].he_node = (newbkt - h->ht_overflow_buckets);
+
+ /* Add new entry to new overflow bucket */
+ bucket = newbkt;
+
+ /*
+ * Use entry [1] in the new bucket _if_ we just copied into
+ * entry [zero] above.
+ */
+ if (use_seven)
+ {
+
+ hicn_hashtb_init_entry (&(bucket->hb_entries[1]),
+ NODE_IDX_FROM_NODE (node, h), hash, 0);
+ *hash_entry = &(bucket->hb_entries[1]);
+
+ node->bucket_id = (newbkt - h->ht_overflow_buckets);
+ node->entry_idx = 1;
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+ (*hash_entry)->vft_id = *vft_id;
+ (*hash_entry)->dpo_ctx_id = *dpo_ctx_id;
+ }
+ else
+ {
+
+ hicn_hashtb_init_entry (&(bucket->hb_entries[0]),
+ NODE_IDX_FROM_NODE (node, h), hash, 0);
+ *hash_entry = &(bucket->hb_entries[0]);
+ node->bucket_id = (newbkt - h->ht_overflow_buckets);
+ node->entry_idx = 0;
+ node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
+ (*hash_entry)->vft_id = *vft_id;
+ (*hash_entry)->dpo_ctx_id = *dpo_ctx_id;
+ }
+ }
+
+ /* And we're done with the overflow bucket */
+ ret = HICN_ERROR_NONE;
+
+done:
+
+ return (ret);
+}
+
+/*
+ * Delete a node from a hashtable using the node itself, and delete/free the
+ * node. Caller's pointer is cleared on success.
+ */
+void
+hicn_hashtb_delete (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 hashval)
+{
+
+ hicn_hashtb_remove_node (h, *pnode, hashval);
+ hicn_hashtb_free_node (h, *pnode);
+ *pnode = NULL;
+
+}
+
+/*
+ * Delete an entry from a hashtable using the node itself. If the node was
+ * stored in an overflow bucket, and the bucket is empty after freeing the
+ * node, the bucket is freed as well.
+ */
+void
+hicn_hashtb_remove_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ u64 hashval)
+{
+ int i, count;
+ u32 bidx, overflow_p;
+ hicn_hash_bucket_t *bucket, *parent;
+
+ if ((h == NULL) || (node == NULL))
+ {
+ goto done;
+ }
+ if (node->hn_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
+ bucket = pool_elt_at_index (h->ht_overflow_buckets, node->bucket_id);
+ else
+ {
+ /*
+ * Use some bits of the low half of the hash to locate a
+ * row/bucket in the table
+ */
+ bidx = (hashval & (h->ht_bucket_count - 1));
+ ASSERT (bidx == node->bucket_id);
+ bucket = h->ht_buckets + node->bucket_id;
+ }
+
+ overflow_p = node->hn_flags & HICN_HASH_NODE_OVERFLOW_BUCKET;
+
+ /* Clear out the entry. */
+ hicn_hashtb_init_entry (&(bucket->hb_entries[node->entry_idx]), 0, 0LL, 0);
+
+ if (!overflow_p)
+ {
+ /*
+ * And we're done, in the easy case where we didn't change an
+ * overflow bucket
+ */
+ goto done;
+ }
+ /*
+ * The special case: if this is the last remaining entry in an
+ * overflow bucket, liberate the bucket. That in turn has a special
+ * case if this bucket is in the middle of a chain of overflow
+ * buckets.
+ *
+ * Note that we're not trying aggressively (yet) to condense buckets at
+ * every possible opportunity.
+ */
+
+ /*
+ * Reset this flag; we'll set it again if this bucket links to
+ * another
+ */
+ overflow_p = FALSE;
+
+ for (i = 0, count = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++)
+ {
+ if (bucket->hb_entries[i].he_node != 0)
+ {
+ count++;
+ }
+ if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1) &&
+ (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW))
+ {
+ count--; /* Doesn't count as a 'real' entry */
+ overflow_p = TRUE;
+ }
+ }
+
+ if (count > 0)
+ {
+ /* Still a (real) entry in the row */
+ goto done;
+ }
+ /*
+ * Need to locate the predecessor of 'bucket': start at the beginning
+ * of the chain of buckets and move forward
+ */
+ bidx = (hashval & (h->ht_bucket_count - 1));
+
+ for (parent = h->ht_buckets + bidx; parent != NULL;)
+ {
+
+ if ((parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_flags &
+ HICN_HASH_ENTRY_FLAG_OVERFLOW) == 0)
+ {
+ parent = NULL;
+ break;
+ }
+ bidx = parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
+
+ if (pool_elt_at_index (h->ht_overflow_buckets, bidx) == bucket)
+ {
+ /*
+ * Found the predecessor of 'bucket'. If 'bucket' has
+ * a successor, connect 'parent' to it, and take
+ * 'bucket out of the middle.
+ */
+ if (overflow_p)
+ {
+ parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node =
+ bucket->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
+ }
+ else
+ {
+ /*
+ * Just clear the predecessor entry pointing
+ * at 'bucket'
+ */
+ hicn_hashtb_init_entry (&parent->hb_entries
+ [(HICN_HASHTB_BUCKET_ENTRIES - 1)], 0,
+ 0LL, 0);
+ }
+
+ break;
+ }
+ /*
+ * After the first iteration, 'parent' will be an overflow
+ * bucket too
+ */
+ parent = pool_elt_at_index (h->ht_overflow_buckets, bidx);
+ }
+
+ /* We really expect to have found the predecessor */
+ ASSERT (parent != NULL);
+
+ /* And now, finally, we can put 'bucket' back on the free list */
+ free_overflow_bucket (h, &bucket);
+
+done:
+ return;
+}
+
+/*
+ * Prepare a hashtable node, supplying the key, and computed hash info.
+ */
+void
+hicn_hashtb_init_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ const u8 * key, u32 keylen)
+{
+ assert (h != NULL);
+ assert (node != NULL);
+ assert (keylen <= HICN_PARAM_HICN_NAME_LEN_MAX);
+
+ /* Init the node struct */
+ node->hn_flags = HICN_HASH_NODE_FLAGS_DEFAULT;
+ node->hn_keysize = 0;
+ node->hn_keysize = keylen;
+ memcpy (node->hn_key.ks.key, key, keylen);
+ node->bucket_id = ~0;
+ node->entry_idx = ~0;
+}
+
+/*
+ * Release a hashtable node back to the free list when an entry is cleared
+ */
+void
+hicn_hashtb_free_node (hicn_hashtb_h h, hicn_hash_node_t * node)
+{
+ ASSERT (h->ht_nodes_used > 0);
+
+ /* Return 'node' to the free list */
+ pool_put (h->ht_nodes, node);
+ h->ht_nodes_used--;
+
+}
+
+/*
+ * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'.
+ */
+int
+hicn_hashtb_next_node (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 * ctx)
+{
+ int i, j, ret = HICN_ERROR_HASHTB_INVAL;
+ u32 bidx, entry;
+ hicn_hash_bucket_t *bucket;
+
+ if ((h == NULL) || (pnode == NULL) || (ctx == NULL))
+ {
+ goto done;
+ }
+ /* Special-case for new iteration */
+ if (*ctx == HICN_HASH_WALK_CTX_INITIAL)
+ {
+ bidx = 0;
+ bucket = &h->ht_buckets[0];
+ entry = 0;
+ j = 0;
+ i = 0;
+ goto search_table;
+ }
+ /* Convert context to bucket and entry indices */
+ bidx = *ctx & 0xffffffffLL;
+ entry = *ctx >> 32;
+
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+
+ /* Init total index into entries (includes fixed bucket and overflow) */
+ j = 0;
+
+skip_processed_bucket_chunks:
+ /*
+ * Figure out where to resume the search for the next entry in the
+ * table, by trying to find the last entry returned, from the cookie.
+ * Loop walks one (regular or overflow) bucket chunk, label is used
+ * for walking chain of chunks. Note that if there was a deletion or
+ * an addition that created an overflow, iterator can skip entries or
+ * return duplicate entries, for entries that are present from before
+ * the walk starts until after it ends.
+ */
+
+ for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++, j++)
+ {
+ if (j > entry)
+ {
+ /*
+ * Start search for next here, use existing 'bucket'
+ * and 'i'
+ */
+ break;
+ }
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+ /*
+ * Be prepared to continue to an overflow bucket if
+ * necessary. (We only expect the last entry in a bucket to
+ * refer to an overflow bucket...)
+ */
+ if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+
+ /* Increment overall entry counter 'j' */
+ j++;
+
+ goto skip_processed_bucket_chunks;
+ }
+ /*
+ * end of row (end of fixed bucket plus any
+ * overflows)
+ */
+ i = 0;
+ j = 0;
+
+ bidx++;
+
+ /* Special case - we're at the end */
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+ break;
+ }
+ }
+
+search_table:
+
+ /*
+ * Now we're searching through the table for the next entry that's
+ * set
+ */
+
+ for (; i < HICN_HASHTB_BUCKET_ENTRIES; i++, j++)
+ {
+ /*
+ * If an entry is marked for deletion, ignore it
+ */
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
+ {
+ continue;
+ }
+ /* Is this entry set? */
+ if (bucket->hb_entries[i].he_node != 0)
+ {
+
+ /* Retrieve the node struct */
+ *pnode = pool_elt_at_index (h->ht_nodes,
+ bucket->hb_entries[i].he_node);
+
+ /*
+ * Set 'entry' as we exit, so we can update the
+ * cookie
+ */
+ entry = j;
+ ret = HICN_ERROR_NONE;
+ break;
+ }
+ /*
+ * Be prepared to continue to an overflow bucket if
+ * necessary. (We only expect the last entry in a bucket to
+ * refer to an overflow bucket...)
+ */
+ if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1))
+ {
+ if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
+ {
+ bucket = pool_elt_at_index (h->ht_overflow_buckets,
+ bucket->hb_entries[i].he_node);
+ /*
+ * Reset per-bucket index 'i', here (not done
+ * in iterator)
+ */
+ i = 0;
+ /* Increment overall entry counter 'j' */
+ j++;
+
+ goto search_table;
+ }
+ else
+ {
+ /*
+ * Move to next bucket, resetting per-bucket
+ * and overall entry indexes
+ */
+ i = 0;
+ j = 0;
+
+ bidx++;
+
+ /* Special case - we're at the end */
+ if (bidx >= h->ht_bucket_count)
+ {
+ ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
+ goto done;
+ }
+ bucket = h->ht_buckets + bidx;
+ goto search_table;
+ }
+ }
+ }
+
+done:
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ /* Update context */
+ *ctx = bidx;
+ *ctx |= ((u64) entry << 32);
+ }
+ return (ret);
+}
+
+int
+hicn_hashtb_key_to_buf (u8 ** vec_res, hicn_hashtb_h h,
+ const hicn_hash_node_t * node)
+{
+ int ret = HICN_ERROR_NONE;
+ u8 *vec = *vec_res;
+
+ if (node->hn_keysize <= HICN_HASH_KEY_BYTES)
+ {
+ vec_add (vec, node->hn_key.ks.key, node->hn_keysize);
+ }
+ *vec_res = vec;
+ return (ret);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/hashtb.h b/hicn-plugin/src/network/hashtb.h
new file mode 100644
index 000000000..3c72fda65
--- /dev/null
+++ b/hicn-plugin/src/network/hashtb.h
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_HASHTB_H__
+#define __HICN_HASHTB_H__
+
+#include <stdint.h>
+#include <vppinfra/bihash_8_8.h>
+#include <vppinfra/bihash_24_8.h>
+
+#include "params.h"
+#include "parser.h"
+#include "error.h"
+
+/**
+ * @file hashtb.h
+ * Lookup is finding a hashtable record whose name matches the name being
+ * looked up. Most of the lookup work is based on the hash value of the two
+ * names. Note that the intel cache line size is 64 bytes, and some platforms
+ * load in 2 cache lines together.
+ * - first step is to match a record at the bucket/slot level (htab has an
+ * array of htbucket_t/htbc_elmt, where each bucket has 7 slots to hold indices
+ * for entries.) Matching at this level implies
+ * - the hashes of the lookup name and the record map to the same bucket
+ * - the high 32 bits of the hashes (slot bce_hash_msb32s) match. Read
+ * cost (on the hash table size, i.e. ignoring reading the name being
+ * looked up):
+ * - First step normally requires 1 cache line load to pull in the
+ * 64-byte htbucket_t with the 7 element slot table holding the
+ * hash_msb32s.
+ * - In the event (hopefully rare for a hash table with appropriate
+ * number of buckets) that more than 7 elements hash to the same bucket,
+ * lookup may well need to look not only at the static htbc_elmt_t but at
+ * the chain of dynamically allocated htbc_elmt_t's linked to the static
+ * htbc_elmt_t, where each of these holds slot entries for additional elements.
+ * - Before reaching that point, it is initially required to read in the
+ * hash table record fields (ht_bucket_buf, htnode buf, etc) holding
+ * pointers to the arrays, but these cache lines are common to all lookups
+ * so will likely already be in the cache.
+ * - second step is to match at the record level (htnode/htkb level) once a
+ * slot-level match happens. Matching at this level implies the following match
+ * - the hash values (the full 64 bits vs. bucket+32 msb, above).
+ * - the name which, on the hash table side, is stored as a list of htkb_t (key buffers).
+ *
+ * Some hashtables (for which rare false positives are tolerable) store hash
+ * values but no keys. (In ISM NDN forwarder, this was used for dcm_dpf: data
+ * cache manager's dataplane filter, where speed was critical and very rare
+ * false positives would be detected in the full dcm check.) - No key buffers
+ * are used (or even allocated at hash table creation).
+ */
+
+/* Handy abbreviations for success status, and for boolean values */
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define HICN_HASH_INVALID_IDX ~0
+/*
+ * for hicn_hashtb_next_node() iterator, this otherwise illegal context value
+ * indicates first call of iteration. Note: must not be 0, which is a legal
+ * context value.
+ */
+#define HICN_HASH_WALK_CTX_INITIAL (~((u64)0))
+
+/*
+ * Key memory allocation scheme.
+ *
+ * The key is the bytestring that a hashtable entry is storing, e.g. a fib
+ * prefix or packet name. The hash of the name is used not just to pick the
+ * bucket, but also as a surrogate for the actual key value.
+ *
+ * Client calls pass key/name as contiguous memory for lookup/add/delete but
+ * hashable stores its copy of the key/name as a list of one or more hash_key
+ * structs. - key memory is managed as a list of keys (cache line
+ * sized/aligned buffers). - If (keysize < 128) then use key struct's full
+ * 128 bytes - If not, first key struct is head of a linked list of elements
+ * where the first bytes are used for the key and the last 4 bytes are the
+ * index of the next entry (or an end marker). - key memory is generally the
+ * single largest use of memory in the hash table, especially for PIT, as
+ * names are bigger than node structs (which is also per name/entry).
+ *
+ */
+
+/* Compute hash node index from node pointer */
+#define NODE_IDX_FROM_NODE(p, h) \
+ (u32)((p) - ((h)->ht_nodes))
+
+#define HICN_HASH_KEY_BYTES 20
+
+typedef struct
+{
+ struct
+ {
+ u8 key[HICN_HASH_KEY_BYTES];
+ } ks; /* Entire key in one block */
+} hicn_hash_key_t;
+
+/*
+ * Ratio of extra key blocks to allocate, in case the embedded ones aren't
+ * sufficient. This is the fraction of the number of entries allocated.
+ */
+#define HICN_HASHTB_KEY_RATIO 8
+
+/*
+ * hash node, used to store a hash table entry; indexed by an entry in a
+ * bucket. the node contains an embedded key; long keys are stored as chains
+ * of keys.
+ *
+ * The memory block for a node includes space for storing outgoing faces for
+ * interests, additional memory located off the end of the htnode data structure.
+ *
+ */
+
+/* Size this so that we can offer 64B aligned on 64-bits for storing outgoing
+ * faces information
+ */
+#define HICN_HASH_NODE_APP_DATA_SIZE 64
+
+/* How to align in the right way */
+typedef struct __attribute__ ((packed)) hicn_hash_node_s
+{
+ /* Bucket id containing the corresponding hash entry. */
+ u32 bucket_id;
+
+ /* Hash entry index in the bucket */
+ u32 entry_idx;
+
+ /* Total size of the key */
+ u16 hn_keysize;
+
+ /* 1 byte of flags for application use */
+ u8 hn_flags;
+
+ u8 _hn_reserved1; /* TBD, to align what follows back to
+ * 32 */
+
+ hicn_hash_key_t hn_key; /* Key value embedded in the node, may chain
+ * to more key buffers if necessary */
+
+ /* 32B + HICN_HASH_NODE_APP_DATA_SIZE */
+ /* Followed by app-specific data (fib or pit or cs entry, e.g.) */
+ u8 hn_data[HICN_HASH_NODE_APP_DATA_SIZE];
+
+} hicn_hash_node_t;
+
+#define HICN_HASH_NODE_FLAGS_DEFAULT 0x00
+#define HICN_HASH_NODE_CS_FLAGS 0x01
+#define HICN_HASH_NODE_OVERFLOW_BUCKET 0x02
+
+/*
+ * hicn_hash_entry_t Structure holding all or part of a hash value, a node
+ * index, and other key pieces of info.
+ *
+ * - 128 bytes/bucket with 19 bytes/entry gives 6 entries, or 5 entries plus
+ * next bucket ptr if overflow Changes in this structure will affect
+ * hicn_hash_bucket_t
+ */
+typedef struct __attribute__ ((packed)) hicn_hash_entry_s
+{
+
+ /* MSB of the hash value */
+ u64 he_msb64;
+
+ /* Index of node block */
+ u32 he_node;
+
+ /*
+ * Lock to prevent hash_node deletion while there are still interest
+ * or data referring to it
+ */
+ u32 locks;
+
+ /* Index of dpo (4B) */
+ index_t dpo_ctx_id;
+
+ /* A few flags, including 'this points to a chain of buckets' */
+ u8 he_flags;
+
+ /*
+ * Index of the virtual function table corresponding to the dpo_ctx
+ * strategy
+ */
+ u8 vft_id;
+
+} hicn_hash_entry_t; //size 22B
+
+STATIC_ASSERT (sizeof (index_t) <= 4, "sizeof index_t is greater than 4B");
+
+
+#define HICN_HASH_ENTRY_FLAGS_DEFAULT 0x00
+
+/* If entry is PIT this flag is 0 */
+#define HICN_HASH_ENTRY_FLAG_CS_ENTRY 0x01
+
+/*
+ * This entry heads a chain of overflow buckets (we expect to see this only
+ * in the last entry in a bucket.) In this case, the index is to an overflow
+ * bucket rather than to a single node block.
+ */
+#define HICN_HASH_ENTRY_FLAG_OVERFLOW 0x04
+
+/* This entry has been marked for deletion */
+#define HICN_HASH_ENTRY_FLAG_DELETED 0x08
+
+/* Use fast he_timeout units for expiration, slow if not */
+#define HICN_HASH_ENTRY_FLAG_FAST_TIMEOUT 0x10
+
+/*
+ * hash bucket: Contains an array of entries. Cache line sized/aligned, so no
+ * room for extra fields unless bucket size is increased to 2 cache lines or
+ * the entry struct shrinks.
+ */
+
+/*
+ * Overflow bucket ratio as a fraction of the fixed/configured count; a pool
+ * of hash buckets used if a row in the fixed table overflows.
+ */
+#define HICN_HASHTB_BUCKET_ENTRIES 5
+
+typedef struct __attribute__ ((packed))
+{
+ hicn_hash_entry_t hb_entries[HICN_HASHTB_BUCKET_ENTRIES];
+ u64 align1;
+ u64 align2;
+ u16 align3;
+} hicn_hash_bucket_t;
+
+/* Overall target fill-factor for the hashtable */
+#define HICN_HASHTB_FILL_FACTOR 4
+
+#define HICN_HASHTB_MIN_ENTRIES (1 << 4) // includes dummy node 0 entry
+#define HICN_HASHTB_MAX_ENTRIES (1 << 24)
+
+#define HICN_HASHTB_MIN_BUCKETS (1 << 10)
+
+/*
+ * htab_t
+ *
+ * Hash table main structure.
+ *
+ * Contains - pointers to dynamically allocated arrays of cache-line
+ * sized/aligned structures (buckets, nodes, keys). Put frequently accessed
+ * fields in the first cache line.
+ */
+typedef struct hicn_hashtb_s
+{
+
+ /* 8B - main array of hash buckets */
+ hicn_hash_bucket_t *ht_buckets;
+
+ /* 8B - just-in-case block of overflow buckets */
+ hicn_hash_bucket_t *ht_overflow_buckets;
+
+ /* 8B - block of nodes associated with entries in buckets */
+ hicn_hash_node_t *ht_nodes;
+
+ /* Flags */
+ u32 ht_flags;
+
+ /* Count of buckets allocated in the main array */
+ u32 ht_bucket_count;
+
+ /* Count of overflow buckets allocated */
+ u32 ht_overflow_bucket_count;
+ u32 ht_overflow_buckets_used;
+
+ /* Count of nodes allocated */
+ u32 ht_node_count;
+ u32 ht_nodes_used;
+
+ /* Count of overflow key structs allocated */
+ u32 ht_key_count;
+ u32 ht_keys_used;
+
+} hicn_hashtb_t, *hicn_hashtb_h;
+
+/*
+ * Offset to aligned start of additional data (PIT/CS, FIB) embedded in each
+ * node.
+ */
+extern u32 ht_node_data_offset_aligned;
+
+/* Flags for hashtable */
+
+#define HICN_HASHTB_FLAGS_DEFAULT 0x00
+
+/*
+ * Don't use the last entry in each bucket - only use it for overflow. We use
+ * this for the FIB, currently, so that we can support in-place FIB changes
+ * that would be difficult if there were hash entry copies as part of
+ * overflow handling.
+ */
+#define HICN_HASHTB_FLAG_USE_SEVEN 0x04
+#define HICN_HASHTB_FLAG_KEY_FMT_PFX 0x08
+#define HICN_HASHTB_FLAG_KEY_FMT_NAME 0x10
+
+/*
+ * Max prefix name components we'll support in our incremental hashing;
+ * currently used only for LPM in the FIB.
+ */
+#define HICN_HASHTB_MAX_NAME_COMPS HICN_PARAM_FIB_ENTRY_PFX_COMPS_MAX
+
+/*
+ * APIs and inlines
+ */
+
+/* Compute hash node index from node pointer */
+static inline u32
+hicn_hashtb_node_idx_from_node (hicn_hashtb_h h, hicn_hash_node_t * p)
+{
+ return (p - h->ht_nodes);
+}
+
+/* Retrieve a hashtable node by node index */
+static inline hicn_hash_node_t *
+hicn_hashtb_node_from_idx (hicn_hashtb_h h, u32 idx)
+{
+ return (pool_elt_at_index (h->ht_nodes, idx));
+}
+
+/* Allocate a brand-new hashtable */
+int
+hicn_hashtb_alloc (hicn_hashtb_h * ph, u32 max_elems, size_t app_data_size);
+
+/* Free a hashtable, including its embedded arrays */
+int hicn_hashtb_free (hicn_hashtb_h * ph);
+
+/* Hash a bytestring, currently using bihash */
+u64 hicn_hashtb_hash_bytestring (const u8 * key, u32 keylen);
+
+always_inline hicn_hash_entry_t *
+hicn_hashtb_get_entry (hicn_hashtb_h h, u32 entry_idx, u32 bucket_id,
+ u8 bucket_overflow)
+{
+ hicn_hash_bucket_t *bucket;
+ if (bucket_overflow)
+ bucket = pool_elt_at_index (h->ht_overflow_buckets, bucket_id);
+ else
+ bucket = (hicn_hash_bucket_t *) (h->ht_buckets + bucket_id);
+
+ return &(bucket->hb_entries[entry_idx]);
+}
+
+/* Hash a name, currently using bihash */
+always_inline u64
+hicn_hashtb_hash_name (const u8 * key, u16 keylen)
+{
+ if (key != NULL && keylen == HICN_V4_NAME_LEN)
+ {
+ clib_bihash_kv_8_8_t kv;
+ kv.key = ((u64 *) key)[0];
+ return clib_bihash_hash_8_8 (&kv);
+ }
+ else if (key != NULL && keylen == HICN_V6_NAME_LEN)
+ {
+ clib_bihash_kv_24_8_t kv;
+ kv.key[0] = ((u64 *) key)[0];
+ kv.key[1] = ((u64 *) key)[1];
+ kv.key[2] = ((u32 *) key)[4];
+ return clib_bihash_hash_24_8 (&kv);
+ }
+ else
+ {
+ return (-1LL);
+ }
+}
+
+
+/*
+ * Prepare a hashtable node for insertion, supplying the key and computed
+ * hash info. This sets up the node->key relationship, possibly allocating
+ * overflow key buffers.
+ */
+void
+hicn_hashtb_init_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ const u8 * key, u32 keylen);
+
+/*
+ * Insert a node into the hashtable. We expect the caller has used the init
+ * api to set the node key and hash info, and populated the extra data area
+ * (if any) - or done the equivalent work itself.
+ */
+int
+hicn_hashtb_insert (hicn_hashtb_h h, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hash,
+ u32 * node_id,
+ index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+/*
+ * Basic api to lookup a specific hash+key tuple. This does the entire lookup
+ * operation, retrieving node structs and comparing keys, so it's not
+ * optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+/*
+ * Extended api to lookup a specific hash+key tuple. The implementation
+ * allows the caller to locate nodes that are marked for deletion; this is
+ * part of some hashtable applications, such as the FIB.
+ *
+ * This does the entire lookup operation, retrieving node structs and comparing
+ * keys, so it's not optimized for prefetching or high performance.
+ *
+ * Returns zero and mails back a node on success, errno otherwise.
+ */
+int
+hicn_hashtb_lookup_node_ex (hicn_hashtb_h h, const u8 * key,
+ u32 keylen, u64 hashval, u8 is_data,
+ int include_deleted_p, u32 * node_id,
+ index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+/**
+ * @brief Compares the key in the node with the given key
+ *
+ * This function allows to split the hash verification from the comparison of
+ * the entire key. Useful to exploit prefertching.
+ * @result 1 if equals, 0 otherwise
+ */
+int hicn_node_compare (const u8 * key, u32 keylen, hicn_hash_node_t * node);
+
+/*
+ * Remove a node from a hashtable using the node itself. The internal data
+ * structs are cleaned up, but the node struct itself is not: the caller must
+ * free the node itself.
+ */
+void hicn_hashtb_remove_node (hicn_hashtb_h h, hicn_hash_node_t * node,
+ u64 hashval);
+
+/*
+ * Delete a node from a hashtable using the node itself, and delete/free the
+ * node. Caller's pointer is cleared on success.
+ */
+void hicn_hashtb_delete (hicn_hashtb_h h, hicn_hash_node_t ** pnode,
+ u64 hashval);
+
+/*
+ * Utility to init a new entry in a hashtable bucket/row. We use this to add
+ * new a node+hash, and to clear out an entry during removal.
+ */
+void
+hicn_hashtb_init_entry (hicn_hash_entry_t * entry,
+ u32 nodeidx, u64 hashval, u32 locks);
+
+
+/*
+ * Return data area embedded in a hash node struct. We maintain an 'offset'
+ * value in case the common node body struct doesn't leave the data area
+ * aligned properly.
+ */
+static inline void *
+hicn_hashtb_node_data (hicn_hash_node_t * node)
+{
+ return ((u8 *) (node) + ht_node_data_offset_aligned);
+}
+
+/*
+ * Use some bits of the low half of the hash to locate a row/bucket in the
+ * table
+ */
+static inline u32
+hicn_hashtb_bucket_idx (hicn_hashtb_h h, u64 hashval)
+{
+ return ((u32) (hashval & (h->ht_bucket_count - 1)));
+}
+
+/*
+ * Return a hash node struct from the free list, or NULL. Note that the
+ * returned struct is _not_ cleared/zeroed - init is up to the caller.
+ */
+static inline hicn_hash_node_t *
+hicn_hashtb_alloc_node (hicn_hashtb_h h)
+{
+ hicn_hash_node_t *p = NULL;
+
+ if (h->ht_nodes_used < h->ht_node_count)
+ {
+ pool_get_aligned (h->ht_nodes, p, 8);
+ h->ht_nodes_used++;
+ }
+ return (p);
+}
+
+/*
+ * Release a hashtable node back to the free list when an entry is cleared
+ */
+void hicn_hashtb_free_node (hicn_hashtb_h h, hicn_hash_node_t * node);
+
+/*
+ * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'
+ * between calls.
+ *
+ * Set the context value to HICN_HASH_WALK_CTX_INITIAL to start an iteration.
+ */
+int
+hicn_hashtb_next_node (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 * ctx);
+
+
+int
+hicn_hashtb_key_to_str (hicn_hashtb_h h, const hicn_hash_node_t * node,
+ char *buf, int bufsize, int must_fit);
+
+/*
+ * single hash full name can pass offset for two hashes calculation in case
+ * we use CS and PIT in a two steps hashes (prefix + seqno)
+ */
+always_inline int
+hicn_hashtb_fullhash (const u8 * name, u16 namelen, u64 * name_hash)
+{
+ *name_hash = hicn_hashtb_hash_name (name, namelen);
+ return (*name_hash != (-1LL) ? HICN_ERROR_NONE : HICN_ERROR_HASHTB_INVAL);
+}
+
+#endif /* // __HICN_HASHTB_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/hicn.api b/hicn-plugin/src/network/hicn.api
new file mode 100644
index 000000000..9643f2098
--- /dev/null
+++ b/hicn-plugin/src/network/hicn.api
@@ -0,0 +1,579 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "5.1.0";
+import "vnet/ip/ip_types.api";
+
+enum hicn_action_type
+{
+ HICN_DISABLE = 0,
+ HICN_ENABLE,
+};
+
+typedef hicn_face
+{
+ /* IP local address */
+ vl_api_address_t nat_addr;
+
+ /* IPv4 local port number */
+ u32 swif;
+
+ /* Face flags */
+ u32 flags;
+
+ /* Name of the interface */
+ u8 if_name[30];
+};
+
+define hicn_api_node_params_set
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Enable / disable ICN forwarder in VPP */
+ u8 enable_disable;
+
+ /* PIT maximum size, otherwise -1 to assign default value */
+ i32 pit_max_size;
+
+ /* CS maximum size, otherwise -1 to assign default value */
+ i32 cs_max_size;
+
+ /* Upper bound on PIT entry lifetime, otherwise -1 to assign default value */
+ f64 pit_max_lifetime_sec;
+};
+
+define hicn_api_node_params_set_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_node_params_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_node_params_get_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Enabled / disabled flag */
+ u8 is_enabled;
+
+ /* compile-time plugin features */
+ u8 feature_cs;
+
+ /* Number of VPP workers */
+ u32 worker_count;
+
+ /* PIT maximum size, otherwise -1 to assign default value */
+ u32 pit_max_size;
+
+ /* CS maximum size, otherwise -1 to assign default value */
+ u32 cs_max_size;
+
+ /* Upper bound on PIT entry lifetime */
+ f64 pit_max_lifetime_sec;
+};
+
+define hicn_api_node_stats_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_node_stats_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* ICN packets processed */
+ u64 pkts_processed;
+
+ /* ICN interests forwarded */
+ u64 pkts_interest_count;
+
+ /* ICN data msgs forwarded */
+ u64 pkts_data_count;
+
+ /* ICN cached data msg replies */
+ u64 pkts_from_cache_count;
+
+ /* ICN no PIT entry drops */
+ u64 pkts_no_pit_count;
+
+ /* ICN expired PIT entries */
+ u64 pit_expired_count;
+
+ /* ICN expired CS entries */
+ u64 cs_expired_count;
+
+ /* ICN LRU CS entries freed */
+ u64 cs_lru_count;
+
+ /* ICN msgs dropped due to no packet buffers */
+ u64 pkts_drop_no_buf;
+
+ /* ICN Interest messages aggregated in PIT */
+ u64 interests_aggregated;
+
+ /* ICN Interest messages retransmitted */
+ u64 interests_retx;
+
+ /* ICN Interest messages colliding in hashtb */
+ u64 interests_hash_collision;
+
+ /* Number of entries in PIT at the present moment */
+ u64 pit_entries_count;
+
+ /* Number of entries in CS at the present moment */
+ u64 cs_entries_count;
+
+ /* Number of entries in CS at the present moment */
+ u64 cs_entries_ntw_count;
+};
+
+define hicn_api_face_stats_details
+{
+/* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Id of the face */
+ u32 faceid;
+
+ /* Interest rx */
+ u64 irx_packets;
+
+ u64 irx_bytes;
+
+ /* Interest tx */
+ u64 itx_packets;
+
+ u64 itx_bytes;
+
+ /* data rx */
+ u64 drx_packets;
+
+ u64 drx_bytes;
+
+ /* data tx */
+ u64 dtx_packets;
+
+ u64 dtx_bytes;
+};
+
+define hicn_api_face_stats_dump
+{
+/* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_face_params_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face to be retrieved */
+ u32 faceid;
+};
+
+define hicn_api_face_params_get_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* The face required */
+ u32 faceid;
+
+ /* IP local address */
+ vl_api_address_t nat_addr;
+
+ /* VPP interface (index) associated with the face */
+ u32 swif;
+
+ /* Face flags */
+ u32 flags;
+};
+
+define hicn_api_faces_details
+{
+/* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Id of the face */
+ u32 faceid;
+
+ /* Face to add */
+ vl_api_hicn_face_t face;
+};
+
+define hicn_api_faces_dump
+{
+/* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_face_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face to be retrieved */
+ u32 faceid;
+};
+
+define hicn_api_face_get_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Id of the face */
+ u32 faceid;
+
+ /* Face to add */
+ vl_api_hicn_face_t face;
+};
+
+define hicn_api_route_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Route prefix */
+ vl_api_prefix_t prefix;
+};
+
+define hicn_api_route_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* List of faces pointing to the next hops */
+ u32 faceids[5];
+
+ /* Number of valid faceids */
+ u8 nfaces;
+
+ /* Strategy */
+ u32 strategy_id;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_routes_details
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Route prefix */
+ vl_api_prefix_t prefix;
+
+ /* List of faces pointing to the next hops */
+ u32 faceids[5];
+
+ /* Number of valid faceids */
+ u8 nfaces;
+
+ /* Strategy */
+ u32 strategy_id;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_routes_dump
+{
+/* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_strategies_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_strategies_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Number of available strategies */
+ u8 n_strategies;
+
+ /* Strategies */
+ u32 strategy_id[256];
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_strategy_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Route prefix */
+ u32 strategy_id;
+};
+
+define hicn_api_strategy_get_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Strategy description */
+ u8 description[200];
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_enable_disable
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Enable or disable enable/disable hICN*/
+ vl_api_hicn_action_type_t enable_disable;
+
+ /* Prefix on which we enable/disable hICN*/
+ vl_api_prefix_t prefix;
+};
+
+define hicn_api_enable_disable_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+/* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_register_prod_app
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Prefix to match */
+ vl_api_prefix_t prefix;
+
+ /* sw_if id */
+ u32 swif;
+
+ /* CS memory reserved -- in number of packets */
+ u32 cs_reserved;
+};
+
+define hicn_api_register_prod_app_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Actual CS memory reserved -- in number of packets */
+ u32 cs_reserved;
+
+ /* Prod address (ipv4 or ipv6) */
+ vl_api_address_t prod_addr;
+
+ /* Return value: new Face ID, ~0 means no Face was created */
+ u32 faceid;
+};
+
+autoreply define hicn_api_face_prod_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face ID to be deleted */
+ u32 faceid;
+};
+
+define hicn_api_register_cons_app
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* swif */
+ u32 swif;
+};
+
+define hicn_api_register_cons_app_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Ip4 address */
+ vl_api_address_t src_addr4;
+
+ /* Ip6 address */
+ vl_api_address_t src_addr6;
+
+ /* Return value: new Face ID, ~0 means no Face was created */
+ u32 faceid1;
+
+ /* Return value: new Face ID, ~0 means no Face was created */
+ u32 faceid2;
+};
+
+autoreply define hicn_api_face_cons_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* A Face ID to be deleted */
+ u32 faceid;
+};
+
+define hicn_api_udp_tunnel_add_del
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Source address */
+ vl_api_address_t src_addr;
+
+ /* Destination address */
+ vl_api_address_t dst_addr;
+
+ /* Source port */
+ u16 src_port;
+
+ /* Destination port */
+ u16 dst_port;
+
+ /* Add or remove the tunnel*/
+ u8 is_add;
+};
+
+define hicn_api_udp_tunnel_add_del_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Udp encap index */
+ u32 uei;
+};
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/hicn.h b/hicn-plugin/src/network/hicn.h
new file mode 100644
index 000000000..81246c7e9
--- /dev/null
+++ b/hicn-plugin/src/network/hicn.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_H__
+#define __HICN_H__
+
+#define ip_address_t hicn_ip_address_t
+#define ip_address_cmp hicn_ip_address_cmp
+#define ip_prefix_t hicn_ip_prefix_t
+#define ip_prefix_cmp hicn_ip_prefix_cmp
+#undef ip_prefix_len
+#define ip_prefix_len hicn_ip_prefix_len
+#include <hicn/hicn.h>
+#undef ip_address_t
+#undef ip_address_cmp
+#undef ip_prefix_t
+#undef ip_prefix_cmp
+#undef ip_prefix_len
+#define ip_prefix_len(_a) (_a)->len
+
+#include "hicn_buffer.h"
+
+#endif /* __HICN_H__ */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/hicn_api.c b/hicn-plugin/src/network/hicn_api.c
new file mode 100644
index 000000000..e8e24b639
--- /dev/null
+++ b/hicn-plugin/src/network/hicn_api.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip4.h>
+#include <vnet/ip/ip6.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/ip/ip_types_api.h>
+#include <vnet/ip/ip_format_fns.h>
+
+#include "faces/face.h"
+#include "udp_tunnels/udp_tunnel.h"
+#include "infra.h"
+#include "parser.h"
+#include "mgmt.h"
+#include "strategy_dpo_manager.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy.h"
+#include "pg.h"
+#include "error.h"
+#include "faces/app/face_prod.h"
+#include "faces/app/face_cons.h"
+#include "route.h"
+
+/* define message IDs */
+#include <vpp_plugins/hicn/hicn.api_enum.h>
+#include <vpp_plugins/hicn/hicn.api_types.h>
+
+/* define generated endian-swappers */
+#define vl_endianfun
+#include <vpp_plugins/hicn/hicn_all_api_h.h>
+#undef vl_endianfun
+
+#define REPLY_MSG_ID_BASE sm->msg_id_base
+#include <vlibapi/api_helper_macros.h>
+
+/****** SUPPORTING FUNCTION DECLARATIONS ******/
+
+/*
+ * Convert a unix return code to a vnet_api return code. Currently stubby:
+ * should have more cases.
+ */
+always_inline vnet_api_error_t
+hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
+ vl_api_hicn_api_face_params_get_reply_t
+ * reply);
+
+
+/****************** API MESSAGE HANDLERS ******************/
+
+/****** NODE ******/
+
+static void
+vl_api_hicn_api_node_params_set_t_handler (vl_api_hicn_api_node_params_set_t *
+ mp)
+{
+ vl_api_hicn_api_node_params_set_reply_t *rmp;
+ int rv;
+
+ hicn_main_t *sm = &hicn_main;
+
+ int pit_max_size = clib_net_to_host_i32 (mp->pit_max_size);
+ pit_max_size =
+ pit_max_size == -1 ? HICN_PARAM_PIT_ENTRIES_DFLT : pit_max_size;
+
+ f64 pit_max_lifetime_sec = mp->pit_max_lifetime_sec;
+ pit_max_lifetime_sec =
+ pit_max_lifetime_sec ==
+ -1 ? HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS / SEC_MS : pit_max_lifetime_sec;
+
+ int cs_max_size = clib_net_to_host_i32 (mp->cs_max_size);
+ cs_max_size = cs_max_size == -1 ? HICN_PARAM_CS_ENTRIES_DFLT : cs_max_size;
+
+ rv = hicn_infra_plugin_enable_disable ((int) (mp->enable_disable),
+ pit_max_size,
+ pit_max_lifetime_sec,
+ cs_max_size,
+ ~0);
+
+ REPLY_MACRO (VL_API_HICN_API_NODE_PARAMS_SET_REPLY /* , rmp, mp, rv */ );
+}
+
+static void
+vl_api_hicn_api_node_params_get_t_handler (vl_api_hicn_api_node_params_get_t *
+ mp)
+{
+ vl_api_hicn_api_node_params_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_NODE_PARAMS_GET_REPLY, (
+ {
+ rmp->is_enabled = sm->is_enabled;
+ rmp->feature_cs = HICN_FEATURE_CS;
+ rmp->pit_max_size = clib_host_to_net_u32 (hicn_infra_pit_size);
+ rmp->pit_max_lifetime_sec = ((f64) sm->pit_lifetime_max_ms) / SEC_MS;
+ rmp->cs_max_size = clib_host_to_net_u32 (hicn_infra_cs_size);
+ rmp->retval = clib_host_to_net_i32 (rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_hicn_api_node_stats_get_t_handler (vl_api_hicn_api_node_stats_get_t *
+ mp)
+{
+ vl_api_hicn_api_node_stats_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_NODE_STATS_GET_REPLY, (
+ {
+ rv = hicn_mgmt_node_stats_get (rmp);
+ rmp->retval =clib_host_to_net_i32 (rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+
+static void
+ vl_api_hicn_api_face_params_get_t_handler
+ (vl_api_hicn_api_face_params_get_t * mp)
+{
+ vl_api_hicn_api_face_params_get_reply_t *rmp;
+ int rv = 0;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_FACE_PARAMS_GET_REPLY, (
+ {
+ rv = hicn_face_api_entry_params_serialize(faceid, rmp);
+ rmp->retval = clib_host_to_net_u32(rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_face_details (hicn_face_t * face, vl_api_hicn_face_t * mp)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+
+ ip_address_encode (&face->nat_addr, IP46_TYPE_ANY, &mp->nat_addr);
+ mp->flags = clib_host_to_net_u32 (face->flags);
+ mp->swif = clib_net_to_host_u32 (face->sw_if);
+ vnet_sw_interface_t *sw_interface =
+ vnet_get_sw_interface_or_null (vnm, face->sw_if);
+ u8 *sbuf = 0;
+ if (sw_interface != NULL)
+ {
+ sbuf =
+ format (0, "%U", format_vnet_sw_interface_name, vnm, sw_interface);
+ strcpy ((char *) (mp->if_name), (char *) sbuf);
+ }
+}
+
+static void
+send_faces_details (vl_api_registration_t * reg,
+ hicn_face_t * face, u32 context)
+{
+ vl_api_hicn_api_faces_details_t *mp;
+ hicn_main_t *hm = &hicn_main;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->faceid = clib_host_to_net_u32 (hicn_dpoi_get_index (face));
+ mp->_vl_msg_id = htons (VL_API_HICN_API_FACES_DETAILS + hm->msg_id_base);
+ mp->context = context;
+
+ send_face_details (face, &(mp->face));
+
+ vl_api_send_msg (reg, (u8 *) mp);
+}
+
+static void
+vl_api_hicn_api_faces_dump_t_handler (vl_api_hicn_api_faces_dump_t * mp)
+{
+ hicn_face_t *face;
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (face, hicn_dpoi_face_pool,
+ ({
+ send_faces_details (reg, face, mp->context);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_hicn_api_face_get_t_handler (vl_api_hicn_api_face_get_t * mp)
+{
+ vl_api_hicn_api_face_get_reply_t *rmp;
+ int rv = 0;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_FACE_GET_REPLY, (
+ {
+ rv = hicn_dpoi_idx_is_valid(faceid);
+ if (rv)
+ {
+ hicn_face_t * face = hicn_dpoi_get_from_idx(faceid);
+ send_face_details(face, &(rmp->face));
+ rv = HICN_ERROR_NONE;
+ }
+ else
+ {
+ rv = HICN_ERROR_FACE_NOT_FOUND;
+ }
+ rmp->retval = clib_host_to_net_u32(rv);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_face_stats_details (vl_api_registration_t * reg,
+ hicn_face_t * face, u32 context)
+{
+ vl_api_hicn_api_face_stats_details_t *mp;
+ hicn_main_t *hm = &hicn_main;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id =
+ htons (VL_API_HICN_API_FACE_STATS_DETAILS + hm->msg_id_base);
+ mp->context = context;
+
+ mp->faceid = htonl (hicn_dpoi_get_index (face));
+ vlib_counter_t v;
+ vlib_get_combined_counter (&counters
+ [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_INTEREST_RX, &v);
+ mp->irx_packets = clib_net_to_host_u64 (v.packets);
+ mp->irx_bytes = clib_net_to_host_u64 (v.bytes);
+
+ vlib_get_combined_counter (&counters
+ [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_INTEREST_TX, &v);
+ mp->itx_packets = clib_net_to_host_u64 (v.packets);
+ mp->itx_bytes = clib_net_to_host_u64 (v.bytes);
+
+ vlib_get_combined_counter (&counters
+ [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_DATA_RX, &v);
+ mp->drx_packets = clib_net_to_host_u64 (v.packets);
+ mp->drx_bytes = clib_net_to_host_u64 (v.bytes);
+
+ vlib_get_combined_counter (&counters
+ [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_DATA_TX, &v);
+ mp->dtx_packets = clib_net_to_host_u64 (v.packets);
+ mp->dtx_bytes = clib_net_to_host_u64 (v.bytes);
+
+ vl_api_send_msg (reg, (u8 *) mp);
+}
+
+static void
+ vl_api_hicn_api_face_stats_dump_t_handler
+ (vl_api_hicn_api_face_stats_dump_t * mp)
+{
+ hicn_face_t *face;
+ vl_api_registration_t *reg;
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ /* *INDENT-OFF* */
+ pool_foreach (face, hicn_dpoi_face_pool,
+ ({
+ send_face_stats_details (reg, face, mp->context);
+ }));
+ /* *INDENT-ON* */
+}
+
+
+/****** ROUTE *******/
+
+static void vl_api_hicn_api_route_get_t_handler
+ (vl_api_hicn_api_route_get_t * mp)
+{
+ vl_api_hicn_api_route_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ fib_prefix_t prefix;
+ ip_prefix_decode (&mp->prefix, &prefix);
+ const dpo_id_t *hicn_dpo_id;
+ hicn_dpo_ctx_t *hicn_dpo_ctx;
+ u32 fib_index;
+
+ rv = hicn_route_get_dpo (&prefix, &hicn_dpo_id, &fib_index);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_ROUTE_GET_REPLY, (
+ {
+ if (rv == HICN_ERROR_NONE)
+ {
+ hicn_dpo_ctx = hicn_strategy_dpo_ctx_get(hicn_dpo_id->dpoi_index);
+ for (int i = 0; hicn_dpo_ctx != NULL && i < hicn_dpo_ctx->entry_count; i++)
+ {
+ rmp->faceids[i] = hicn_dpo_ctx->next_hops[i];
+ }
+ rmp->strategy_id = clib_host_to_net_u32(hicn_dpo_get_vft_id(hicn_dpo_id));}
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+send_route_details (vl_api_registration_t * reg,
+ const fib_prefix_t * pfx, u32 context)
+{
+ vl_api_hicn_api_routes_details_t *mp;
+ hicn_main_t *hm = &hicn_main;
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+
+ mp->_vl_msg_id = htons (VL_API_HICN_API_ROUTES_DETAILS + hm->msg_id_base);
+ mp->context = context;
+
+ ip_prefix_encode (pfx, &mp->prefix);
+ mp->nfaces = 0;
+
+ const dpo_id_t *hicn_dpo_id;
+ hicn_dpo_ctx_t *hicn_dpo_ctx;
+ u32 fib_index;
+
+ int rv = hicn_route_get_dpo (pfx, &hicn_dpo_id, &fib_index);
+
+ if (rv == HICN_ERROR_NONE)
+ {
+ hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
+ for (int i = 0; hicn_dpo_ctx != NULL && i < hicn_dpo_ctx->entry_count;
+ i++)
+ {
+ mp->faceids[i] =
+ clib_host_to_net_u32 (hicn_dpo_ctx->
+ next_hops[i]);
+ mp->nfaces++;
+ }
+ mp->strategy_id =
+ clib_host_to_net_u32 (hicn_dpo_get_vft_id (hicn_dpo_id));
+ }
+
+ vl_api_send_msg (reg, (u8 *) mp);
+}
+
+typedef struct vl_api_hicn_api_route_dump_walk_ctx_t_
+{
+ fib_node_index_t *feis;
+} vl_api_hicn_api_route_dump_walk_ctx_t;
+
+static fib_table_walk_rc_t
+vl_api_hicn_api_route_dump_walk (fib_node_index_t fei, void *arg)
+{
+ vl_api_hicn_api_route_dump_walk_ctx_t *ctx = arg;
+ int found = 0;
+ const dpo_id_t *former_dpo_id;
+
+ /* Route already existing. We need to update the dpo. */
+ const dpo_id_t *load_balance_dpo_id =
+ fib_entry_contribute_ip_forwarding (fei);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type == DPO_LOAD_BALANCE)
+ {
+ /* former_dpo_id is a load_balance dpo */
+ load_balance_t *lb = load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ /* FIB entry exists but there is no hicn dpo. */
+ for (int i = 0; i < lb->lb_n_buckets && !found; i++)
+ {
+ former_dpo_id = load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (former_dpo_id))
+ {
+ vec_add1 (ctx->feis, fei);
+ }
+ }
+ }
+
+ return (FIB_TABLE_WALK_CONTINUE);
+}
+
+static void
+vl_api_hicn_api_routes_dump_t_handler (vl_api_hicn_api_routes_dump_t * mp)
+{
+ vl_api_registration_t *reg;
+ fib_table_t *fib_table;
+ ip4_main_t *im = &ip4_main;
+ ip6_main_t *im6 = &ip6_main;
+ fib_node_index_t *lfeip;
+ const fib_prefix_t *pfx;
+ vl_api_hicn_api_route_dump_walk_ctx_t ctx = {
+ .feis = NULL,
+ };
+
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ pool_foreach (fib_table, im->fibs, (
+ {
+ fib_table_walk (fib_table->ft_index,
+ FIB_PROTOCOL_IP4,
+ vl_api_hicn_api_route_dump_walk,
+ &ctx);}
+ ));
+
+ pool_foreach (fib_table, im6->fibs, (
+ {
+ fib_table_walk (fib_table->ft_index,
+ FIB_PROTOCOL_IP6,
+ vl_api_hicn_api_route_dump_walk,
+ &ctx);}
+ ));
+
+ vec_foreach (lfeip, ctx.feis)
+ {
+ pfx = fib_entry_get_prefix (*lfeip);
+ send_route_details (reg, pfx, mp->context);
+ }
+
+ vec_free (ctx.feis);
+
+}
+
+static void vl_api_hicn_api_strategies_get_t_handler
+ (vl_api_hicn_api_strategies_get_t * mp)
+{
+ vl_api_hicn_api_strategies_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ int n_strategies = hicn_strategy_get_all_available ();
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_STRATEGIES_GET_REPLY/* , rmp, mp, rv */ ,(
+ {
+ int j = 0;
+ for (u32 i = 0; i < (u32) n_strategies; i++)
+ {
+ if (hicn_dpo_strategy_id_is_valid (i) == HICN_ERROR_NONE)
+ {
+ rmp->strategy_id[j] = clib_host_to_net_u32 (i); j++;}
+ }
+ rmp->n_strategies = n_strategies;
+ }));
+ /* *INDENT-ON* */
+}
+
+static void vl_api_hicn_api_strategy_get_t_handler
+ (vl_api_hicn_api_strategy_get_t * mp)
+{
+ vl_api_hicn_api_strategy_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ u32 strategy_id = clib_net_to_host_u32 (mp->strategy_id);
+ rv = hicn_dpo_strategy_id_is_valid (strategy_id);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_STRATEGY_GET_REPLY /* , rmp, mp, rv */ ,(
+ {
+ if (rv == HICN_ERROR_NONE)
+ {
+ const hicn_strategy_vft_t * hicn_strategy_vft =
+ hicn_dpo_get_strategy_vft (strategy_id);
+ hicn_strategy_vft->hicn_format_strategy (rmp->description, 0);}
+ }));
+ /* *INDENT-ON* */
+}
+
+/************* APP FACE ****************/
+
+static void vl_api_hicn_api_register_prod_app_t_handler
+ (vl_api_hicn_api_register_prod_app_t * mp)
+{
+ vl_api_hicn_api_register_prod_app_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ fib_prefix_t prefix;
+ ip_prefix_decode (&mp->prefix, &prefix);
+ u32 swif = clib_net_to_host_u32 (mp->swif);
+ u32 cs_reserved = clib_net_to_host_u32 (mp->cs_reserved);
+ u32 faceid;
+
+ ip46_address_t prod_addr;
+ ip46_address_reset (&prod_addr);
+ rv = hicn_face_prod_add (&prefix, swif, &cs_reserved, &prod_addr, &faceid);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_REGISTER_PROD_APP_REPLY, (
+ {
+ ip_address_encode(&prod_addr, IP46_TYPE_ANY, &rmp->prod_addr);
+ rmp->cs_reserved = clib_net_to_host_u32(cs_reserved);
+ rmp->faceid = clib_net_to_host_u32(faceid);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_hicn_api_face_prod_del_t_handler (vl_api_hicn_api_face_prod_del_t * mp)
+{
+ vl_api_hicn_api_face_prod_del_reply_t *rmp;
+ int rv = HICN_ERROR_FACE_NOT_FOUND;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
+ rv = hicn_face_prod_del (faceid);
+
+ REPLY_MACRO (VL_API_HICN_API_FACE_PROD_DEL_REPLY /* , rmp, mp, rv */ );
+}
+
+static void vl_api_hicn_api_register_cons_app_t_handler
+ (vl_api_hicn_api_register_cons_app_t * mp)
+{
+ vl_api_hicn_api_register_cons_app_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+ ip46_address_t src_addr4 = ip46_address_initializer;
+ ip46_address_t src_addr6 = ip46_address_initializer;
+
+ u32 swif = clib_net_to_host_u32 (mp->swif);
+ u32 faceid1;
+ u32 faceid2;
+
+ rv =
+ hicn_face_cons_add (&src_addr4.ip4, &src_addr6.ip6, swif, &faceid1,
+ &faceid2);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_REGISTER_CONS_APP_REPLY, (
+ {
+ ip_address_encode(&src_addr4, IP46_TYPE_ANY, &rmp->src_addr4);
+ ip_address_encode(&src_addr6, IP46_TYPE_ANY, &rmp->src_addr6);
+ rmp->faceid1 = clib_net_to_host_u32(faceid1);
+ rmp->faceid2 = clib_net_to_host_u32(faceid2);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_hicn_api_face_cons_del_t_handler (vl_api_hicn_api_face_cons_del_t * mp)
+{
+ vl_api_hicn_api_face_cons_del_reply_t *rmp;
+ int rv = HICN_ERROR_FACE_NOT_FOUND;
+
+ hicn_main_t *sm = &hicn_main;
+
+ hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
+ rv = hicn_face_cons_del (faceid);
+
+ REPLY_MACRO (VL_API_HICN_API_FACE_CONS_DEL_REPLY /* , rmp, mp, rv */ );
+}
+
+static void vl_api_hicn_api_enable_disable_t_handler
+(vl_api_hicn_api_enable_disable_t * mp)
+{
+ vl_api_hicn_api_enable_disable_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ fib_prefix_t prefix;
+ ip_prefix_decode (&mp->prefix, &prefix);
+
+ switch (clib_net_to_host_u32(mp->enable_disable))
+ {
+ case HICN_ENABLE:
+ rv = hicn_route_enable(&prefix);
+ break;
+ case HICN_DISABLE:
+ rv = hicn_route_disable(&prefix);
+ break;
+ }
+
+ REPLY_MACRO (VL_API_HICN_API_ENABLE_DISABLE_REPLY/* , rmp, mp, rv */ );
+}
+
+/*********************************** UDP TUNNELS ************************************/
+
+static void vl_api_hicn_api_udp_tunnel_add_del_t_handler
+(vl_api_hicn_api_udp_tunnel_add_del_t * mp)
+{
+ vl_api_hicn_api_udp_tunnel_add_del_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ ip46_address_t src_addr;
+ ip46_address_t dst_addr;
+ u16 src_port;
+ u16 dst_port;
+ index_t uei = ~0;
+
+ ip46_type_t type = ip_address_decode (&mp->src_addr, &src_addr);
+ if (type != ip_address_decode (&mp->dst_addr, &dst_addr))
+ {
+ rv = HICN_ERROR_UDP_TUNNEL_SRC_DST_TYPE;
+ goto done;
+ }
+
+ src_port = clib_net_to_host_u16(mp->src_port);
+ dst_port = clib_net_to_host_u16(mp->dst_port);
+
+ fib_protocol_t proto = ip46_address_is_ip4(&src_addr) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+
+ index_t fib_index = fib_table_find (proto, HICN_FIB_TABLE);
+
+ if (mp->is_add)
+ {
+ uei = udp_tunnel_add(proto,
+ fib_index, &src_addr, &dst_addr, src_port, dst_port,
+ UDP_ENCAP_FIXUP_NONE);
+ }
+ else
+ {
+ udp_tunnel_del(proto,
+ fib_index, &src_addr, &dst_addr, src_port, dst_port,
+ UDP_ENCAP_FIXUP_NONE);
+ }
+
+
+ done:
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_HICN_API_UDP_TUNNEL_ADD_DEL_REPLY, (
+ {
+ rmp->uei = clib_host_to_net_u32(uei);
+ }));
+ /* *INDENT-ON* */
+}
+
+/************************************************************************************/
+
+#include <vpp_plugins/hicn/hicn.api.c>
+
+/* Set up the API message handling tables */
+clib_error_t *
+hicn_api_plugin_hookup (vlib_main_t * vm)
+{
+ hicn_main_t *hm = &hicn_main;
+
+ hm->msg_id_base = setup_message_id_table ();
+ return 0;
+}
+
+
+
+/******************* SUPPORTING FUNCTIONS *******************/
+
+/*
+ * Binary serialization for get face configuration API. for the moment
+ * assuming only ip faces here. To be completed with othet types of faces
+ */
+vnet_api_error_t
+hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
+ vl_api_hicn_api_face_params_get_reply_t
+ * reply)
+{
+ int rv = HICN_ERROR_NONE;
+
+ if (!reply)
+ {
+ rv = VNET_API_ERROR_INVALID_ARGUMENT;
+ goto done;
+ }
+ hicn_face_t *face = hicn_dpoi_get_from_idx (faceid);
+
+ if (face != NULL)
+ {
+ ip_address_encode (&face->nat_addr, IP46_TYPE_ANY,
+ &reply->nat_addr);
+
+ reply->swif = clib_host_to_net_u32 (face->sw_if);
+ reply->flags = clib_host_to_net_u32 (face->flags);
+ reply->faceid = clib_host_to_net_u32 (faceid);
+ }
+ else
+ rv = HICN_ERROR_FACE_IP_ADJ_NOT_FOUND;
+
+done:
+ return (rv);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/hicn_api_test.c b/hicn-plugin/src/network/hicn_api_test.c
new file mode 100644
index 000000000..033d0a1b3
--- /dev/null
+++ b/hicn-plugin/src/network/hicn_api_test.c
@@ -0,0 +1,1319 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+
+#include <vat/vat.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vppinfra/error.h>
+
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip_types_api.h>
+#include <vnet/ip/ip_format_fns.h>
+
+#define __plugin_msg_base hicn_test_main.msg_id_base
+#include <vlibapi/vat_helper_macros.h>
+
+#include <vpp/api/vpe.api_types.h>
+
+#include <vpp_plugins/hicn/hicn_api.h>
+#include <vpp_plugins/hicn/error.h>
+
+
+/* Declare message IDs */
+#include <vpp_plugins/hicn/hicn_msg_enum.h>
+
+/* SUPPORTING FUNCTIONS NOT LOADED BY VPP_API_TEST */
+uword
+unformat_ip46_address (unformat_input_t * input, va_list * args)
+{
+ ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
+ ip46_type_t type = va_arg (*args, ip46_type_t);
+ if ((type != IP46_TYPE_IP6) &&
+ unformat (input, "%U", unformat_ip4_address, &ip46->ip4))
+ {
+ ip46_address_mask_ip4 (ip46);
+ return 1;
+ }
+ else if ((type != IP46_TYPE_IP4) &&
+ unformat (input, "%U", unformat_ip6_address, &ip46->ip6))
+ {
+ return 1;
+ }
+ return 0;
+}
+
+static ip46_type_t
+ip_address_union_decode (const vl_api_address_union_t * in,
+ vl_api_address_family_t af, ip46_address_t * out)
+{
+ ip46_type_t type;
+
+ switch (clib_net_to_host_u32 (af))
+ {
+ case ADDRESS_IP4:
+ clib_memset (out, 0, sizeof (*out));
+ clib_memcpy (&out->ip4, &in->ip4, sizeof (out->ip4));
+ type = IP46_TYPE_IP4;
+ break;
+ case ADDRESS_IP6:
+ clib_memcpy (&out->ip6, &in->ip6, sizeof (out->ip6));
+ type = IP46_TYPE_IP6;
+ break;
+ default:
+ ASSERT (!"Unkown address family in API address type");
+ type = IP46_TYPE_ANY;
+ break;
+ }
+
+ return type;
+}
+
+void
+ip6_address_encode (const ip6_address_t * in, vl_api_ip6_address_t out)
+{
+ clib_memcpy (out, in, sizeof (*in));
+}
+
+void
+ip6_address_decode (const vl_api_ip6_address_t in, ip6_address_t * out)
+{
+ clib_memcpy (out, in, sizeof (*out));
+}
+
+void
+ip4_address_encode (const ip4_address_t * in, vl_api_ip4_address_t out)
+{
+ clib_memcpy (out, in, sizeof (*in));
+}
+
+void
+ip4_address_decode (const vl_api_ip4_address_t in, ip4_address_t * out)
+{
+ clib_memcpy (out, in, sizeof (*out));
+}
+
+static void
+ip_address_union_encode (const ip46_address_t * in,
+ vl_api_address_family_t af,
+ vl_api_address_union_t * out)
+{
+ if (ADDRESS_IP6 == clib_net_to_host_u32 (af))
+ ip6_address_encode (&in->ip6, out->ip6);
+ else
+ ip4_address_encode (&in->ip4, out->ip4);
+}
+
+ip46_type_t
+ip_address_decode (const vl_api_address_t * in, ip46_address_t * out)
+{
+ return (ip_address_union_decode (&in->un, in->af, out));
+}
+
+void
+ip_address_encode (const ip46_address_t * in, ip46_type_t type,
+ vl_api_address_t * out)
+{
+ switch (type)
+ {
+ case IP46_TYPE_IP4:
+ out->af = clib_net_to_host_u32 (ADDRESS_IP4);
+ break;
+ case IP46_TYPE_IP6:
+ out->af = clib_net_to_host_u32 (ADDRESS_IP6);
+ break;
+ case IP46_TYPE_ANY:
+ if (ip46_address_is_ip4 (in))
+ out->af = clib_net_to_host_u32 (ADDRESS_IP4);
+ else
+ out->af = clib_net_to_host_u32 (ADDRESS_IP6);
+ break;
+ }
+ ip_address_union_encode (in, out->af, &out->un);
+}
+
+fib_protocol_t
+fib_proto_from_ip46 (ip46_type_t iproto)
+{
+ switch (iproto)
+ {
+ case IP46_TYPE_IP4:
+ return FIB_PROTOCOL_IP4;
+ case IP46_TYPE_IP6:
+ return FIB_PROTOCOL_IP6;
+ case IP46_TYPE_ANY:
+ ASSERT (0);
+ return FIB_PROTOCOL_IP4;
+ }
+
+ ASSERT (0);
+ return FIB_PROTOCOL_IP4;
+}
+
+ip46_type_t
+fib_proto_to_ip46 (fib_protocol_t fproto)
+{
+ switch (fproto)
+ {
+ case FIB_PROTOCOL_IP4:
+ return (IP46_TYPE_IP4);
+ case FIB_PROTOCOL_IP6:
+ return (IP46_TYPE_IP6);
+ case FIB_PROTOCOL_MPLS:
+ return (IP46_TYPE_ANY);
+ }
+ ASSERT (0);
+ return (IP46_TYPE_ANY);
+}
+
+void
+ip_prefix_decode (const vl_api_prefix_t * in, fib_prefix_t * out)
+{
+ switch (clib_net_to_host_u32 (in->address.af))
+ {
+ case ADDRESS_IP4:
+ out->fp_proto = FIB_PROTOCOL_IP4;
+ break;
+ case ADDRESS_IP6:
+ out->fp_proto = FIB_PROTOCOL_IP6;
+ break;
+ }
+ out->fp_len = in->len;
+ out->___fp___pad = 0;
+ ip_address_decode (&in->address, &out->fp_addr);
+}
+
+void
+ip_prefix_encode (const fib_prefix_t * in, vl_api_prefix_t * out)
+{
+ out->len = in->fp_len;
+ ip_address_encode (&in->fp_addr,
+ fib_proto_to_ip46 (in->fp_proto), &out->address);
+}
+
+/////////////////////////////////////////////////////
+
+#define HICN_FACE_NULL ~0
+
+typedef struct
+{
+ /* API message ID base */
+ u16 msg_id_base;
+ vat_main_t *vat_main;
+ u32 ping_id;
+} hicn_test_main_t;
+
+hicn_test_main_t hicn_test_main;
+
+#define foreach_standard_reply_retval_handler \
+_(hicn_api_node_params_set_reply) \
+_(hicn_api_enable_disable_reply)
+
+#define _(n) \
+ static void vl_api_##n##_t_handler \
+ (vl_api_##n##_t * mp) \
+ { \
+ vat_main_t * vam = hicn_test_main.vat_main; \
+ i32 retval = ntohl(mp->retval); \
+ if (vam->async_mode) { \
+ vam->async_errors += (retval < 0); \
+ } else { \
+ fformat (vam->ofp,"%s\n", get_error_string(retval));\
+ vam->retval = retval; \
+ vam->result_ready = 1; \
+ } \
+ }
+foreach_standard_reply_retval_handler;
+#undef _
+
+/*
+ * Table of message reply handlers, must include boilerplate handlers we just
+ * generated
+ */
+#define foreach_vpe_api_reply_msg \
+_(HICN_API_NODE_PARAMS_SET_REPLY, hicn_api_node_params_set_reply) \
+_(HICN_API_NODE_PARAMS_GET_REPLY, hicn_api_node_params_get_reply) \
+_(HICN_API_NODE_STATS_GET_REPLY, hicn_api_node_stats_get_reply) \
+_(HICN_API_FACE_GET_REPLY, hicn_api_face_get_reply) \
+_(HICN_API_FACES_DETAILS, hicn_api_faces_details) \
+_(HICN_API_FACE_STATS_DETAILS, hicn_api_face_stats_details) \
+_(HICN_API_FACE_PARAMS_GET_REPLY, hicn_api_face_params_get_reply) \
+_(HICN_API_ROUTE_GET_REPLY, hicn_api_route_get_reply) \
+_(HICN_API_ROUTES_DETAILS, hicn_api_routes_details) \
+_(HICN_API_STRATEGIES_GET_REPLY, hicn_api_strategies_get_reply) \
+_(HICN_API_STRATEGY_GET_REPLY, hicn_api_strategy_get_reply) \
+_(HICN_API_ENABLE_DISABLE_REPLY, hicn_api_enable_disable_reply) \
+_(HICN_API_UDP_TUNNEL_ADD_DEL_REPLY, hicn_api_udp_tunnel_add_del_reply)
+
+static int
+api_hicn_api_node_params_set (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ int enable_disable = 1;
+ int pit_size = -1, cs_size = -1;
+ f64 pit_max_lifetime_sec = -1.0f;
+ int ret;
+
+ vl_api_hicn_api_node_params_set_t *mp;
+
+ /* Parse args required to build the message */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "disable"))
+ {
+ enable_disable = 0;
+ }
+ else if (unformat (input, "PIT size %d", &pit_size))
+ {;
+ }
+ else if (unformat (input, "CS size %d", &cs_size))
+ {;
+ }
+ else if (unformat (input, "PIT maxlife %f", &pit_max_lifetime_sec))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Construct the API message */
+ M (HICN_API_NODE_PARAMS_SET, mp);
+ mp->enable_disable = clib_host_to_net_u32(enable_disable);
+ mp->pit_max_size = clib_host_to_net_i32 (pit_size);
+ mp->cs_max_size = clib_host_to_net_i32 (cs_size);
+ mp->pit_max_lifetime_sec = pit_max_lifetime_sec;
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_node_params_get (vat_main_t * vam)
+{
+ vl_api_hicn_api_node_params_get_t *mp;
+ int ret;
+
+ //Construct the API message
+ M (HICN_API_NODE_PARAMS_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_node_params_get_reply_t_handler
+ (vl_api_hicn_api_node_params_get_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ fformat (vam->ofp,
+ "Enabled %d\n"
+ " Features: cs:%d\n"
+ " PIT size %d\n"
+ " PIT lifetime dflt %.3f, min %.3f, max %.3f\n"
+ " CS size %d\n",
+ mp->is_enabled,
+ mp->feature_cs,
+ clib_net_to_host_u32 (mp->pit_max_size),
+ mp->pit_max_lifetime_sec, clib_net_to_host_u32 (mp->cs_max_size));
+}
+
+static int
+api_hicn_api_node_stats_get (vat_main_t * vam)
+{
+ vl_api_hicn_api_node_stats_get_t *mp;
+ int ret;
+
+ /* Construct the API message */
+ M (HICN_API_NODE_STATS_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_node_stats_get_reply_t_handler
+ (vl_api_hicn_api_node_stats_get_reply_t * rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ else
+ {
+ fformat (vam->ofp, //compare hicn_cli_show_command_fn block:should match
+ " PIT entries (now): %d\n"
+ " CS entries (now): %d\n"
+ " Forwarding statistics:"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_nak_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " pkts_nacked_interests_count: %d\n"
+ " pkts_nak_hoplimit_count: %d\n"
+ " pkts_nak_no_route_count: %d\n"
+ " pkts_no_pit_count: %d\n"
+ " pit_expired_count: %d\n"
+ " cs_expired_count: %d\n"
+ " cs_lru_count: %d\n"
+ " pkts_drop_no_buf: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64 (rmp->pkts_no_pit_count),
+ clib_net_to_host_u64 (rmp->pit_expired_count),
+ clib_net_to_host_u64 (rmp->cs_expired_count),
+ clib_net_to_host_u64 (rmp->cs_lru_count),
+ clib_net_to_host_u64 (rmp->pkts_drop_no_buf),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
+ }
+}
+
+static int
+api_hicn_api_face_params_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_face_params_get_t *mp;
+ u32 faceid = HICN_FACE_NULL, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ //Check for presence of face ID
+ if (faceid == HICN_FACE_NULL)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_FACE_PARAMS_GET, mp);
+ mp->faceid = clib_host_to_net_u32 (faceid);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_face_params_get_reply_t_handler
+ (vl_api_hicn_api_face_params_get_reply_t * rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+ u8 *sbuf = 0;
+ ip46_address_t nat_addr;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ vec_reset_length (sbuf);
+ ip_address_decode (&rmp->nat_addr, &nat_addr);
+ sbuf =
+ format (0, "nat_addr %U", format_ip46_address,
+ &nat_addr, 0 /*IP46_ANY_TYPE */);
+
+ fformat (vam->ofp, "%s swif %d flags %d\n",
+ sbuf,
+ clib_net_to_host_u32 (rmp->swif),
+ clib_net_to_host_i32 (rmp->flags));
+}
+
+static void
+format_face (vl_api_hicn_face_t * rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ u8 *sbuf = 0;
+ ip46_address_t nat_addr;
+ ip46_address_t local_addr;
+
+ vec_reset_length (sbuf);
+ ip_address_decode (&rmp->nat_addr, &nat_addr);
+
+ sbuf =
+ format (0, "nat_addr %U", format_ip46_address,
+ &local_addr, 0 /*IP46_ANY_TYPE */);
+
+ fformat (vam->ofp, "%s swif %d flags %d name %s\n",
+ sbuf,
+ clib_net_to_host_u32 (rmp->swif),
+ clib_net_to_host_i32 (rmp->flags), rmp->if_name);
+}
+
+static int
+api_hicn_api_faces_dump (vat_main_t * vam)
+{
+ hicn_test_main_t *hm = &hicn_test_main;
+ vl_api_hicn_api_faces_dump_t *mp;
+ vl_api_control_ping_t *mp_ping;
+ int ret;
+
+ if (vam->json_output)
+ {
+ clib_warning ("JSON output not supported for faces_dump");
+ return -99;
+ }
+
+ M (HICN_API_FACES_DUMP, mp);
+ S (mp);
+
+ if (!hm->ping_id)
+ hm->ping_id = vl_msg_api_get_msg_index ((u8 *) (VL_API_CONTROL_PING_CRC));
+
+ /* Use a control ping for synchronization */
+ mp_ping = vl_msg_api_alloc_as_if_client (sizeof (*mp_ping));
+ mp_ping->_vl_msg_id = htons (hm->ping_id);
+ mp_ping->client_index = vam->my_client_index;
+
+ fformat (vam->ofp, "Sending ping id=%d\n", hm->ping_id);
+
+ vam->result_ready = 0;
+ S (mp_ping);
+
+ W (ret);
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_faces_details_t_handler
+ (vl_api_hicn_api_faces_details_t * mp)
+{
+ format_face (&(mp->face));
+}
+
+static int
+api_hicn_api_face_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_face_get_t *mp;
+ u32 faceid = HICN_FACE_NULL, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ //Check for presence of face ID
+ if (faceid == HICN_FACE_NULL)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_FACE_GET, mp);
+ mp->faceid = clib_host_to_net_u32 (faceid);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+
+static void
+ vl_api_hicn_api_face_get_reply_t_handler
+ (vl_api_hicn_api_face_get_reply_t * rmp)
+{
+
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ format_face (&(rmp->face));
+}
+
+
+
+static int
+api_hicn_api_face_stats_dump (vat_main_t * vam)
+{
+ hicn_test_main_t *hm = &hicn_test_main;
+ vl_api_hicn_api_face_stats_dump_t *mp;
+ vl_api_control_ping_t *mp_ping;
+ int ret;
+
+ if (vam->json_output)
+ {
+ clib_warning ("JSON output not supported for memif_dump");
+ return -99;
+ }
+
+ M (HICN_API_FACE_STATS_DUMP, mp);
+ S (mp);
+
+ if (!hm->ping_id)
+ hm->ping_id = vl_msg_api_get_msg_index ((u8 *) (VL_API_CONTROL_PING_CRC));
+
+ /* Use a control ping for synchronization */
+ mp_ping = vl_msg_api_alloc_as_if_client (sizeof (*mp_ping));
+ mp_ping->_vl_msg_id = htons (hm->ping_id);
+ mp_ping->client_index = vam->my_client_index;
+
+ fformat (vam->ofp, "Sending ping id=%d\n", hm->ping_id);
+
+ vam->result_ready = 0;
+ S (mp_ping);
+
+ W (ret);
+ return ret;
+}
+
+/* face_stats-details message handler */
+static void
+ vl_api_hicn_api_face_stats_details_t_handler
+ (vl_api_hicn_api_face_stats_details_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+
+ fformat (vam->ofp, "face id %d\n"
+ " interest rx packets %16Ld\n"
+ " bytes %16Ld\n"
+ " interest tx packets %16Ld\n"
+ " bytes %16Ld\n"
+ " data rx packets %16Ld\n"
+ " bytes %16Ld\n"
+ " data tx packets %16Ld\n"
+ " bytes %16Ld\n",
+ clib_host_to_net_u32 (mp->faceid),
+ clib_host_to_net_u64 (mp->irx_packets),
+ clib_host_to_net_u64 (mp->irx_bytes),
+ clib_host_to_net_u64 (mp->itx_packets),
+ clib_host_to_net_u64 (mp->itx_bytes),
+ clib_host_to_net_u64 (mp->drx_packets),
+ clib_host_to_net_u64 (mp->drx_bytes),
+ clib_host_to_net_u64 (mp->dtx_packets),
+ clib_host_to_net_u64 (mp->dtx_bytes));
+}
+
+static int
+api_hicn_api_route_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+
+ vl_api_hicn_api_route_get_t *mp;
+ fib_prefix_t prefix;
+ int ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0))
+ || (prefix.fp_len == 0))
+ {
+ clib_warning ("Please specify a valid prefix...");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_ROUTE_GET, mp);
+ if (!ip46_address_is_ip4 (&(prefix.fp_addr)))
+ prefix.fp_proto = fib_proto_from_ip46 (IP46_TYPE_IP6);
+ ip_prefix_encode (&prefix, &mp->prefix);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_routes_dump (vat_main_t * vam)
+{
+
+ hicn_test_main_t *hm = &hicn_test_main;
+ vl_api_hicn_api_route_get_t *mp;
+ vl_api_control_ping_t *mp_ping;
+ int ret;
+
+ if (vam->json_output)
+ {
+ clib_warning ("JSON output not supported for routes_dump");
+ return -99;
+ }
+
+ M (HICN_API_ROUTES_DUMP, mp);
+ S (mp);
+
+ if (!hm->ping_id)
+ hm->ping_id = vl_msg_api_get_msg_index ((u8 *) (VL_API_CONTROL_PING_CRC));
+
+ /* Use a control ping for synchronization */
+ mp_ping = vl_msg_api_alloc_as_if_client (sizeof (*mp_ping));
+ mp_ping->_vl_msg_id = htons (hm->ping_id);
+ mp_ping->client_index = vam->my_client_index;
+
+ fformat (vam->ofp, "Sending ping id=%d\n", hm->ping_id);
+
+ vam->result_ready = 0;
+ S (mp_ping);
+
+ W (ret);
+ return ret;
+}
+
+static void
+vl_api_hicn_api_route_get_reply_t_handler (vl_api_hicn_api_route_get_reply_t *
+ rmp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (rmp->retval);
+ u8 *sbuf = 0;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ int i = 0;
+ u8 null_face = 0;
+ u32 faceid;
+
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "Faces: \n");
+ while (i < 1000 && !null_face)
+ {
+ faceid = clib_net_to_host_u32 (rmp->faceids[i]);
+ if (faceid != HICN_FACE_NULL)
+ {
+ sbuf =
+ format (sbuf, "faceid %d",
+ clib_net_to_host_u32 (rmp->faceids[i]));
+ i++;
+ }
+ else
+ {
+ null_face = 1;
+ }
+ }
+
+ fformat (vam->ofp, "%s\n Strategy: %d\n",
+ sbuf, clib_net_to_host_u32 (rmp->strategy_id));
+}
+
+/* face_stats-details message handler */
+static void
+ vl_api_hicn_api_routes_details_t_handler
+ (vl_api_hicn_api_routes_details_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ fib_prefix_t prefix;
+ u32 faceid;
+ u8 *sbuf = 0;
+ vec_reset_length (sbuf);
+
+ ip_prefix_decode (&mp->prefix, &prefix);
+ sbuf =
+ format (sbuf, "Prefix: %U/%u\n", format_ip46_address, &prefix.fp_addr, 0,
+ prefix.fp_len);
+
+ sbuf = format (sbuf, "Faces: \n");
+ for (int i = 0; i < mp->nfaces; i++)
+ {
+ faceid = clib_net_to_host_u32 (mp->faceids[i]);
+ sbuf = format (sbuf, " faceid %d\n", faceid);
+ }
+
+ fformat (vam->ofp, "%sStrategy: %d\n",
+ sbuf, clib_net_to_host_u32 (mp->strategy_id));
+}
+
+static int
+api_hicn_api_strategies_get (vat_main_t * vam)
+{
+ vl_api_hicn_api_strategies_get_t *mp;
+ int ret;
+
+ //TODO
+ /* Construct the API message */
+ M (HICN_API_STRATEGIES_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_strategies_get_reply_t_handler
+ (vl_api_hicn_api_strategies_get_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+ u8 *sbuf = 0;
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ int n_strategies = clib_net_to_host_i32 (mp->n_strategies);
+
+ vec_reset_length (sbuf);
+ sbuf = format (sbuf, "Available strategies:\n");
+
+ int i;
+ for (i = 0; i < n_strategies; i++)
+ {
+ u32 strategy_id = clib_net_to_host_u32 (mp->strategy_id[i]);
+ sbuf = format (sbuf, "%d ", strategy_id);
+ }
+ fformat (vam->ofp, "%s", sbuf);
+}
+
+static int
+api_hicn_api_strategy_get (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_strategy_get_t *mp;
+ int ret;
+
+ u32 strategy_id = HICN_STRATEGY_NULL;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "strategy %d", strategy_id))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ if (strategy_id == HICN_STRATEGY_NULL)
+ {
+ clib_warning ("Please specify strategy id...");
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (HICN_API_STRATEGY_GET, mp);
+ mp->strategy_id = clib_host_to_net_u32 (strategy_id);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_strategy_get_reply_t_handler
+ (vl_api_hicn_api_strategy_get_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ fformat (vam->ofp, "%s", mp->description);
+}
+
+static int
+api_hicn_api_enable_disable (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_enable_disable_t *mp;
+ int ret;
+
+ fib_prefix_t prefix;
+ vl_api_hicn_action_type_t en_dis = HICN_ENABLE;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {;
+ }
+ else if (unformat (input, "disable"))
+ {;
+ en_dis = HICN_DISABLE;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0))
+ || (prefix.fp_len == 0))
+ {
+ clib_warning ("Please specify a valid prefix...");
+ return 1;
+ }
+
+ prefix.fp_proto = ip46_address_is_ip4 (&(prefix.fp_addr)) ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+
+ //Construct the API message
+ M (HICN_API_ENABLE_DISABLE, mp);
+
+ ip_prefix_encode (&prefix, &mp->prefix);
+ mp->enable_disable = en_dis;
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_register_prod_app (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_register_prod_app_t *mp;
+ fib_prefix_t prefix;
+ u32 swif = ~0;
+ int ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {;
+ }
+ else if (unformat (input, "id %d", &swif))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ /* Check parse */
+ if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0))
+ || (prefix.fp_len == 0))
+ {
+ clib_warning ("Please specify prefix...");
+ return 1;
+ }
+
+ prefix.fp_proto =
+ ip46_address_is_ip4 (&(prefix.fp_addr)) ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+ /* Construct the API message */
+ M (HICN_API_REGISTER_PROD_APP, mp);
+ ip_prefix_encode (&prefix, &mp->prefix);
+
+ mp->swif = clib_host_to_net_u32 (swif);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_register_prod_app_reply_t_handler
+ (vl_api_hicn_api_register_prod_app_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+}
+
+static int
+api_hicn_api_face_prod_del (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_face_prod_del_t *mp;
+ u32 faceid = 0, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ //Check for presence of face ID
+ if (faceid == ~0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_FACE_PROD_DEL, mp);
+ mp->faceid = clib_host_to_net_u32 (faceid);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_register_cons_app (vat_main_t * vam)
+{
+ vl_api_hicn_api_register_cons_app_t *mp;
+ int ret;
+
+ /* Construct the API message */
+ M (HICN_API_REGISTER_CONS_APP, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_face_cons_del (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_face_cons_del_t *mp;
+ u32 faceid = 0, ret;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "face %d", &faceid))
+ {;
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ //Check for presence of face ID
+ if (faceid == ~0)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+ //Construct the API message
+ M (HICN_API_FACE_CONS_DEL, mp);
+ mp->faceid = clib_host_to_net_u32 (faceid);
+
+ //send it...
+ S (mp);
+
+ //Wait for a reply...
+ W (ret);
+
+ return ret;
+}
+
+static void
+ vl_api_hicn_api_register_cons_app_reply_t_handler
+ (vl_api_hicn_api_register_cons_app_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+ ip46_address_t src_addr4 = ip46_address_initializer;
+ ip46_address_t src_addr6 = ip46_address_initializer;
+ ip_address_decode (&mp->src_addr4, &src_addr4);
+ ip_address_decode (&mp->src_addr6, &src_addr6);
+
+ fformat (vam->ofp,
+ "ip4 address %U\n"
+ "ip6 address :%U\n",
+ format_ip46_address, IP46_TYPE_ANY, &src_addr4,
+ format_ip46_address, IP46_TYPE_ANY, &src_addr6);
+}
+
+static int
+api_hicn_api_udp_tunnel_add_del (vat_main_t * vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_udp_tunnel_add_del_t *mp;
+
+ ip46_address_t src_ip, dst_ip;
+ u32 src_port, dst_port;
+ fib_protocol_t fproto;
+ u8 is_del;
+ int ret;
+
+ is_del = 0;
+ fproto = FIB_PROTOCOL_MAX;
+
+ /* Get a line of input. */
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "add"))
+ is_del = 0;
+ else if (unformat (input, "del"))
+ is_del = 1;
+ else if (unformat (input, "%U %U",
+ unformat_ip4_address,
+ &src_ip.ip4, unformat_ip4_address, &dst_ip.ip4))
+ fproto = FIB_PROTOCOL_IP4;
+ else if (unformat (input, "%U %U",
+ unformat_ip6_address,
+ &src_ip.ip6, unformat_ip6_address, &dst_ip.ip6))
+ fproto = FIB_PROTOCOL_IP6;
+ else if (unformat (input, "%d %d", &src_port, &dst_port))
+ ;
+ else
+ {
+ break;
+ }
+ }
+
+
+ if (fproto == FIB_PROTOCOL_MAX)
+ {
+ clib_warning ("Please specify face ID");
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (HICN_API_UDP_TUNNEL_ADD_DEL, mp);
+ ip_address_encode (&src_ip, fproto == FIB_PROTOCOL_IP4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6 ,&mp->src_addr);
+ ip_address_encode (&dst_ip, fproto == FIB_PROTOCOL_IP4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6 ,&mp->dst_addr);
+ mp->src_port = clib_host_to_net_u16(src_port);
+ mp->dst_port = clib_host_to_net_u16(dst_port);
+ mp->is_add = !is_del;
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+vl_api_hicn_api_udp_tunnel_add_del_reply_t_handler
+(vl_api_hicn_api_udp_tunnel_add_del_reply_t * mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ //vpp_api_test infra will also print out string form of error
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ index_t uei = clib_net_to_host_u32(mp->uei);
+
+ fformat (vam->ofp,
+ "udp-encap %d\n",
+ uei);
+}
+
+
+
+#include <vpp_plugins/hicn/hicn.api_test.c>
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/hicn_config.c b/hicn-plugin/src/network/hicn_config.c
new file mode 100644
index 000000000..441bd03cf
--- /dev/null
+++ b/hicn-plugin/src/network/hicn_config.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <vlib/vlib.h>
+#include <vnet/interface.h>
+
+#include "hicn.h"
+#include "params.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "mgmt.h"
+#include "error.h"
+#include "faces/app/address_mgr.h"
+#include "face_db.h"
+#include "udp_tunnels/udp_tunnel.h"
+#include "route.h"
+
+hicn_main_t hicn_main;
+/* Module vars */
+int hicn_infra_fwdr_initialized = 0;
+
+/*
+ * Global time counters we're trying out for opportunistic hashtable
+ * expiration.
+ */
+uint16_t hicn_infra_fast_timer; /* Counts at 1 second intervals */
+uint16_t hicn_infra_slow_timer; /* Counts at 1 minute intervals */
+
+hicn_face_bucket_t *hicn_face_bucket_pool;
+
+/*
+ * Init hicn forwarder with configurable PIT, CS sizes
+ */
+static int
+hicn_infra_fwdr_init (uint32_t shard_pit_size, uint32_t shard_cs_size)
+{
+ int ret = 0;
+
+ if (hicn_infra_fwdr_initialized)
+ {
+ ret = HICN_ERROR_FWD_ALREADY_ENABLED;
+ goto done;
+ }
+ /* Init per worker limits */
+ hicn_infra_pit_size = shard_pit_size;
+ hicn_infra_cs_size = shard_cs_size;
+
+ /* Init the global time-compression counters */
+ hicn_infra_fast_timer = 1;
+ hicn_infra_slow_timer = 1;
+
+ ret = hicn_pit_create (&hicn_main.pitcs, hicn_infra_pit_size);
+ hicn_pit_set_lru_max (&hicn_main.pitcs, hicn_infra_cs_size);
+done:
+ if ((ret == HICN_ERROR_NONE) && !hicn_infra_fwdr_initialized)
+ {
+ hicn_infra_fwdr_initialized = 1;
+ }
+ return (ret);
+}
+
+/*
+ * Action function shared between message handler and debug CLI NOTICE: we're
+ * only 'enabling' now
+ */
+int
+hicn_infra_plugin_enable_disable (int enable_disable,
+ int pit_size_req,
+ f64 pit_max_lifetime_sec_req,
+ int cs_size_req,
+ vnet_link_t link)
+{
+ int ret = 0;
+
+ hicn_main_t *sm = &hicn_main;
+ uint32_t pit_size, cs_size;
+
+ /* Notice if we're already enabled... */
+ if (sm->is_enabled)
+ {
+ ret = HICN_ERROR_FWD_ALREADY_ENABLED;
+ goto done;
+ }
+ /* Set up params and call fwdr_init set up PIT/CS, forwarder nodes */
+
+ /* Check the range and assign some globals */
+ if (pit_max_lifetime_sec_req < 0)
+ {
+ sm->pit_lifetime_max_ms = HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS;
+ }
+ else
+ {
+ if (pit_max_lifetime_sec_req < HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC ||
+ pit_max_lifetime_sec_req > HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_MAXLT_OOB;
+ goto done;
+ }
+ sm->pit_lifetime_max_ms = pit_max_lifetime_sec_req * SEC_MS;
+ }
+
+ if (pit_size_req < 0)
+ {
+ pit_size = HICN_PARAM_PIT_ENTRIES_DFLT;
+ }
+ else
+ {
+ if (pit_size_req < HICN_PARAM_PIT_ENTRIES_MIN ||
+ pit_size_req > HICN_PARAM_PIT_ENTRIES_MAX)
+ {
+ ret = HICN_ERROR_PIT_CONFIG_SIZE_OOB;
+ goto done;
+ }
+ pit_size = (uint32_t) pit_size_req;
+ }
+
+ if (cs_size_req < 0)
+ {
+ cs_size = HICN_PARAM_CS_ENTRIES_DFLT;
+ }
+ else
+ {
+ /*
+ * This should be relatively safe
+ * At this point vlib buffers should have been already allocated
+ */
+
+ vlib_buffer_main_t *bm;
+ vlib_buffer_pool_t *bp;
+ vlib_main_t *vm = vlib_get_main ();
+ bm = vm->buffer_main;
+
+ u32 n_buffers = 0;
+ vec_foreach (bp, bm->buffer_pools)
+ n_buffers = n_buffers < bp->n_buffers ? bp->n_buffers : n_buffers;
+
+ // check if CS is bugger tha PIT or bigger than the available vlib_buffers
+ uword cs_buffers =
+ (n_buffers >
+ HICN_PARAM_CS_MIN_MBUF) ? n_buffers - HICN_PARAM_CS_MIN_MBUF : 0;
+
+ if (cs_size_req > (pit_size_req / 2) || cs_size_req > cs_buffers)
+ {
+ cs_size_req =
+ ((pit_size_req / 2) > cs_buffers) ? cs_buffers : pit_size_req / 2;
+ vlib_cli_output (vm,
+ "WARNING!! CS too large. Please check size of PIT or the number of buffers available in VPP\n");
+
+ }
+ cs_size = (uint32_t) cs_size_req;
+ }
+
+ ret = hicn_infra_fwdr_init (pit_size, cs_size);
+
+ hicn_face_db_init (pit_size);
+
+ if (ret != HICN_ERROR_NONE)
+ {
+ goto done;
+ }
+ sm->is_enabled = 1;
+ sm->link = link;
+ //hicn_face_udp_init_internal ();
+
+done:
+
+ return (ret);
+}
+
+static clib_error_t *
+hicn_configure (vlib_main_t * vm, unformat_input_t * input)
+{
+ u32 pit_size = HICN_PARAM_PIT_ENTRIES_DFLT;
+ u32 cs_size = HICN_PARAM_CS_ENTRIES_DFLT;
+ u64 pit_lifetime_max_sec = HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS / SEC_MS;
+
+ vnet_link_t link;
+
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "pit-size %u", &pit_size))
+ ;
+ else if (unformat (input, "cs-size %u", &cs_size))
+ ;
+ else if (unformat (input, "pit-lifetime-max %u", &pit_lifetime_max_sec))
+ ;
+ else if (unformat (input, "grab mpls-tunnels"))
+ link = VNET_LINK_MPLS;
+ else
+ break;
+ }
+
+ unformat_free (input);
+
+ hicn_infra_plugin_enable_disable (1, pit_size,
+ pit_lifetime_max_sec,
+ cs_size, link);
+
+
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (hicn_configure, "hicn");
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/infra.h b/hicn-plugin/src/network/infra.h
new file mode 100644
index 000000000..ff76de4e4
--- /dev/null
+++ b/hicn-plugin/src/network/infra.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INFRA_H__
+#define __HICN_INFRA_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/interface.h>
+
+#include "pcs.h"
+
+/**
+ * @file infra.h
+ *
+ */
+
+/**
+ * @brief hICN plugin global state.
+ */
+typedef struct hicn_main_s
+{
+ /* Binary API message ID base */
+ u16 msg_id_base;
+
+ /* Have we been enabled */
+ u16 is_enabled;
+
+ /* Forwarder PIT/CS */
+ hicn_pit_cs_t pitcs;
+
+ /* Global PIT lifetime info */
+ /*
+ * Boundaries for the interest lifetime. If greater than
+ * pit_lifetime_max_ms, pit_lifetime_max_ms is used in the PIT
+ */
+ u64 pit_lifetime_max_ms;
+
+ vnet_link_t link;
+
+} hicn_main_t;
+
+extern hicn_main_t hicn_main;
+
+extern int hicn_infra_fwdr_initialized;
+
+/* PIT and CS size */
+u32 hicn_infra_pit_size;
+u32 hicn_infra_cs_size;
+
+/**
+ * @brief Enable and disable the hicn plugin
+ *
+ * Enable the time the hICN plugin and set the forwarder parameters.
+ * @param enable_disable 1 if to enable, 0 otherwisw (currently only enable is supported)
+ * @param pit_max_size Max size of the PIT
+ * @param pit_max_lifetime_sec_req Maximum timeout allowed for a PIT entry lifetime
+ * @param cs_max_size CS size. Must be <= than pit_max_size
+ * @param cs_reserved_app Amount of CS reserved for application faces
+ */
+int
+hicn_infra_plugin_enable_disable (int enable_disable,
+ int pit_max_size,
+ f64 pit_max_lifetime_sec_req,
+ int cs_max_size,
+ vnet_link_t link);
+
+
+/* vlib nodes that compose the hICN forwarder */
+extern vlib_node_registration_t hicn_interest_pcslookup_node;
+extern vlib_node_registration_t hicn_data_pcslookup_node;
+extern vlib_node_registration_t hicn_data_fwd_node;
+extern vlib_node_registration_t hicn_data_store_node;
+extern vlib_node_registration_t hicn_interest_hitpit_node;
+extern vlib_node_registration_t hicn_interest_hitcs_node;
+extern vlib_node_registration_t hicn_pg_interest_node;
+extern vlib_node_registration_t hicn_pg_data_node;
+extern vlib_node_registration_t hicn_pg_server_node;
+extern vlib_node_registration_t hicn_data_input_ip6_node;
+extern vlib_node_registration_t hicn_data_input_ip4_node;
+
+
+
+#endif /* // __HICN_INFRA_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/interest_hitcs.h b/hicn-plugin/src/network/interest_hitcs.h
new file mode 100644
index 000000000..94fa3e6f5
--- /dev/null
+++ b/hicn-plugin/src/network/interest_hitcs.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INTEREST_HITCS_H__
+#define __HICN_INTEREST_HITCS_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/**
+ * @file interest_hitcs.h
+ *
+ * This is the node encoutered by interest packets after the hicn-interest-pcslookup.
+ * This node satisfies an interest with a data stored in the CS and send the data back
+ * from the incoming iface of the interest (i.e., the vlib buffer is sent to the
+ * hicn6-iface-output or hicn4-iface-output node). In case the data is expired, the
+ * vlib buffer is sent to the hicn-strategy node.
+ */
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_interest_hitcs_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_interest_hitcs_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_interest_hitcs_trace_t;
+
+typedef enum
+{
+ HICN_INTEREST_HITCS_NEXT_STRATEGY,
+ HICN_INTEREST_HITCS_NEXT_IFACE4_OUT,
+ HICN_INTEREST_HITCS_NEXT_IFACE6_OUT,
+ HICN_INTEREST_HITCS_NEXT_ERROR_DROP,
+ HICN_INTEREST_HITCS_N_NEXT,
+} hicn_interest_hitcs_next_t;
+
+#endif /* // __HICN_INTEREST_HITCS_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/interest_hitcs_node.c b/hicn-plugin/src/network/interest_hitcs_node.c
new file mode 100644
index 000000000..f569fa897
--- /dev/null
+++ b/hicn-plugin/src/network/interest_hitcs_node.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+#include <vppinfra/string.h>
+
+#include "interest_hitcs.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "data_fwd.h"
+#include "infra.h"
+#include "state.h"
+#include "error.h"
+
+/* packet trace format function */
+static u8 *hicn_interest_hitcs_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_interest_hitcs_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+vlib_node_registration_t hicn_interest_hitcs_node;
+
+always_inline void drop_packet (u32 * next0);
+
+always_inline void
+clone_from_cs (vlib_main_t * vm, u32 * bi0_cs, vlib_buffer_t * dest, u8 isv6)
+{
+ /* Retrieve the buffer to clone */
+ vlib_buffer_t *cs_buf = vlib_get_buffer (vm, *bi0_cs);
+ hicn_buffer_t *hicnb = hicn_get_buffer (cs_buf);
+ word buffer_advance = CLIB_CACHE_LINE_BYTES * 2;
+ if (hicnb->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
+ {
+ clib_memcpy_fast (vlib_buffer_get_current (dest),
+ vlib_buffer_get_current (cs_buf),
+ cs_buf->current_length);
+ clib_memcpy_fast (dest->opaque2, cs_buf->opaque2,
+ sizeof (cs_buf->opaque2));
+
+ dest->current_length = cs_buf->current_length;
+ dest->total_length_not_including_first_buffer = 0;
+ }
+ else
+ {
+ vlib_buffer_advance (cs_buf, -buffer_advance);
+ if (PREDICT_FALSE (cs_buf->ref_count == 255))
+ {
+ vlib_buffer_t *cs_buf2 = vlib_buffer_copy (vm, cs_buf);
+ vlib_buffer_advance (cs_buf, buffer_advance);
+ *bi0_cs = vlib_get_buffer_index (vm, cs_buf2);
+ cs_buf->ref_count--;
+ cs_buf = cs_buf2;
+ }
+
+ clib_memcpy_fast (vlib_buffer_get_current (dest),
+ vlib_buffer_get_current (cs_buf), buffer_advance);
+ clib_memcpy_fast (dest->opaque2, cs_buf->opaque2,
+ sizeof (cs_buf->opaque2));
+ dest->current_length = buffer_advance;
+ vlib_buffer_advance (cs_buf, buffer_advance);
+ vlib_buffer_attach_clone (vm, dest, cs_buf);
+ }
+}
+
+static uword
+hicn_interest_hitcs_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_hitcs_next_t next_index;
+ hicn_interest_hitcs_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+ int ret;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_hitcs_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ hicn_buffer_t *hicnb0;
+ hicn_hash_node_t *node0;
+ hicn_pcs_entry_t *pitp;
+ hicn_hash_entry_t *hash_entry0;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Get hicn buffer and state */
+ hicnb0 = hicn_get_buffer (b0);
+ hicn_get_internal_state (hicnb0, rt->pitcs, &node0, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ nameptr = (u8 *) (&name);
+ pitp = hicn_pit_get_data (node0);
+
+ dpo_id_t hicn_dpo_id0 =
+ { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+
+ if (PREDICT_FALSE
+ (ret != HICN_ERROR_NONE ||
+ !hicn_node_compare (nameptr, namelen, node0)))
+ {
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ drop_packet (&next0);
+ goto end_processing;
+ }
+ if ((tnow > pitp->shared.expire_time))
+ {
+ /* Delete and clean up expired CS entry */
+ hicn_pcs_delete (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ stats.cs_expired_count++;
+ /* Forward interest to the strategy node */
+ next0 = HICN_INTEREST_HITCS_NEXT_STRATEGY;
+ }
+ else
+ {
+ if (PREDICT_TRUE
+ (!(hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED)))
+ hicn_pcs_cs_update (vm, rt->pitcs, pitp, pitp, node0);
+
+ /*
+ * Retrieve the incoming iface and forward
+ * the data through it
+ */
+ next0 = isv6 ? HICN_INTEREST_HITCS_NEXT_IFACE6_OUT :
+ HICN_INTEREST_HITCS_NEXT_IFACE4_OUT;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hicnb0->face_id;
+
+ clone_from_cs (vm, &pitp->u.cs.cs_pkt_buf, b0, isv6);
+
+ stats.pkts_from_cache_count++;
+ stats.pkts_data_count++;
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ }
+
+ end_processing:
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_hitcs_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+
+ vlib_node_increment_counter (vm, hicn_interest_hitcs_node.index,
+ HICNFWD_ERROR_CACHED,
+ stats.pkts_from_cache_count);
+
+ vlib_node_increment_counter (vm, hicn_interest_hitcs_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ update_node_counter (vm, hicn_interest_hitcs_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ return (frame->n_vectors);
+}
+
+always_inline void
+drop_packet (u32 * next0)
+{
+ *next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
+}
+
+/* packet trace format function */
+static u8 *
+hicn_interest_hitcs_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_interest_hitcs_trace_t *t =
+ va_arg (*args, hicn_interest_hitcs_trace_t *);
+
+ s = format (s, "INTEREST-HITCS: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_interest_hitcs_node) =
+{
+ .function = hicn_interest_hitcs_node_fn,
+ .name = "hicn-interest-hitcs",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_hitcs_runtime_t),
+ .format_trace = hicn_interest_hitcs_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_hitcs_error_strings),
+ .error_strings = hicn_interest_hitcs_error_strings,
+ .n_next_nodes = HICN_INTEREST_HITCS_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_INTEREST_HITCS_NEXT_STRATEGY] = "hicn-strategy",
+ [HICN_INTEREST_HITCS_NEXT_IFACE4_OUT] = "hicn4-iface-output",
+ [HICN_INTEREST_HITCS_NEXT_IFACE6_OUT] = "hicn6-iface-output",
+ [HICN_INTEREST_HITCS_NEXT_ERROR_DROP] = "error-drop"
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/interest_hitpit.h b/hicn-plugin/src/network/interest_hitpit.h
new file mode 100644
index 000000000..ffdc61c8f
--- /dev/null
+++ b/hicn-plugin/src/network/interest_hitpit.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INTEREST_HITPIT_H__
+#define __HICN_INTEREST_HITPIT_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/**
+ * @file interest_hitpit.h
+ *
+ * This is the node encoutered by interest packets after the hicn-interest-pcslookup.
+ * This node aggregates an interest in the PIT or forward it in case of a retransmission.
+ * If the interest must be retransmitted the next vlib node will be on of the
+ * hicn6-face-output or hicn4-face-output nodes. If the pit entry is expired the next vlib node
+ * will be the hicn-strategy node, otherwise the vlib buffer is dropped.
+ */
+
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_interest_hitpit_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_interest_hitpit_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_interest_hitpit_trace_t;
+
+typedef enum
+{
+ HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS,
+ HICN_INTEREST_HITPIT_NEXT_STRATEGY,
+ HICN_INTEREST_HITPIT_NEXT_FACE4_OUTPUT,
+ HICN_INTEREST_HITPIT_NEXT_FACE6_OUTPUT,
+ HICN_INTEREST_HITPIT_NEXT_ERROR_DROP,
+ HICN_INTEREST_HITPIT_N_NEXT,
+} hicn_interest_hitpit_next_t;
+
+#endif /* // __HICN_INTEREST_HITPIT_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/interest_hitpit_node.c b/hicn-plugin/src/network/interest_hitpit_node.c
new file mode 100644
index 000000000..9ebf183c5
--- /dev/null
+++ b/hicn-plugin/src/network/interest_hitpit_node.c
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+
+#include "interest_hitpit.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "data_fwd.h"
+#include "infra.h"
+#include "strategy.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+#include "state.h"
+#include "error.h"
+#include "face_db.h"
+
+/* packet trace format function */
+static u8 *hicn_interest_hitpit_format_trace (u8 * s, va_list * args);
+
+/* Stats string values */
+static char *hicn_interest_hitpit_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+vlib_node_registration_t hicn_interest_hitpit_node;
+
+always_inline void drop_packet (u32 * next0);
+
+/*
+ * hICN forwarder node for interests hitting the PIT
+ */
+static uword
+hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_hitpit_next_t next_index;
+ hicn_interest_hitpit_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_hitpit_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ hicn_hash_node_t *node0;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ hicn_pcs_entry_t *pitp;
+ u8 dpo_ctx_id0;
+ u8 found = 0;
+ int nh_idx;
+ hicn_face_id_t outface;
+ hicn_hash_entry_t *hash_entry0;
+ hicn_buffer_t *hicnb0;
+ int ret;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* Get hicn buffer and state */
+ hicnb0 = hicn_get_buffer (b0);
+ hicn_get_internal_state (hicnb0, rt->pitcs, &node0, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+
+
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ nameptr = (u8 *) (&name);
+ pitp = hicn_pit_get_data (node0);
+ dpo_id_t hicn_dpo_id0 =
+ { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+
+ /*
+ * Check if the hit is instead a collision in the
+ * hash table. Unlikely to happen.
+ */
+ if (PREDICT_FALSE
+ (ret != HICN_ERROR_NONE
+ || !hicn_node_compare (nameptr, namelen, node0)))
+ {
+ stats.interests_hash_collision++;
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ drop_packet (&next0);
+
+ goto end_processing;
+ }
+ /*
+ * If the entry is expired, remove it no matter of
+ * the possible cases.
+ */
+ if (tnow > pitp->shared.expire_time)
+ {
+ strategy_vft0->hicn_on_interest_timeout (dpo_ctx_id0);
+ hicn_pcs_delete (rt->pitcs, &pitp, &node0, vm, hash_entry0,
+ dpo_vft0, &hicn_dpo_id0);
+ stats.pit_expired_count++;
+ next0 = HICN_INTEREST_HITPIT_NEXT_STRATEGY;
+ }
+ else
+ {
+ if ((hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY))
+ {
+ next0 = HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS;
+ }
+ else
+ {
+ /*
+ * Distinguish between aggregation or
+ * retransmission
+ */
+
+ found =
+ hicn_face_search (hicnb0->face_id,
+ &(pitp->u.pit.faces));
+
+ if (found)
+ {
+ strategy_vft0->hicn_select_next_hop (dpo_ctx_id0,
+ &nh_idx, &outface);
+ /* Retransmission */
+ /*
+ * Prepare the packet for the
+ * forwarding
+ */
+ next0 = isv6 ? HICN_INTEREST_HITPIT_NEXT_FACE6_OUTPUT :
+ HICN_INTEREST_HITPIT_NEXT_FACE4_OUTPUT;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
+ outface;
+
+ /*
+ * Update the egress face in
+ * the PIT
+ */
+ pitp->u.pit.pe_txnh = nh_idx;
+ stats.interests_retx++;
+ }
+ else
+ {
+ hicn_face_db_add_face (hicnb0->face_id,
+ &pitp->u.pit.faces);
+
+ /* Aggregation */
+ drop_packet (&next0);
+ stats.interests_aggregated++;
+ }
+ /* Remove lock from the entry */
+ hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm,
+ hash_entry0, dpo_vft0, &hicn_dpo_id0);
+
+ }
+ }
+ end_processing:
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_hitpit_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+
+
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_INTEREST_AGG,
+ stats.interests_aggregated);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_INT_RETRANS,
+ stats.interests_retx);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_PIT_EXPIRED,
+ stats.pit_expired_count);
+ vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_HASH_COLL_HASHTB_COUNT,
+ stats.interests_hash_collision);
+
+ update_node_counter (vm, hicn_interest_hitpit_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_interest_hitpit_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_interest_hitpit_trace_t *t =
+ va_arg (*args, hicn_interest_hitpit_trace_t *);
+
+ s = format (s, "INTEREST-HITPIT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+void
+drop_packet (u32 * next0)
+{
+ *next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
+}
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_interest_hitpit_node) =
+{
+ .function = hicn_interest_hitpit_node_fn,
+ .name = "hicn-interest-hitpit",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_hitpit_runtime_t),
+ .format_trace = hicn_interest_hitpit_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_hitpit_error_strings),
+ .error_strings = hicn_interest_hitpit_error_strings,
+ .n_next_nodes = HICN_INTEREST_HITPIT_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS] = "hicn-interest-hitcs",
+ [HICN_INTEREST_HITPIT_NEXT_STRATEGY] = "hicn-strategy",
+ [HICN_INTEREST_HITPIT_NEXT_FACE4_OUTPUT] = "hicn4-face-output",
+ [HICN_INTEREST_HITPIT_NEXT_FACE6_OUTPUT] = "hicn6-face-output",
+ [HICN_INTEREST_HITPIT_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/interest_pcslookup.h b/hicn-plugin/src/network/interest_pcslookup.h
new file mode 100644
index 000000000..cbc9dde51
--- /dev/null
+++ b/hicn-plugin/src/network/interest_pcslookup.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_INTEREST_PCSLOOKUP_H__
+#define __HICN_INTEREST_PCSLOOKUP_H__
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "pcs.h"
+
+/**
+ * @file interest_pcslookup.h
+ *
+ * This is the node encoutered by interest packets after the hicn6-iface-input or
+ * hicn4-iface-input. This node performs a lookup in the pit and content store and
+ * if there is a hit in the PIT, the vlib buffer is passed to the hicn-interest-hitcs
+ * while if there is a hit in the CS the vlib buffer is passed to the
+ * hicn-interest-hitpit. If there isn't any hit, the vlib buffer is passed to the
+ * hicn-strategy node.
+ */
+
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_interest_pcslookup_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_interest_pcslookup_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_interest_pcslookup_trace_t;
+
+typedef enum
+{
+ HICN_INTEREST_PCSLOOKUP_NEXT_STRATEGY,
+ HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT,
+ HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITCS,
+ HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP,
+ HICN_INTEREST_PCSLOOKUP_N_NEXT,
+} hicn_interest_pcslookup_next_t;
+
+#endif /* // __HICN_INTEREST_PCSLOOKUP_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/interest_pcslookup_node.c b/hicn-plugin/src/network/interest_pcslookup_node.c
new file mode 100644
index 000000000..6ac2aa3a0
--- /dev/null
+++ b/hicn-plugin/src/network/interest_pcslookup_node.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+
+#include "interest_pcslookup.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "state.h"
+
+/**
+ * @FILE This node performs a lookup in the PIT and CS for a received interest packet.
+ *
+ * This node passes the packet to the interest-hitpit and interest-hitcs nodes
+ * when there is a hit in the pit or content store, respectively.
+ */
+
+/* Functions declarations */
+
+/* packet trace format function */
+static u8 *hicn_interest_pcslookup_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_interest_pcslookup_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+vlib_node_registration_t hicn_interest_pcslookup_node;
+
+/*
+ * ICN forwarder node for interests: handling of Interests delivered based on
+ * ACL. - 1 packet at a time - ipv4/tcp ipv6/tcp
+ */
+static uword
+hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_pcslookup_next_t next_index;
+ hicn_interest_pcslookup_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ int ret;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_pcslookup_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ u64 name_hash = 0;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ u32 node_id0 = 0;
+ index_t dpo_ctx_id0 = 0;
+ u8 vft_id0 = 0;
+ u8 is_cs0 = 0;
+ u8 hash_entry_id = 0;
+ u8 bucket_is_overflown = 0;
+ u32 bucket_id = ~0;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ {
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_STRATEGY;
+ }
+ nameptr = (u8 *) (&name);
+ stats.pkts_processed++;
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE ||
+ hicn_hashtb_fullhash (nameptr, namelen,
+ &name_hash) !=
+ HICN_ERROR_NONE))
+ {
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ }
+ else
+ {
+ if (hicn_hashtb_lookup_node (rt->pitcs->pcs_table, nameptr,
+ namelen, name_hash,
+ 0 /* is_data */ , &node_id0,
+ &dpo_ctx_id0, &vft_id0, &is_cs0,
+ &hash_entry_id, &bucket_id,
+ &bucket_is_overflown) ==
+ HICN_ERROR_NONE)
+ {
+ next0 =
+ HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT + is_cs0;
+ }
+ stats.pkts_interest_count++;
+ }
+
+ hicn_store_internal_state (b0, name_hash, node_id0, dpo_ctx_id0,
+ vft_id0, hash_entry_id, bucket_id,
+ bucket_is_overflown);
+
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_pcslookup_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pit_get_cs_count (rt->pitcs);
+ u32 pcs_ntw_count = hicn_pcs_get_ntw_count (rt->pitcs);
+
+
+ vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+
+ vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_CS_NTW_COUNT, pcs_ntw_count);
+
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_interest_pcslookup_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_interest_pcslookup_trace_t *t =
+ va_arg (*args, hicn_interest_pcslookup_trace_t *);
+
+ s = format (s, "INTEREST_PCSLOOKUP: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_interest_pcslookup_node) =
+{
+ .function = hicn_interest_pcslookup_node_fn,
+ .name = "hicn-interest-pcslookup",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_pcslookup_runtime_t),
+ .format_trace = hicn_interest_pcslookup_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_pcslookup_error_strings),
+ .error_strings = hicn_interest_pcslookup_error_strings,
+ .n_next_nodes = HICN_INTEREST_PCSLOOKUP_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_INTEREST_PCSLOOKUP_NEXT_STRATEGY] = "hicn-strategy",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT] = "hicn-interest-hitpit",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITCS] = "hicn-interest-hitcs",
+ [HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/mapme.h b/hicn-plugin/src/network/mapme.h
new file mode 100644
index 000000000..17bd9a766
--- /dev/null
+++ b/hicn-plugin/src/network/mapme.h
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_MAPME__
+#define __HICN_MAPME__
+
+#include <vnet/dpo/load_balance.h>
+#include <vnet/buffer.h>
+#include <hicn/mapme.h>
+
+#include "hicn.h"
+#include "route.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h" // dpo_is_hicn
+
+/**
+ * @file
+ *
+ * @brief Mapme
+ *
+ * Mapme implementation follows the "Anchorless mobility through hICN" document
+ * specification. In particular, the implementation is made of:
+ * - two internal nodes: hicn-mapme-ctrl and hicn-mapme-ack. The former processes
+ * IU and the latter IU acknowledgment.
+ * - a process node, mapme-eventmgr-process, that is signaled every time a face is
+ * added or deleted, as well as when a new next hop is added to a fib entry as a
+ * result of a mobility event.
+ *
+ * TFIB implementation is done as an extension of an hICN fib entry. In particular,
+ * the list of next hops hold the list of next hops in the tfib as well (stored at the
+ * end of the list of regualt next hops). Mapme implementation follows the hICN vrf
+ * implementation and consider the vrf 0 (default fib) as the control-plane fib to
+ * update every time a new next hop must be added or removed.
+ */
+
+
+#define HICN_MAPME_ALLOW_LOCATORS 1
+
+//#define HICN_MAPME_NOTIFICATIONS 1
+
+#define NOT_A_NOTIFICATION false
+#define TIMER_NO_REPEAT false
+
+#define INVALID_SEQ 0
+
+STATIC_ASSERT (sizeof(u32) == sizeof(seq_t),
+ "seq_t is not 4 bytes");
+
+typedef struct hicn_mapme_conf_s
+{
+ hicn_mapme_conf_t conf;
+ bool remove_dpo; // FIXME used ?
+
+ vlib_main_t *vm;
+ vlib_log_class_t log_class;
+} hicn_mapme_main_t;
+
+/**
+ * @brief List of event to signat to the procesing node (eventmgr)
+ */
+#define foreach_hicn_mapme_event \
+ _(FACE_ADD) \
+ _(FACE_DEL) \
+ _(FACE_APP_ADD) \
+ _(FACE_APP_DEL) \
+ _(FACE_NH_SET) \
+ _(FACE_NH_ADD) \
+ _(FACE_PH_ADD) \
+ _(FACE_PH_DEL)
+
+typedef enum
+{
+#define _(a) HICN_MAPME_EVENT_##a,
+ foreach_hicn_mapme_event
+#undef _
+} hicn_mapme_event_t;
+
+typedef hicn_dpo_ctx_t hicn_mapme_tfib_t;
+
+/*
+ * Ideally we might need to care about alignment, but this struct is only
+ * used for casting hicn_dpo_ctx_t.
+ *
+ * See otherwise vnet/dpo/dpo.h
+ */
+
+STATIC_ASSERT (sizeof (hicn_mapme_tfib_t) <= sizeof (hicn_dpo_ctx_t),
+ "hicn_mapme_tfib_t is greater than hicn_dpo_ctx_t");
+
+#define TFIB(dpo_ctx) ((hicn_mapme_tfib_t*)(dpo_ctx))
+
+static_always_inline int
+hicn_mapme_nh_set (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+{
+ hicn_dpo_ctx_t * strategy_ctx = (hicn_dpo_ctx_t *)tfib;
+ const fib_prefix_t * prefix = fib_entry_get_prefix(strategy_ctx->fib_entry_index);
+
+ u32 n_entries = tfib->entry_count;
+ /* Remove all the existing next hops and set the new one */
+ for (int i = 0; i < n_entries; i++)
+ {
+ hicn_face_t * face = hicn_dpoi_get_from_idx(strategy_ctx->next_hops[0]);
+ ip_adjacency_t * adj = adj_get (face->dpo.dpoi_index);
+ ip_nh_del_helper(face->dpo.dpoi_proto, prefix, &adj->sub_type.nbr.next_hop, face->sw_if);
+ }
+ hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
+ ip_nh_add_helper(face->dpo.dpoi_proto, prefix, &face->nat_addr, face->sw_if);
+ return 0;
+}
+
+/**
+ * @brief Add a next hop iif it is not already a next hops
+ */
+static_always_inline int
+hicn_mapme_nh_add (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+{
+ for (u8 pos = 0; pos < tfib->entry_count; pos++)
+ if (tfib->next_hops[pos] == face_id)
+ return 0;
+
+ /* Add the next hop in the vrf 0 which will add it to the entry in the hICN vrf */
+ hicn_dpo_ctx_t * strategy_ctx = (hicn_dpo_ctx_t *)tfib;
+ const fib_prefix_t * prefix = fib_entry_get_prefix(strategy_ctx->fib_entry_index);
+ hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
+ ip_nh_add_helper(face->dpo.dpoi_proto, prefix, &face->nat_addr, face->sw_if);
+
+ return 0;
+}
+
+/**
+ * Add a 'previous' hop to the TFIB
+ *
+ * XXX we should have the for look in the reverse order for simpler code.
+ */
+static_always_inline int
+hicn_mapme_tfib_add (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+{
+ u8 pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+
+ //XXX don 't add if it already exist
+ // eg.an old IU received on a face on which we are retransmitting
+ for (u8 pos2 = pos; pos2 < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos2++)
+ if (tfib->next_hops[pos2] == face_id)
+ return 0;
+
+ //Make sure we have enough room
+ if (pos <= tfib->entry_count)
+ return -1;
+
+ tfib->next_hops[pos - 1] = face_id;
+ tfib->tfib_entry_count++;
+
+ /*
+ * Take a lock on the face as if it will be removed from the next_hops a
+ * lock will be removed.
+ */
+ hicn_face_lock_with_id(face_id);
+
+ return 0;
+}
+
+static_always_inline int
+hicn_mapme_tfib_clear (hicn_mapme_tfib_t * tfib)
+{
+ hicn_face_id_t invalid = NEXT_HOP_INVALID;
+ /*
+ * We need to do a linear scan of TFIB entries to find the one to
+ * remove
+ */
+ u8 start_pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+ u8 pos = ~0;
+ for (pos = start_pos; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos++)
+ {
+ hicn_face_unlock_with_id (tfib->next_hops[pos]);
+ tfib->next_hops[pos] = invalid;
+ break;
+ }
+
+ tfib->tfib_entry_count = 0;
+
+ return 0;
+}
+
+static_always_inline int
+hicn_mapme_tfib_del (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+{
+ hicn_face_id_t invalid = NEXT_HOP_INVALID;
+ /*
+ * We need to do a linear scan of TFIB entries to find the one to
+ * remove
+ */
+ u8 start_pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+ u8 pos = ~0;
+ for (pos = start_pos; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos++)
+ if (tfib->next_hops[pos] == face_id)
+ {
+ hicn_face_unlock_with_id (tfib->next_hops[pos]);
+ tfib->next_hops[pos] = invalid;
+ break;
+ }
+ if (pos == HICN_PARAM_FIB_ENTRY_NHOPS_MAX)
+ /* Not found */
+ return -1;
+
+ tfib->tfib_entry_count--;
+
+ /* Likely we won't receive a new IU twice from the same face */
+ if (PREDICT_TRUE (pos > start_pos))
+ memmove (tfib->next_hops + start_pos +1 , tfib->next_hops + start_pos,
+ (pos - start_pos) * sizeof (hicn_face_id_t));
+
+ return 0;
+}
+
+/**
+ * @brief Performs an Exact Prefix Match lookup on the FIB
+ * @returns the corresponding DPO (hICN or IP LB), or NULL
+ */
+static_always_inline
+ dpo_id_t * fib_epm_lookup (ip46_address_t * addr, u8 plen)
+{
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index;
+ u32 fib_index;
+ dpo_id_t *dpo_id;
+ load_balance_t *lb;
+
+ const dpo_id_t *load_balance_dpo_id;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (addr, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ /* Check if the route already exist in the fib : EPM */
+ fib_index = fib_table_find (fib_pfx.fp_proto, HICN_FIB_TABLE);
+
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ if (fib_entry_index == FIB_NODE_INDEX_INVALID)
+ return NULL;
+
+ load_balance_dpo_id = fib_entry_contribute_ip_forwarding (fib_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ return NULL;
+
+ /* former_dpo_id is a load_balance dpo */
+ lb = load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ /* Check if there is only one bucket */
+
+ /*
+ * We now distinguish the case where we have an hICN route (the
+ * regular case), and the case where we have an IP route, to be able
+ * to apply MAP-Me mechanisms even to a locator IP address.
+ */
+
+ for (int i = 0; i < lb->lb_n_buckets; i++)
+ {
+ /* un-const */
+ dpo_id = (dpo_id_t *) load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (dpo_id))
+ return dpo_id;
+ }
+
+ /* un-const */
+ return (dpo_id_t *) load_balance_dpo_id;
+}
+
+/* DPO types */
+
+extern dpo_type_t hicn_face_udp_type;
+extern dpo_type_t hicn_face_ip_type;
+
+/* VLIB EDGE IDs */
+
+/* in faces/ip/face_ip.c */
+extern u32 strategy_face_ip4_vlib_edge;
+extern u32 strategy_face_ip6_vlib_edge;
+/* in faces/udp/face_udp.c */
+extern u32 strategy_face_udp4_vlib_edge;
+extern u32 strategy_face_udp6_vlib_edge;
+
+
+/**
+ * @brief Returns the next hop vlib edge on which we can send an Interest packet.
+ *
+ * This is both used to preprocess a dpo that will be stored as a next hop in the FIB, and to determine on which node to send an Interest Update.
+ */
+always_inline u32
+hicn_mapme_get_dpo_vlib_edge (dpo_id_t * dpo)
+{
+ if (dpo->dpoi_type == hicn_face_ip_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return strategy_face_ip4_vlib_edge;
+ case DPO_PROTO_IP6:
+ return strategy_face_ip6_vlib_edge;
+ default:
+ return ~0;
+ }
+ }
+ else if (dpo->dpoi_type == hicn_face_udp_type)
+ {
+ switch (dpo->dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return strategy_face_udp4_vlib_edge;
+ case DPO_PROTO_IP6:
+ return strategy_face_udp6_vlib_edge;
+ default:
+ return ~0;
+ }
+ }
+ else
+ {
+ return ~0;
+ }
+}
+
+/**
+ * @brief Returns the next hop node on which we can send an Update packet
+ */
+always_inline char *
+hicn_mapme_get_dpo_face_node (hicn_face_id_t face_id)
+{
+ hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
+
+ switch (face->dpo.dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return "hicn4-face-output";
+ case DPO_PROTO_IP6:
+ return "hicn6-face-output";
+ default:
+ return NULL;
+ }
+}
+
+#define DEBUG(...) //vlib_log_debug(mapme_main.log_class, __VA_ARGS__)
+#define WARN(...) //vlib_log_warn(mapme_main.log_class, __VA_ARGS__)
+#define ERROR(...) //vlib_log_err(mapme_main.log_class, __VA_ARGS__)
+
+#endif /* __HICN_MAPME__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/mapme_ack.h b/hicn-plugin/src/network/mapme_ack.h
new file mode 100644
index 000000000..821baf203
--- /dev/null
+++ b/hicn-plugin/src/network/mapme_ack.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by Cisco Systems Inc. All Rights Reserved.
+ *
+ */
+
+#ifndef HICN_MAPME_ACK_H
+#define HICN_MAPME_ACK_H
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @file
+ *
+ */
+
+/* Node context data */
+typedef struct hicn_mapme_ack_runtime_s
+{
+ int id;
+} hicn_mapme_ack_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_mapme_ack_trace_t;
+
+typedef enum
+{
+ HICN_MAPME_ACK_NEXT_ERROR_DROP,
+ HICN_MAPME_ACK_N_NEXT,
+} hicn_mapme_ack_next_t;
+
+#endif /* HICN_MAPME_ACK_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/mapme_ack_node.c b/hicn-plugin/src/network/mapme_ack_node.c
new file mode 100644
index 000000000..f26895d20
--- /dev/null
+++ b/hicn-plugin/src/network/mapme_ack_node.c
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip6_packet.h>
+
+#include "hicn.h"
+#include "mapme.h"
+#include "mapme_ack.h"
+#include "mapme_eventmgr.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "data_fwd.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "state.h"
+
+extern hicn_mapme_main_t mapme_main;
+
+/* packet trace format function */
+static u8 *hicn_mapme_ack_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_mapme_ack_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/*
+ * @brief Process incoming ack messages (Interest Update Ack)
+ * @param vm vlib main data structure
+ * @param b Control packet (IU)
+ * @param face_id Ingress face id
+ */
+bool
+hicn_mapme_process_ack (vlib_main_t * vm, vlib_buffer_t * b,
+ hicn_face_id_t in_face)
+{
+ seq_t fib_seq;
+ const dpo_id_t *dpo;
+ hicn_prefix_t prefix;
+ mapme_params_t params;
+ int rc;
+
+ /* Parse incoming message */
+ rc =
+ hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ if (rc < 0)
+ goto ERR_PARSE;
+
+ /* if (params.seq == INVALID_SEQ) */
+ /* { */
+ /* DEBUG ("Invalid sequence number found in IU"); */
+ /* return true; */
+ /* } */
+
+ dpo = fib_epm_lookup (&(prefix.name), prefix.len);
+ if (!dpo)
+ {
+ DEBUG ("Ignored ACK for non-existing FIB entry. Ignored.");
+ return true;
+
+ }
+
+ /* We are only expecting ACKs for hICN DPOs */
+ ASSERT (dpo_is_hicn (dpo));
+
+ hicn_mapme_tfib_t *tfib =
+ TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
+
+ if (tfib == NULL)
+ {
+ WARN ("Unable to get strategy ctx.");
+ return false;
+ }
+
+ fib_seq = tfib->seq;
+
+ /*
+ * As we always retransmit IU with the latest seq, we are not interested in
+ * ACKs with inferior seq
+ */
+ if (params.seq < fib_seq)
+ {
+ DEBUG ("Ignored ACK for low seq");
+ return true;
+ }
+
+ hicn_mapme_tfib_del (tfib, in_face);
+
+ /*
+ * Is the ingress face in TFIB ? if so, remove it, otherwise it might be a
+ * duplicate
+ */
+ retx_t *retx = vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_PH_DEL,
+ 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,
+ .dpo = *dpo
+ };
+
+ return true;
+
+ERR_PARSE:
+ return false;
+}
+
+vlib_node_registration_t hicn_mapme_ack_node;
+
+static uword
+hicn_mapme_ack_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ hicn_buffer_t *hb;
+ hicn_mapme_ack_next_t next_index;
+ u32 n_left_from, *from, *to_next;
+ n_left_from = frame->n_vectors;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0) // buffers in the current frame
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = HICN_MAPME_ACK_NEXT_ERROR_DROP;
+ u32 sw_if_index0;
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vlib_cli_output (vm, "Received IUAck");
+ hb = hicn_get_buffer (b0);
+ hicn_mapme_process_ack (vm, b0, hb->face_id);
+
+ /* Single loop: process 1 packet here */
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_mapme_ack_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ /* $$$$$ Done processing 1 packet here $$$$$ */
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+// vlib_node_increment_counter (vm, hicn_mapme_ack_node.index,
+// HICN_MAPME_ACK_ERROR_SWAPPED, pkts_swapped);
+ return (frame->n_vectors);
+}
+
+/* packet trace format function */
+static u8 *
+hicn_mapme_ack_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_mapme_ack_trace_t *t = va_arg (*args, hicn_mapme_ack_trace_t *);
+
+ s = format (s, "MAPME_ACK: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the MAP-Me node processing special interests
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mapme_ack_node) =
+{
+ .function = hicn_mapme_ack_node_fn,
+ .name = "hicn-mapme-ack",
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (hicn_mapme_ack_runtime_t),
+ .format_trace = hicn_mapme_ack_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_mapme_ack_error_strings),
+ .error_strings = hicn_mapme_ack_error_strings,
+ .n_next_nodes = HICN_MAPME_ACK_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_MAPME_ACK_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/mapme_ctrl.h b/hicn-plugin/src/network/mapme_ctrl.h
new file mode 100644
index 000000000..9af4beccc
--- /dev/null
+++ b/hicn-plugin/src/network/mapme_ctrl.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (c) 2017-2019 by Cisco Systems Inc. All Rights Reserved.
+ *
+ */
+
+#ifndef HICN_MAPME_CTRL_H
+#define HICN_MAPME_CTRL_H
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+/**
+ * @file mapme_ctrl.h
+ *
+ */
+
+/* Node context data */
+typedef struct hicn_mapme_ctrl_runtime_s
+{
+ int id;
+} hicn_mapme_ctrl_runtime_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+} hicn_mapme_ctrl_trace_t;
+
+typedef enum
+{
+ HICN_MAPME_CTRL_NEXT_IP4_OUTPUT,
+ HICN_MAPME_CTRL_NEXT_IP6_OUTPUT,
+ HICN_MAPME_CTRL_NEXT_ERROR_DROP,
+ HICN_MAPME_CTRL_N_NEXT,
+} hicn_mapme_ctrl_next_t;
+/**
+ * @brief Returns the next hop node on which we can send an ACK packet
+ */
+always_inline hicn_mapme_ctrl_next_t
+hicn_mapme_ctrl_get_iface_node (hicn_face_id_t face_id)
+{
+ hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
+
+ switch (face->dpo.dpoi_proto)
+ {
+ case DPO_PROTO_IP4:
+ return HICN_MAPME_CTRL_NEXT_IP4_OUTPUT;
+ case DPO_PROTO_IP6:
+ return HICN_MAPME_CTRL_NEXT_IP6_OUTPUT;
+ default:
+ return HICN_MAPME_CTRL_NEXT_ERROR_DROP;
+ }
+}
+
+#endif /* HICN_MAPME_CTRL_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/mapme_ctrl_node.c b/hicn-plugin/src/network/mapme_ctrl_node.c
new file mode 100644
index 000000000..a0be2be1d
--- /dev/null
+++ b/hicn-plugin/src/network/mapme_ctrl_node.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This node processses MAP-Me control messages.
+ */
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/dpo/load_balance.h>
+
+#include "hicn.h"
+#include "mapme.h"
+#include "mapme_ctrl.h"
+#include "mapme_eventmgr.h"
+#include "mgmt.h"
+#include "parser.h"
+#include "infra.h"
+#include "strategy_dpo_manager.h"
+#include "strategy_dpo_ctx.h"
+#include "error.h"
+#include "state.h"
+
+extern hicn_mapme_main_t mapme_main;
+
+#define MS2NS(x) x * 1000000
+
+/* Functions declarations */
+
+/* packet trace format function */
+static u8 *hicn_mapme_ctrl_format_trace (u8 * s, va_list * args);
+
+
+/* Stats string values */
+static char *hicn_mapme_ctrl_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/*
+ * @brief Process incoming control messages (Interest Update)
+ * @param vm vlib main data structure
+ * @param b Control packet (IU)
+ * @param face_id Ingress face id
+ *
+ * NOTE:
+ * - this function answers locally to the IU interest by replying with a Ack
+ * (Data) packet, unless in case of outdated information, in which we can
+ * consider the interest is dropped, and another IU (aka ICMP error) is sent so
+ * that retransmissions stop.
+ */
+static_always_inline bool
+hicn_mapme_process_ctrl (vlib_main_t * vm, vlib_buffer_t * b,
+ hicn_face_id_t in_face_id)
+{
+ seq_t fib_seq;
+ const dpo_id_t *dpo;
+ hicn_prefix_t prefix;
+ mapme_params_t params;
+ int rc;
+
+ /* Parse incoming message */
+ rc =
+ hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ if (rc < 0)
+ goto ERR_PARSE;
+
+ vlib_cli_output (vm, "IU - type:%d seq:%d len:%d", params.type, params.seq,
+ prefix.len);
+
+ /* if (params.seq == INVALID_SEQ) */
+ /* { */
+ /* vlib_log_warn (mapme_main.log_class, */
+ /* "Invalid sequence number found in IU"); */
+
+ /* return true; */
+ /* } */
+
+ /* We forge the ACK which we be the packet forwarded by the node */
+ hicn_mapme_create_ack (vlib_buffer_get_current (b), &params);
+
+ dpo = fib_epm_lookup (&prefix.name, prefix.len);
+ if (!dpo)
+ {
+#ifdef HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY
+ /*
+ * This might happen for a node hosting a producer which has moved.
+ * Destroying the face has led to removing all corresponding FIB
+ * entries. In that case, we need to correctly restore the FIB entries.
+ */
+ DEBUG ("Re-creating FIB entry with next hop on connection")
+#error "not implemented"
+#else
+ //ERROR("Received IU for non-existing FIB entry");
+ return false;
+#endif /* HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY */
+
+ }
+
+#ifdef HICN_MAPME_ALLOW_LOCATORS
+ if (!dpo_is_hicn ((dpo)))
+ {
+ /* We have an IP DPO */
+ WARN ("Not implemented yet.");
+ return false;
+ }
+#endif
+
+ /* Process the hICN DPO */
+ hicn_mapme_tfib_t *tfib =
+ TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
+
+ if (tfib == NULL)
+ {
+ WARN ("Unable to get strategy ctx.");
+ return false;
+ }
+
+ fib_seq = tfib->seq;
+
+ if (params.seq > fib_seq)
+ {
+ DEBUG
+ ("Higher sequence number than FIB %d > %d, updating seq and next hops",
+ params.seq, fib_seq);
+
+ /* This has to be done first to allow processing ack */
+ tfib->seq = params.seq;
+
+ // in_face and next_hops are face_id_t
+
+ /* Remove ingress face from TFIB in case it was present */
+ hicn_mapme_tfib_del (tfib, in_face_id);
+
+ /* Move next hops to TFIB... but in_face... */
+ for (u8 pos = 0; pos < tfib->entry_count; pos++)
+ {
+ hicn_face_t * face = hicn_dpoi_get_from_idx(tfib->next_hops[pos]);
+ hicn_face_t * in_face = hicn_dpoi_get_from_idx(in_face_id);
+ if (dpo_is_adj(&face->dpo))
+ {
+ ip_adjacency_t * adj = adj_get (dpo->dpoi_index);
+ if (ip46_address_cmp(&(adj->sub_type.nbr.next_hop), &(in_face->nat_addr))== 0)
+ break;
+ }
+ DEBUG
+ ("Adding nexthop to the tfib, dpo index in_face %d, dpo index tfib %d",
+ in_face_id, tfib->next_hops[pos]);
+ hicn_mapme_tfib_add (tfib, tfib->next_hops[pos]);
+ }
+
+ hicn_mapme_nh_set (tfib, in_face_id);
+
+ /* We transmit both the prefix and the full dpo (type will be needed to pick the right transmit node */
+ retx_t *retx = vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_NH_SET,
+ 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,
+ .dpo = *dpo
+ };
+
+ }
+ else if (params.seq == fib_seq)
+ {
+ DEBUG ("Same sequence number than FIB %d > %d, adding next hop",
+ params.seq, fib_seq);
+
+ /* Remove ingress face from TFIB in case it was present */
+ hicn_mapme_tfib_del (tfib, in_face_id);
+
+ /* Add ingress face to next hops */
+ hicn_mapme_nh_add (tfib, in_face_id);
+
+ /* Multipath, multihoming, multiple producers or duplicate interest */
+ retx_t *retx = vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_NH_ADD,
+ 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,
+ .dpo = *dpo
+ };
+ }
+ else // params.seq < fib_seq
+ {
+ /*
+ * face is propagating outdated information, we can just consider it as a
+ * prevHops
+ */
+ hicn_mapme_tfib_add (tfib, in_face_id);
+
+ retx_t *retx = vlib_process_signal_event_data (vm,
+ hicn_mapme_eventmgr_process_node.
+ index,
+ HICN_MAPME_EVENT_FACE_PH_ADD,
+ 1,
+ sizeof (retx_t));
+ *retx = (retx_t)
+ {
+ .prefix = prefix,
+ .dpo = *dpo
+ };
+ }
+
+ /* We just raise events, the event_mgr is in charge of forging packet. */
+
+ return true;
+
+//ERR_ACK_CREATE:
+ERR_PARSE:
+ return false;
+}
+
+vlib_node_registration_t hicn_mapme_ctrl_node;
+
+static uword
+hicn_mapme_ctrl_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ hicn_buffer_t *hb;
+ hicn_mapme_ctrl_next_t next_index;
+ u32 n_left_from, *from, *to_next;
+ n_left_from = frame->n_vectors;
+ //hicn_face_id_t in_face;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0) // buffers in the current frame
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+ b0 = vlib_get_buffer (vm, bi0);
+ hb = hicn_get_buffer (b0);
+
+ /* This determines the next node on which the ack will be sent back */
+ u32 next0 = hicn_mapme_ctrl_get_iface_node (hb->face_id);
+
+ hicn_mapme_process_ctrl (vm, b0, hb->face_id);
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hb->face_id;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ // vlib_node_increment_counter (vm, hicn_mapme_ctrl_node.index,
+ // HICN_MAPME_CTRL_ERROR_SWAPPED, pkts_swapped);
+ return frame->n_vectors;
+}
+
+/* packet trace format function */
+static u8 *
+hicn_mapme_ctrl_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_mapme_ctrl_trace_t *t = va_arg (*args, hicn_mapme_ctrl_trace_t *);
+
+ s = format (s, "MAPME_CTRL: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+/*
+ * Node registration for the MAP-Me node processing special interests
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mapme_ctrl_node) =
+{
+ .function = hicn_mapme_ctrl_node_fn,
+ .name = "hicn-mapme-ctrl",
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (hicn_mapme_ctrl_runtime_t),
+ .format_trace = hicn_mapme_ctrl_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_mapme_ctrl_error_strings),
+ .error_strings = hicn_mapme_ctrl_error_strings,
+ .n_next_nodes = HICN_MAPME_CTRL_N_NEXT,
+ .next_nodes =
+ {
+ /*
+ * Control packets are not forwarded by this node, but sent by the Event
+ * Manager. This node is only responsible for sending ACK back,
+ * Acks are like data packets are output on iface's
+ */
+ [HICN_MAPME_CTRL_NEXT_IP4_OUTPUT] = "hicn4-iface-output",
+ [HICN_MAPME_CTRL_NEXT_IP6_OUTPUT] = "hicn6-iface-output",
+ [HICN_MAPME_CTRL_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/mapme_eventmgr.c b/hicn-plugin/src/network/mapme_eventmgr.c
new file mode 100644
index 000000000..d8b7562f8
--- /dev/null
+++ b/hicn-plugin/src/network/mapme_eventmgr.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hicn.h"
+#include "strategy_dpo_ctx.h"
+#include "mapme.h"
+#include "mapme_eventmgr.h"
+#include "strategies/dpo_mw.h"
+
+#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/ip6_fib.h>
+
+#define DEFAULT_TIMEOUT 1.0 /* s */
+
+hicn_mapme_main_t mapme_main;
+
+hicn_prefix_t *retx_pool;
+uword *retx_hash;
+
+void
+hicn_mapme_init (vlib_main_t * vm)
+{
+ mapme_main.vm = vm;
+ mapme_main.log_class = vlib_log_register_class ("hicn_mapme", 0);
+}
+
+/* borrowed from vnet/fib/ip4_fib.c */
+
+typedef struct ip4_fib_show_walk_ctx_t_
+{
+ fib_node_index_t *ifsw_indicies;
+} ip4_fib_show_walk_ctx_t;
+
+static fib_table_walk_rc_t
+ip4_fib_show_walk_cb (fib_node_index_t fib_entry_index, void *arg)
+{
+ ip4_fib_show_walk_ctx_t *ctx = arg;
+
+ vec_add1 (ctx->ifsw_indicies, fib_entry_index);
+
+ return (FIB_TABLE_WALK_CONTINUE);
+}
+
+/* borrowed from vnet/fib/ip6_fib.c */
+
+typedef struct ip6_fib_show_ctx_t_
+{
+ fib_node_index_t *entries;
+} ip6_fib_show_ctx_t;
+
+static fib_table_walk_rc_t
+ip6_fib_table_show_walk (fib_node_index_t fib_entry_index, void *arg)
+{
+ ip6_fib_show_ctx_t *ctx = arg;
+
+ vec_add1 (ctx->entries, fib_entry_index);
+
+ return (FIB_TABLE_WALK_CONTINUE);
+}
+
+void
+hicn_mapme_process_fib_entry (vlib_main_t * vm, hicn_face_id_t face,
+ const fib_node_index_t * fib_entry_index)
+{
+ const dpo_id_t *load_balance_dpo_id;
+ load_balance_t *lb;
+ dpo_id_t *dpo_id;
+ fib_entry_t *fib_entry;
+
+ load_balance_dpo_id = fib_entry_contribute_ip_forwarding (*fib_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ return;
+
+ /* former_dpo_id is a load_balance dpo */
+ lb = load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ for (int i = 0; i < lb->lb_n_buckets; i++)
+ {
+ /* un-const */
+ dpo_id = (dpo_id_t *) load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (dpo_id))
+ {
+ fib_entry = fib_entry_get (*fib_entry_index);
+ vlib_cli_output (vm, "set face pending %U", format_fib_prefix,
+ &fib_entry->fe_prefix);
+ }
+ }
+}
+
+void
+hicn_mapme_process_ip4_fib (vlib_main_t * vm, hicn_face_id_t face)
+{
+ ip4_main_t *im4 = &ip4_main;
+ fib_table_t *fib_table;
+ int table_id = -1, fib_index = ~0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im4->fibs,
+ ({
+ ip4_fib_t *fib = pool_elt_at_index(im4->v4_fibs, fib_table->ft_index);
+
+ if (table_id >= 0 && table_id != (int)fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int)fib->index)
+ continue;
+
+ fib_node_index_t *fib_entry_index;
+ ip4_fib_show_walk_ctx_t ctx = {
+ .ifsw_indicies = NULL,
+ };
+
+ ip4_fib_table_walk(fib, ip4_fib_show_walk_cb, &ctx);
+ //vec_sort_with_function(ctx.ifsw_indicies, fib_entry_cmp_for_sort);
+
+ vec_foreach(fib_entry_index, ctx.ifsw_indicies)
+ {
+ hicn_mapme_process_fib_entry(vm, face, fib_entry_index);
+ }
+
+ vec_free(ctx.ifsw_indicies);
+ }));
+ /* *INDENT-ON* */
+}
+
+void
+hicn_mapme_process_ip6_fib (vlib_main_t * vm, hicn_face_id_t face)
+{
+ /* Walk IPv6 FIB */
+ ip6_main_t *im6 = &ip6_main;
+ fib_table_t *fib_table;
+ ip6_fib_t *fib;
+ int table_id = -1, fib_index = ~0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (fib_table, im6->fibs,
+ ({
+ fib = pool_elt_at_index(im6->v6_fibs, fib_table->ft_index);
+
+ if (table_id >= 0 && table_id != (int)fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int)fib->index)
+ continue;
+ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL)
+ continue;
+
+ fib_node_index_t *fib_entry_index;
+ ip6_fib_show_ctx_t ctx = {
+ .entries = NULL,
+ };
+
+ ip6_fib_table_walk(fib->index, ip6_fib_table_show_walk, &ctx);
+ //vec_sort_with_function(ctx.entries, fib_entry_cmp_for_sort);
+
+ vec_foreach(fib_entry_index, ctx.entries)
+ {
+ hicn_mapme_process_fib_entry(vm, face, fib_entry_index);
+ }
+
+ vec_free(ctx.entries);
+
+ }));
+ /* *INDENT-ON* */
+}
+
+
+/**
+ * Callback called everytime a new face is created (not including app faces)
+ */
+void
+hicn_mapme_on_face_added (vlib_main_t * vm, hicn_face_id_t face)
+{
+ hicn_mapme_process_ip4_fib (vm, face);
+ hicn_mapme_process_ip6_fib (vm, face);
+}
+
+/*
+ * We need a retransmission pool holding all necessary information for crafting
+ * special interests, thus including both the DPO and the prefix associated to
+ * it.
+ */
+#define NUM_RETX_ENTRIES 100
+#define NUM_RETX_SLOT 2
+#define NEXT_SLOT(cur) (1-cur)
+#define CUR retx_array[cur]
+#define NXT retx_array[NEXT_SLOT(cur)]
+#define CURLEN retx_len[cur]
+#define NXTLEN retx_len[NEXT_SLOT(cur)]
+
+static_always_inline void *
+get_packet_buffer (vlib_main_t * vm, u32 node_index, u32 dpoi_index,
+ ip46_address_t * addr, hicn_type_t type)
+{
+ vlib_frame_t *f;
+ vlib_buffer_t *b; // for newly created packet
+ u32 *to_next;
+ u32 bi;
+ u8 *buffer;
+
+ if (vlib_buffer_alloc (vm, &bi, 1) != 1)
+ {
+ clib_warning ("buffer allocation failure");
+ return NULL;
+ }
+
+ /* Create a new packet from scratch */
+ b = vlib_get_buffer (vm, bi);
+ ASSERT (b->current_data == 0);
+
+ /* Face information for next hop node index */
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = dpoi_index;
+ hicn_get_buffer (b)->type = type;
+
+ /* Enqueue the packet right now */
+ f = vlib_get_frame_to_node (vm, node_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi;
+ f->n_vectors = 1;
+ vlib_put_frame_to_node (vm, node_index, f);
+
+ // pointer to IP layer ? do we need to prepare for ethernet ???
+ buffer = vlib_buffer_get_current (b);
+ b->current_length =
+ (type.l1 == IPPROTO_IPV6) ? HICN_MAPME_V6_HDRLEN : HICN_MAPME_V4_HDRLEN;
+
+ return buffer;
+}
+
+static_always_inline bool
+hicn_mapme_send_message (vlib_main_t * vm, const hicn_prefix_t * prefix,
+ mapme_params_t * params, hicn_face_id_t face)
+{
+ size_t n;
+
+ /* This should be retrieved from face information */
+ DEBUG ("Retransmission for prefix %U seq=%d", format_ip46_address,
+ &prefix->name, IP46_TYPE_ANY, params->seq);
+
+ char *node_name = hicn_mapme_get_dpo_face_node (face);
+ if (!node_name)
+ {
+ clib_warning
+ ("Could not determine next node for sending MAP-Me packet");
+ return false;
+ }
+
+ vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) node_name);
+ u32 node_index = node->index;
+
+ u8 *buffer = get_packet_buffer (vm, node_index, face,
+ (ip46_address_t *) prefix,
+ (params->protocol ==
+ IPPROTO_IPV6) ? HICN_TYPE_IPV6_ICMP :
+ HICN_TYPE_IPV4_ICMP);
+ n = hicn_mapme_create_packet (buffer, prefix, params);
+ if (n <= 0)
+ {
+ clib_warning ("Could not create MAP-Me packet");
+ return false;
+ }
+
+ return true;
+}
+
+static_always_inline void
+hicn_mapme_send_updates (vlib_main_t * vm, hicn_prefix_t * prefix,
+ dpo_id_t dpo, bool send_all)
+{
+ hicn_mapme_tfib_t *tfib = TFIB (hicn_strategy_dpo_ctx_get (dpo.dpoi_index));
+ if (!tfib)
+ {
+ DEBUG ("NULL TFIB entry id=%d", dpo.dpoi_index);
+ return;
+ }
+
+ u8 tfib_last_idx = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+
+ mapme_params_t params = {
+ .protocol = ip46_address_is_ip4 (&prefix->name)
+ ? IPPROTO_IP : IPPROTO_IPV6,
+ .type = UPDATE,
+ .seq = tfib->seq,
+ };
+
+ if (send_all)
+ {
+ for (u8 pos = tfib_last_idx; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX;
+ pos++)
+ {
+ hicn_mapme_send_message (vm, prefix, &params,
+ tfib->next_hops[pos]);
+ }
+ }
+ else
+ {
+ hicn_mapme_send_message (vm, prefix, &params,
+ tfib->next_hops[tfib_last_idx]);
+ }
+}
+
+static uword
+hicn_mapme_eventmgr_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ f64 timeout = 0; /* By default, no timer is run */
+ f64 current_time, due_time;
+ u8 idle = 0;
+
+ retx_t retx_array[NUM_RETX_SLOT][NUM_RETX_ENTRIES];
+ memset (retx_array, 0, NUM_RETX_SLOT * NUM_RETX_ENTRIES);
+ u8 retx_len[NUM_RETX_SLOT] = { 0 };
+ u8 cur = 0; /* current slot */
+
+ hicn_mapme_init (vm);
+
+ for (;;)
+ {
+ /* NOTE: returned timeout seems to always be 0 with get_event_data
+ * instead of get_event, and we thus need to reimplement timeout
+ * management on top, as done elsewhere in VPP code.
+ *
+ * The most probable event. For simplicity, for new faces, we pass the same retx_t with no
+ * prefix
+ */
+ if (timeout != 0)
+ {
+ /* timeout = */ vlib_process_wait_for_event_or_clock (vm, timeout);
+ current_time = vlib_time_now (vm);
+
+ /*
+ * As we don't accummulate errors, we allow for simple timer
+ * management with no error correction accounting for elapsed time.
+ * Also, we only run a timer when there are pending retransmissions.
+ */
+ timeout =
+ (due_time >
+ current_time) ? due_time - current_time : DEFAULT_TIMEOUT;
+ due_time = current_time + timeout;
+ }
+ else
+ {
+ vlib_process_wait_for_event (vm);
+ }
+
+ uword event_type = ~0;
+ void *event_data = vlib_process_get_event_data (vm, &event_type);
+
+ switch (event_type)
+ {
+ case HICN_MAPME_EVENT_FACE_ADD:
+ {
+ /*
+ * A face has been added:
+ * - In case of a local app face, we need to advertise a new prefix
+ * - For another local face type, we need to advertise local
+ * prefixes and schedule retransmissions
+ */
+ retx_t *retx_events = event_data;
+ for (u8 i = 0; i < vec_len (retx_events); i++)
+ {
+ hicn_mapme_on_face_added (vm, retx_events[i].face_id);
+ }
+ idle = 0;
+ }
+ break;
+
+ case HICN_MAPME_EVENT_FACE_DEL:
+ idle = 0;
+ break;
+
+ case HICN_MAPME_EVENT_FACE_NH_SET:
+ {
+ /*
+ * An hICN FIB entry has been modified. All operations so far
+ * have been procedded in the nodes. Here we need to track
+ * retransmissions upon timeout: we mark the FIB entry as pending in
+ * the second-to-next slot
+ */
+
+ /* Mark FIB entry as pending for second-to-next slot */
+ retx_t *retx_events = event_data;
+ for (u8 i = 0; i < vec_len (retx_events); i++)
+ {
+ /*
+ * retx_events[i] corresponds to the dpoi_index of the (T)FIB
+ * structure that has been modified. Multiple successive
+ * events might correspond to the same entry.
+ *
+ * The FIB entry has a new next hop, and its TFIB section has:
+ * - eventually previous prev hops for which a IU with a
+ * lower seqno has been sent
+ * - the prev hops that have just been added.
+ *
+ * We don't distinguish any and just send an updated IU to all
+ * of them. The retransmission of the latest IU to all
+ * facilitates the matching of ACKs to a single seqno which is
+ * the one stored in the FIB.
+ *
+ * Since we retransmit to all prev hops, we can remove this
+ * (T)FIB entry for the check at the end of the current slot.
+ */
+ retx_t *retx = (retx_t *) & retx_events[i];
+
+ retx->rtx_count = 0;
+ /*
+ * Transmit IU for all TFIB entries with latest seqno (we have
+ * at least one for sure!)
+ */
+ hicn_mapme_send_updates (vm, &retx->prefix, retx->dpo, true);
+
+ /* Delete entry_id from retransmissions in the current slot (if present) ... */
+ for (u8 j = 0; j < CURLEN; j++)
+ if (!dpo_cmp (&(CUR[j].dpo), &retx->dpo))
+ {
+ CUR[j].dpo.dpoi_index = ~0; /* sufficient */
+ }
+
+ /* ... and schedule it for next slot (if not already) */
+ u8 j;
+ for (j = 0; j < NXTLEN; j++)
+ if (!dpo_cmp (&NXT[j].dpo, &retx->dpo))
+ break;
+ if (j == NXTLEN) /* not found */
+ NXT[NXTLEN++] = *retx;
+ }
+ idle = 0;
+ }
+ break;
+
+ case HICN_MAPME_EVENT_FACE_NH_ADD:
+ /*
+ * As per the description of states, this event should add the face
+ * to the list of next hops, and eventually remove it from TFIB.
+ * This corresponds to the multipath case.
+ *
+ * In all cases, we assume the propagation was already done when the first
+ * interest with the same sequence number was received, so we stop here
+ * No change in TFIB = no IU to send
+ *
+ * No change in timers.
+ */
+ vlib_cli_output (vm, "[hicn_event_mgr] ADD NEXT HOP IN FIB");
+
+ /* Add ingress face as next hop */
+ idle = 0;
+
+ break;
+
+ case HICN_MAPME_EVENT_FACE_PH_ADD:
+ /* Back-propagation, interesting even for IN (desync) */
+ {
+ retx_t *retx_events = event_data;
+ for (u8 i = 0; i < vec_len (retx_events); i++)
+ {
+ hicn_mapme_send_updates (vm, &retx_events[i].prefix,
+ retx_events[i].dpo, false);
+ }
+ idle = 0;
+ }
+ break;
+
+ case HICN_MAPME_EVENT_FACE_PH_DEL:
+ /* Ack : remove an element from TFIB */
+ break;
+
+ case ~0:
+ /* Timeout occurred, we have to retransmit IUs for all pending
+ * prefixes having entries in TFIB
+ *
+ * timeouts are slotted
+ * | | | |
+ *
+ * ^
+ * +- event occurred
+ * new face, wait for the second next
+ * (having two arrays and swapping cur and next)
+ * retx : put in next
+ */
+ idle += 1;
+ for (u8 pos = 0; pos < CURLEN; pos++)
+ {
+ retx_t *retx = &CUR[pos];
+
+ if (retx->dpo.dpoi_index == ~0) /* deleted entry */
+ continue;
+
+ hicn_mapme_tfib_t *tfib =
+ TFIB (hicn_strategy_dpo_ctx_get (retx->dpo.dpoi_index));
+ if (!tfib)
+ {
+ DEBUG ("NULL TFIB entry for dpoi_index=%d",
+ retx->dpo.dpoi_index);
+ continue;
+ }
+
+ hicn_mapme_send_updates (vm, &retx->prefix, retx->dpo, true);
+
+ retx->rtx_count++;
+ // If we exceed the numver of retransmittion it means that all tfib entries have seens at least HICN_PARAM_RTX_MAX of retransmission
+ if (retx->rtx_count < HICN_PARAM_RTX_MAX)
+ {
+ /*
+ * We did some retransmissions, so let's reschedule a check in the
+ * next slot
+ */
+ NXT[NXTLEN++] = CUR[pos];
+ idle = 0;
+ }
+ else
+ {
+ hicn_mapme_tfib_clear (tfib);
+ }
+ }
+
+ /* Reset events in this slot and prepare for next one */
+ CURLEN = 0;
+ cur = NEXT_SLOT (cur);
+
+ /* After two empty slots, we disable the timer */
+
+ break;
+ }
+
+ if (event_data)
+ vlib_process_put_event_data (vm, event_data);
+
+ timeout = (idle > 1) ? 0 : DEFAULT_TIMEOUT;
+
+ // if (vlib_process_suspend_time_is_zero (timeout)) { ... }
+
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
+
+/* Not static as we need to access it from hicn_face */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_mapme_eventmgr_process_node) = { //,static) = {
+ .function = hicn_mapme_eventmgr_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "mapme-eventmgr-process",
+ .process_log2_n_stack_bytes = 16,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/mapme_eventmgr.h b/hicn-plugin/src/network/mapme_eventmgr.h
new file mode 100644
index 000000000..b63d16805
--- /dev/null
+++ b/hicn-plugin/src/network/mapme_eventmgr.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h> // vlib_node_registration_t (vlib/node.h)
+
+#include <hicn/name.h>
+
+/**
+ * @file mapme_eventmgr.h
+ *
+ */
+
+/*
+ * Structure carrying all necessary information for managing Special Interest
+ * (re)transmissions.
+ */
+typedef struct
+{
+ hicn_prefix_t prefix;
+ dpo_id_t dpo;
+ hicn_face_id_t face_id;
+ u8 rtx_count; // Number of retransmissions since last tfib addition
+} retx_t;
+
+#define HASH32(x) ((u16)x ^ (x << 16))
+
+/**
+ * @brief This is a process node reacting to face events.
+ */
+// not static !
+vlib_node_registration_t hicn_mapme_eventmgr_process_node;
+
+/**
+ * @brief Initialize MAP-Me on forwarder
+ * @params vm - vlib_main_t pointer
+ */
+void hicn_mapme_init (vlib_main_t * vm);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/mgmt.c b/hicn-plugin/src/network/mgmt.c
new file mode 100644
index 000000000..7b20fe911
--- /dev/null
+++ b/hicn-plugin/src/network/mgmt.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+
+#include "hicn.h"
+#include "infra.h"
+#include "mgmt.h"
+
+/* define message IDs */
+#include <vpp_plugins/hicn/hicn_msg_enum.h>
+
+/* shared routine betweeen API and CLI, leveraging API message structure */
+int
+hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp)
+{
+ rmp->pkts_processed = 0;
+ rmp->pkts_interest_count = 0;
+ rmp->pkts_data_count = 0;
+ rmp->pkts_from_cache_count = 0;
+ rmp->pkts_no_pit_count = 0;
+ rmp->pit_expired_count = 0;
+ rmp->cs_expired_count = 0;
+ rmp->cs_lru_count = 0;
+ rmp->pkts_drop_no_buf = 0;
+ rmp->interests_aggregated = 0;
+ rmp->interests_retx = 0;
+ rmp->pit_entries_count =
+ clib_host_to_net_u64 (hicn_main.pitcs.pcs_pit_count);
+ rmp->cs_entries_count = clib_host_to_net_u64 (hicn_main.pitcs.pcs_cs_count);
+ rmp->cs_entries_ntw_count =
+ clib_host_to_net_u64 (hicn_main.pitcs.policy_state.count);
+
+ vlib_error_main_t *em;
+ vlib_node_t *n;
+ foreach_vlib_main ((
+ {
+ em = &this_vlib_main->error_main;
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_interest_pcslookup_node.index);
+ u32 node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_PROCESSED]);
+ rmp->pkts_interest_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_INTERESTS]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_data_pcslookup_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_PROCESSED]);
+ rmp->pkts_data_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_DATAS]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_interest_hitcs_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_from_cache_count +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_CACHED]);
+ n =
+ vlib_get_node (this_vlib_main,
+ hicn_interest_hitpit_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->interests_aggregated +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_INTEREST_AGG]);
+ rmp->interests_retx +=
+ clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
+ HICNFWD_ERROR_INT_RETRANS]);}));
+ return (HICN_ERROR_NONE);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/mgmt.h b/hicn-plugin/src/network/mgmt.h
new file mode 100644
index 000000000..bafb0194d
--- /dev/null
+++ b/hicn-plugin/src/network/mgmt.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_MGMT_H__
+#define __HICN_MGMT_H__
+
+#include <vppinfra/error.h>
+#include <vpp_plugins/hicn/hicn_api.h>
+
+#include "faces/face.h"
+
+
+/**
+ * @file mgmt.h
+ *
+ */
+
+typedef struct icn_stats_s
+{
+ u32 pkts_processed;
+ u32 pkts_interest_count;
+ u32 pkts_data_count;
+ u32 pkts_from_cache_count;
+ u32 pkts_no_pit_count;
+ u32 pit_expired_count;
+ u32 cs_expired_count;
+ u32 no_bufs_count;
+ u32 pkts_interest_agg;
+ u32 pkts_int_retrans;
+ u32 pit_int_count;
+ u32 pit_cs_count;
+} icn_stats_t;
+
+typedef enum
+{
+ HICN_MGMT_FACE_OP_NONE = 0,
+ HICN_MGMT_FACE_OP_CREATE,
+ HICN_MGMT_FACE_OP_DELETE,
+ HICN_MGMT_FACE_OP_ADMIN,
+ HICN_MGMT_FACE_OP_HELLO,
+} hicn_mgmt_face_op_e;
+
+typedef enum
+{
+ HICN_MGMT_MAPME_OP_NONE = 0,
+ HICN_MGMT_MAPME_OP_CREATE,
+ HICN_MGMT_MAPME_OP_DELETE,
+ HICN_MGMT_MAPME_OP_ENABLE,
+ HICN_MGMT_MAPME_OP_DISABLE
+} hicn_mgmt_mapme_op_e;
+
+typedef enum
+{
+ HICN_ADDRESS_TYPE_NONE,
+ HICN_ADDRESS_TYPE_V4,
+ HICN_ADDRESS_TYPE_V6
+} hicn_address_type_e;
+
+/*
+ * Utility to update error counters in all hICN nodes
+ */
+always_inline void
+update_node_counter (vlib_main_t * vm, u32 node_idx, u32 counter_idx, u64 val)
+{
+ vlib_node_t *node = vlib_get_node (vm, node_idx);
+ vlib_error_main_t *em = &(vm->error_main);
+ u32 base_idx = node->error_heap_index;
+
+ em->counters[base_idx + counter_idx] = val;
+}
+
+
+/*
+ * Stats for the forwarding node, which end up called "error" even though
+ * they aren't...
+ */
+#define foreach_hicnfwd_error \
+ _(PROCESSED, "hICN packets processed") \
+ _(INTERESTS, "hICN interests forwarded") \
+ _(DATAS, "hICN data msgs forwarded") \
+ _(CACHED, "Cached data ") \
+ _(NO_PIT, "hICN no PIT entry drops") \
+ _(PIT_EXPIRED, "hICN expired PIT entries") \
+ _(CS_EXPIRED, "hICN expired CS entries") \
+ _(CS_LRU, "hICN LRU CS entries freed") \
+ _(NO_BUFS, "No packet buffers") \
+ _(INTEREST_AGG, "Interests aggregated") \
+ _(INTEREST_AGG_ENTRY, "Interest aggregated per entry") \
+ _(INT_RETRANS, "Interest retransmissions") \
+ _(INT_COUNT, "Interests in PIT") \
+ _(CS_COUNT, "CS total entries") \
+ _(CS_NTW_COUNT, "CS ntw entries") \
+ _(CS_APP_COUNT, "CS app entries") \
+ _(HASH_COLL_HASHTB_COUNT, "Collisions in Hash table")
+
+typedef enum
+{
+#define _(sym, str) HICNFWD_ERROR_##sym,
+ foreach_hicnfwd_error
+#undef _
+ HICNFWD_N_ERROR,
+} hicnfwd_error_t;
+
+/*
+ * Declarations
+ */
+clib_error_t *hicn_api_plugin_hookup (vlib_main_t * vm);
+
+int hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp);
+
+#endif /* // __HICN_MGMT_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/params.h b/hicn-plugin/src/network/params.h
new file mode 100644
index 000000000..606d50771
--- /dev/null
+++ b/hicn-plugin/src/network/params.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PARAM_H__
+#define __HICN_PARAM_H__
+
+#include <math.h>
+
+/**
+ * @file params.h
+ *
+ */
+
+
+/*
+ * Features
+ */
+#define HICN_FEATURE_CS 1 //1 enable 0 disable
+
+/*
+ * Face compile-time parameters
+ */
+#define HICN_PARAM_FACES_MAX 512
+
+STATIC_ASSERT ((HICN_PARAM_FACES_MAX & (HICN_PARAM_FACES_MAX - 1)) == 0,
+ "HICN_PARAM_FACES_MAX must be a power of 2");
+
+/*
+ * Max length for hICN names
+ */
+#define HICN_PARAM_HICN_NAME_LEN_MAX 20 //bytes
+
+// Max next - hops supported in a FIB entry
+#define HICN_PARAM_FIB_ENTRY_NHOPS_MAX 10
+
+// Default and limit on weight, whatever weight means
+#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT 0x10
+#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX 0xff
+
+/*
+ * PIT compile-time parameters
+ */
+#define HICN_PARAM_PIT_ENTRIES_MIN 1024
+#define HICN_PARAM_PIT_ENTRIES_DFLT 1024 * 128
+#define HICN_PARAM_PIT_ENTRIES_MAX 2 * 1024 * 1024
+
+// aggregation limit(interest previous hops)
+// Supported up to 516. For more than 4 faces this param must
+// HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4 must be a power of two
+#define HICN_PARAM_PIT_ENTRY_PHOPS_MAX 20
+
+STATIC_ASSERT ((ceil (log2 ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4)))) ==
+ (floor (log2 ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4)))),
+ "HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4 must be a power of two");
+
+STATIC_ASSERT ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX <= HICN_PARAM_FACES_MAX),
+ "HICN_PARAM_PIT_ENTRY_PHOP_MAX must be <= than HICN_PARAM_FACES_MAX");
+
+//tFIB parameters
+#define HICN_PARAM_RTX_MAX 10
+
+// PIT lifetime limits on API override this(in seconds, integer type)
+#define HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC 0
+#define HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC 200
+
+//PIT lifetime params if not set at API(in mseconds, integer type)
+#define HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS 20000
+
+// Face CS reservation params
+#define HICN_PARAM_FACE_MAX_CS_RESERVED 20000 //packets
+#define HICN_PARAM_FACE_MIN_CS_RESERVED 0 //packets
+#define HICN_PARAM_FACE_DFT_CS_RESERVED 20000 //packets
+
+/*
+ * CS compile-time parameters
+ */
+#define HICN_PARAM_CS_ENTRIES_MIN 0 // can disable CS
+#define HICN_PARAM_CS_ENTRIES_DFLT 4 * 1024
+#define HICN_PARAM_CS_ENTRIES_MAX 1024 * 1024
+
+#define HICN_PARAM_CS_LRU_DEFAULT (16 * 1024)
+
+/* CS lifetime defines, in mseconds, integer type */
+#define HICN_PARAM_CS_LIFETIME_MIN 0
+#define HICN_PARAM_CS_LIFETIME_DFLT (5 * 60 * 1000) // 300 seconds
+#define HICN_PARAM_CS_LIFETIME_MAX (24 * 3600 * 1000) //24 hours...
+
+/* CS reserved portion for applications */
+#define HICN_PARAM_CS_RESERVED_APP 50 //%
+#define HICN_PARAM_CS_MIN_MBUF 4096 //this seems to be the minumim default number of mbuf we can have in vpp
+
+/* Cloning parameters */
+/* ip4 */
+#define HICN_IP4_VERSION_HEADER_LENGTH 0x45
+#define HICN_IP4_PROTOCOL IP_PROTOCOL_TCP
+#define HICN_IP4_TTL_DEFAULT 128
+
+/* ip6 */
+#define IPV6_DEFAULT_VERSION 6
+#define IPV6_DEFAULT_TRAFFIC_CLASS 0
+#define IPV6_DEFAULT_FLOW_LABEL 0
+#define HCIN_IP6_VERSION_TRAFFIC_FLOW (IPV6_DEFAULT_VERSION << 28) | \
+ (IPV6_DEFAULT_TRAFFIC_CLASS << 20) | \
+ (IPV6_DEFAULT_FLOW_LABEL & 0xfffff)
+#define HICN_IP6_PROTOCOL IP_PROTOCOL_TCP
+#define HICN_IP6_HOP_LIMIT 0x40
+
+#endif /* // __HICN_PARAM_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/parser.h b/hicn-plugin/src/network/parser.h
new file mode 100644
index 000000000..102f63107
--- /dev/null
+++ b/hicn-plugin/src/network/parser.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PARSER_H__
+#define __HICN_PARSER_H__
+
+#include <vlib/vlib.h>
+
+#include "hicn.h"
+#include <vpp_plugins/hicn/error.h>
+
+/**
+ * @file parser.h
+ */
+
+/*
+ * Key type codes for header, header tlvs, body tlvs, and child tlvs
+ */
+
+// FIXME(reuse lib struct, no more control ?)
+enum hicn_pkt_type_e
+{
+ HICN_PKT_TYPE_INTEREST = 0,
+ HICN_PKT_TYPE_CONTENT = 1,
+};
+
+/**
+ * @brief Parse an interest packet
+ *
+ * @param pkt vlib buffer holding the interest
+ * @param name return variable that will point to the hicn name
+ * @param namelen return valiable that will hold the length of the name
+ * @param pkt_hdrp return valiable that will point to the packet header
+ * @param isv6 return variable that will be equale to 1 is the header is ipv6
+ */
+always_inline int
+hicn_interest_parse_pkt (vlib_buffer_t * pkt, hicn_name_t * name,
+ u16 * namelen, hicn_header_t ** pkt_hdrp, u8 * isv6)
+{
+ if (pkt == NULL)
+ return HICN_ERROR_PARSER_PKT_INVAL;
+ hicn_header_t *pkt_hdr = vlib_buffer_get_current (pkt);
+ *pkt_hdrp = pkt_hdr;
+ u8 *ip_pkt = vlib_buffer_get_current (pkt);
+ *isv6 = hicn_is_v6 (pkt_hdr);
+ u8 ip_proto = (*isv6) * IPPROTO_IPV6;
+ u8 next_proto_offset = 6 + (1 - *isv6) * 3;
+ //in the ipv6 header the next header field is at byte 6
+ // in the ipv4 header the protocol field is at byte 9
+ hicn_type_t type = (hicn_type_t) { {
+ .l4 = IPPROTO_NONE,.l3 =
+ IPPROTO_NONE,.l2 =
+ ip_pkt[next_proto_offset],.l1 =
+ ip_proto}
+ };
+ hicn_get_buffer (pkt)->type = type;
+
+ hicn_ops_vft[type.l1]->get_interest_name (type, &pkt_hdr->protocol, name);
+ *namelen = (1 - (*isv6)) * HICN_V4_NAME_LEN + (*isv6) * HICN_V6_NAME_LEN;
+
+ return HICN_ERROR_NONE;
+}
+
+/**
+ * @brief Parse a data packet
+ *
+ * @param pkt vlib buffer holding the interest
+ * @param name return variable that will point to the hicn name
+ * @param namelen return valiable that will hold the length of the name
+ * @param pkt_hdrp return valiable that will point to the packet header
+ * @param isv6 return variable that will be equale to 1 is the header is ipv6
+ */
+always_inline int
+hicn_data_parse_pkt (vlib_buffer_t * pkt, hicn_name_t * name,
+ u16 * namelen, hicn_header_t ** pkt_hdrp, u8 * isv6)
+{
+ if (pkt == NULL)
+ return HICN_ERROR_PARSER_PKT_INVAL;
+ hicn_header_t *pkt_hdr = vlib_buffer_get_current (pkt);
+ *pkt_hdrp = pkt_hdr;
+ *pkt_hdrp = pkt_hdr;
+ u8 *ip_pkt = vlib_buffer_get_current (pkt);
+ *isv6 = hicn_is_v6 (pkt_hdr);
+ u8 ip_proto = (*isv6) * IPPROTO_IPV6;
+ /*
+ * in the ipv6 header the next header field is at byte 6 in the ipv4
+ * header the protocol field is at byte 9
+ */
+ u8 next_proto_offset = 6 + (1 - *isv6) * 3;
+ hicn_type_t type = (hicn_type_t) { {.l4 = IPPROTO_NONE,.l3 =
+ IPPROTO_NONE,.l2 =
+ ip_pkt[next_proto_offset],.l1 =
+ ip_proto}
+ };
+ hicn_get_buffer (pkt)->type = type;
+ hicn_ops_vft[type.l1]->get_data_name (type, &pkt_hdr->protocol, name);
+ *namelen = (1 - (*isv6)) * HICN_V4_NAME_LEN + (*isv6) * HICN_V6_NAME_LEN;
+
+ return HICN_ERROR_NONE;
+}
+
+
+#endif /* // __HICN_PARSER_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/pcs.c b/hicn-plugin/src/network/pcs.c
new file mode 100644
index 000000000..6c44b9d83
--- /dev/null
+++ b/hicn-plugin/src/network/pcs.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdlib.h>
+#include <vlib/vlib.h>
+
+#include "hashtb.h"
+#include "pcs.h"
+#include "cache_policies/cs_lru.h"
+
+int
+hicn_pit_create (hicn_pit_cs_t * p, u32 num_elems)
+{
+ int ret =
+ hicn_hashtb_alloc (&p->pcs_table, num_elems, sizeof (hicn_pcs_entry_t));
+ p->pcs_table->ht_flags |= HICN_HASHTB_FLAG_KEY_FMT_NAME;
+
+ p->pcs_pit_count = p->pcs_cs_count = 0;
+
+ p->policy_state.max =
+ HICN_PARAM_CS_LRU_DEFAULT -
+ (HICN_PARAM_CS_LRU_DEFAULT * HICN_PARAM_CS_RESERVED_APP / 100);
+ p->policy_state.count = 0;
+ p->policy_state.head = p->policy_state.tail = 0;
+
+ p->policy_vft.hicn_cs_insert = hicn_cs_lru.hicn_cs_insert;
+ p->policy_vft.hicn_cs_update = hicn_cs_lru.hicn_cs_update;
+ p->policy_vft.hicn_cs_dequeue = hicn_cs_lru.hicn_cs_dequeue;
+ p->policy_vft.hicn_cs_delete_get = hicn_cs_lru.hicn_cs_delete_get;
+ p->policy_vft.hicn_cs_trim = hicn_cs_lru.hicn_cs_trim;
+ p->policy_vft.hicn_cs_flush = hicn_cs_lru.hicn_cs_flush;
+
+ return (ret);
+}
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/pcs.h b/hicn-plugin/src/network/pcs.h
new file mode 100644
index 000000000..a9e1ae5a0
--- /dev/null
+++ b/hicn-plugin/src/network/pcs.h
@@ -0,0 +1,839 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PCS_H__
+#define __HICN_PCS_H__
+
+#include "hashtb.h"
+#include "face_db.h"
+#include "strategy_dpo_manager.h"
+#include "error.h"
+#include "cache_policies/cs_policy.h"
+#include "faces/face.h"
+
+/**
+ * @file pcs.h
+ *
+ * This file implement the PIT and CS which are collapsed in the same
+ * structure, thereore an entry is either a PIT entry of a CS entry.
+ * The implementation consist of a hash table where each entry of the
+ * hash table contains a PIT or CS entry, some counters to maintain the
+ * status of the PIT/CS and the reference to the eviction policy for
+ * the CS. The default eviction policy id FIFO.
+ */
+
+/* The PIT and CS are stored as a union */
+#define HICN_PIT_NULL_TYPE 0
+#define HICN_PIT_TYPE 1
+#define HICN_CS_TYPE 2
+
+/*
+ * Definitions and Forward refs for the time counters we're trying out.
+ * Counters are maintained by the background process. TODO.
+ */
+#define SEC_MS 1000
+#define HICN_INFRA_FAST_TIMER_SECS 1
+#define HICN_INFRA_FAST_TIMER_MSECS (HICN_INFRA_FAST_TIMER_SECS * SEC_MS)
+#define HICN_INFRA_SLOW_TIMER_SECS 60
+#define HICN_INFRA_SLOW_TIMER_MSECS (HICN_INFRA_SLOW_TIMER_SECS * SEC_MS)
+
+/*
+ * Note that changing this may change alignment within the PIT struct, so be careful.
+ */
+typedef struct __attribute__ ((packed)) hicn_pcs_shared_s
+{
+
+ /* Installation/creation time (vpp float units, for now) */
+ f64 create_time;
+
+ /* Expiration time (vpp float units, for now) */
+ f64 expire_time;
+
+ /* Shared 'flags' octet */
+ u8 entry_flags;
+
+ /* Needed to align for the pit or cs portion */
+ u8 padding;
+} hicn_pcs_shared_t;
+
+#define HICN_PCS_ENTRY_CS_FLAG 0x01
+
+/*
+ * PIT entry, unioned with a CS entry below
+ */
+typedef struct __attribute__ ((packed)) hicn_pit_entry_s
+{
+
+ /* Shared size 8 + 8 + 2 = 18B */
+
+ /*
+ * Egress next hop (containes the egress face) This id refers to the
+ * position of the choosen face in the next_hops array of the dpo */
+ /* 18B + 1B = 19B */
+ u8 pe_txnh;
+
+ /* Array of incoming ifaces */
+ /* 24B + 32B (8B*4) =56B */
+ hicn_face_db_t faces;
+
+} hicn_pit_entry_t;
+
+#define HICN_CS_ENTRY_OPAQUE_SIZE HICN_HASH_NODE_APP_DATA_SIZE - 36
+
+/*
+ * CS entry, unioned with a PIT entry below
+ */
+typedef struct __attribute__ ((packed)) hicn_cs_entry_s
+{
+ /* 18B + 2B = 20B */
+ u16 align;
+
+ /* Packet buffer, if held */
+ /* 20B + 4B = 24B */
+ u32 cs_pkt_buf;
+
+ /* Ingress face */
+ /* 24B + 4B = 28B */
+ hicn_face_id_t cs_rxface;
+
+ /* Linkage for LRU, in the form of hashtable node indexes */
+ /* 28B + 8B = 36B */
+ u32 cs_lru_prev;
+ u32 cs_lru_next;
+
+ /* Reserved for implementing cache policy different than LRU */
+ /* 36B + (64 - 36)B = 64B */
+ u8 opaque[HICN_CS_ENTRY_OPAQUE_SIZE];
+
+
+} __attribute__ ((packed)) hicn_cs_entry_t;
+
+/*
+ * Combined PIT/CS entry data structure, embedded in a hashtable entry after
+ * the common hashtable preamble struct. This MUST fit in the available
+ * (fixed) space in a hashtable node.
+ */
+typedef struct hicn_pcs_entry_s
+{
+
+ hicn_pcs_shared_t shared;
+
+ union
+ {
+ hicn_pit_entry_t pit;
+ hicn_cs_entry_t cs;
+ } u;
+} hicn_pcs_entry_t;
+
+
+/*
+ * Overall PIT/CS table, based on the common hashtable
+ */
+typedef struct hicn_pit_cs_s
+{
+
+ hicn_hashtb_t *pcs_table;
+
+ /* Counters for PIT/CS sentries */
+ u32 pcs_pit_count;
+ u32 pcs_cs_count;
+ u32 pcs_cs_dealloc;
+ u32 pcs_pit_dealloc;
+
+ /* Total size of PCS */
+ u32 pcs_size;
+
+ hicn_cs_policy_t policy_state;
+ hicn_cs_policy_vft_t policy_vft;
+
+} hicn_pit_cs_t;
+
+/* Functions declarations */
+int hicn_pit_create (hicn_pit_cs_t * p, u32 num_elems);
+
+always_inline void
+hicn_pit_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pcs_entry, hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t * node, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id, hicn_face_id_t inface_id, u8 is_appface);
+
+always_inline void
+hicn_pcs_cs_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * old_entry, hicn_pcs_entry_t * entry,
+ hicn_hash_node_t * node);
+
+always_inline void
+hicn_pcs_cs_delete (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t ** pcs_entry, hicn_hash_node_t ** node,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline int
+hicn_pcs_cs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+always_inline int
+hicn_pcs_cs_insert_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow, hicn_face_id_t inface);
+
+always_inline int
+hicn_pcs_pit_insert (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t * entry,
+ hicn_hash_node_t * node, hicn_hash_entry_t ** hash_entry,
+ u64 hashval, u32 * node_id, index_t * dpo_ctx_id,
+ u8 * vft_id, u8 * is_cs, u8 * hash_entry_id,
+ u32 * bucket_id, u8 * bucket_is_overflow);
+
+always_inline void
+hicn_pcs_pit_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline int
+hicn_pcs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval, u32 * node_id,
+ index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow);
+
+always_inline void
+hicn_pcs_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline void
+hicn_pcs_remove_lock (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+
+always_inline void
+hicn_cs_delete_trimmed (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t ** node, vlib_main_t * vm);
+
+/* Function implementation */
+/* Accessor for pit/cs data inside hash table node */
+static inline hicn_pcs_entry_t *
+hicn_pit_get_data (hicn_hash_node_t * node)
+{
+ return (hicn_pcs_entry_t *) (hicn_hashtb_node_data (node));
+}
+
+/* Init pit/cs data block (usually inside hash table node) */
+static inline void
+hicn_pit_init_data (hicn_pcs_entry_t * p)
+{
+ p->shared.entry_flags = 0;
+ p->u.pit.faces.n_faces = 0;
+ p->u.pit.faces.is_overflow = 0;
+ hicn_face_bucket_t *face_bkt;
+ pool_get (hicn_face_bucket_pool, face_bkt);
+
+ p->u.pit.faces.next_bucket = face_bkt - hicn_face_bucket_pool;
+}
+
+/* Init pit/cs data block (usually inside hash table node) */
+static inline void
+hicn_cs_init_data (hicn_pcs_entry_t * p)
+{
+ p->shared.entry_flags = 0;
+ p->u.pit.faces.n_faces = 0;
+ p->u.pit.faces.is_overflow = 0;
+}
+
+
+static inline f64
+hicn_pcs_get_exp_time (f64 cur_time_sec, u64 lifetime_msec)
+{
+ return (cur_time_sec + ((f64) lifetime_msec) / SEC_MS);
+}
+
+/*
+ * Configure CS LRU limit. Zero is accepted, means 'no limit', probably not a
+ * good choice.
+ */
+static inline void
+hicn_pit_set_lru_max (hicn_pit_cs_t * p, u32 limit)
+{
+ p->policy_state.max = limit;
+}
+
+/*
+ * Accessor for PIT interest counter.
+ */
+static inline u32
+hicn_pit_get_int_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_pit_count);
+}
+
+/*
+ * Accessor for PIT cs entries counter.
+ */
+static inline u32
+hicn_pit_get_cs_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_cs_count);
+}
+
+static inline u32
+hicn_pcs_get_ntw_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->policy_state.count);
+}
+
+static inline u32
+hicn_pit_get_htb_bucket_count (const hicn_pit_cs_t * pitcs)
+{
+ return (pitcs->pcs_table->ht_overflow_buckets_used);
+}
+
+static inline int
+hicn_cs_enabled (hicn_pit_cs_t * pit)
+{
+ switch (HICN_FEATURE_CS)
+ {
+ case 0:
+ default:
+ return (0);
+ case 1:
+ return (pit->policy_state.max > 0);
+ }
+}
+
+/*
+ * Delete a PIT/CS entry from the hashtable, freeing the hash node struct.
+ * The caller's pointers are zeroed! If cs_trim is true, entry has already
+ * been removed from lru list The main purpose of this wrapper is helping
+ * maintain the per-PIT stats.
+ */
+always_inline void
+hicn_pcs_delete_internal (hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id)
+{
+ hicn_pcs_entry_t *pcs = *pcs_entryp;
+
+ ASSERT (pcs == hicn_hashtb_node_data (*node));
+
+ if (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
+ {
+ pitcs->pcs_cs_dealloc++;
+ /* Free any associated packet buffer */
+ vlib_buffer_free_one (vm, pcs->u.cs.cs_pkt_buf);
+ pcs->u.cs.cs_pkt_buf = ~0;
+ ASSERT ((pcs->u.cs.cs_lru_prev == 0)
+ && (pcs->u.cs.cs_lru_prev == pcs->u.cs.cs_lru_next));
+ }
+ else
+ {
+ pitcs->pcs_pit_dealloc++;
+ hicn_strategy_dpo_ctx_unlock (hicn_dpo_id);
+
+ /* Flush faces */
+ hicn_faces_flush (&(pcs->u.pit.faces));
+ }
+
+ hicn_hashtb_delete (pitcs->pcs_table, node, hash_entry->he_msb64);
+ *pcs_entryp = NULL;
+}
+
+/*
+ * Convert a PIT entry into a CS entry (assumes that the entry is already in
+ * the hashtable.) This is primarily here to maintain the internal counters.
+ */
+always_inline void
+hicn_pit_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * pcs_entry, hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t * node, const hicn_dpo_vft_t * dpo_vft,
+ dpo_id_t * hicn_dpo_id, hicn_face_id_t inface_id, u8 is_appface)
+{
+
+ /*
+ * Different from the insert node. In here we don't need to add a new
+ * hash entry.
+ */
+ pitcs->pcs_pit_count--;
+ hicn_strategy_dpo_ctx_unlock (hicn_dpo_id);
+ /* Flush faces */
+ hicn_faces_flush (&(pcs_entry->u.pit.faces));
+
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+ node->hn_flags |= HICN_HASH_NODE_CS_FLAGS;
+ pcs_entry->shared.entry_flags |= HICN_PCS_ENTRY_CS_FLAG;
+
+ pcs_entry->u.cs.cs_rxface = inface_id;
+
+ /* Update the CS according to the policy */
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ policy_vft->hicn_cs_insert (pitcs, node, pcs_entry, policy_state);
+ pitcs->pcs_cs_count++;
+
+ if (policy_state->count > policy_state->max)
+ {
+ hicn_hash_node_t *node;
+ hicn_pcs_entry_t *pcs_entry;
+ hicn_hash_entry_t *hash_entry;
+ policy_vft->hicn_cs_delete_get (pitcs, policy_state,
+ &node, &pcs_entry, &hash_entry);
+
+
+ /*
+ * We don't have to decrease the lock (therefore we cannot
+ * use hicn_pcs_cs_delete function)
+ */
+ policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
+
+ hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
+
+ /* Update the global CS counter */
+ pitcs->pcs_cs_count--;
+ }
+}
+
+/* Functions specific for PIT or CS */
+
+always_inline void
+hicn_pcs_cs_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * old_entry, hicn_pcs_entry_t * entry,
+ hicn_hash_node_t * node)
+{
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ if (entry->u.cs.cs_rxface != old_entry->u.cs.cs_rxface)
+ {
+ /* Dequeue content from the old queue */
+ policy_vft->hicn_cs_dequeue (pitcs, node, old_entry, policy_state);
+
+ old_entry->u.cs.cs_rxface = entry->u.cs.cs_rxface;
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ policy_vft->hicn_cs_insert (pitcs, node, old_entry, policy_state);
+
+ if (policy_state->count > policy_state->max)
+ {
+ hicn_hash_node_t *node;
+ hicn_pcs_entry_t *pcs_entry;
+ hicn_hash_entry_t *hash_entry;
+ policy_vft->hicn_cs_delete_get (pitcs, policy_state,
+ &node, &pcs_entry, &hash_entry);
+
+ /*
+ * We don't have to decrease the lock (therefore we cannot
+ * use hicn_pcs_cs_delete function)
+ */
+ policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
+
+ hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
+
+ /* Update the global CS counter */
+ pitcs->pcs_cs_count--;
+ }
+ }
+ else
+ /* Update the CS LRU, moving this item to the head */
+ policy_vft->hicn_cs_update (pitcs, node, old_entry, policy_state);
+}
+
+always_inline void
+hicn_pcs_cs_delete (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t ** pcs_entryp, hicn_hash_node_t ** nodep,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ if (!(hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
+ {
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ policy_vft->hicn_cs_dequeue (pitcs, (*nodep), (*pcs_entryp),
+ policy_state);
+
+ /* Update the global CS counter */
+ pitcs->pcs_cs_count--;
+ }
+
+ /* A data could have been inserted in the CS through a push. In this case locks == 0 */
+ hash_entry->locks--;
+ if (hash_entry->locks == 0)
+ {
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, nodep, vm, dpo_vft, hicn_dpo_id);
+ }
+ else
+ {
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ }
+}
+
+always_inline int
+hicn_pcs_cs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow)
+{
+ ASSERT (entry == hicn_hashtb_node_data (node));
+
+ int ret =
+ hicn_hashtb_insert (pitcs->pcs_table, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow);
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ {
+ /* Mark the entry as a CS entry */
+ node->hn_flags |= HICN_HASH_NODE_CS_FLAGS;
+ entry->shared.entry_flags |= HICN_PCS_ENTRY_CS_FLAG;
+ (*hash_entry)->he_flags |= HICN_HASH_ENTRY_FLAG_CS_ENTRY;
+
+ hicn_cs_policy_t *policy_state;
+ hicn_cs_policy_vft_t *policy_vft;
+
+ policy_state = &pitcs->policy_state;
+ policy_vft = &pitcs->policy_vft;
+
+ policy_vft->hicn_cs_insert (pitcs, node, entry, policy_state);
+ pitcs->pcs_cs_count++;
+
+ if (policy_state->count > policy_state->max)
+ {
+ hicn_hash_node_t *node;
+ hicn_pcs_entry_t *pcs_entry;
+ hicn_hash_entry_t *hash_entry;
+ policy_vft->hicn_cs_delete_get (pitcs, policy_state,
+ &node, &pcs_entry, &hash_entry);
+
+ /*
+ * We don't have to decrease the lock (therefore we cannot
+ * use hicn_pcs_cs_delete function)
+ */
+ policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
+
+ hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
+
+ /* Update the global CS counter */
+ pitcs->pcs_cs_count--;
+ }
+ }
+ return ret;
+}
+
+/*
+ * Insert CS entry into the hashtable The main purpose of this wrapper is
+ * helping maintain the per-PIT stats.
+ */
+always_inline int
+hicn_pcs_cs_insert_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval,
+ u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
+ u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
+ u8 * bucket_is_overflow, hicn_face_id_t inface)
+{
+ int ret;
+
+ ASSERT (entry == hicn_hashtb_node_data (node));
+
+ entry->u.cs.cs_rxface = inface;
+ ret =
+ hicn_pcs_cs_insert (vm, pitcs, entry, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow);
+
+ /* A content already exists in CS with the same name */
+ if (ret == HICN_ERROR_HASHTB_EXIST && *is_cs)
+ {
+ /* Update the entry */
+ hicn_hash_node_t *existing_node =
+ hicn_hashtb_node_from_idx (pitcs->pcs_table, *node_id);
+ hicn_pcs_entry_t *pitp = hicn_pit_get_data (existing_node);
+
+ /* Free associated packet buffer and update counter */
+ pitcs->pcs_cs_dealloc++;
+ vlib_buffer_free_one (vm, pitp->u.cs.cs_pkt_buf);
+
+ pitp->shared.create_time = entry->shared.create_time;
+ pitp->shared.expire_time = entry->shared.expire_time;
+ pitp->u.cs.cs_pkt_buf = entry->u.cs.cs_pkt_buf;
+
+ hicn_pcs_cs_update (vm, pitcs, pitp, entry, existing_node);
+ }
+
+ return (ret);
+}
+
+/*
+ * Insert PIT entry into the hashtable The main purpose of this wrapper is
+ * helping maintain the per-PIT stats.
+ */
+always_inline int
+hicn_pcs_pit_insert (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t * entry,
+ hicn_hash_node_t * node, hicn_hash_entry_t ** hash_entry,
+ u64 hashval, u32 * node_id, index_t * dpo_ctx_id,
+ u8 * vft_id, u8 * is_cs, u8 * hash_entry_id,
+ u32 * bucket_id, u8 * bucket_is_overflow)
+{
+ ASSERT (entry == hicn_hashtb_node_data (node));
+
+ int ret =
+ hicn_hashtb_insert (pitcs->pcs_table, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
+ bucket_is_overflow);
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ pitcs->pcs_pit_count++;
+
+ return ret;
+}
+
+always_inline void
+hicn_pcs_pit_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ hash_entry->locks--;
+ if (hash_entry->locks == 0)
+ {
+ pitcs->pcs_pit_count--;
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, hicn_dpo_id);
+ }
+ else
+ {
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ }
+}
+
+
+/* Generic functions for PIT/CS */
+
+/*
+ * Insert PIT/CS entry into the hashtable The main purpose of this wrapper is
+ * helping maintain the per-PIT stats.
+ */
+always_inline int
+hicn_pcs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
+ hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
+ hicn_hash_entry_t ** hash_entry, u64 hashval, u32 * node_id,
+ index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
+ u8 * hash_entry_id, u32 * bucket_id, u8 * bucket_is_overflow)
+{
+ int ret;
+
+ if ((*hash_entry)->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
+ {
+ ret =
+ hicn_pcs_cs_insert (vm, pitcs, entry, node, hash_entry, hashval,
+ node_id, dpo_ctx_id, vft_id, is_cs, hash_entry_id,
+ bucket_id, bucket_is_overflow);
+ }
+ else
+ {
+ ret =
+ hicn_pcs_pit_insert (pitcs, entry, node, hash_entry, hashval, node_id,
+ dpo_ctx_id, vft_id, is_cs, hash_entry_id,
+ bucket_id, bucket_is_overflow);
+ }
+
+ return (ret);
+}
+
+
+/*
+ * Delete entry if there are no pending lock on the entry, otherwise mark it
+ * as to delete.
+ */
+always_inline void
+hicn_pcs_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** nodep, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ /*
+ * If the entry has already been marked as deleted, it has already
+ * been dequeue
+ */
+ if (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
+ {
+ hicn_pcs_cs_delete (vm, pitcs, pcs_entryp, nodep, hash_entry,
+ dpo_vft, hicn_dpo_id);
+ }
+ else
+ {
+ hicn_pcs_pit_delete (pitcs, pcs_entryp, nodep, vm,
+ hash_entry, dpo_vft, hicn_dpo_id);
+ }
+}
+
+/*
+ * Remove a lock in the entry and delete it if there are no pending lock and
+ * the entry is marked as to be deleted
+ */
+always_inline void
+hicn_pcs_remove_lock (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_node_t ** node, vlib_main_t * vm,
+ hicn_hash_entry_t * hash_entry,
+ const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+{
+ hash_entry->locks--;
+ if (hash_entry->locks == 0
+ && (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
+ {
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, hicn_dpo_id);
+ }
+}
+
+/*
+ * Delete entry which has already been bulk-removed from lru list
+ */
+always_inline void
+hicn_cs_delete_trimmed (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
+ hicn_hash_entry_t * hash_entry,
+ hicn_hash_node_t ** node, vlib_main_t * vm)
+{
+
+
+ if (hash_entry->locks == 0)
+ {
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (hash_entry->vft_id);
+ dpo_id_t hicn_dpo_id =
+ { dpo_vft->hicn_dpo_get_type (), 0, 0, hash_entry->dpo_ctx_id };
+
+ hicn_pcs_delete_internal
+ (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, &hicn_dpo_id);
+ }
+ else
+ {
+ hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ }
+}
+
+/*
+ * wrappable counter math (assumed uint16_t): return sum of addends
+ */
+always_inline u16
+hicn_infra_seq16_sum (u16 addend1, u16 addend2)
+{
+ return (addend1 + addend2);
+}
+
+/*
+ * for comparing wrapping numbers, return lt,eq,gt 0 for a lt,eq,gt b
+ */
+always_inline int
+hicn_infra_seq16_cmp (u16 a, u16 b)
+{
+ return ((int16_t) (a - b));
+}
+
+/*
+ * below are wrappers for lt, le, gt, ge seq16 comparators
+ */
+always_inline int
+hicn_infra_seq16_lt (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) < 0);
+}
+
+always_inline int
+hicn_infra_seq16_le (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) <= 0);
+}
+
+always_inline int
+hicn_infra_seq16_gt (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) > 0);
+}
+
+always_inline int
+hicn_infra_seq16_ge (u16 a, u16 b)
+{
+ return (hicn_infra_seq16_cmp (a, b) >= 0);
+}
+
+
+extern u16 hicn_infra_fast_timer; /* Counts at 1 second intervals */
+extern u16 hicn_infra_slow_timer; /* Counts at 1 minute intervals */
+
+/*
+ * Utilities to convert lifetime into expiry time based on compressed clock,
+ * suitable for the opportunistic hashtable entry timeout processing.
+ */
+
+//convert time in msec to time in clicks
+always_inline u16
+hicn_infra_ms2clicks (u64 time_ms, u64 ms_per_click)
+{
+ f64 time_clicks =
+ ((f64) (time_ms + ms_per_click - 1)) / ((f64) ms_per_click);
+ return ((u16) time_clicks);
+}
+
+always_inline u16
+hicn_infra_get_fast_exp_time (u64 lifetime_ms)
+{
+ u16 lifetime_clicks =
+ hicn_infra_ms2clicks (lifetime_ms, HICN_INFRA_FAST_TIMER_MSECS);
+ return (hicn_infra_seq16_sum (hicn_infra_fast_timer, lifetime_clicks));
+}
+
+always_inline u16
+hicn_infra_get_slow_exp_time (u64 lifetime_ms)
+{
+ u16 lifetime_clicks =
+ hicn_infra_ms2clicks (lifetime_ms, HICN_INFRA_SLOW_TIMER_MSECS);
+ return (hicn_infra_seq16_sum (hicn_infra_slow_timer, lifetime_clicks));
+}
+
+#endif /* // __HICN_PCS_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/pg.c b/hicn-plugin/src/network/pg.c
new file mode 100644
index 000000000..eb833030d
--- /dev/null
+++ b/hicn-plugin/src/network/pg.c
@@ -0,0 +1,1326 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include "hicn.h"
+#include "pg.h"
+#include "parser.h"
+#include "infra.h"
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_pg_interest_node;
+vlib_node_registration_t hicn_pg_data_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_hicnpg_error \
+ _(PROCESSED, "hICN PG packets processed") \
+ _(DROPPED, "hICN PG packets dropped") \
+ _(INTEREST_MSGS_GENERATED, "hICN PG Interests generated") \
+ _(CONTENT_MSGS_RECEIVED, "hICN PG Content msgs received")
+
+typedef enum
+{
+#define _(sym,str) HICNPG_ERROR_##sym,
+ foreach_hicnpg_error
+#undef _
+ HICNPG_N_ERROR,
+} hicnpg_error_t;
+
+static char *hicnpg_error_strings[] = {
+#define _(sym,string) string,
+ foreach_hicnpg_error
+#undef _
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_INTEREST_NEXT_V4_LOOKUP,
+ HICNPG_INTEREST_NEXT_V6_LOOKUP,
+ HICNPG_INTEREST_NEXT_DROP,
+ HICNPG_N_NEXT,
+} hicnpg_interest_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} hicnpg_trace_t;
+
+hicnpg_main_t hicnpg_main = {
+ .index = (u32) 0,
+ .index_ifaces = (u32) 1,
+ .max_seq_number = (u32) ~ 0,
+ .interest_lifetime = 4,
+ .n_flows = (u32) 0,
+ .n_ifaces = (u32) 1,
+ .sw_if = (u32) 0
+};
+
+hicnpg_server_main_t hicnpg_server_main = {
+ .node_index = 0,
+};
+
+/* packet trace format function */
+static u8 *
+format_hicnpg_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+
+ s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_index);
+ return (s);
+}
+
+always_inline void
+hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 lifetime, u32 next_flow, u32 iface);
+
+always_inline void
+hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 lifetime, u32 next_flow, u32 iface);
+
+always_inline void
+convert_interest_to_data_v4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0);
+
+always_inline void
+convert_interest_to_data_v6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0);
+
+always_inline void
+calculate_tcp_checksum_v4 (vlib_main_t * vm, vlib_buffer_t * b0);
+
+always_inline void
+calculate_tcp_checksum_v6 (vlib_main_t * vm, vlib_buffer_t * b0);
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the vpp
+ * packet generator to generate icn request traffic.
+ */
+static uword
+hicnpg_client_interest_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicnpg_interest_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 interest_msgs_generated = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ hicn_header_t *hicn0 = NULL, *hicn1 = NULL;
+ hicn_name_t name0, name1;
+ u16 namelen0, namelen1;
+ hicnpg_main_t *hpgm = &hicnpg_main;
+ int iface = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = HICNPG_INTEREST_NEXT_DROP;
+ u32 next1 = HICNPG_INTEREST_NEXT_DROP;
+ u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
+ u8 isv6_0;
+ u8 isv6_1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = hpgm->sw_if;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = hpgm->sw_if;
+
+ /* Check icn packets, locate names */
+ if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+ /* Rewrite and send */
+ isv6_0 ? hicn_rewrite_interestv6 (vm, b0,
+ (hpgm->index /
+ hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (vm, b0,
+ (hpgm->index / hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next0 =
+ isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ }
+ if (hicn_interest_parse_pkt (b1, &name1, &namelen1, &hicn1, &isv6_1)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+ /* Rewrite and send */
+ isv6_1 ? hicn_rewrite_interestv6 (vm, b1,
+ (hpgm->index /
+ hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (vm, b1,
+ (hpgm->index / hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next1 =
+ isv6_1 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ }
+ /* Send pkt to next node */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ if (next0 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueues, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = HICNPG_INTEREST_NEXT_DROP;
+ u32 sw_if_index0;
+ u8 isv6_0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = hpgm->sw_if;
+
+ /* Check icn packets, locate names */
+ if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
+ == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+
+ /* Rewrite and send */
+ isv6_0 ? hicn_rewrite_interestv6 (vm, b0,
+ (hpgm->index /
+ hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (vm, b0,
+ (hpgm->index / hpgm->n_flows) %
+ hpgm->max_seq_number,
+ hpgm->interest_lifetime,
+ hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next0 =
+ isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ }
+ /* Send pkt to ip lookup */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicnpg_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed += 1;
+
+ if (next0 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_DROPPED, pkts_dropped);
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_INTEREST_MSGS_GENERATED,
+ interest_msgs_generated);
+
+ return (frame->n_vectors);
+}
+
+void
+hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 interest_lifetime, u32 next_flow, u32 iface)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Generate the right src and dst corresponding to flow and iface */
+ ip46_address_t src_addr = {
+ .ip4 = hicnpg_main.pgen_clt_src_addr.ip4,
+ };
+ hicn_name_t dst_name = {
+ .prefix.ip4 = hicnpg_main.pgen_clt_hicn_name->fp_addr.ip4,
+ .suffix = seq_number,
+ };
+
+ src_addr.ip4.as_u32 += clib_host_to_net_u32 (iface);
+ dst_name.prefix.ip4.as_u32 += clib_net_to_host_u32 (next_flow);
+
+ /* Update locator and name */
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ HICN_OPS4->set_interest_locator (type, &h0->protocol, &src_addr);
+ HICN_OPS4->set_interest_name (type, &h0->protocol, &dst_name);
+
+ /* Update lifetime (currently L4 checksum is not updated) */
+ HICN_OPS4->set_lifetime (type, &h0->protocol, interest_lifetime);
+
+ /* Update checksums */
+ HICN_OPS4->update_checksums (type, &h0->protocol, 0, 0);
+}
+
+/**
+ * @brief Rewrite the IPv6 header as the next generated packet
+ *
+ * Set up a name prefix
+ * - etiher generate interest in which the name varies only after the prefix
+ * (inc : seq_number), then the flow acts on the prefix (CHECK)
+ * seq_number => TCP, FLOW =>
+ *
+ * SRC : pgen_clt_src_addr.ip6 DST = generate name (pgen_clt_hicn_name.ip6)
+ * ffff:ffff:ffff:ffff ffff:ffff:ffff:ffff
+ * \__/ \__/
+ * +iface + flow
+ * Source is used to emulate different consumers.
+ * FIXME iface is ill-named, better name it consumer id
+ * Destination is used to iterate on the content.
+ */
+void
+hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
+ u16 interest_lifetime, u32 next_flow, u32 iface)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Generate the right src and dst corresponding to flow and iface */
+ ip46_address_t src_addr = {
+ .ip6 = hicnpg_main.pgen_clt_src_addr.ip6,
+ };
+ hicn_name_t dst_name = {
+ .prefix.ip6 = hicnpg_main.pgen_clt_hicn_name->fp_addr.ip6,
+ .suffix = seq_number,
+ };
+ src_addr.ip6.as_u32[3] += clib_host_to_net_u32 (iface);
+ dst_name.prefix.ip6.as_u32[3] += clib_net_to_host_u32 (next_flow);
+
+ /* Update locator and name */
+ hicn_type_t type = hicn_get_buffer (b0)->type;
+ HICN_OPS6->set_interest_locator (type, &h0->protocol, &src_addr);
+ HICN_OPS6->set_interest_name (type, &h0->protocol, &dst_name);
+
+ /* Update lifetime */
+ HICN_OPS6->set_lifetime (type, &h0->protocol, interest_lifetime);
+
+ /* Update checksums */
+ calculate_tcp_checksum_v6 (vm, b0);
+}
+
+
+
+void
+calculate_tcp_checksum_v4 (vlib_main_t * vm, vlib_buffer_t * b0)
+{
+ ip4_header_t *ip0;
+ tcp_header_t *tcp0;
+ ip_csum_t sum0;
+ u32 tcp_len0;
+
+ ip0 = (ip4_header_t *) (vlib_buffer_get_current (b0));
+ tcp0 =
+ (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip4_header_t));
+ tcp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip4_header_t);
+
+ /* Initialize checksum with header. */
+ if (BITS (sum0) == 32)
+ {
+ sum0 = clib_mem_unaligned (&ip0->src_address, u32);
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry
+ (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ tcp0->checksum = 0;
+
+ u32 tcp_offset = sizeof (ip4_header_t);
+ sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+
+ tcp0->checksum = ~ip_csum_fold (sum0);
+}
+
+void
+calculate_tcp_checksum_v6 (vlib_main_t * vm, vlib_buffer_t * b0)
+{
+ ip6_header_t *ip0;
+ tcp_header_t *tcp0;
+ ip_csum_t sum0;
+ u32 tcp_len0;
+
+ ip0 = (ip6_header_t *) (vlib_buffer_get_current (b0));
+ tcp0 =
+ (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip6_header_t));
+ tcp_len0 = clib_net_to_host_u16 (ip0->payload_length);
+
+ /* Initialize checksum with header. */
+ if (BITS (sum0) == 32)
+ {
+ sum0 = clib_mem_unaligned (&ip0->src_address, u32);
+ sum0 =
+ ip_csum_with_carry (sum0,
+ clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry
+ (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ tcp0->checksum = 0;
+
+ u32 tcp_offset = sizeof (ip6_header_t);
+ sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+
+ tcp0->checksum = ~ip_csum_fold (sum0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_pg_interest_node) ={
+ .function = hicnpg_client_interest_node_fn,
+ .name = "hicnpg-interest",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicnpg_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicnpg_error_strings),
+ .error_strings = hicnpg_error_strings,
+ .n_next_nodes = HICNPG_N_NEXT,
+ .next_nodes =
+ {
+ [HICNPG_INTEREST_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_INTEREST_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_INTEREST_NEXT_DROP] = "error-drop"
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_DATA_NEXT_DROP,
+ HICNPG_DATA_NEXT_LOOKUP4,
+ HICNPG_DATA_NEXT_LOOKUP6,
+ HICNPG_DATA_N_NEXT,
+} hicnpg_data_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icnpg_data_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_hicnpg_data_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+
+ s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type,
+ t->sw_if_index, t->next_index);
+ return (s);
+}
+
+
+static_always_inline int
+match_ip4_name (u32 * name, fib_prefix_t * prefix)
+{
+ u32 xor = 0;
+
+ xor = *name & prefix->fp_addr.ip4.data_u32;
+
+ return xor == prefix->fp_addr.ip4.data_u32;
+}
+
+static_always_inline int
+match_ip6_name (u8 * name, fib_prefix_t * prefix)
+{
+ union
+ {
+ u32x4 as_u32x4;
+ u64 as_u64[2];
+ u32 as_u32[4];
+ } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
+
+ xor_sum.as_u64[0] = ((u64 *) name)[0] & prefix->fp_addr.ip6.as_u64[0];
+ xor_sum.as_u64[1] = ((u64 *) name)[1] & prefix->fp_addr.ip6.as_u64[1];
+
+ return (xor_sum.as_u64[0] == prefix->fp_addr.ip6.as_u64[0]) &&
+ (xor_sum.as_u64[1] == prefix->fp_addr.ip6.as_u64[1]);
+}
+
+
+/*
+ * Return 0,1,2.
+ * 0 matches
+ * 1 does not match and the prefix is ip4
+ * 2 does not match and the prefix is ip6
+ */
+static_always_inline u32
+match_data (vlib_buffer_t * b, fib_prefix_t * prefix)
+{
+ u8 *ptr = vlib_buffer_get_current (b);
+ u8 v = *ptr & 0xf0;
+ u32 next = 0;
+
+ if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ if (!match_ip4_name ((u32 *) & (ptr[12]), prefix))
+ next = 1;
+ }
+ else
+ if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ if (!match_ip6_name (&(ptr[8]), prefix))
+ next = 2;
+ }
+
+ return next;
+}
+
+/*
+ * Return 0,1,2.
+ * 0 matches
+ * 1 does not match and the prefix is ip4
+ * 2 does not match and the prefix is ip6
+ */
+static_always_inline u32
+match_interest (vlib_buffer_t * b, fib_prefix_t * prefix)
+{
+ u8 *ptr = vlib_buffer_get_current (b);
+ u8 v = *ptr & 0xf0;
+ u32 next = 0;
+
+ if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ if (!match_ip4_name ((u32 *) & (ptr[16]), prefix))
+ next = 1;
+ }
+ else
+ if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ if (!match_ip6_name (&(ptr[24]), prefix))
+ next = 2;
+ }
+
+ return next;
+}
+
+
+
+
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the vpp
+ * packet generator to generate icn request traffic.
+ */
+static uword
+hicnpg_client_data_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicnpg_data_next_t next_index;
+ u32 pkts_processed = 0;
+ u32 content_msgs_received = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 1, msg_type1 = 1;
+ hicnpg_main_t *hpgm = &hicnpg_main;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = HICNPG_DATA_NEXT_DROP;
+ u32 next1 = HICNPG_DATA_NEXT_DROP;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ next0 =
+ HICNPG_DATA_NEXT_DROP + match_data (b0, hpgm->pgen_clt_hicn_name);
+ next1 =
+ HICNPG_DATA_NEXT_DROP + match_data (b1, hpgm->pgen_clt_hicn_name);
+
+ if (PREDICT_FALSE (vnet_get_feature_count
+ (vnet_buffer (b0)->feature_arc_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]) > 1))
+ vnet_feature_next (&next0, b0);
+
+ if (PREDICT_FALSE (vnet_get_feature_count
+ (vnet_buffer (b1)->feature_arc_index,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX]) > 1))
+ vnet_feature_next (&next1, b1);
+
+
+ if (next0 == HICNPG_DATA_NEXT_DROP)
+ {
+ /* Increment a counter */
+ content_msgs_received++;
+ }
+
+ if (next1 == HICNPG_DATA_NEXT_DROP)
+ {
+ /* Increment a counter */
+ content_msgs_received++;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ pkts_processed += 2;
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = HICNPG_DATA_NEXT_DROP;
+ u32 sw_if_index0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ next0 =
+ HICNPG_DATA_NEXT_DROP + match_data (b0, hpgm->pgen_clt_hicn_name);
+
+ if (PREDICT_FALSE (vnet_get_feature_count
+ (vnet_buffer (b0)->feature_arc_index,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX]) > 1))
+ vnet_feature_next (&next0, b0);
+
+ if (next0 == HICNPG_DATA_NEXT_DROP)
+ {
+ /* Increment a counter */
+ content_msgs_received++;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ pkts_processed++;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_data_node.index,
+ HICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_data_node.index,
+ HICNPG_ERROR_CONTENT_MSGS_RECEIVED,
+ content_msgs_received);
+ return (frame->n_vectors);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_pg_data_node) =
+{
+ .function = hicnpg_client_data_node_fn,
+ .name = "hicnpg-data",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicnpg_data_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicnpg_error_strings),
+ .error_strings = hicnpg_error_strings,
+ .n_next_nodes = HICNPG_DATA_N_NEXT,
+ .next_nodes =
+ {
+ [HICNPG_DATA_NEXT_DROP] = "error-drop",
+ [HICNPG_DATA_NEXT_LOOKUP4] = "ip4-lookup",
+ [HICNPG_DATA_NEXT_LOOKUP6] = "ip6-lookup",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_data_input_ip4_arc, static)=
+ {
+ .arc_name = "ip4-unicast",
+ .node_name = "hicnpg-data",
+ .runs_before = VNET_FEATURES("ip4-inacl"),
+ };
+/* *INDENT-ON* */
+
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_data_input_ip6_arc, static)=
+ {
+ .arc_name = "ip6-unicast",
+ .node_name = "hicnpg-data",
+ .runs_before = VNET_FEATURES("ip6-inacl"),
+ };
+/* *INDENT-ON* */
+
+
+/*
+ * End of packet-generator client node
+ */
+
+/*
+ * Beginning of packet-generation server node
+ */
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_pg_server_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_icnpg_server_error \
+_(PROCESSED, "hICN PG Server packets processed") \
+_(DROPPED, "hICN PG Server packets dropped")
+
+typedef enum
+{
+#define _(sym,str) HICNPG_SERVER_ERROR_##sym,
+ foreach_icnpg_server_error
+#undef _
+ HICNPG_SERVER_N_ERROR,
+} icnpg_server_error_t;
+
+static char *icnpg_server_error_strings[] = {
+#define _(sym,string) string,
+ foreach_icnpg_server_error
+#undef _
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_SERVER_NEXT_V4_LOOKUP,
+ HICNPG_SERVER_NEXT_V6_LOOKUP,
+ HICNPG_SERVER_NEXT_DROP,
+ HICNPG_SERVER_N_NEXT,
+} icnpg_server_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} hicnpg_server_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_icnpg_server_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_server_trace_t *t = va_arg (*args, hicnpg_server_trace_t *);
+
+ s =
+ format (s,
+ "HICNPG SERVER: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type, t->sw_if_index,
+ t->next_index);
+ return (s);
+}
+
+/*
+ * Node function for the icn packet-generator server.
+ */
+static uword
+hicnpg_node_server_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ icnpg_server_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ hicn_header_t *hicn0 = NULL, *hicn1 = NULL;
+ hicn_name_t name0, name1;
+ u16 namelen0, namelen1;
+
+ hicnpg_server_main_t *hpgsm = &hicnpg_server_main;
+
+ from = vlib_frame_vector_args (frame);
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = HICNPG_SERVER_NEXT_DROP;
+ u32 next1 = HICNPG_SERVER_NEXT_DROP;
+ u8 isv6_0 = 0;
+ u8 isv6_1 = 0;
+ u32 sw_if_index0, sw_if_index1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ u32 match0 = match_interest (b0, hpgsm->pgen_srv_hicn_name);
+ u32 match1 = match_interest (b1, hpgsm->pgen_srv_hicn_name);
+
+ if (match0)
+ {
+ next0 = match0 - 1;
+ }
+ else
+ if (hicn_interest_parse_pkt
+ (b0, &name0, &namelen0, &hicn0, &isv6_0) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
+
+ isv6_0 ? convert_interest_to_data_v6 (vm, b0, rb,
+ bi0) :
+ convert_interest_to_data_v4 (vm, b0, rb, bi0);
+
+ next0 =
+ isv6_0 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ }
+
+ if (match1)
+ {
+ next1 = match1 - 1;
+ }
+ else
+ if (hicn_interest_parse_pkt
+ (b1, &name1, &namelen1, &hicn1, &isv6_1) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
+
+ isv6_1 ? convert_interest_to_data_v6 (vm, b1, rb,
+ bi1) :
+ convert_interest_to_data_v4 (vm, b1, rb, bi1);
+
+ next1 =
+ isv6_1 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ }
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ if (next0 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueues, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 next0 = HICNPG_SERVER_NEXT_DROP;
+ u32 sw_if_index0 = ~0;
+ u8 isv6_0 = 0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ u32 match0 = match_interest (b0, hpgsm->pgen_srv_hicn_name);
+
+ if (match0)
+ {
+ next0 = match0 - 1;
+ }
+ else
+ if (hicn_interest_parse_pkt
+ (b0, &name0, &namelen0, &hicn0, &isv6_0) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
+
+ isv6_0 ? convert_interest_to_data_v6 (vm, b0, rb,
+ bi0) :
+ convert_interest_to_data_v4 (vm, b0, rb, bi0);
+
+ next0 =
+ isv6_0 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed += 1;
+
+ if (next0 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_server_node.index,
+ HICNPG_SERVER_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_server_node.index,
+ HICNPG_SERVER_ERROR_DROPPED, pkts_dropped);
+
+ return (frame->n_vectors);
+}
+
+void
+convert_interest_to_data_v4 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Get the packet length */
+ u16 pkt_len = clib_net_to_host_u16 (h0->v4.ip.len);
+
+ /*
+ * Rule of thumb: We want the size of the IP packet to be <= 1500 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if ((bytes_to_copy + pkt_len) > 1500)
+ {
+ bytes_to_copy = 1500 - pkt_len;
+ }
+ /* Add content to the data packet */
+ vlib_buffer_add_data (vm, &bi0, rb->data, bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = vlib_buffer_get_current (b0);
+
+ ip4_address_t src_addr = h0->v4.ip.saddr;
+ h0->v4.ip.saddr = h0->v4.ip.daddr;
+ h0->v4.ip.daddr = src_addr;
+
+ h0->v4.ip.len = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ h0->v4.ip.csum = ip4_header_checksum ((ip4_header_t *) & (h0->v4.ip));
+ calculate_tcp_checksum_v4 (vm, b0);
+}
+
+void
+convert_interest_to_data_v6 (vlib_main_t * vm, vlib_buffer_t * b0,
+ vlib_buffer_t * rb, u32 bi0)
+{
+ hicn_header_t *h0 = vlib_buffer_get_current (b0);
+
+ /* Get the packet length */
+ uint16_t pkt_len =
+ clib_net_to_host_u16 (h0->v6.ip.len) + sizeof (ip6_header_t);
+
+ /*
+ * Figure out how many bytes we can add to the content
+ *
+ * Rule of thumb: We want the size of the IP packet to be <= 1400 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if ((bytes_to_copy + pkt_len) > 1500)
+ {
+ bytes_to_copy = 1500 - pkt_len;
+ }
+ /* Add content to the data packet */
+ vlib_buffer_add_data (vm, &bi0, rb->data, bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ h0 = vlib_buffer_get_current (b0);
+ ip6_address_t src_addr = h0->v6.ip.saddr;
+ h0->v6.ip.saddr = h0->v6.ip.daddr;
+ h0->v6.ip.daddr = src_addr;
+
+ h0->v6.ip.len = clib_host_to_net_u16 (vlib_buffer_length_in_chain
+ (vm, b0) - sizeof (ip6_header_t));
+ h0->v6.tcp.data_offset_and_reserved |= 0x0f;
+ h0->v6.tcp.urg_ptr = htons (0xffff);
+
+ calculate_tcp_checksum_v6 (vm, b0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(hicn_pg_server_node) =
+{
+ .function = hicnpg_node_server_fn,
+ .name = "hicnpg-server",
+ .vector_size = sizeof(u32),
+ .format_trace = format_icnpg_server_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(icnpg_server_error_strings),
+ .error_strings = icnpg_server_error_strings,
+ .n_next_nodes = HICNPG_SERVER_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICNPG_SERVER_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_SERVER_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_SERVER_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_pg_server_ip6, static)=
+ {
+ .arc_name = "ip6-unicast",
+ .node_name = "hicnpg-server",
+ .runs_before = VNET_FEATURES("ip6-inacl"),
+ };
+/* *INDENT-ON* */
+
+/* *INDENT-OFF* */
+VNET_FEATURE_INIT(hicn_pg_server_ip4, static)=
+ {
+ .arc_name = "ip4-unicast",
+ .node_name = "hicnpg-server",
+ .runs_before = VNET_FEATURES("ip4-inacl"),
+ };
+/* *INDENT-ON* */
+
+/*
+ * End of packet-generator server node
+ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/pg.h b/hicn-plugin/src/network/pg.h
new file mode 100644
index 000000000..84a391d43
--- /dev/null
+++ b/hicn-plugin/src/network/pg.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_PG_H__
+#define __HICN_PG_H__
+
+
+/**
+ * @file pg.h
+ *
+ * The packet generator is made of two entities, a client and a server.
+ * The client issues interests at high speed and the server satisfy each
+ * interest it receives with the corresponding data.
+ * The packet generator is made of three nodes:
+ * - hicnpg-interest that receives packets from a packet generator interface
+ * and manipulate them to generate interests based on the given configuration.
+ * This node runs at the client side.
+ * - hicnpg-data that receives data packets at the client side and counts them.
+ * This is useful for statistics. The "show err" command will give the number
+ * of interest issued and data received at the client side
+ * - hicnpg-server that recevies and interest and replies with the corresponding
+ * data. The data is generated from the interest switching the src and destination
+ * address in the packet and appending a payload to the packet.
+ *
+ *
+ * These three nodes are inserted in the vlib graph in the following manner:
+ * - hicnpg-interest is added as a possible next node of the pg-input node. The packet
+ * generator stream then specifies it as next node.
+ * - hicnpg-data is added as next hop of the ip4/6-unicast node exploiting the corresponding
+ * feature and it runs before the ip4/6-inacl node. In this way, every packet that is
+ * received through an interface on which this feature is enabled is sent to this node.
+ * - hicnpg-server is added as next hop of the ip4/6-unicast using the corresponding
+ * feature and it runs before the ip4/6-inacl node. In this way, every packet that is
+ * received through an interface on which this feature is enabled is sent to this node.
+ *
+ * An example of how to use the pg for hicn is available in the documentation.
+ */
+
+/**
+ * @brief hICN packet generator main for the pg client nodes
+ *
+ * It stores the configuration and make it availables to the pg client nodes.
+ */
+typedef struct hicnpg_main_s
+{
+ u32 index; //used to compute the sequence number
+ fib_prefix_t *pgen_clt_hicn_name; //hICN name to put in the destiantion addess of an interest
+ u32 index_ifaces; /* used to mimic interests coming from different consumer */
+ u32 n_ifaces; /* The source address will change from interest to interest */
+ /* index_ifaces is used to keep a global reference to the iface used */
+ /* and it is incremented when we want to change "consumer" */
+ /* n_ifaces identifies how many consumers to simulate */
+ u32 max_seq_number; //Use to limit the max sequence number
+ u32 n_flows; //Use to simulate multiple flows (a flow always have the same hICN name)
+ ip46_address_t pgen_clt_src_addr; //Source addess base to use in the interest
+
+ u16 interest_lifetime; // Interest lifetime
+ u32 sw_if; //Interface where to send interest and receives data
+} hicnpg_main_t;
+
+extern hicnpg_main_t hicnpg_main;
+
+/**
+ * @brief hICN packet generator main for the pg server node
+ *
+ * It stores the configuration and make it availables to the pg server node.
+ */
+typedef struct hicnpg_server_main_s
+{
+ u32 node_index;
+ /* Arbitrary content */
+ u32 pgen_svr_buffer_idx;
+ fib_prefix_t *pgen_srv_hicn_name;
+} hicnpg_server_main_t;
+
+extern hicnpg_server_main_t hicnpg_server_main;
+
+extern vlib_node_registration_t hicn_pg_interest_node;
+extern vlib_node_registration_t hicn_pg_data_node;
+
+#endif // __HICN_PG_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/route.c b/hicn-plugin/src/network/route.c
new file mode 100644
index 000000000..3b774cd82
--- /dev/null
+++ b/hicn-plugin/src/network/route.c
@@ -0,0 +1,766 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry_track.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/dpo/drop_dpo.h>
+#include <vnet/dpo/load_balance.h>
+#include <vnet/udp/udp.h>
+#include <vnet/udp/udp_encap.h>
+#include <vlib/global_funcs.h>
+
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+#include "strategy.h"
+#include "faces/face.h"
+#include "error.h"
+#include "strategies/dpo_mw.h"
+#include "infra.h"
+#include "udp_tunnels/udp_tunnel.h"
+
+#define FIB_SOURCE_HICN 0x04 //Right after the FIB_SOURCE_INTERFACE priority
+
+fib_source_t hicn_fib_src;
+
+fib_node_type_t hicn_fib_node_type;
+
+ip4_address_t localhost4 = {0};
+ip6_address_t localhost6 = {0};
+
+int
+hicn_route_get_dpo (const fib_prefix_t * prefix,
+ const dpo_id_t ** hicn_dpo, u32 * fib_index)
+{
+ //fib_prefix_t fib_pfx;
+ const dpo_id_t *load_balance_dpo_id;
+ const dpo_id_t *former_dpo_id;
+ int found = 0, ret = HICN_ERROR_ROUTE_NOT_FOUND;
+ fib_node_index_t fib_entry_index;
+
+ /* Check if the route already exist in the fib */
+ /*
+ * ASSUMPTION: we use table 0 which is the default table and it is
+ * already existing and locked
+ */
+ *fib_index = fib_table_find_or_create_and_lock (prefix->fp_proto,
+ HICN_FIB_TABLE,
+ hicn_fib_src);
+ fib_entry_index = fib_table_lookup_exact_match (*fib_index, prefix);
+
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
+ {
+ /* Route already existing. We need to update the dpo. */
+ load_balance_dpo_id =
+ fib_entry_contribute_ip_forwarding (fib_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ ret = HICN_ERROR_ROUTE_NO_LD;
+ else
+ {
+ /* former_dpo_id is a load_balance dpo */
+ load_balance_t *lb =
+ load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ /* FIB entry exists but there is no hicn dpo. */
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ for (int i = 0; i < lb->lb_n_buckets && !found; i++)
+ {
+ former_dpo_id = load_balance_get_bucket_i (lb, i);
+
+ if (dpo_is_hicn (former_dpo_id))
+ {
+ *hicn_dpo = former_dpo_id;
+ ret = HICN_ERROR_NONE;
+ found = 1;
+ }
+ }
+ }
+ }
+ /*
+ * Remove the lock from the table. We keep one lock per route, not
+ * per dpo
+ */
+ fib_table_unlock (*fib_index, prefix->fp_proto, hicn_fib_src);
+
+ return ret;
+}
+
+int
+hicn_route_set_strategy (fib_prefix_t * prefix, u8 strategy_id)
+{
+ const dpo_id_t *hicn_dpo_id;
+ dpo_id_t new_dpo_id = DPO_INVALID;
+ int ret;
+ hicn_dpo_ctx_t *old_hicn_dpo_ctx;
+ const hicn_dpo_vft_t *new_dpo_vft;
+ index_t new_hicn_dpo_idx;
+ u32 fib_index;
+
+ ret = hicn_route_get_dpo (prefix, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ old_hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
+
+ new_dpo_vft = hicn_dpo_get_vft_from_id (strategy_id);
+
+ if (new_dpo_vft == NULL || old_hicn_dpo_ctx == NULL)
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+
+ /* Create a new dpo for the new strategy */
+ new_dpo_vft->hicn_dpo_create (hicn_dpo_id->dpoi_proto,
+ old_hicn_dpo_ctx->next_hops,
+ old_hicn_dpo_ctx->entry_count,
+ &new_hicn_dpo_idx);
+
+ /* the value we got when we registered */
+ dpo_set (&new_dpo_id,
+ new_dpo_vft->hicn_dpo_get_type (),
+ (ip46_address_is_ip4 (&prefix->fp_addr) ? DPO_PROTO_IP4 :
+ DPO_PROTO_IP6), new_hicn_dpo_idx);
+
+ /* Here is where we create the "via" like route */
+ /*
+ * For the moment we use the global one the prefix you want
+ * to match Neale suggested -- FIB_SOURCE_HICN the client
+ * that is adding them -- no easy explanation at this time…
+ */
+ fib_node_index_t new_fib_node_index =
+ fib_table_entry_special_dpo_update (fib_index,
+ prefix,
+ hicn_fib_src,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &new_dpo_id);
+
+ dpo_unlock (&new_dpo_id);
+ ret =
+ (new_fib_node_index !=
+ FIB_NODE_INDEX_INVALID) ? HICN_ERROR_NONE :
+ HICN_ERROR_ROUTE_NOT_UPDATED;
+ }
+ //Remember to remove the lock from the table when removing the entry
+ return ret;
+
+}
+
+int
+ip_nh_add_helper (fib_protocol_t fib_proto, const fib_prefix_t * rpfx, ip46_address_t * nh, u32 sw_if)
+{
+ fib_route_path_t *rpaths = NULL, rpath;
+
+ u32 fib_index = fib_table_find(fib_proto, 0);
+
+ clib_memset(&rpath, 0, sizeof(rpath));
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_addr = *nh;
+ rpath.frp_proto = ip46_address_is_ip4(nh) ? DPO_PROTO_IP4 : DPO_PROTO_IP6;
+
+ vec_add1(rpaths, rpath);
+
+ fib_table_entry_path_add2 (fib_index,
+ rpfx,
+ FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE, rpaths);
+ return 0;
+}
+
+int
+ip_nh_del_helper (fib_protocol_t fib_proto, const fib_prefix_t * rpfx, ip46_address_t * nh, u32 sw_if)
+{
+ fib_route_path_t *rpaths = NULL, rpath;
+
+ u32 fib_index = fib_table_find(fib_proto, 0);
+
+ clib_memset(&rpath, 0, sizeof(rpath));
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_addr = *nh;
+ rpath.frp_proto = ip46_address_is_ip4(nh) ? DPO_PROTO_IP4 : DPO_PROTO_IP6;
+
+ vec_add1(rpaths, rpath);
+
+ fib_table_entry_path_remove2 (fib_index,
+ rpfx,
+ FIB_SOURCE_CLI,
+ rpaths);
+ return 0;
+}
+
+
+static ip46_address_t * get_address(ip46_address_t * nh, u32 sw_if, fib_protocol_t proto)
+{
+ ip46_address_t * local_address = calloc(1, sizeof(ip46_address_t));
+
+ if (proto == FIB_PROTOCOL_IP4)
+ {
+ ip_interface_address_t *interface_address;
+ ip4_address_t *addr =
+ ip4_interface_address_matching_destination (&ip4_main,
+ &nh->ip4,
+ sw_if,
+ &interface_address);
+
+ if (addr == NULL)
+ addr = ip4_interface_first_address (&ip4_main,
+ sw_if,
+ &interface_address);
+ if (addr != NULL)
+ ip46_address_set_ip4 (local_address, addr);
+ }
+ else if (proto == FIB_PROTOCOL_IP6)
+ {
+ ip_interface_address_t *interface_address;
+ ip6_interface_address_matching_destination (&ip6_main,
+ &nh->ip6,
+ sw_if,
+ &interface_address);
+
+ ip6_address_t *addr = NULL;
+ if (interface_address != NULL)
+ addr =
+ (ip6_address_t *)
+ ip_interface_address_get_address (&ip6_main.lookup_main,
+ interface_address);
+
+ if (addr == NULL)
+ addr = ip6_interface_first_address (&ip6_main, sw_if);
+
+ if (addr != NULL)
+ ip46_address_set_ip6 (local_address, addr);
+ }
+
+ return local_address;
+}
+
+static void
+sync_hicn_fib_entry(hicn_dpo_ctx_t *fib_entry)
+{
+ const dpo_id_t * dpo_loadbalance = fib_entry_contribute_ip_forwarding (fib_entry->fib_entry_index);
+ const load_balance_t *lb0 = load_balance_get(dpo_loadbalance->dpoi_index);
+ index_t hicn_fib_entry_index = hicn_strategy_dpo_ctx_get_index(fib_entry);
+ hicn_face_id_t * vec_faces = 0;
+
+ dpo_id_t temp = DPO_INVALID;
+ const dpo_id_t *former_dpo = &temp;
+ int index = 0;
+ for (int j = 0; j < lb0->lb_n_buckets; j++) {
+ const dpo_id_t * dpo = load_balance_get_bucket_i(lb0,j);
+
+ int dpo_comparison = dpo_cmp(former_dpo, dpo);
+ former_dpo = dpo;
+ /*
+ * Loadbalancing in ip replicate the dpo in multiple buckets
+ * in order to honor the assigned weights.
+ */
+ if (dpo_comparison == 0)
+ continue;
+
+ u32 sw_if = ~0;
+ ip46_address_t * nh = NULL;
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+
+ if (dpo_is_adj(dpo))
+ {
+ ip_adjacency_t * adj = adj_get (dpo->dpoi_index);
+ sw_if = adj->rewrite_header.sw_if_index;
+ nh = get_address (&(adj->sub_type.nbr.next_hop), sw_if, fib_entry->proto);
+ }
+ else if (dpo->dpoi_type == dpo_type_udp_ip4 || dpo->dpoi_type == dpo_type_udp_ip6)
+ {
+ u8 proto = dpo->dpoi_type == dpo_type_udp_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ nh = calloc (1, sizeof(ip46_address_t));
+ switch (dpo->dpoi_proto)
+ {
+ case FIB_PROTOCOL_IP6:
+ nh = calloc (1, sizeof(ip46_address_t));
+ ip46_address_set_ip6(nh, &localhost6);
+ break;
+ case FIB_PROTOCOL_IP4:
+ nh = calloc (1, sizeof(ip46_address_t));
+ ip46_address_set_ip4(nh, &localhost4);
+ break;
+ default:
+ nh = calloc (1, sizeof(ip46_address_t));
+ }
+ udp_tunnel_add_existing (dpo->dpoi_index, proto);
+ }
+ else //if (dpo_is_drop(dpo))
+ {
+ sw_if = dpo_get_urpf(dpo);
+ nh = calloc (1, sizeof(ip46_address_t));
+ }
+
+ /* Careful, this adds a lock on the face if it exists */
+ hicn_face_add(dpo, nh, sw_if, &face_id, 0);
+
+ vec_validate(vec_faces, index);
+ vec_faces[index] = face_id;
+ index++;
+
+ /* Face creation can realloc load_balance_t? Seem the fib_tracking does so. */
+ dpo_loadbalance = fib_entry_contribute_ip_forwarding (fib_entry->fib_entry_index);
+ lb0 = load_balance_get(dpo_loadbalance->dpoi_index);
+ }
+
+ const hicn_dpo_vft_t * strategy_vft = hicn_dpo_get_vft(fib_entry->dpo_type);
+ int i = 0;
+ while (i < fib_entry->entry_count)
+ {
+ u32 idx_nh = vec_search(vec_faces, fib_entry->next_hops[i]);
+ if (idx_nh == ~0)
+ {
+ strategy_vft->hicn_dpo_del_nh(fib_entry->next_hops[i], hicn_fib_entry_index);
+ }
+ else
+ {
+ vec_del1(vec_faces, idx_nh);
+
+ /* Remove the lock added by hicn_face_add */
+ hicn_face_unlock_with_id (fib_entry->next_hops[i]);
+ i++;
+ }
+ }
+
+ hicn_face_id_t *face_id;
+ vec_foreach(face_id, vec_faces)
+ {
+ strategy_vft->hicn_dpo_add_update_nh(*face_id, hicn_fib_entry_index);
+
+ /* Remove the lock added by hicn_face_add */
+ hicn_face_unlock_with_id (*face_id);
+
+ }
+ vec_free(vec_faces);
+}
+
+static void
+enable_disable_data_receiving (fib_protocol_t proto, u32 sw_if, u8 is_enable)
+{
+ if (proto == FIB_PROTOCOL_IP4 && sw_if != ~0)
+ vnet_feature_enable_disable ("ip4-local", "hicn-data-input-ip4",
+ sw_if, is_enable, 0, 0);
+ else if (proto == FIB_PROTOCOL_IP6 && sw_if != ~0)
+ vnet_feature_enable_disable ("ip6-local", "hicn-data-input-ip6",
+ sw_if, is_enable, 0, 0);
+
+}
+
+walk_rc_t enable_data_receiving_new_fib_entry (vnet_main_t * vnm,
+ vnet_sw_interface_t * si,
+ void *ctx)
+{
+ fib_protocol_t *proto = (fib_protocol_t *) ctx;
+ enable_disable_data_receiving(*proto, si->sw_if_index, 1);
+
+ return (WALK_CONTINUE);
+}
+
+walk_rc_t disable_data_receiving_rm_fib_entry (vnet_main_t * vnm,
+ vnet_sw_interface_t * si,
+ void *ctx)
+{
+ fib_protocol_t *proto = (fib_protocol_t *) ctx;
+ enable_disable_data_receiving(*proto, si->sw_if_index, 0);
+
+ return (WALK_CONTINUE);
+ }
+
+int
+hicn_route_enable (fib_prefix_t *prefix) {
+
+ int ret = HICN_ERROR_NONE;
+ fib_node_index_t fib_entry_index;
+
+ /* Check if the route already exist in the fib */
+ /*
+ * ASSUMPTION: we use table 0 which is the default table and it is
+ * already existing and locked
+ */
+ u32 fib_index = fib_table_find(prefix->fp_proto, 0);
+
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, prefix);
+
+ if (fib_entry_index == FIB_NODE_INDEX_INVALID)
+ {
+ fib_entry_index = fib_table_lookup (fib_index, prefix);
+
+ fib_route_path_t * paths = fib_entry_encode(fib_entry_index);
+
+ fib_table_entry_path_add2(fib_index, prefix, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, paths);
+ }
+
+ /* Check if the prefix is already enabled */
+ u32 fib_hicn_index = fib_table_find(prefix->fp_proto, HICN_FIB_TABLE);
+
+ fib_node_index_t fib_hicn_entry_index = fib_table_lookup_exact_match (fib_hicn_index, prefix);
+
+ if (fib_hicn_entry_index == FIB_NODE_INDEX_INVALID)
+ {
+ dpo_id_t dpo = DPO_INVALID;
+ index_t dpo_idx;
+ default_dpo.hicn_dpo_create (prefix->fp_proto, 0, NEXT_HOP_INVALID,
+ &dpo_idx);
+
+ /* the value we got when we registered */
+ /*
+ * This should be taken from the name?!? the index of the
+ * object
+ */
+ dpo_set (&dpo,
+ default_dpo.hicn_dpo_get_type (),
+ (ip46_address_is_ip4 (&prefix->fp_addr) ? DPO_PROTO_IP4 :
+ DPO_PROTO_IP6), dpo_idx);
+
+ hicn_dpo_ctx_t * fib_entry = hicn_strategy_dpo_ctx_get(dpo_idx);
+
+ fib_node_init (&fib_entry->fib_node, hicn_fib_node_type);
+ fib_node_lock (&fib_entry->fib_node);
+
+ fib_entry->fib_entry_index = fib_entry_track (fib_index,
+ prefix,
+ hicn_fib_node_type,
+ dpo_idx, &fib_entry->fib_sibling);
+
+
+ /* Here is where we create the "via" like route */
+ /*
+ * For the moment we use the global one the prefix you want
+ * to match Neale suggested -- FIB_SOURCE_HICN the client
+ * that is adding them -- no easy explanation at this time…
+ */
+ CLIB_UNUSED (fib_node_index_t new_fib_node_index) =
+ fib_table_entry_special_dpo_add (fib_hicn_index,
+ prefix,
+ hicn_fib_src,
+ (FIB_ENTRY_FLAG_EXCLUSIVE |
+ FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT),
+ &dpo);
+
+ sync_hicn_fib_entry(fib_entry);
+
+ /* We added a route, therefore add one lock to the table */
+ fib_table_lock (fib_index, prefix->fp_proto, hicn_fib_src);
+
+ /* Enable the feature to punt data packet every time we enable a new hicn route
+ * For each enable there must be a disable to defenitely disable the feature
+ *
+ * We cannot enable only the interfaces on which we send out interest because
+ * Data packet might be coming on in different interfaces, as in che case of mpls
+ * tunnels (packets are received from the physical nic, not the mpls tunnel interface).
+ */
+ vnet_main_t * vnm = vnet_get_main ();
+ vnet_sw_interface_walk(vnm, enable_data_receiving_new_fib_entry, &(prefix->fp_proto));
+
+ dpo_unlock (&dpo);
+ }
+ else
+ {
+ const dpo_id_t *load_balance_dpo_id;
+ const dpo_id_t *strategy_dpo_id;
+
+ /* Route already existing. We need to update the dpo. */
+ load_balance_dpo_id =
+ fib_entry_contribute_ip_forwarding (fib_hicn_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ {
+ ret = HICN_ERROR_ROUTE_NO_LD;
+ goto done;
+ }
+ else
+ {
+ load_balance_t *lb =
+ load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
+
+ if (!dpo_is_hicn (strategy_dpo_id))
+ {
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ goto done;
+ }
+
+ if (lb->lb_n_buckets > 1)
+ {
+ ret = HICN_ERROR_ROUTE_MLT_LD;
+ goto done;
+ }
+
+ hicn_dpo_ctx_t * hicn_fib_entry = hicn_strategy_dpo_ctx_get(strategy_dpo_id->dpoi_index);
+
+ sync_hicn_fib_entry(hicn_fib_entry);
+ }
+ }
+
+ done:
+ return ret;
+}
+
+int
+hicn_route_disable (fib_prefix_t *prefix) {
+
+ int ret = HICN_ERROR_NONE;
+
+ /* Check if the prefix is already enabled */
+ u32 fib_hicn_index = fib_table_find(prefix->fp_proto, HICN_FIB_TABLE);
+
+ fib_node_index_t fib_hicn_entry_index = fib_table_lookup_exact_match (fib_hicn_index, prefix);
+
+ if (fib_hicn_entry_index == FIB_NODE_INDEX_INVALID)
+ {
+ return HICN_ERROR_ROUTE_NOT_FOUND;
+ }
+ else
+ {
+ const dpo_id_t *load_balance_dpo_id;
+ const dpo_id_t *strategy_dpo_id;
+ hicn_dpo_ctx_t * hicn_fib_entry;
+
+ /* Route already existing. We need to update the dpo. */
+ load_balance_dpo_id =
+ fib_entry_contribute_ip_forwarding (fib_hicn_entry_index);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ {
+ ret = HICN_ERROR_ROUTE_NO_LD;
+ goto done;
+ }
+ else
+ {
+ load_balance_t *lb =
+ load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
+
+ if (!dpo_is_hicn (strategy_dpo_id))
+ {
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ goto done;
+ }
+
+ if (lb->lb_n_buckets > 1)
+ {
+ ret = HICN_ERROR_ROUTE_MLT_LD;
+ goto done;
+ }
+
+ hicn_fib_entry = hicn_strategy_dpo_ctx_get(strategy_dpo_id->dpoi_index);
+
+ for (int i = 0; i < hicn_fib_entry->entry_count; i++)
+ {
+ hicn_strategy_dpo_ctx_del_nh(hicn_fib_entry->next_hops[i], hicn_fib_entry);
+ }
+ }
+
+ fib_entry_untrack(hicn_fib_entry->fib_entry_index, hicn_fib_entry->fib_sibling);
+
+ fib_table_entry_special_remove (fib_hicn_index, prefix, hicn_fib_src);
+
+ /* Disable the feature to punt data packet every time we enable a new hicn route */
+ vnet_main_t * vnm = vnet_get_main ();
+ vnet_sw_interface_walk(vnm, disable_data_receiving_rm_fib_entry, &(prefix->fp_proto));
+ }
+
+ done:
+ return ret;
+}
+
+
+static fib_node_t *
+hicn_ctx_node_get (fib_node_index_t index)
+{
+ hicn_dpo_ctx_t * hicn_ctx;
+
+ hicn_ctx = hicn_strategy_dpo_ctx_get(index);
+
+ return (&hicn_ctx->fib_node);
+}
+
+static void
+hicn_fib_last_lock_gone (fib_node_t *node)
+{
+}
+
+static hicn_dpo_ctx_t *
+hicn_ctx_from_fib_node (fib_node_t * node)
+{
+ return ((hicn_dpo_ctx_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (hicn_dpo_ctx_t, fib_node)));
+}
+
+static fib_node_back_walk_rc_t
+hicn_fib_back_walk_notify (fib_node_t *node,
+ fib_node_back_walk_ctx_t *ctx)
+{
+
+ hicn_dpo_ctx_t *fib_entry = hicn_ctx_from_fib_node (node);
+
+ sync_hicn_fib_entry(fib_entry);
+
+ return (FIB_NODE_BACK_WALK_CONTINUE);
+}
+
+static void
+hicn_fib_show_memory (void)
+{
+}
+
+
+static const fib_node_vft_t hicn_fib_vft =
+{
+ .fnv_get = hicn_ctx_node_get,
+ .fnv_last_lock = hicn_fib_last_lock_gone,
+ .fnv_back_walk = hicn_fib_back_walk_notify,
+ .fnv_mem_show = hicn_fib_show_memory,
+};
+
+fib_table_walk_rc_t enable_data_on_existing_hicn(fib_node_index_t fei,
+ void *ctx)
+{
+ u32 sw_if = *(u32 *)ctx;
+ const dpo_id_t *load_balance_dpo_id;
+ const dpo_id_t *strategy_dpo_id;
+
+ /* Route already existing. We need to update the dpo. */
+ load_balance_dpo_id =
+ fib_entry_contribute_ip_forwarding (fei);
+
+ /* The dpo is not a load balance dpo as expected */
+ if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
+ {
+ goto done;
+ }
+ else
+ {
+ load_balance_t *lb =
+ load_balance_get (load_balance_dpo_id->dpoi_index);
+
+ strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
+
+ if (!dpo_is_hicn (strategy_dpo_id))
+ {
+ goto done;
+ }
+
+ enable_disable_data_receiving (strategy_dpo_id->dpoi_proto, sw_if, 1);
+ }
+
+ done:
+ return (FIB_TABLE_WALK_CONTINUE);
+}
+
+static clib_error_t *
+set_table_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
+{
+
+ if (!is_add)
+ return HICN_ERROR_NONE;
+
+ vnet_sw_interface_t * sw_int = vnet_get_sw_interface(vnm, sw_if_index);
+ vnet_hw_interface_t * hw_int = vnet_get_hw_interface(vnm, sw_int->hw_if_index);
+
+ char * mpls = "mpls";
+ if (strstr((char *)hw_int->name, mpls) == NULL)
+ return 0;
+
+ int rv = ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, HICN_FIB_TABLE, 1);
+
+ if (!rv)
+ {
+ rv = ip_table_bind (FIB_PROTOCOL_IP6, sw_if_index, HICN_FIB_TABLE, 1);
+
+ if (rv)
+ {
+ /* An error occurred. Bind the interface back to the default fib */
+ ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, 0, 1);
+ }
+ }
+
+ u32 fib_index = fib_table_find(FIB_PROTOCOL_IP4,
+ HICN_FIB_TABLE);
+ if (fib_index != ~0)
+ {
+ /*
+ * Walk the ip4 and ip6 fib tables to discover existing hicn fib entries.
+ * For each of them we need to enable the feature to punt data packets.
+ */
+ fib_table_walk(fib_index,
+ FIB_PROTOCOL_IP4,
+ enable_data_on_existing_hicn,
+ &sw_if_index);
+ }
+
+ fib_index = fib_table_find(FIB_PROTOCOL_IP6,
+ HICN_FIB_TABLE);
+ if (fib_index != ~0)
+ {
+ fib_table_walk(fib_index,
+ FIB_PROTOCOL_IP6,
+ enable_data_on_existing_hicn,
+ &sw_if_index);
+ }
+
+ return rv ? clib_error_return (0, "unable to add hicn table to interface") : 0;
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION (set_table_interface_add_del);
+
+void
+hicn_route_init ()
+{
+ vnet_main_t * vnm = vnet_get_main ();
+ vlib_main_t * vm = vlib_get_main ();
+ hicn_fib_src = fib_source_allocate ("hicn",
+ FIB_SOURCE_HICN, FIB_SOURCE_BH_API);
+
+ hicn_fib_node_type = fib_node_register_new_type(&hicn_fib_vft);
+
+ ip_table_create(FIB_PROTOCOL_IP4, HICN_FIB_TABLE, 1, (const u8 *)"hicn4");
+ ip_table_create(FIB_PROTOCOL_IP6, HICN_FIB_TABLE, 1, (const u8 *)"hicn6");
+
+ u32 sw_if_index;
+ u8 mac_address[6];
+ u8 is_specified = 0;
+ u32 user_instance = 0;
+
+ vnet_create_loopback_interface (&sw_if_index, mac_address,
+ is_specified, user_instance);
+
+ localhost4.as_u8[0] = 127;
+ localhost4.as_u8[3] = 1;
+ u32 length4 = 32, length6 = 128, is_del = 0, flags = 0;
+
+ localhost6.as_u8[15] = 1;
+
+ ip4_add_del_interface_address (vm, sw_if_index, &localhost4, length4, is_del);
+ ip6_add_del_interface_address (vm, sw_if_index, &localhost6, length6, is_del);
+
+ flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
+ vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/route.h b/hicn-plugin/src/network/route.h
new file mode 100644
index 000000000..a1ba86b3d
--- /dev/null
+++ b/hicn-plugin/src/network/route.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_ROUTE__
+#define __HICN_ROUTE__
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include "hicn.h"
+#include "faces/face.h"
+
+/**
+ * @file route.h
+ *
+ * hICN uses a specific vrf to install the routes for a prefix has been enabled to
+ * be hicn. It considers the vrf 0 (the default vrf) as the dominating vrf on
+ * which every route is stored. Enabling a prefix to be hICN will copy all the routes
+ * in the vrf 0 for the given prefi, in the vrf HICN. Every modification made on the
+ * vrf 0 on an hICN enabled prefix is reflected in the vrf hICN (through the use of
+ * the fib entry tracking functionality). Moreover, we use the lookup in the vrf hICN
+ * as a way for punting packet that must be processed as hICN. The implementation will
+ * install a special dpo as a single next hop for the vpp load balancer for each entry
+ * in the vrf hICN that we enabled. Such dpo will have two purposes: 1) to punt packets
+ * to the hICN forwarding pipeline, 2) to point to the righe strategy (the dpoi_index will
+ * be an index to the strategy context while the dpoi_type will be an index to the strategy vft).
+ *
+ * Additionally, hICN assign each interface to the vrf hICN; this is required for
+ * the interest lookup. Vpp performs a lookup in the vrf assigned to the interface,
+ * therefore if an interface is not assigned to the hICN vrf, the lookup will be done
+ * on the vrf 0 and the packet won't be processed through the hicn forwarding pipeline.
+ */
+
+/*
+ * Adding each interface to the vrf hICN has the side effect that to ping you need to
+ * specify the vrf hICN in the command.
+ */
+
+extern fib_source_t hicn_fib_src;
+
+extern dpo_type_t udp_encap_dpo_types[FIB_PROTOCOL_MAX];
+
+/**
+ * @Brief Return the hicn_dpo corresponding to the prefix in teh vrf HICN
+ *
+ * @param prefix Prefix for which we want to retrieve the hICN dpo
+ * @param hicn_dpo return value with the hicn_dpo
+ * @param fib_index return value with the fib index corresponding to the prefix
+ */
+int
+hicn_route_get_dpo (const fib_prefix_t * prefix,
+ const dpo_id_t ** hicn_dpo, u32 * fib_index);
+
+
+/**
+ * @Brief Set the strategy for a given prefix
+ *
+ * @param prefix Prefix for which we set the strategy
+ * @param stretegy_id Index of the strategy to set
+ */
+int
+hicn_route_set_strategy (fib_prefix_t * prefix, u32 strategy_id);
+
+/**
+ * @Brief Helper to add a nex hop in the vrf 0. If there are no entries in the
+ * vrf 0 that matches with the prefix (epm), a new one is created.
+ *
+ * @param fib_proto FIB_PROTOCOL_IP6 or FIB_PROTOCOL_IP4 (mpls not supported)
+ * @param pfx Prefix for which to add a next hop
+ * @param nh Next hop to add
+ * @param sw_if Software interface index to add in the next hop
+ */
+int
+ip_nh_add_helper (fib_protocol_t fib_proto, const fib_prefix_t * pfx, ip46_address_t * nh, u32 sw_if);
+
+/**
+ * @Brief Helper to remove a nex hop in the vrf 0. If there are no entries in the
+ * vrf 0 nothing happens.
+ *
+ * @param fib_proto FIB_PROTOCOL_IP6 or FIB_PROTOCOL_IP4 (mpls not supported)
+ * @param pfx Prefix for which to remove a next hop
+ * @param nh Next hop to remove
+ * @param sw_if Software interface index in the next hop definition
+ */
+int
+ip_nh_del_helper (fib_protocol_t fib_proto, const fib_prefix_t * rpfx, ip46_address_t * nh, u32 sw_if);
+
+/**
+ * @Brief Enable an hICN for an ip prefix
+ *
+ * @param prefix Prefix for which we enable hICN
+ * @return HICN_ERROR_NONE if hICN was enabled on the prefix
+ * HICN_ERROR_ROUTE_NO_LD if the first dpo for the fib entry corresponding to the prefix is not a load_balancer
+ * HICN_ERROR_ROUTE_DPO_NO_HICN if the loadbalancer in the vrf HICN already contains a dpo which is not an hICN one
+ * HICN_ERROR_ROUTE_MLT_LD if there are more than a dpo in the vpp loadbalancer
+ */
+int
+hicn_route_enable (fib_prefix_t *prefix);
+
+/**
+ * @Brief Disable an hICN for an ip prefix. If hICN wasn't enable on the prefix
+ * nothing happens and it returns HICN_ERROR_ROUTE_NOT_FOUND
+ *
+ * @param prefix Prefix for which we disable hICN
+ */
+int
+hicn_route_disable (fib_prefix_t *prefix);
+
+
+/* Init route internal strustures */
+void
+hicn_route_init();
+#endif /* //__HICN_ROUTE__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/state.h b/hicn-plugin/src/network/state.h
new file mode 100644
index 000000000..37003d0ae
--- /dev/null
+++ b/hicn-plugin/src/network/state.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STATE__
+#define __HICN_STATE__
+
+#include <netinet/in.h>
+#include <vnet/buffer.h>
+
+#include "hicn.h"
+#include "pcs.h"
+#include "hashtb.h"
+#include "strategy.h"
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+
+/**
+ * @file plugin_state
+ *
+ * Helper functions to hicn state (hash node, hash entry, strategy vft, dpo vft and dpo context id)
+ *
+ */
+
+//TODO exploit this state to prefetch hash nodes and entries.
+
+/**
+ * @brief Retrieve the hicn state
+ *
+ * @param hicnb hicn buffer used to retrieve the hicn state
+ * @param pitcs pointer to PIT/CS
+ * @param node node in the hash table referring to the buffer
+ * @param strategy_vft return value pointing to the strategy vft corresponding to the buffer
+ * @param dpo_vft return value pointing to the dpo vft corresponding to the buffer
+ * @param dpo_ctx_id return value pointing to the dpo context id corresponding to the buffer
+ * @param hash_entry entry in the hash table referring to the buffer
+ */
+always_inline void
+hicn_get_internal_state (hicn_buffer_t * hicnb, hicn_pit_cs_t * pitcs,
+ hicn_hash_node_t ** node,
+ const hicn_strategy_vft_t ** strategy_vft,
+ const hicn_dpo_vft_t ** dpo_vft, u8 * dpo_ctx_id,
+ hicn_hash_entry_t ** hash_entry)
+{
+ *node = pool_elt_at_index (pitcs->pcs_table->ht_nodes, hicnb->node_id);
+ *strategy_vft = hicn_dpo_get_strategy_vft (hicnb->vft_id);
+ *dpo_vft = hicn_dpo_get_vft (hicnb->vft_id);
+ *dpo_ctx_id = hicnb->dpo_ctx_id;
+
+ hicn_hash_bucket_t *bucket;
+ if (hicnb->hash_bucket_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
+ bucket =
+ pool_elt_at_index (pitcs->pcs_table->ht_overflow_buckets,
+ hicnb->bucket_id);
+ else
+ bucket =
+ (hicn_hash_bucket_t *) (pitcs->pcs_table->ht_buckets +
+ hicnb->bucket_id);
+
+ *hash_entry = &(bucket->hb_entries[hicnb->hash_entry_id]);
+}
+
+/*
+ * This function set the PCS entry index, the dpo index and the vft index in
+ * the opaque2 buffer. In this way, the interest-hitpit and interest-hitcs
+ * nodes can prefetch the corresponding state (PIT entry, dpo_ctx and the
+ * strategy vft
+ */
+/**
+ * @brief Store the hicn state in the hicn buffer
+ *
+ * @param b vlib buffer holding the hICN packet
+ * @param name_hash hash of the hICN name
+ * @param node_id id of the node in the hash table referring to the buffer
+ * @param dpo_ctx_id id of the dpo context id corresponding to the buffer
+ * @param vft_id id of the strategy vft corresponding to the buffer
+ * @param hash_entry_id id of the entry in the hash table referring to the buffer
+ * @param bucket_id id of the hasth table bucket that holds the hash entry
+ * @param bucket_is_overflow 1 if the bucket is from the ht_overflow_buckets pool
+ * 0 if the bucket is from the ht_buckets pool
+ */
+always_inline void
+hicn_store_internal_state (vlib_buffer_t * b, u64 name_hash, u32 node_id,
+ u8 dpo_ctx_id, u8 vft_id, u8 hash_entry_id,
+ u32 bucket_id, u8 bucket_is_overflow)
+{
+ hicn_buffer_t *hicnb = hicn_get_buffer (b);
+ hicnb->name_hash = name_hash;
+ hicnb->node_id = node_id;
+ hicnb->dpo_ctx_id = dpo_ctx_id;
+ hicnb->vft_id = vft_id;
+ hicnb->hash_entry_id = hash_entry_id;
+ hicnb->bucket_id = bucket_id;
+ hicnb->hash_bucket_flags =
+ HICN_HASH_NODE_OVERFLOW_BUCKET * bucket_is_overflow;
+}
+
+#endif /* // __HICN_STATE__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/strategies/dpo_mw.c b/hicn-plugin/src/network/strategies/dpo_mw.c
new file mode 100644
index 000000000..12c77bce8
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/dpo_mw.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_mw.h"
+#include "strategy_mw.h"
+#include "../strategy_dpo_manager.h"
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @brief DPO type value for the mw_strategy
+ */
+static dpo_type_t hicn_dpo_type_mw;
+
+static const hicn_dpo_vft_t hicn_dpo_mw_vft = {
+ .hicn_dpo_is_type = &hicn_dpo_is_type_strategy_mw,
+ .hicn_dpo_get_type = &hicn_dpo_strategy_mw_get_type,
+ .hicn_dpo_module_init = &hicn_dpo_strategy_mw_module_init,
+ .hicn_dpo_create = &hicn_strategy_mw_ctx_create,
+ .hicn_dpo_add_update_nh = &hicn_strategy_mw_ctx_add_nh,
+ .hicn_dpo_del_nh = &hicn_strategy_mw_ctx_del_nh,
+ .hicn_dpo_format = &hicn_strategy_mw_format_ctx
+};
+
+int
+hicn_dpo_is_type_strategy_mw (const dpo_id_t * dpo)
+{
+ return dpo->dpoi_type == hicn_dpo_type_mw;
+}
+
+void
+hicn_dpo_strategy_mw_module_init (void)
+{
+ /*
+ * Register our type of dpo
+ */
+ hicn_dpo_type_mw =
+ hicn_dpo_register_new_type (hicn_nodes_strategy, &hicn_dpo_mw_vft,
+ hicn_mw_strategy_get_vft (),
+ &dpo_strategy_mw_ctx_vft);
+}
+
+dpo_type_t
+hicn_dpo_strategy_mw_get_type (void)
+{
+ return hicn_dpo_type_mw;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+u8 *
+hicn_strategy_mw_format_ctx (u8 * s, int n, ...)
+{
+ va_list args;
+ va_start (args, n);
+ s = format_hicn_strategy_mw_ctx (s, &args);
+ return s;
+}
+
+u8 *
+format_hicn_strategy_mw_ctx (u8 * s, va_list * ap)
+{
+ int i = 0;
+ index_t index = va_arg (*ap, index_t);
+ hicn_dpo_ctx_t *dpo_ctx = NULL;
+ hicn_strategy_mw_ctx_t *mw_dpo_ctx = NULL;
+ u32 indent = va_arg (*ap, u32);;
+
+ dpo_ctx = hicn_strategy_dpo_ctx_get (index);
+ if (dpo_ctx == NULL)
+ return s;
+
+ mw_dpo_ctx = (hicn_strategy_mw_ctx_t *) dpo_ctx->data;
+
+ s = format (s, "hicn-mw");
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ u8 *buf = NULL;
+ if (i < dpo_ctx->entry_count)
+ buf = format (NULL, "FIB");
+ else if (i >=
+ HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
+ buf = format (NULL, "TFIB");
+ else
+ continue;
+
+ s = format (s, "\n");
+ s =
+ format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i],
+ indent);
+ s = format (s, "weight %u", mw_dpo_ctx->weight[i]);
+ s = format (s, " %s", buf);
+ }
+
+ return (s);
+}
+
+void
+hicn_strategy_mw_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
+ int nh_len, index_t * dpo_idx)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx;
+ hicn_dpo_ctx_t *hicn_strategy_ctx;
+
+ /* Allocate a hicn_dpo_ctx on the vpp pool and initialize it */
+ hicn_strategy_ctx = hicn_strategy_dpo_ctx_alloc ();
+ hicn_strategy_mw_ctx = (hicn_strategy_mw_ctx_t *) hicn_strategy_ctx->data;
+
+ *dpo_idx = hicn_strategy_dpo_ctx_get_index (hicn_strategy_ctx);
+
+ init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_mw, proto);
+
+ memset (hicn_strategy_mw_ctx->weight, 0, HICN_PARAM_FIB_ENTRY_NHOPS_MAX);
+}
+
+int
+hicn_strategy_mw_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ u8 pos = 0;
+
+ if (hicn_strategy_dpo_ctx == NULL)
+ {
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
+
+ hicn_strategy_dpo_ctx_add_nh (nh, hicn_strategy_dpo_ctx, &pos);
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) & hicn_strategy_dpo_ctx->data;
+
+ hicn_strategy_mw_ctx->weight[pos] = DEFAULT_WEIGHT;
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_strategy_mw_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ //No need to flush the weights, they are initialized when a dpo_ctx is created;
+ return hicn_strategy_dpo_ctx_del_nh (face_id, hicn_strategy_dpo_ctx);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/dpo_mw.h b/hicn-plugin/src/network/strategies/dpo_mw.h
new file mode 100644
index 000000000..433c415fb
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/dpo_mw.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_MW_H__
+#define __HICN_DPO_MW_H__
+
+#include <vnet/dpo/dpo.h>
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @file dpo_mw.h
+ *
+ * This file implements the strategy vtf (see strategy.h) and
+ * the dpo vft (see strategy_dpo_manager.h) for the strategy
+ * maximum weight
+ */
+
+#define DEFAULT_WEIGHT 0
+
+typedef struct hicn_strategy_mw_ctx_s
+{
+ u8 weight[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+} hicn_strategy_mw_ctx_t;
+
+/**
+ * @brief Format the dpo ctx for a human-readable string
+ *
+ * @param s String to which to append the formatted dpo ctx
+ * @param ap List of parameters for the formatting
+ *
+ * @result The string with the formatted dpo ctx
+ */
+u8 *format_hicn_strategy_mw_ctx (u8 * s, va_list * ap);
+
+const static dpo_vft_t dpo_strategy_mw_ctx_vft = {
+ .dv_lock = hicn_strategy_dpo_ctx_lock,
+ .dv_unlock = hicn_strategy_dpo_ctx_unlock,
+ .dv_format = format_hicn_strategy_mw_ctx,
+};
+
+/**
+ * @brief Retrieve an hicn_strategy_mw_ctx object
+ *
+ * @param indext Index of the hicn_dpo_ctx to retrieve
+ * @return The hicn_dpo_ctx object or NULL
+ */
+hicn_dpo_ctx_t *hicn_strategy_mw_ctx_get (index_t index);
+
+/**
+ * @brief Create a new mw ctx
+ *
+ * @param proto The protocol to which the dpo is meant for (see vpp docs)
+ * @param next_hop A list of next hops to be inserted in the dpo ctx
+ * @param nh_len Size of the list
+ * @param dpo_idx index_t that will hold the index of the created dpo ctx
+ * @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
+ */
+void
+hicn_strategy_mw_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
+ int nh_len, index_t * dpo_idx);
+
+/**
+ * @brief Add or update a next hop in the dpo ctx.
+ *
+ * This function is meant to be used in the control plane and not in the data plane,
+ * as it is not optimized for the latter.
+ *
+ * @param nh Next hop to insert in the dpo ctx
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
+ */
+int hicn_strategy_mw_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx);
+
+/**
+ * @brief Delete a next hop in the dpo ctx.
+ *
+ * @param face_id Face identifier of the next hop
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
+ */
+int hicn_strategy_mw_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx);
+
+/**
+ * @brief Prefetch a dpo
+ *
+ * @param dpo_idx Index of the dpo ctx to prefetch
+ */
+void hicn_strategy_mw_ctx_prefetch (index_t dpo_idx);
+
+/**
+ * @brief Return true if the dpo is of type strategy mw
+ *
+ * @param dpo Dpo to check the type
+ */
+int hicn_dpo_is_type_strategy_mw (const dpo_id_t * dpo);
+
+/**
+ * @brief Initialize the Maximum Weight strategy
+ */
+void hicn_dpo_strategy_mw_module_init (void);
+
+/**
+ * @brief Return the dpo type for the Maximum Weight strategy
+ */
+dpo_type_t hicn_dpo_strategy_mw_get_type (void);
+
+/**
+ * @brief Format the dpo ctx for the strategy Maximum Weight
+ *
+ * @param s String to append the formatted dpo ctx
+ * @param ap List of arguments to format
+ */
+u8 *format_hicn_dpo_strategy_mw (u8 * s, va_list * ap);
+
+/**
+ * @brief Format the dpo ctx for the strategy Maximum Weight. To
+ * call from other functions
+ *
+ * @param s String to append the formatted dpo ctx
+ * @param ... List of arguments to format
+ */
+u8 *hicn_strategy_mw_format_ctx (u8 * s, int n, ...);
+
+
+#endif // __HICN_DPO_MW_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/dpo_rr.c b/hicn-plugin/src/network/strategies/dpo_rr.c
new file mode 100644
index 000000000..adb7e1025
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/dpo_rr.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_rr.h"
+#include "strategy_rr.h"
+#include "../strategy_dpo_manager.h"
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @brief DPO type value for the rr_strategy
+ */
+static dpo_type_t hicn_dpo_type_rr;
+
+static const hicn_dpo_vft_t hicn_dpo_rr_vft = {
+ .hicn_dpo_is_type = &hicn_dpo_is_type_strategy_rr,
+ .hicn_dpo_get_type = &hicn_dpo_strategy_rr_get_type,
+ .hicn_dpo_module_init = &hicn_dpo_strategy_rr_module_init,
+ .hicn_dpo_create = &hicn_strategy_rr_ctx_create,
+ .hicn_dpo_add_update_nh = &hicn_strategy_rr_ctx_add_nh,
+ .hicn_dpo_del_nh = &hicn_strategy_rr_ctx_del_nh,
+ .hicn_dpo_format = &hicn_strategy_rr_format_ctx
+};
+
+int
+hicn_dpo_is_type_strategy_rr (const dpo_id_t * dpo)
+{
+ return dpo->dpoi_type == hicn_dpo_type_rr;
+}
+
+void
+hicn_dpo_strategy_rr_module_init (void)
+{
+ /*
+ * Register our type of dpo
+ */
+ hicn_dpo_type_rr =
+ hicn_dpo_register_new_type (hicn_nodes_strategy, &hicn_dpo_rr_vft,
+ hicn_rr_strategy_get_vft (),
+ &dpo_strategy_rr_ctx_vft);
+}
+
+dpo_type_t
+hicn_dpo_strategy_rr_get_type (void)
+{
+ return hicn_dpo_type_rr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+u8 *
+hicn_strategy_rr_format_ctx (u8 * s, int n, ...)
+{
+ va_list args;
+ va_start (args, n);
+ s = format_hicn_strategy_rr_ctx (s, &args);
+ return s;
+}
+
+u8 *
+format_hicn_strategy_rr_ctx (u8 * s, va_list * ap)
+{
+ int i = 0;
+ index_t index = va_arg (*ap, index_t);
+ hicn_dpo_ctx_t *dpo_ctx = NULL;
+ hicn_strategy_rr_ctx_t *rr_dpo_ctx = NULL;
+ u32 indent = va_arg (*ap, u32);
+
+ dpo_ctx = hicn_strategy_dpo_ctx_get (index);
+ if (dpo_ctx == NULL)
+ return s;
+
+ rr_dpo_ctx = (hicn_strategy_rr_ctx_t *) dpo_ctx->data;
+
+ s =
+ format (s, "hicn-rr, next hop Face %d",
+ dpo_ctx->next_hops[rr_dpo_ctx->current_nhop]);
+
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ u8 *buf = NULL;
+ if (i < dpo_ctx->entry_count)
+ buf = format (NULL, "FIB");
+ else if (i >=
+ HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
+ buf = format (NULL, "TFIB");
+ else
+ continue;
+
+ s = format (s, "\n");
+ s =
+ format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i],
+ indent);
+ s = format (s, " %s", buf);
+ }
+
+ return (s);
+}
+
+void
+hicn_strategy_rr_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
+ int nh_len, index_t * dpo_idx)
+{
+ hicn_strategy_rr_ctx_t *hicn_strategy_rr_ctx;
+ hicn_dpo_ctx_t *hicn_strategy_ctx;
+
+ /* Allocate a hicn_dpo_ctx on the vpp pool and initialize it */
+ hicn_strategy_ctx = hicn_strategy_dpo_ctx_alloc ();
+ hicn_strategy_rr_ctx = (hicn_strategy_rr_ctx_t *) hicn_strategy_ctx->data;
+
+ *dpo_idx = hicn_strategy_dpo_ctx_get_index (hicn_strategy_ctx);
+
+ init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_rr, proto);
+
+ hicn_strategy_rr_ctx->current_nhop = 0;
+}
+
+int
+hicn_strategy_rr_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ u8 pos = 0;
+
+ if (hicn_strategy_dpo_ctx == NULL)
+ {
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
+
+ hicn_strategy_dpo_ctx_add_nh (nh, hicn_strategy_dpo_ctx, &pos);
+ //nothing else to initialize in this strategy
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_strategy_rr_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ //No need to change the current_nhop. It will be updated at the next selection.
+ return hicn_strategy_dpo_ctx_del_nh (face_id, hicn_strategy_dpo_ctx);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/dpo_rr.h b/hicn-plugin/src/network/strategies/dpo_rr.h
new file mode 100644
index 000000000..e4e5b5372
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/dpo_rr.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_RR_H__
+#define __HICN_DPO_RR_H__
+
+#include <vnet/dpo/dpo.h>
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @file dpo_rr.h
+ *
+ * This file implements the strategy vtf (see strategy.h) and
+ * the dpo vft (see strategy_dpo_manager.h) for the strategy
+ * round robin.
+ */
+
+
+/**
+ * Context for the Round Robin strategy
+ */
+
+typedef struct hicn_strategy_rr_ctx_s
+{
+ u8 current_nhop;
+} hicn_strategy_rr_ctx_t;
+
+/**
+ * @brief Format the dpo ctx for a human-readable string
+ *
+ * @param s String to which to append the formatted dpo ctx
+ * @param ap List of parameters for the formatting
+ *
+ * @result The string with the formatted dpo ctx
+ */
+u8 *format_hicn_strategy_rr_ctx (u8 * s, va_list * ap);
+
+const static dpo_vft_t dpo_strategy_rr_ctx_vft = {
+ .dv_lock = hicn_strategy_dpo_ctx_lock,
+ .dv_unlock = hicn_strategy_dpo_ctx_unlock,
+ .dv_format = format_hicn_strategy_rr_ctx,
+};
+
+/**
+ * @brief Retrieve an hicn_strategy_rr_ctx object
+ *
+ * @param indext Index of the hicn_dpo_ctx to retrieve
+ * @return The hicn_dpo_ctx object or NULL
+ */
+hicn_dpo_ctx_t *hicn_strategy_rr_ctx_get (index_t index);
+
+/**
+ * @brief Create a new round robin ctx
+ *
+ * @param proto The protocol to which the dpo is meant for (see vpp docs)
+ * @param next_hop A list of next hops to be inserted in the dpo ctx
+ * @param nh_len Size of the list
+ * @param dpo_idx index_t that will hold the index of the created dpo ctx
+ * @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
+ */
+void
+hicn_strategy_rr_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
+ int nh_len, index_t * dpo_idx);
+
+/**
+ * @brief Add or update a next hop in the dpo ctx.
+ *
+ * This function is meant to be used in the control plane and not in the data plane,
+ * as it is not optimized for the latter.
+ *
+ * @param nh Next hop to insert in the dpo ctx
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
+ */
+int hicn_strategy_rr_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx);
+
+/**
+ * @brief Delete a next hop in the dpo ctx.
+ *
+ * @param face_id Face identifier of the next hop
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
+ */
+int hicn_strategy_rr_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx);
+
+/**
+ * @brief Prefetch a dpo
+ *
+ * @param dpo_idx Index of the dpo ctx to prefetch
+ */
+void hicn_strategy_rr_ctx_prefetch (index_t dpo_idx);
+
+/**
+ * @brief Return true if the dpo is of type strategy rr
+ *
+ * @param dpo Dpo to check the type
+ */
+int hicn_dpo_is_type_strategy_rr (const dpo_id_t * dpo);
+
+/**
+ * @brief Initialize the Round Robin strategy
+ */
+void hicn_dpo_strategy_rr_module_init (void);
+
+/**
+ * @brief Return the dpo type for the Round Robin strategy
+ */
+dpo_type_t hicn_dpo_strategy_rr_get_type (void);
+
+/**
+ * @brief Format the dpo ctx for the strategy Round Robin
+ *
+ * @param s String to append the formatted dpo ctx
+ * @param ap List of arguments to format
+ */
+u8 *format_hicn_dpo_strategy_rr (u8 * s, va_list * ap);
+
+/**
+ * @brief Format the dpo ctx for the strategy Round Robin. To
+ * call from other functions
+ *
+ * @param s String to append the formatted dpo ctx
+ * @param ... List of arguments to format
+ */
+u8 *hicn_strategy_rr_format_ctx (u8 * s, int n, ...);
+
+
+#endif // __HICN_DPO_RR_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/strategy_mw.c b/hicn-plugin/src/network/strategies/strategy_mw.c
new file mode 100644
index 000000000..fe4d5896a
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/strategy_mw.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "dpo_mw.h"
+#include "../strategy.h"
+#include "../strategy_dpo_ctx.h"
+#include "../faces/face.h"
+#include "../hashtb.h"
+#include "../strategy_dpo_manager.h"
+
+/* Simple strategy that chooses the next hop with the maximum weight */
+/* It does not require to exend the hicn_dpo */
+void hicn_receive_data_mw (index_t dpo_idx, int nh_idx);
+void hicn_add_interest_mw (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+void hicn_on_interest_timeout_mw (index_t dpo_idx);
+u32 hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx,
+ hicn_face_id_t* outface);
+u32 get_strategy_node_index_mw (void);
+u8 *hicn_strategy_format_trace_mw (u8 * s, hicn_strategy_trace_t * t);
+u8 *hicn_strategy_format_mw (u8 * s, va_list * ap);
+
+
+static hicn_strategy_vft_t hicn_strategy_mw_vft = {
+ .hicn_receive_data = &hicn_receive_data_mw,
+ .hicn_add_interest = &hicn_add_interest_mw,
+ .hicn_on_interest_timeout = &hicn_on_interest_timeout_mw,
+ .hicn_select_next_hop = &hicn_select_next_hop_mw,
+ .hicn_format_strategy_trace = hicn_strategy_format_trace_mw,
+ .hicn_format_strategy = &hicn_strategy_format_mw
+};
+
+/*
+ * Return the vft of the strategy.
+ */
+hicn_strategy_vft_t *
+hicn_mw_strategy_get_vft (void)
+{
+ return &hicn_strategy_mw_vft;
+}
+
+/* DPO should be give in input as it containes all the information to calculate the next hops*/
+u32
+hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx, hicn_face_id_t* outface)
+{
+ hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+
+ if (dpo_ctx == NULL)
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
+ (hicn_strategy_mw_ctx_t *) dpo_ctx->data;
+
+ u8 next_hop_index = 0;
+ for (int i = 0; i < dpo_ctx->entry_count; i++)
+ {
+ if (hicn_strategy_mw_ctx->weight[next_hop_index] <
+ hicn_strategy_mw_ctx->weight[i])
+ {
+ next_hop_index = i;
+ }
+ }
+
+ *outface = dpo_ctx->next_hops[next_hop_index];
+
+ return HICN_ERROR_NONE;
+}
+
+void
+hicn_add_interest_mw (index_t dpo_ctx_idx, hicn_hash_entry_t * hash_entry)
+{
+ hash_entry->dpo_ctx_id = dpo_ctx_idx;
+ dpo_id_t hicn_dpo_id =
+ { hicn_dpo_strategy_mw_get_type (), 0, 0, dpo_ctx_idx };
+ hicn_strategy_dpo_ctx_lock (&hicn_dpo_id);
+ hash_entry->vft_id = hicn_dpo_get_vft_id (&hicn_dpo_id);
+}
+
+void
+hicn_on_interest_timeout_mw (index_t dpo_idx)
+{
+ /* Nothign to do in the mw strategy when we receive an interest */
+}
+
+void
+hicn_receive_data_mw (index_t dpo_idx, int nh_idx)
+{
+}
+
+
+/* packet trace format function */
+u8 *
+hicn_strategy_format_trace_mw (u8 * s, hicn_strategy_trace_t * t)
+{
+ s = format (s, "Strategy_mw: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+u8 *
+hicn_strategy_format_mw (u8 * s, va_list * ap)
+{
+
+ u32 indent = va_arg (*ap, u32);
+ s =
+ format (s,
+ "Static Weights: weights are updated by the control plane, next hop is the one with the maximum weight.\n",
+ indent);
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/strategy_mw.h b/hicn-plugin/src/network/strategies/strategy_mw.h
new file mode 100644
index 000000000..9e0078b23
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/strategy_mw.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_MW_H__
+#define __HICN_STRATEGY_MW_H__
+
+#include "../strategy.h"
+
+/**
+ * @file strategy_mw.h
+ *
+ * This file implements the maximum weight strategy. In this
+ * strategy the choosen next hop is one with the maximum weight.
+ */
+
+/**
+ * @brief Return the vft for the Maximum Weight strategy
+ */
+hicn_strategy_vft_t *hicn_mw_strategy_get_vft (void);
+
+#endif // __HICN_STRATEGY_MW_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/strategy_mw_cli.c b/hicn-plugin/src/network/strategies/strategy_mw_cli.c
new file mode 100644
index 000000000..636d7effa
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/strategy_mw_cli.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dpo/dpo.h>
+#include <vlib/vlib.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_table.h>
+
+#include "../strategy_dpo_manager.h"
+#include "../faces/face.h"
+#include "../error.h"
+#include "../route.h"
+#include "dpo_mw.h"
+
+static clib_error_t *
+hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *cl_err = 0;
+ int ret = HICN_ERROR_NONE;
+ fib_prefix_t prefix;
+ hicn_face_id_t faceid = HICN_FACE_NULL;
+ u32 fib_index;
+ u32 weight = HICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT;
+ hicn_dpo_ctx_t *hicn_dpo_ctx;
+ const dpo_id_t *hicn_dpo_id;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "prefix %U/%u", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ ;
+ else if (unformat (line_input, "face %u", &faceid))
+ ;
+ else if (unformat (line_input, "weight %u", &weight))
+ ;
+ else
+ {
+ return clib_error_return (0, "%s",
+ get_error_string
+ (HICN_ERROR_CLI_INVAL));
+ }
+
+ }
+ }
+
+ if (((weight < 0) || (weight > HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX)))
+ {
+ cl_err = clib_error_return (0,
+ "Next-hop weight must be between 0 and %d",
+ (int) HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX);
+ goto done;
+ }
+
+ if (((ip46_address_is_zero (&prefix.fp_addr)) || faceid == HICN_FACE_NULL))
+ {
+ cl_err =
+ clib_error_return (0, "Please specify prefix and a valid faceid...");
+ goto done;
+ }
+
+ prefix.fp_proto =
+ ip46_address_is_ip4 (&prefix.
+ fp_addr) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ ret = hicn_route_get_dpo (&prefix, &hicn_dpo_id, &fib_index);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
+
+ if (hicn_dpo_ctx == NULL
+ || hicn_dpo_id->dpoi_type != hicn_dpo_strategy_mw_get_type ())
+ {
+ cl_err = clib_error_return (0, get_error_string (ret));
+ goto done;
+ }
+
+ hicn_strategy_mw_ctx_t *mw_dpo =
+ (hicn_strategy_mw_ctx_t *) hicn_dpo_ctx;
+ int idx = ~0;
+ for (int i = 0; i < hicn_dpo_ctx->entry_count; i++)
+ if (hicn_dpo_ctx->next_hops[i] == faceid)
+ idx = i;
+
+ if (idx == ~0)
+ {
+ cl_err =
+ clib_error_return (0,
+ get_error_string
+ (HICN_ERROR_STRATEGY_NH_NOT_FOUND));
+ goto done;
+ }
+
+ mw_dpo->weight[idx] = weight;
+ }
+ else
+ {
+ cl_err = clib_error_return (0, get_error_string (ret));
+
+ }
+
+done:
+
+ return (cl_err);
+
+}
+
+/* cli declaration for 'strategy mw' */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND(hicn_mw_strategy_cli_set_weight_command, static)=
+{
+ .path = "hicn strategy mw set",
+ .short_help = "hicn strategy mw set prefix <prefix> face <face_id> weight <weight>",
+ .function = hicn_mw_strategy_cli_set_weight_command_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/strategy_rr.c b/hicn-plugin/src/network/strategies/strategy_rr.c
new file mode 100644
index 000000000..4c65ce52a
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/strategy_rr.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_rr.h"
+#include "../strategy.h"
+#include "../strategy_dpo_ctx.h"
+#include "../faces/face.h"
+#include "../hashtb.h"
+#include "../strategy_dpo_manager.h"
+
+/* Simple strategy that chooses the next hop with the maximum weight */
+/* It does not require to exend the hicn_dpo */
+void hicn_receive_data_rr (index_t dpo_idx, int nh_idx);
+void hicn_add_interest_rr (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+void hicn_on_interest_timeout_rr (index_t dpo_idx);
+u32 hicn_select_next_hop_rr (index_t dpo_idx, int *nh_idx,
+ hicn_face_id_t* outface);
+u8 *hicn_strategy_format_trace_rr (u8 * s, hicn_strategy_trace_t * t);
+u8 *hicn_strategy_format_rr (u8 * s, va_list * ap);
+
+
+static hicn_strategy_vft_t hicn_strategy_rr_vft = {
+ .hicn_receive_data = &hicn_receive_data_rr,
+ .hicn_add_interest = &hicn_add_interest_rr,
+ .hicn_on_interest_timeout = &hicn_on_interest_timeout_rr,
+ .hicn_select_next_hop = &hicn_select_next_hop_rr,
+ .hicn_format_strategy_trace = &hicn_strategy_format_trace_rr,
+ .hicn_format_strategy = &hicn_strategy_format_rr
+};
+
+/*
+ * Return the vft of the strategy.
+ */
+hicn_strategy_vft_t *
+hicn_rr_strategy_get_vft (void)
+{
+ return &hicn_strategy_rr_vft;
+}
+
+/* DPO should be give in input as it containes all the information to calculate the next hops*/
+u32
+hicn_select_next_hop_rr (index_t dpo_idx, int *nh_idx, hicn_face_id_t* outface)
+{
+ hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+
+ if (dpo_ctx == NULL)
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+
+ hicn_strategy_rr_ctx_t *hicn_strategy_rr_ctx =
+ (hicn_strategy_rr_ctx_t *) dpo_ctx->data;
+
+ *outface =
+ dpo_ctx->next_hops[hicn_strategy_rr_ctx->current_nhop];
+
+ hicn_strategy_rr_ctx->current_nhop =
+ (hicn_strategy_rr_ctx->current_nhop + 1) % dpo_ctx->entry_count;
+
+ return HICN_ERROR_NONE;
+}
+
+void
+hicn_add_interest_rr (index_t dpo_ctx_idx, hicn_hash_entry_t * hash_entry)
+{
+ hash_entry->dpo_ctx_id = dpo_ctx_idx;
+ dpo_id_t hicn_dpo_id =
+ { hicn_dpo_strategy_rr_get_type (), 0, 0, dpo_ctx_idx };
+ hicn_strategy_dpo_ctx_lock (&hicn_dpo_id);
+ hash_entry->vft_id = hicn_dpo_get_vft_id (&hicn_dpo_id);
+}
+
+void
+hicn_on_interest_timeout_rr (index_t dpo_idx)
+{
+ /* Nothing to do in the rr strategy when we receive an interest */
+}
+
+void
+hicn_receive_data_rr (index_t dpo_idx, int nh_idx)
+{
+}
+
+
+/* packet trace format function */
+u8 *
+hicn_strategy_format_trace_rr (u8 * s, hicn_strategy_trace_t * t)
+{
+ s = format (s, "Strategy_rr: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+u8 *
+hicn_strategy_format_rr (u8 * s, va_list * ap)
+{
+
+ u32 indent = va_arg (*ap, u32);
+ s =
+ format (s,
+ "Round Robin: next hop is chosen ciclying between all the available next hops, one after the other.\n",
+ indent);
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategies/strategy_rr.h b/hicn-plugin/src/network/strategies/strategy_rr.h
new file mode 100644
index 000000000..4dfe76b43
--- /dev/null
+++ b/hicn-plugin/src/network/strategies/strategy_rr.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_RR_H__
+#define __HICN_STRATEGY_RR_H__
+
+#include "../strategy.h"
+
+/**
+ * @file strategy_rr.h
+ *
+ * This file implements the round robin strategy. In this
+ * strategy the next hop is choosen in a round robin way.
+ */
+
+/**
+ * @brief Return the vft for the Round Robin strategy
+ */
+hicn_strategy_vft_t *hicn_rr_strategy_get_vft (void);
+
+#endif // __HICN_STRATEGY_RR_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/strategy.h b/hicn-plugin/src/network/strategy.h
new file mode 100644
index 000000000..d949f38a4
--- /dev/null
+++ b/hicn-plugin/src/network/strategy.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY__
+#define __HICN_STRATEGY__
+
+#include "hicn.h"
+#include "hashtb.h"
+#include "mgmt.h"
+#include "faces/face.h"
+
+/**
+ * @file strategy.h
+ *
+ * A strategy is defined as a dpo and a set of function (vft) that will be called
+ * during the packet processing. A strategy is associated to an entry in the fib by
+ * assigning the corresponding dpo to the fib entry. The dpo points to a hICN dpo
+ * context (ctx) which contains the information needed by the strategy to compute
+ * the next hop. Each strategy hash its own dpo type, which means that the dpo_type
+ * uniquely identifies a strategy and its vft. The strategy node will use the dpo_type
+ * to retrieve the corresponding vft.
+ * Here we provide:
+ * - a template for the callbacks to implement in order to create a new strategy
+ * (hicn_fwd_strategy_t)
+ * - a default implementation for the strategy node which will call the strategy
+ * functions while processing the interest packets
+ */
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ dpo_type_t dpo_type;
+} hicn_strategy_trace_t;
+
+typedef struct hicn_strategy_vft_s
+{
+ void (*hicn_receive_data) (index_t dpo_idx, int nh_idx);
+ void (*hicn_on_interest_timeout) (index_t dpo_idx);
+ void (*hicn_add_interest) (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+ u32 (*hicn_select_next_hop) (index_t dpo_idx, int *nh_idx,
+ hicn_face_id_t* outface);
+ u8 *(*hicn_format_strategy_trace) (u8 *, hicn_strategy_trace_t *);
+ u8 *(*hicn_format_strategy) (u8 * s, va_list * ap);
+ /**< Format an hICN dpo*/
+} hicn_strategy_vft_t;
+
+typedef enum
+{
+ HICN_STRATEGY_NEXT_INTEREST_HITPIT,
+ HICN_STRATEGY_NEXT_INTEREST_HITCS,
+ HICN_STRATEGY_NEXT_INTEREST_FACE4,
+ HICN_STRATEGY_NEXT_INTEREST_FACE6,
+ HICN_STRATEGY_NEXT_ERROR_DROP,
+ HICN_STRATEGY_N_NEXT,
+} hicn_strategy_next_t;
+
+const static char *const hicn_ip6_nodes[] =
+{
+ "hicn6-iface-input", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+const static char *const hicn_ip4_nodes[] =
+{
+ "hicn4-iface-input", // this is the name you give your node in VLIB_REGISTER_NODE
+ NULL,
+};
+
+const static char *const *const hicn_nodes_strategy[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP6] = hicn_ip6_nodes,
+ [DPO_PROTO_IP4] = hicn_ip4_nodes,
+};
+
+
+extern vlib_node_registration_t hicn_strategy_node;
+
+#endif /* //__HICN_STRATEGY__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/strategy_dpo_ctx.c b/hicn-plugin/src/network/strategy_dpo_ctx.c
new file mode 100644
index 000000000..342c78bb5
--- /dev/null
+++ b/hicn-plugin/src/network/strategy_dpo_ctx.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "strategy_dpo_ctx.h"
+#include "strategy_dpo_manager.h"
+
+hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_pool;
+
+void
+hicn_strategy_init_dpo_ctx_pool ()
+{
+ pool_init_fixed (hicn_strategy_dpo_ctx_pool, 256);
+
+}
+
+void
+hicn_strategy_dpo_ctx_lock (dpo_id_t * dpo)
+{
+ hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo->dpoi_index);
+
+ if (dpo_ctx != NULL)
+ {
+ dpo_ctx->locks++;
+ }
+}
+
+void
+hicn_strategy_dpo_ctx_unlock (dpo_id_t * dpo)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx =
+ (hicn_dpo_ctx_t *) hicn_strategy_dpo_ctx_get (dpo->dpoi_index);
+
+ if (hicn_strategy_dpo_ctx != NULL)
+ {
+ hicn_strategy_dpo_ctx->locks--;
+
+ if (0 == hicn_strategy_dpo_ctx->locks)
+ {
+ pool_put (hicn_strategy_dpo_ctx_pool, hicn_strategy_dpo_ctx);
+ }
+ }
+}
+
+u8 *
+hicn_strategy_dpo_format_ctx (u8 * s, va_list * ap)
+{
+ index_t index = va_arg (*ap, index_t);
+ hicn_dpo_ctx_t *dpo = NULL;
+ u32 indent = va_arg (*ap, u32);
+
+ dpo = (hicn_dpo_ctx_t *) hicn_strategy_dpo_ctx_get (index);
+
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (dpo->dpo_type);
+
+ s = dpo_vft->hicn_dpo_format (s, 2, index, indent);
+
+ return (s);
+}
+
+index_t
+hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t * cd)
+{
+ return (cd - hicn_strategy_dpo_ctx_pool);
+}
+
+hicn_dpo_ctx_t *
+hicn_strategy_dpo_ctx_get (index_t index)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = NULL;
+ if (!pool_is_free_index (hicn_strategy_dpo_ctx_pool, index))
+ {
+ hicn_strategy_dpo_ctx =
+ (pool_elt_at_index (hicn_strategy_dpo_ctx_pool, index));
+ }
+
+ return hicn_strategy_dpo_ctx;
+}
+
+hicn_dpo_ctx_t *
+hicn_strategy_dpo_ctx_alloc ()
+{
+ hicn_dpo_ctx_t *dpo_ctx;
+ pool_get (hicn_strategy_dpo_ctx_pool, dpo_ctx);
+ return dpo_ctx;
+}
+
+int
+hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
+ u8 * pos)
+{
+
+ int empty = dpo_ctx->entry_count;
+
+ /* Iterate through the list of faces to find if the face is already a next hop */
+ for (int i = 0; i < dpo_ctx->entry_count; i++)
+ {
+ if (nh == dpo_ctx->next_hops[i])
+ {
+ /* If face is marked as deleted, ignore it */
+ hicn_face_t *face =
+ hicn_dpoi_get_from_idx (dpo_ctx->next_hops[i]);
+ if (face->flags & HICN_FACE_FLAGS_DELETED)
+ {
+ continue;
+ }
+ return HICN_ERROR_DPO_CTX_NHOPS_EXISTS;
+ }
+ }
+
+ /* Get an empty place */
+ if (empty > HICN_PARAM_FIB_ENTRY_NHOPS_MAX)
+ {
+ return HICN_ERROR_DPO_CTX_NHOPS_NS;
+ }
+
+ dpo_ctx->next_hops[empty] = nh;
+ hicn_face_lock_with_id (nh);
+ dpo_ctx->entry_count++;
+ *pos = empty;
+
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
+ hicn_dpo_ctx_t * dpo_ctx)
+{
+ int ret = HICN_ERROR_DPO_CTX_NOT_FOUND;
+ hicn_face_id_t invalid = NEXT_HOP_INVALID;
+
+ for (int i = 0; i < dpo_ctx->entry_count; i++)
+ {
+ if (dpo_ctx->next_hops[i] == face_id)
+ {
+ hicn_face_unlock_with_id (dpo_ctx->next_hops[i]);
+ dpo_ctx->entry_count--;
+ dpo_ctx->next_hops[i] = dpo_ctx->next_hops[dpo_ctx->entry_count];
+ dpo_ctx->next_hops[dpo_ctx->entry_count] = invalid;
+ ret = HICN_ERROR_NONE;
+ break;
+ }
+ }
+
+ return ret;
+
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/strategy_dpo_ctx.h b/hicn-plugin/src/network/strategy_dpo_ctx.h
new file mode 100644
index 000000000..214ed88ad
--- /dev/null
+++ b/hicn-plugin/src/network/strategy_dpo_ctx.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_DPO_CTX_H__
+#define __HICN_STRATEGY_DPO_CTX_H__
+
+#include <vnet/dpo/dpo.h>
+#include <vnet/fib/fib_table.h>
+
+#include "params.h"
+#include "faces/face.h"
+
+/**
+ * @file strategy_dpo_ctx.h
+ *
+ * This file implements the general hICN DPO ctx (shared among all the strategies).
+ *
+ * An hICN DPO ctx contains the list of next hops, auxiliaries fields to maintain the dpo, map-me
+ * specifics (tfib_entry_count and seq), the dpo_type and 64B to let each strategy to store additional
+ * information. Each next hop is an hicn_face_id_t that refers to an index for an hICN face. The
+ * dpo_type is used to identify the strategy and to retrieve the vft corresponding to the strategy
+ * (see strategy.h) and to the dpo ctx (see strategy_dpo_manager.h)
+ */
+
+//FIB table for hicn. 0 is the default one used by ip
+#define HICN_FIB_TABLE 10
+
+#define NEXT_HOP_INVALID ~0
+
+#define INIT_SEQ 0
+
+typedef struct __attribute__ ((packed)) hicn_dpo_ctx_s
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ /* 4B*10 = 40B */
+ hicn_face_id_t next_hops[HICN_PARAM_FIB_ENTRY_NHOPS_MAX];
+ /* 40B + 4B = 44B */
+ u32 locks;
+ /* 44B + 1B = 45B */
+ u8 entry_count;
+ /* 45B + 1B = 46B */
+ /* Number of TFIB entries (stored at the end of the next_hops array */
+ u8 tfib_entry_count;
+
+ dpo_type_t dpo_type;
+
+ /* 46B + 2B = 48B */
+ u8 padding; /* To align to 8B */
+
+ /* 48 + 4B = 52; last sequence number */
+ u32 seq;
+
+ /* 52 + 12 = 64 */
+ fib_node_t fib_node;
+
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+
+ fib_node_index_t fib_entry_index;
+
+ u32 fib_sibling;
+
+ union
+ {
+ u32 padding_proto;
+ fib_protocol_t proto;
+ };
+
+ u8 data[CLIB_CACHE_LINE_BYTES - 12];
+
+} hicn_dpo_ctx_t;
+
+extern hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_pool;
+
+/**
+ * @brief Initialize the hICN dpo ctx
+ *
+ * @param dpo_ctx Pointer to the hICN dpo ctx to initialize
+ * @param next_hop List of netx hops to store in the dpo ctx
+ * @param nh_len Number of elements in the list of next hops
+ * @param dpo_type Type of dpo. It identifies the strategy.
+ */
+always_inline void
+init_dpo_ctx (hicn_dpo_ctx_t * dpo_ctx, const hicn_face_id_t * next_hop,
+ int nh_len, dpo_type_t dpo_type, dpo_proto_t proto)
+{
+ hicn_face_id_t invalid = NEXT_HOP_INVALID;
+
+ dpo_ctx->entry_count = 0;
+ dpo_ctx->locks = 0;
+
+ dpo_ctx->tfib_entry_count = 0;
+
+ dpo_ctx->seq = INIT_SEQ;
+ dpo_ctx->dpo_type = dpo_type;
+
+ dpo_ctx->proto = proto;
+
+ for (int i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX && i < nh_len; i++)
+ {
+ dpo_ctx->next_hops[i] = next_hop[i];
+ dpo_ctx->entry_count++;
+ }
+
+
+ for (int i = nh_len; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ dpo_ctx->next_hops[i] = invalid;
+ }
+
+}
+
+/**
+ * @brief Initialize the pool containing the hICN dpo ctx
+ *
+ */
+void hicn_strategy_init_dpo_ctx_pool (void);
+
+/**
+ * @brief Allocate a new hICN dpo ctx from the pool
+ */
+hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_alloc ();
+
+/**
+ * @brief Retrieve an existing hICN dpo ctx from the pool
+ */
+hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_get (index_t index);
+
+/**
+ * @brief Retrieve the index of the hICN dpo ctx
+ */
+index_t hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t * cd);
+
+/**
+ * @brief Lock the dpo of a strategy ctx
+ *
+ * @param dpo Identifier of the dpo of the strategy ctx
+ */
+void hicn_strategy_dpo_ctx_lock (dpo_id_t * dpo);
+
+/**
+ * @brief Unlock the dpo of a strategy ctx
+ *
+ * @param dpo Identifier of the dpo of the strategy ctx
+ */
+void hicn_strategy_dpo_ctx_unlock (dpo_id_t * dpo);
+
+/**
+ * @brief Add or update a next hop in the dpo ctx.
+ *
+ * This function is meant to be used in the control plane and not in the data plane,
+ * as it is not optimized for the latter.
+ *
+ * @param nh Next hop to insert in the dpo ctx
+ * @param dpo_ctx Dpo ctx to update with the new or updated next hop
+ * @param pos Return the position of the nh that has been added
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
+ */
+int
+hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
+ u8 * pos);
+
+/**
+ * @brief Delete a next hop in the dpo ctx.
+ *
+ * @param face_id Face identifier of the next hop
+ * @param dpo_ctx Dpo ctx to update by removing the face
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
+ */
+int
+hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
+ hicn_dpo_ctx_t * dpo_ctx);
+
+
+STATIC_ASSERT (sizeof (hicn_dpo_ctx_t) <= 2 * CLIB_CACHE_LINE_BYTES,
+ "sizeof hicn_dpo_ctx_t is greater than 128B");
+
+#endif /* // __HICN_STRATEGY_DPO_CTX_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/strategy_dpo_manager.c b/hicn-plugin/src/network/strategy_dpo_manager.c
new file mode 100644
index 000000000..f8d41a372
--- /dev/null
+++ b/hicn-plugin/src/network/strategy_dpo_manager.c
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/dpo.h>
+
+#include "strategy_dpo_manager.h"
+#include "strategy_dpo_ctx.h"
+#include "strategies/dpo_mw.h"
+#include "strategies/dpo_rr.h"
+#include "strategy.h"
+#include "faces/face.h"
+
+static dpo_type_t *strategies_id;
+static const hicn_dpo_vft_t **hicn_dpo_vfts;
+
+static const hicn_strategy_vft_t **hicn_strategy_vfts;
+
+int hicn_strategies = 0;
+
+hicn_dpo_vft_t default_dpo;
+
+dpo_type_t
+hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
+ const hicn_dpo_vft_t * hicn_dpo_vft,
+ const hicn_strategy_vft_t * hicn_strategy_vft,
+ const dpo_vft_t * dpo_ctx_vft)
+{
+ dpo_type_t dpo_type = dpo_register_new_type (dpo_ctx_vft, hicn_nodes);
+ vec_validate (hicn_dpo_vfts, dpo_type);
+ hicn_dpo_vfts[dpo_type] = hicn_dpo_vft;
+
+ vec_validate (hicn_strategy_vfts, dpo_type);
+ hicn_strategy_vfts[dpo_type] = hicn_strategy_vft;
+
+ vec_validate (strategies_id, hicn_strategies);
+ strategies_id[hicn_strategies] = dpo_type;
+ hicn_strategies++;
+
+ return dpo_type;
+}
+
+u32
+dpo_is_hicn (const dpo_id_t * dpo)
+{
+ for (int i = 0; i < hicn_strategies; i++)
+ {
+ if (hicn_dpo_vfts[strategies_id[i]]->hicn_dpo_is_type (dpo))
+ return 1;
+ }
+ return 0;
+}
+
+dpo_type_t
+hicn_dpo_get_vft_id (const dpo_id_t * dpo)
+{
+ return dpo->dpoi_type;
+}
+
+const hicn_dpo_vft_t *
+hicn_dpo_get_vft (dpo_type_t vfts_id)
+{
+ return hicn_dpo_vfts[vfts_id];
+}
+
+const hicn_dpo_vft_t *
+hicn_dpo_get_vft_from_id (u8 strategy_id)
+{
+ return hicn_dpo_vfts[strategies_id[strategy_id]];
+}
+
+const hicn_strategy_vft_t *
+hicn_dpo_get_strategy_vft (dpo_type_t vfts_id)
+{
+ return hicn_strategy_vfts[vfts_id];
+}
+
+const hicn_strategy_vft_t *
+hicn_dpo_get_strategy_vft_from_id (u8 vfts_id)
+{
+ return hicn_strategy_vfts[strategies_id[vfts_id]];
+}
+
+void
+hicn_dpos_init (void)
+{
+ hicn_strategy_init_dpo_ctx_pool ();
+ hicn_dpo_strategy_mw_module_init ();
+ hicn_dpo_strategy_rr_module_init ();
+
+ default_dpo.hicn_dpo_is_type = &hicn_dpo_is_type_strategy_mw;
+ default_dpo.hicn_dpo_get_type = &hicn_dpo_strategy_mw_get_type;
+ default_dpo.hicn_dpo_module_init = &hicn_dpo_strategy_mw_module_init;
+ default_dpo.hicn_dpo_create = &hicn_strategy_mw_ctx_create;
+ default_dpo.hicn_dpo_add_update_nh = &hicn_strategy_mw_ctx_add_nh;
+ default_dpo.hicn_dpo_del_nh = &hicn_strategy_mw_ctx_del_nh;
+ default_dpo.hicn_dpo_format = &hicn_strategy_mw_format_ctx;
+}
+
+u8 *
+format_hicn_strategy_list (u8 * s, int n, ...)
+{
+ va_list ap;
+ va_start (ap, n);
+ u32 indent = va_arg (ap, u32);
+ va_end (ap);
+
+ s = format (s, "%U Strategies:\n", format_white_space, indent);
+ indent += 4;
+ int i;
+ vec_foreach_index (i, strategies_id)
+ {
+ s = format (s, "%U (%d) ", format_white_space, indent, i);
+ s = hicn_strategy_vfts[strategies_id[i]]->hicn_format_strategy (s, &ap);
+ }
+
+ return (s);
+}
+
+u8
+hicn_dpo_strategy_id_is_valid (int strategy_id)
+{
+ return vec_len (strategies_id) > strategy_id ?
+ HICN_ERROR_NONE : HICN_ERROR_DPO_MGR_ID_NOT_VALID;
+}
+
+int
+hicn_strategy_get_all_available (void)
+{
+ return hicn_strategies;
+}
+
+/**
+ * @brief Registers a dpo by calling its module init function.
+ *
+ * This is typically called from the ctor for dpo's registered at compilation
+ * time.
+ */
+void
+hicn_dpo_register (const hicn_dpo_vft_t * hicn_dpo)
+{
+ hicn_dpo->hicn_dpo_module_init ();
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/strategy_dpo_manager.h b/hicn-plugin/src/network/strategy_dpo_manager.h
new file mode 100644
index 000000000..e96e050d9
--- /dev/null
+++ b/hicn-plugin/src/network/strategy_dpo_manager.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_DPO_MANAGER_H__
+#define __HICN_STRATEGY_DPO_MANAGER_H__
+
+#include "strategy_dpo_ctx.h"
+#include "strategy.h"
+
+/**
+ * @file strategy_dpo_manager.h
+ *
+ * This file implements structs and helper functions to manipulate hICN dpo.
+ * An hICN DPO is a combination of:
+ * - a hICN DPO ctx (context) that holds the structure containing the
+ * information to choose the next hop,
+ * - a dpo vft that specify how to update the hICN DPO ctx when a next hop is
+ * added, deleted or updated,
+ * - a strategy containing (see strategy.h): (i) the vpp node that processes Interest packets
+ * subjected to such strategy, (ii) the definition of the vft that defines
+ * the hICN strategy functions
+ * An hICN DPO is places as the sole next hop in the vpp loadbalancer, and it containes
+ * a list of next hops that will be used by the associated strategy when forwarding
+ * interest packets.
+ */
+
+/**
+ * @brief Definition of the virtual function table for a hICN DPO.
+ *
+ * The following virtual function table
+ * template that glues together the fuction to interact with the context and the
+ * creating the dpo
+ */
+typedef struct hicn_dpo_vft_s
+{
+ int (*hicn_dpo_is_type) (const dpo_id_t * dpo);
+ /**< Check if the type of the
+ hICN DPO is the expected */
+ dpo_type_t (*hicn_dpo_get_type) (void);
+ /**< Return the type of the hICN dpo */
+ void (*hicn_dpo_module_init) (void); /**< Initialize the hICN dpo */
+ void (*hicn_dpo_create) (fib_protocol_t proto, const hicn_face_id_t * nh, int nh_len, index_t * dpo_idx); /**< Create the context of the hICN dpo */
+ int (*hicn_dpo_add_update_nh) (hicn_face_id_t nh, index_t dpo_idx); /**< Add a next hop to the hICN dpo context */
+ int (*hicn_dpo_del_nh) (hicn_face_id_t face_id, index_t dpo_idx);
+ u8 *(*hicn_dpo_format) (u8 * s, int, ...);
+ /**< Format an hICN dpo*/
+} hicn_dpo_vft_t;
+
+/*
+ * Default dpo to be used to create fib entry when a strategy is not
+ * specified
+ */
+extern hicn_dpo_vft_t default_dpo;
+
+/**
+ * @brief Register a new hICN dpo to the manager.
+ *
+ * Registering a hICN DPO allows the plugin to be aware of the new dpo an be
+ * able to apply it to the FIB entries.
+ *
+ * @param hicn_nodes A list of vpp to which pass an interest that matches with
+ * the FIB entry to which the hICN DPO is applied. This list must contain the
+ * name of the strategy node (or nodes in case of differentiation between IPv4
+ * and IPv6). Unless really needed otherwise (i.e., different implementation of
+ * iface input), the list of node to use should be one provided in the strategy.h
+ * (hicn_nodes_strategy)
+ * @param hicn_dpo_vft The structure holding the virtual function table to
+ * interact with the hICN dpo and its context.
+ * @param hicn_strategy_vft The structure holding the virtual function table
+ * containing the hICN strategy functions.
+ * @return the dpo type registered in the VPP Data plane graph.
+ */
+dpo_type_t
+hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
+ const hicn_dpo_vft_t * hicn_dpo_vft,
+ const hicn_strategy_vft_t *
+ hicn_strategy_vft, const dpo_vft_t * dpo_ctx_vft);
+
+/**
+ * @brief Check if the type of the dpo is among the list of hicn dpo types
+ *
+ * Iterate through the list of dpo types registered in the hicn dpo manager.
+ *
+ * @param dpo The id of the dpo to which check the type
+ * @return 1 if there is a match, 0 otherwise.
+ */
+u32 dpo_is_hicn (const dpo_id_t * dpo);
+
+/**
+ * @brief Return the dpo_vtf and strategy_vtf identifier
+ *
+ * Iterate through the list of dpo types registered in the hicn dpo manager and
+ * retrieve the corresponding dpo_vtf/strategy_vtf identifier.
+ *
+ * @param dpo The id of the dpo to which check the type
+ * @return the dpo_vft/strategy_vft id or HICN_ERROR_DPO_NOT_FOUND in case the dpo is not an hICN dpo.
+ */
+u8 hicn_dpo_get_vft_id (const dpo_id_t * dpo);
+
+/**
+ * @brief Get the vft to manage the dpo context.
+ *
+ * @param The id of the hicn_dpo_vft to retrieve.
+ * @return The vft struct that contains the list of callbacks that allows to
+ * manage the dpo context.
+ */
+const hicn_dpo_vft_t *hicn_dpo_get_vft (dpo_type_t vfts_id);
+
+/**
+ * @brief Get the vft to manage the dpo context from the strategy id.
+ *
+ * @param The strategy id of the hicn_dpo_vft to retrieve.
+ * @return The vft struct that contains the list of callbacks that allows to
+ * manage the dpo context.
+ */
+const hicn_dpo_vft_t *hicn_dpo_get_vft_from_id (u8 strategy_id);
+
+/**
+ * @brief Get the vft with the hICN strategy functions.
+ *
+ * @param The id of the hicn_strategy_vft to retrieve.
+ * @return The vft struct that contains the list hICN strategy functions.
+ */
+const hicn_strategy_vft_t *hicn_dpo_get_strategy_vft (dpo_type_t vfts_id);
+
+/**
+ * @brief Get the vft with the hICN strategy functions from the strategy id.
+ *
+ * @param The id of the hicn_strategy_vft to retrieve.
+ * @return The vft struct that contains the list hICN strategy functions.
+ */
+const hicn_strategy_vft_t *hicn_dpo_get_strategy_vft_from_id (u8 vfts_id);
+
+/**
+ * @brief Initialize all the types hicn dpo registered
+ *
+ * Call the init functions of all the hicn dpo implemented.
+ * This init is called when the plugin bootstrap.
+ */
+void hicn_dpos_init (void);
+
+/**
+ * @brief Print the list of the registered hICN DPO
+ *
+ * @param s String to which to append the list of hICN DPO (strategies)
+ * @param n number of parameters to pass
+ *
+ * @result The string with the list of hICN DPO (strategies)
+ */
+u8 *format_hicn_strategy_list (u8 * s, int n, ...);
+
+/**
+ * @brief Check if a given id points to a strategy and the corresponding dpo ctx
+ *
+ * @param The id of the strategy to check.
+ *
+ * @result HICN_ERROR_NONE is the id is valid, otherwise EINVAL
+ */
+u8 hicn_dpo_strategy_id_is_valid (int strategy_id);
+
+/**
+ * @brief Return the number of available strategies. This number can be used to
+ * as an upperbond for valid vfts_id.
+ *
+ * @result Return the number of available strategies.
+ */
+int hicn_strategy_get_all_available (void);
+
+/**
+ * @brief Registers a module at compilation time to be initialized as part of
+ * the ctor.
+ */
+void hicn_dpo_register (const hicn_dpo_vft_t * hicn_dpo);
+
+#endif /* // __HICN_STRATEGY_DPO_MANAGER_H__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/strategy_node.c b/hicn-plugin/src/network/strategy_node.c
new file mode 100644
index 000000000..0659a871a
--- /dev/null
+++ b/hicn-plugin/src/network/strategy_node.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+
+#include "hicn.h"
+#include "parser.h"
+#include "strategy.h"
+#include "strategy_dpo_ctx.h"
+#include "face_db.h"
+#include "infra.h"
+#include "mgmt.h"
+#include "pcs.h"
+#include "state.h"
+#include "strategies/strategy_mw.h"
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_strategy_node;
+
+/*
+ * Node context data (to be used in all the strategy nodes); we think this is
+ * per-thread/instance
+ */
+typedef struct hicn_strategy_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_strategy_runtime_t;
+
+/* Stats string values */
+static char *hicn_strategy_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* packet trace format function */
+u8 *
+hicn_strategy_format_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicn_strategy_trace_t *t = va_arg (*args, hicn_strategy_trace_t *);
+
+ const hicn_strategy_vft_t *vft = hicn_dpo_get_strategy_vft (t->dpo_type);
+
+ return vft->hicn_format_strategy_trace (s, t);
+}
+
+
+always_inline int
+hicn_new_interest (hicn_strategy_runtime_t * rt, vlib_buffer_t * b0,
+ u32 * next, f64 tnow, u8 * nameptr,
+ u16 namelen, hicn_face_id_t outface, int nh_idx,
+ index_t dpo_ctx_id0, const hicn_strategy_vft_t * strategy,
+ dpo_type_t dpo_type, u8 isv6,
+ vl_api_hicn_api_node_stats_get_reply_t * stats)
+{
+ int ret;
+ hicn_hash_node_t *nodep;
+ hicn_pcs_entry_t *pitp;
+ hicn_header_t *hicn0;
+ hicn_main_t *sm = &hicn_main;
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
+ u32 node_id0 = 0;
+ u8 vft_id0 = dpo_type;
+ u8 is_cs0 = 0;
+ u8 hash_entry_id = 0;
+ u8 bucket_is_overflow = 0;
+ u32 bucket_id = ~0;
+
+
+ /* Create PIT node and init PIT entry */
+ nodep = hicn_hashtb_alloc_node (rt->pitcs->pcs_table);
+ if (PREDICT_FALSE (nodep == NULL))
+ {
+ /* Nothing we can do - no mem */
+ *next = HICN_STRATEGY_NEXT_ERROR_DROP;
+ return HICN_ERROR_HASHTB_NOMEM;
+ }
+ pitp = hicn_pit_get_data (nodep);
+ hicn_pit_init_data (pitp);
+ pitp->shared.create_time = tnow;
+
+ hicn0 = vlib_buffer_get_current (b0);
+ hicn_lifetime_t imsg_lifetime;
+ hicn_type_t type = hicnb0->type;
+ hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
+ &imsg_lifetime);
+
+ if (imsg_lifetime > sm->pit_lifetime_max_ms)
+ {
+ imsg_lifetime = sm->pit_lifetime_max_ms;
+ }
+ pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, imsg_lifetime);
+
+ /* Set up the hash node and insert it */
+ hicn_hash_entry_t *hash_entry;
+ hicn_hashtb_init_node (rt->pitcs->pcs_table, nodep, nameptr, namelen);
+
+ ret =
+ hicn_pcs_pit_insert (rt->pitcs, pitp, nodep, &hash_entry,
+ hicnb0->name_hash, &node_id0, &dpo_ctx_id0, &vft_id0,
+ &is_cs0, &hash_entry_id, &bucket_id,
+ &bucket_is_overflow);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ strategy->hicn_add_interest (vnet_buffer (b0)->ip.adj_index[VLIB_TX],
+ hash_entry);
+
+ /* Add face */
+ hicn_face_db_add_face (hicnb0->face_id, &(pitp->u.pit.faces));
+
+ *next = isv6 ? HICN_STRATEGY_NEXT_INTEREST_FACE6 :
+ HICN_STRATEGY_NEXT_INTEREST_FACE4;
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = outface;
+ stats->pkts_interest_count++;
+ pitp->u.pit.pe_txnh = nh_idx;
+ }
+ else
+ {
+ /* Interest aggregate in PIT */
+ if (ret == HICN_ERROR_HASHTB_EXIST)
+ {
+ hicn_store_internal_state (b0, hicnb0->name_hash, node_id0,
+ dpo_ctx_id0, vft_id0, hash_entry_id,
+ bucket_id, bucket_is_overflow);
+ // We need to take a lock as the lock is not taken on the hash
+ // entry because it is a CS entry (hash_insert function).
+ hash_entry->locks++;
+ *next =
+ is_cs0 ? HICN_STRATEGY_NEXT_INTEREST_HITCS :
+ HICN_STRATEGY_NEXT_INTEREST_HITPIT;
+ }
+ else
+ {
+ /* Send the packet to the interest-hitpit node */
+ *next = HICN_STRATEGY_NEXT_ERROR_DROP;
+ }
+ hicn_faces_flush (&(pitp->u.pit.faces));
+ hicn_hashtb_free_node (rt->pitcs->pcs_table, nodep);
+ }
+
+ return (ret);
+
+}
+
+/*
+ * ICN strategy later node for interests: - 1 packet at a time - ipv4/tcp
+ * ipv6/tcp
+ */
+uword
+hicn_strategy_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+
+ u32 n_left_from, *from, *to_next, n_left_to_next;
+ hicn_strategy_next_t next_index;
+ hicn_strategy_runtime_t *rt = NULL;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = (hicn_strategy_next_t) node->cached_next_index;
+ rt = vlib_node_get_runtime_data (vm, hicn_strategy_node.index);
+ rt->pitcs = &hicn_main.pitcs;
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u8 isv6;
+ u8 *nameptr;
+ u16 namelen;
+ hicn_name_t name;
+ hicn_header_t *hicn0;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ hicn_face_id_t outface;
+ int nh_idx;
+ u32 next0 = next_index;
+ int ret;
+
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&b1->trace_handle, 2 * CLIB_CACHE_LINE_BYTES,
+ STORE);
+ }
+ /* Dequeue a packet buffer */
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ next0 = HICN_STRATEGY_NEXT_ERROR_DROP;
+
+ hicn_dpo_ctx_t *dpo_ctx =
+ hicn_strategy_dpo_ctx_get (vnet_buffer (b0)->ip.
+ adj_index[VLIB_TX]);
+ const hicn_strategy_vft_t *strategy =
+ hicn_dpo_get_strategy_vft (dpo_ctx->dpo_type);
+
+ ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ stats.pkts_processed++;
+ /* Select next hop */
+ /*
+ * Double check that the interest has been through
+ * the interest-pcslookup node due to misconfiguration in
+ * the punting rules.
+ */
+ if (PREDICT_TRUE
+ (ret == HICN_ERROR_NONE && HICN_IS_NAMEHASH_CACHED (b0)
+ && strategy->hicn_select_next_hop (vnet_buffer (b0)->
+ ip.adj_index[VLIB_TX],
+ &nh_idx,
+ &outface) ==
+ HICN_ERROR_NONE))
+ {
+ /*
+ * No need to check if parsing was successful
+ * here. Already checked in the interest_pcslookup
+ * node
+ */
+ nameptr = (u8 *) (&name);
+ hicn_new_interest (rt, b0, &next0, tnow, nameptr, namelen,
+ outface, nh_idx,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX],
+ strategy, dpo_ctx->dpo_type, isv6, &stats);
+ }
+ /* Maybe trace */
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_strategy_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->dpo_type = dpo_ctx->dpo_type;
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ /*
+ * Fix in case of a wrong speculation. Needed for
+ * cloning the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_strategy_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, hicn_strategy_node.index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
+ return (frame->n_vectors);
+}
+
+/*
+ * Node registration for the forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (hicn_strategy_node) =
+ {
+ .name = "hicn-strategy",
+ .function = hicn_strategy_fn,
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (int) + sizeof(hicn_pit_cs_t *),
+ .format_trace = hicn_strategy_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_strategy_error_strings),
+ .error_strings = hicn_strategy_error_strings,
+ .n_next_nodes = HICN_STRATEGY_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_STRATEGY_NEXT_INTEREST_HITPIT] = "hicn-interest-hitpit",
+ [HICN_STRATEGY_NEXT_INTEREST_HITCS] = "hicn-interest-hitcs",
+ [HICN_STRATEGY_NEXT_INTEREST_FACE4] = "hicn4-face-output",
+ [HICN_STRATEGY_NEXT_INTEREST_FACE6] = "hicn6-face-output",
+ [HICN_STRATEGY_NEXT_ERROR_DROP] = "error-drop",
+ },
+ };
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/udp_tunnels/udp_decap.h b/hicn-plugin/src/network/udp_tunnels/udp_decap.h
new file mode 100644
index 000000000..9ddb8a73b
--- /dev/null
+++ b/hicn-plugin/src/network/udp_tunnels/udp_decap.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UDP_DECAP_H__
+#define __UDP_DECAP_H__
+
+/**
+ * @file udp_decap.h
+ *
+ * Implements the udp decapsulation for udp tunnels
+ *
+ * Udp decap nodes follow the ip4/6-local nodes and their purpose
+ * is to retrieve the udp tunnel for the incoming packet. If a tunnel does
+ * not exist the packet is dropped.
+ * The following node to the udp decap nodes are the ip4/6-lookup nodes.
+ */
+
+extern vlib_node_registration_t udp_decap_node;
+
+#endif // __UDP_DECAP_H__
diff --git a/hicn-plugin/src/network/udp_tunnels/udp_decap_node.c b/hicn-plugin/src/network/udp_tunnels/udp_decap_node.c
new file mode 100644
index 000000000..5603f20f9
--- /dev/null
+++ b/hicn-plugin/src/network/udp_tunnels/udp_decap_node.c
@@ -0,0 +1,623 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vnet/fib/fib_table.h>
+
+#include "udp_tunnel.h"
+#include "../mgmt.h"
+#include "../hicn.h"
+#include "../strategy_dpo_ctx.h"
+
+vlib_node_registration_t udp_decap_node;
+
+static char *udp_decap_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnfwd_error
+#undef _
+};
+
+/* Trace context struct */
+typedef enum
+{
+ UDP4_DECAP_NEXT_LOOKUP_IP4,
+ UDP4_DECAP_NEXT_LOOKUP_IP6,
+ UDP4_DECAP_N_NEXT,
+} udp4_decap_next_t;
+
+typedef enum
+{
+ UDP6_DECAP_NEXT_LOOKUP_IP4,
+ UDP6_DECAP_NEXT_LOOKUP_IP6,
+ UDP6_DECAP_N_NEXT,
+} udp6_decap_next_t;
+
+typedef struct udp4_decap_trace_t_
+{
+ ip4_header_t ip;
+ udp_header_t udp;
+} udp4_decap_trace_t;
+
+typedef struct udp6_decap_trace_t_
+{
+ ip6_header_t ip;
+ udp_header_t udp;
+} udp6_decap_trace_t;
+
+typedef struct udp_decap_trace_t_
+{
+ union
+ {
+ udp4_decap_trace_t udp4;
+ udp6_decap_trace_t udp6;
+ };
+
+ u8 isv6;
+ u8 ishicn;
+} udp_decap_trace_t;
+
+
+static u8 *
+format_udp_decap_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ udp_decap_trace_t *t;
+
+ t = va_arg (*args, udp_decap_trace_t *);
+
+ if (t->isv6)
+ {
+ s = format (s, "%U\n %U \n %s",
+ format_ip4_header, &t->udp6.ip, sizeof (t->udp6.ip),
+ format_udp_header, &t->udp6.udp, sizeof (t->udp6.udp),
+ t->ishicn ? "hICN udp tunnel" : "");
+ }
+ else
+ {
+ s = format (s, "%U\n %U \n %s",
+ format_ip4_header, &t->udp4.ip, sizeof (t->udp4.ip),
+ format_udp_header, &t->udp4.udp, sizeof (t->udp4.udp),
+ t->ishicn ? "hICN udp tunnel" : "");
+ }
+ return (s);
+}
+
+static_always_inline void
+udp_decap_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u8 isv6, vlib_buffer_t * b)
+{
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ udp_decap_trace_t *t =
+ vlib_add_trace (vm, node, b, sizeof (*t));
+ t->isv6 = isv6;
+ hicn_buffer_t *hb = hicn_get_buffer(b);
+
+ if (isv6)
+ {
+ clib_memcpy(&(t->udp6.udp), vlib_buffer_get_current(b) + sizeof(ip6_header_t), sizeof(udp_header_t));
+ clib_memcpy(&(t->udp6.ip), vlib_buffer_get_current(b), sizeof(ip6_header_t));
+ t->ishicn = hb->flags & hb->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL;
+ }
+ else
+ {
+ clib_memcpy(&(t->udp4.udp), vlib_buffer_get_current(b) + sizeof(ip4_header_t), sizeof(udp_header_t));
+ clib_memcpy(&(t->udp4.ip), vlib_buffer_get_current(b), sizeof(ip4_header_t));
+ t->ishicn = hb->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL;
+ }
+ }
+}
+
+static uword
+udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 bi0, bi1, bi2, bi3;
+ u32 next0, next1, next2, next3;
+
+ {
+ vlib_buffer_t *b4, *b5, *b6, *b7;
+ b4 = vlib_get_buffer (vm, from[4]);
+ b5 = vlib_get_buffer (vm, from[5]);
+ b6 = vlib_get_buffer (vm, from[6]);
+ b7 = vlib_get_buffer (vm, from[7]);
+ CLIB_PREFETCH (b4, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b5, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b6, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b7, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+
+ from += 4;
+ n_left_from -= 4;
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ to_next[2] = bi2;
+ to_next[3] = bi3;
+
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ u8 *ptr0 = vlib_buffer_get_current (b0);
+ u8 *ptr1 = vlib_buffer_get_current (b1);
+ u8 *ptr2 = vlib_buffer_get_current (b2);
+ u8 *ptr3 = vlib_buffer_get_current (b3);
+ u8 v0 = *ptr0 & 0xf0;
+ u8 v1 = *ptr1 & 0xf0;
+ u8 v2 = *ptr2 & 0xf0;
+ u8 v3 = *ptr3 & 0xf0;
+
+ u8 advance = sizeof(ip4_header_t) + sizeof(udp_header_t);
+
+ vlib_buffer_advance(b0, -advance);
+ vlib_buffer_advance(b1, -advance);
+ vlib_buffer_advance(b2, -advance);
+ vlib_buffer_advance(b3, -advance);
+
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 *outer_ptr1 = vlib_buffer_get_current (b1);
+ u8 *outer_ptr2 = vlib_buffer_get_current (b2);
+ u8 *outer_ptr3 = vlib_buffer_get_current (b3);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
+ u8 outer_v1 = *outer_ptr1 & 0xf0;
+ u8 outer_v2 = *outer_ptr2 & 0xf0;
+ u8 outer_v3 = *outer_ptr3 & 0xf0;
+
+ ip46_address_t src0 = {0};
+ ip46_address_t src1 = {0};
+ ip46_address_t src2 = {0};
+ ip46_address_t src3 = {0};
+
+ ip46_address_t dst0 = {0};
+ ip46_address_t dst1 = {0};
+ ip46_address_t dst2 = {0};
+ ip46_address_t dst3 = {0};
+
+ udp_header_t * udp0 = NULL;
+ udp_header_t * udp1 = NULL;
+ udp_header_t * udp2 = NULL;
+ udp_header_t * udp3 = NULL;
+
+ ip46_address_set_ip4(&src0, &((ip4_header_t *)outer_ptr0)->src_address);
+ ip46_address_set_ip4(&dst0, &((ip4_header_t *)outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip4_header_t));
+ next0 = v0 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip4(&src1, &((ip4_header_t *)outer_ptr1)->src_address);
+ ip46_address_set_ip4(&dst1, &((ip4_header_t *)outer_ptr1)->dst_address);
+ udp1 = (udp_header_t *)(outer_ptr1 + sizeof(ip4_header_t));
+ next1 = v1 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip4(&src2, &((ip4_header_t *)outer_ptr2)->src_address);
+ ip46_address_set_ip4(&dst2, &((ip4_header_t *)outer_ptr2)->dst_address);
+ udp2 = (udp_header_t *)(outer_ptr2 + sizeof(ip4_header_t));
+ next2 = v2 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip4(&src3, &((ip4_header_t *)outer_ptr3)->src_address);
+ ip46_address_set_ip4(&dst3, &((ip4_header_t *)outer_ptr3)->dst_address);
+ udp3 = (udp_header_t *)(outer_ptr3 + sizeof(ip4_header_t));
+ next3 = v3 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
+ hicnb0 = hicn_get_buffer(b0);
+ hicnb1 = hicn_get_buffer(b1);
+ hicnb2 = hicn_get_buffer(b2);
+ hicnb3 = hicn_get_buffer(b3);
+
+
+ /* Udp encap-decap tunnels have dst and src addresses and port swapped */
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst1, &src1, udp1->dst_port, udp1->src_port);
+ vnet_buffer (b2)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst2, &src2, udp2->dst_port, udp2->src_port);
+ vnet_buffer (b3)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst3, &src3, udp3->dst_port, udp3->src_port);
+
+ if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb0->flags |= (outer_v0 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b1)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb1->flags |= (outer_v1 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b2)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb2->flags |= (outer_v2 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b3)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb3->flags |= (outer_v3 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ udp_decap_trace_buffer (vm, node, 1, b0);
+ udp_decap_trace_buffer (vm, node, 1, b1);
+ udp_decap_trace_buffer (vm, node, 1, b2);
+ udp_decap_trace_buffer (vm, node, 1, b3);
+
+ vlib_buffer_advance(b0, advance);
+ vlib_buffer_advance(b1, advance);
+ vlib_buffer_advance(b2, advance);
+ vlib_buffer_advance(b3, advance);
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ /* udp_encap_t *udp_tunnel0 = NULL; */
+ u32 next0;
+
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ u8 *ptr0 = vlib_buffer_get_current (b0);
+ u8 v0 = *ptr0 & 0xf0;
+
+ u8 advance = sizeof(ip4_header_t) + sizeof(udp_header_t);;
+
+ vlib_buffer_advance(b0, -advance);
+
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
+
+ ip46_address_t src0 = {0};
+ ip46_address_t dst0 = {0};
+ udp_header_t * udp0 = NULL;
+
+ ip46_address_set_ip4(&src0, &((ip4_header_t *)outer_ptr0)->src_address);
+ ip46_address_set_ip4(&dst0, &((ip4_header_t *)outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip4_header_t));
+ next0 = v0 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4: UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ hicn_buffer_t *hicnb0 = hicn_get_buffer(b0);
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
+
+ if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb0->flags |= (outer_v0 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ udp_decap_trace_buffer (vm, node, 1, b0);
+
+ vlib_buffer_advance(b0, advance);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return (frame->n_vectors);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(udp4_decap_node) =
+{
+ .function = udp4_decap_node_fn,
+ .name = "udp4-decap",
+ .vector_size = sizeof(u32),
+ .format_trace = format_udp_decap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(udp_decap_error_strings),
+ .error_strings = udp_decap_error_strings,
+ .n_next_nodes = UDP4_DECAP_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [UDP4_DECAP_NEXT_LOOKUP_IP4] = "ip4-lookup",
+ [UDP4_DECAP_NEXT_LOOKUP_IP6] = "ip6-lookup"
+ },
+};
+/* *INDENT-ON* */
+
+static uword
+udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next, next_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Dual loop, X2 */
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 bi0, bi1, bi2, bi3;
+ u32 next0, next1, next2, next3;
+
+ {
+ vlib_buffer_t *b4, *b5, *b6, *b7;
+ b4 = vlib_get_buffer (vm, from[4]);
+ b5 = vlib_get_buffer (vm, from[5]);
+ b6 = vlib_get_buffer (vm, from[6]);
+ b7 = vlib_get_buffer (vm, from[7]);
+ CLIB_PREFETCH (b4, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b5, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b6, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b7, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+
+ from += 4;
+ n_left_from -= 4;
+ to_next[0] = bi0;
+ to_next[1] = bi1;
+ to_next[2] = bi2;
+ to_next[3] = bi3;
+
+ to_next += 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ u8 *ptr0 = vlib_buffer_get_current (b0);
+ u8 *ptr1 = vlib_buffer_get_current (b1);
+ u8 *ptr2 = vlib_buffer_get_current (b2);
+ u8 *ptr3 = vlib_buffer_get_current (b3);
+ u8 v0 = *ptr0 & 0xf0;
+ u8 v1 = *ptr1 & 0xf0;
+ u8 v2 = *ptr2 & 0xf0;
+ u8 v3 = *ptr3 & 0xf0;
+
+ u8 advance = sizeof(ip6_header_t) + sizeof(udp_header_t);
+
+ vlib_buffer_advance(b0, -advance);
+ vlib_buffer_advance(b1, -advance);
+ vlib_buffer_advance(b2, -advance);
+ vlib_buffer_advance(b3, -advance);
+
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 *outer_ptr1 = vlib_buffer_get_current (b1);
+ u8 *outer_ptr2 = vlib_buffer_get_current (b2);
+ u8 *outer_ptr3 = vlib_buffer_get_current (b3);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
+ u8 outer_v1 = *outer_ptr1 & 0xf0;
+ u8 outer_v2 = *outer_ptr2 & 0xf0;
+ u8 outer_v3 = *outer_ptr3 & 0xf0;
+
+ ip46_address_t src0 = {0};
+ ip46_address_t src1 = {0};
+ ip46_address_t src2 = {0};
+ ip46_address_t src3 = {0};
+
+ ip46_address_t dst0 = {0};
+ ip46_address_t dst1 = {0};
+ ip46_address_t dst2 = {0};
+ ip46_address_t dst3 = {0};
+
+ udp_header_t * udp0 = NULL;
+ udp_header_t * udp1 = NULL;
+ udp_header_t * udp2 = NULL;
+ udp_header_t * udp3 = NULL;
+
+ ip46_address_set_ip6(&src0, &((ip6_header_t *)outer_ptr0)->src_address);
+ ip46_address_set_ip6(&dst0, &((ip6_header_t *)outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip6_header_t));
+ next0 = v0 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip6(&src1, &((ip6_header_t *)outer_ptr1)->src_address);
+ ip46_address_set_ip6(&dst1, &((ip6_header_t *)outer_ptr1)->dst_address);
+ udp1 = (udp_header_t *)(outer_ptr1 + sizeof(ip6_header_t));
+ next1 = v1 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip6(&src2, &((ip6_header_t *)outer_ptr2)->src_address);
+ ip46_address_set_ip6(&dst2, &((ip6_header_t *)outer_ptr2)->dst_address);
+ udp2 = (udp_header_t *)(outer_ptr2 + sizeof(ip6_header_t));
+ next2 = v2 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip6(&src3, &((ip6_header_t *)outer_ptr3)->src_address);
+ ip46_address_set_ip6(&dst3, &((ip6_header_t *)outer_ptr3)->dst_address);
+ udp3 = (udp_header_t *)(outer_ptr3 + sizeof(ip6_header_t));
+ next3 = v3 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
+ hicnb0 = hicn_get_buffer(b0);
+ hicnb1 = hicn_get_buffer(b1);
+ hicnb2 = hicn_get_buffer(b2);
+ hicnb3 = hicn_get_buffer(b3);
+
+
+ /* Udp encap-decap tunnels have dst and src addresses and port swapped */
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst1, &src1, udp1->dst_port, udp1->src_port);
+ vnet_buffer (b2)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst2, &src2, udp2->dst_port, udp2->src_port);
+ vnet_buffer (b3)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst3, &src3, udp3->dst_port, udp3->src_port);
+
+ if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb0->flags |= (outer_v0 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b1)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb1->flags |= (outer_v1 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b2)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb2->flags |= (outer_v2 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b3)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb3->flags |= (outer_v3 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ udp_decap_trace_buffer (vm, node, 0, b0);
+ udp_decap_trace_buffer (vm, node, 0, b1);
+ udp_decap_trace_buffer (vm, node, 0, b2);
+ udp_decap_trace_buffer (vm, node, 0, b3);
+
+ vlib_buffer_advance(b0, advance);
+ vlib_buffer_advance(b1, advance);
+ vlib_buffer_advance(b2, advance);
+ vlib_buffer_advance(b3, advance);
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ /* Dual loop, X1 */
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b0;
+ u32 bi0;
+ /* udp_encap_t *udp_tunnel0 = NULL; */
+ u32 next0;
+
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ u8 *ptr0 = vlib_buffer_get_current (b0);
+ u8 v0 = *ptr0 & 0xf0;
+
+ u8 advance = sizeof(ip6_header_t) + sizeof(udp_header_t);
+
+ vlib_buffer_advance(b0, -advance);
+
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
+
+ ip46_address_t src0 = {0};
+ ip46_address_t dst0 = {0};
+ udp_header_t * udp0 = NULL;
+
+ ip46_address_set_ip6(&src0, &((ip6_header_t *)outer_ptr0)->src_address);
+ ip46_address_set_ip6(&dst0, &((ip6_header_t *)outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip6_header_t));
+ next0 = v0 == 0x40? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ hicn_buffer_t *hicnb0 = hicn_get_buffer(b0);
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
+
+ if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID)
+ hicnb0->flags |= (outer_v0 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ udp_decap_trace_buffer (vm, node, 0, b0);
+
+ vlib_buffer_advance(b0, advance);
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return (frame->n_vectors);
+}
+
+
+/*
+ * Node registration for the interest forwarder node
+ */
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE(udp6_decap_node) =
+{
+ .function = udp6_decap_node_fn,
+ .name = "udp6-decap",
+ .vector_size = sizeof(u32),
+ .format_trace = format_udp_decap_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(udp_decap_error_strings),
+ .error_strings = udp_decap_error_strings,
+ .n_next_nodes = UDP6_DECAP_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [UDP6_DECAP_NEXT_LOOKUP_IP4] = "ip4-lookup",
+ [UDP6_DECAP_NEXT_LOOKUP_IP6] = "ip6-lookup"
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/network/udp_tunnels/udp_tunnel.c b/hicn-plugin/src/network/udp_tunnels/udp_tunnel.c
new file mode 100644
index 000000000..872e4cd82
--- /dev/null
+++ b/hicn-plugin/src/network/udp_tunnels/udp_tunnel.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/bihash_40_8.h>
+#include <vnet/fib/fib_table.h>
+
+#include "../error.h"
+#include "../strategy_dpo_ctx.h"
+#include "udp_tunnel.h"
+
+clib_bihash_40_8_t udp_tunnels_hashtb;
+dpo_type_t dpo_type_udp_ip4;
+dpo_type_t dpo_type_udp_ip6;
+
+u32 udp_tunnel_add (fib_protocol_t proto,
+ index_t fib_index,
+ const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port,
+ udp_encap_fixup_flags_t flags)
+{
+ vlib_main_t *vm = vlib_get_main();
+ clib_bihash_kv_40_8_t kv;
+ clib_memcpy(&kv.key[0], src_ip, sizeof(ip46_address_t));
+ clib_memcpy(&kv.key[2], dst_ip, sizeof(ip46_address_t));
+ kv.key[4] = (clib_host_to_net_u16(src_port) << 16) + clib_host_to_net_u16(dst_port);
+
+ clib_bihash_kv_40_8_t value;
+ int rv = clib_bihash_search_40_8 (&udp_tunnels_hashtb, &kv, &value);
+
+ if (rv != 0)
+ {
+ u32 uei = udp_encap_add_and_lock(proto, fib_index, src_ip, dst_ip, src_port, dst_port, flags);
+ kv.value = uei;
+ clib_bihash_add_del_40_8(&udp_tunnels_hashtb, &kv, 1);
+ value.value = kv.value;
+ if (proto == FIB_PROTOCOL_IP4)
+ {
+ udp_register_dst_port(vm, src_port, udp4_decap_node.index, 1);
+ }
+ else
+ {
+ udp_register_dst_port(vm, src_port, udp6_decap_node.index, 0);
+ }
+ }
+
+ return value.value;
+}
+
+void udp_tunnel_add_existing (index_t uei, dpo_proto_t proto)
+{
+ vlib_main_t *vm = vlib_get_main();
+ udp_encap_t * udp_encap = udp_encap_get(uei);
+ clib_bihash_kv_40_8_t kv;
+
+ ip46_address_t src = {0};
+ ip46_address_t dst = {0};
+ u16 src_port = 0, dst_port = 0;
+
+ switch (proto)
+ {
+ case DPO_PROTO_IP4:
+ ip46_address_set_ip4(&src, &(udp_encap->ue_hdrs.ip4.ue_ip4.src_address));
+ ip46_address_set_ip4(&dst, &(udp_encap->ue_hdrs.ip4.ue_ip4.dst_address));
+ src_port = udp_encap->ue_hdrs.ip4.ue_udp.src_port;
+ dst_port = udp_encap->ue_hdrs.ip4.ue_udp.dst_port;
+ break;
+ case DPO_PROTO_IP6:
+ ip46_address_set_ip6(&src, &(udp_encap->ue_hdrs.ip6.ue_ip6.src_address));
+ ip46_address_set_ip6(&dst, &(udp_encap->ue_hdrs.ip6.ue_ip6.dst_address));
+ src_port = udp_encap->ue_hdrs.ip6.ue_udp.src_port;
+ dst_port = udp_encap->ue_hdrs.ip6.ue_udp.dst_port;
+ break;
+ default:
+ break;
+ }
+
+ clib_memcpy(&kv.key[0], &src, sizeof(ip46_address_t));
+ clib_memcpy(&kv.key[2], &dst, sizeof(ip46_address_t));
+ kv.key[4] = (src_port << 16) + dst_port ;
+ kv.value = uei;
+
+ clib_bihash_add_del_40_8(&udp_tunnels_hashtb, &kv, 1);
+
+ if (proto == DPO_PROTO_IP4)
+ {
+ udp_register_dst_port(vm, clib_net_to_host_u16(src_port), udp4_decap_node.index, 1);
+ }
+ else
+ {
+ udp_register_dst_port(vm, clib_net_to_host_u16(src_port), udp6_decap_node.index, 0);
+ }
+}
+
+int udp_tunnel_del (fib_protocol_t proto,
+ index_t fib_index,
+ const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port,
+ udp_encap_fixup_flags_t flags)
+{
+ clib_bihash_kv_40_8_t kv;
+ clib_memcpy(&kv.key[0], src_ip, sizeof(ip46_address_t));
+ clib_memcpy(&kv.key[2], dst_ip, sizeof(ip46_address_t));
+ kv.key[4] = (clib_host_to_net_u16(src_port) << 16) + clib_host_to_net_u16(dst_port);
+
+ clib_bihash_kv_40_8_t value;
+ int ret = clib_bihash_search_40_8 (&udp_tunnels_hashtb, &kv, &value);
+
+ if (ret == 0)
+ {
+ udp_encap_unlock((u32)value.value);
+ clib_bihash_add_del_40_8(&udp_tunnels_hashtb, &kv, 0);
+ ret = HICN_ERROR_NONE;
+ }
+ else
+ {
+ ret = HICN_ERROR_UDP_TUNNEL_NOT_FOUND;
+ }
+
+ return ret;
+}
+
+u32 udp_tunnel_get(const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port)
+{
+ clib_bihash_kv_40_8_t kv;
+ clib_memcpy(&kv.key[0], src_ip, sizeof(ip46_address_t));
+ clib_memcpy(&kv.key[2], dst_ip, sizeof(ip46_address_t));
+ kv.key[4] = (src_port << 16) + dst_port;
+
+ clib_bihash_kv_40_8_t value;
+ int ret = clib_bihash_search_40_8 (&udp_tunnels_hashtb, &kv, &value);
+
+ return ret == 0 ? (u32)value.value : UDP_TUNNEL_INVALID;
+}
+
+
+void udp_tunnel_init()
+{
+ clib_bihash_init_40_8(&udp_tunnels_hashtb, "udp encap table",
+ 2048, 256 << 20);
+
+ /*
+ * Udp encap does not expose the dpo type when it registers.
+ * In the following we understand what is the dpo type for a udp_encap dpo.
+ */
+ ip46_address_t src = {0};
+ ip46_address_t dst = {0};
+
+ src.ip6.as_u8[15] = 1;
+ dst.ip6.as_u8[15] = 2;
+
+ u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6, HICN_FIB_TABLE);
+ u32 uei = udp_encap_add_and_lock(FIB_PROTOCOL_IP6, fib_index, &src, &dst, 4444, 4444, UDP_ENCAP_FIXUP_NONE);
+
+ dpo_id_t temp = DPO_INVALID;
+ udp_encap_contribute_forwarding(uei, DPO_PROTO_IP6, &temp);
+ dpo_type_udp_ip6 = temp.dpoi_type;
+ udp_encap_unlock(uei);
+
+ dpo_id_t temp2 = DPO_INVALID;
+ fib_index = fib_table_find (FIB_PROTOCOL_IP4, HICN_FIB_TABLE);
+ uei = udp_encap_add_and_lock(FIB_PROTOCOL_IP4, fib_index, &src, &dst, 4444, 4444, UDP_ENCAP_FIXUP_NONE);
+ udp_encap_contribute_forwarding(uei, DPO_PROTO_IP4, &temp2);
+ dpo_type_udp_ip4 = temp2.dpoi_type;
+ udp_encap_unlock(uei);
+}
+
+static clib_error_t *
+udp_tunnel_command_fn (vlib_main_t * vm,
+ unformat_input_t * main_input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *error = NULL;
+ ip46_address_t src_ip = {0}, dst_ip = {0};
+ u32 table_id, src_port, dst_port;
+ fib_protocol_t fproto;
+ u8 is_del;
+ index_t uei;
+
+ is_del = 0;
+ fproto = FIB_PROTOCOL_MAX;
+ uei = ~0;
+ table_id = HICN_FIB_TABLE;
+
+ /* Get a line of input. */
+ if (unformat_user (main_input, unformat_line_input, line_input))
+ {
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "index %d", &uei))
+ ;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "%U %U",
+ unformat_ip4_address,
+ &src_ip.ip4, unformat_ip4_address, &dst_ip.ip4))
+ fproto = FIB_PROTOCOL_IP4;
+ else if (unformat (line_input, "%U %U",
+ unformat_ip6_address,
+ &src_ip.ip6, unformat_ip6_address, &dst_ip.ip6))
+ fproto = FIB_PROTOCOL_IP6;
+ else if (unformat (line_input, "%d %d", &src_port, &dst_port))
+ ;
+ else if (unformat (line_input, "table-id %d", &table_id))
+ ;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+ }
+
+ index_t fib_index = fib_table_find (fproto, table_id);
+ if (~0 == fib_index)
+ {
+ error = clib_error_return (0, "Nonexistent table id %d", table_id);
+ goto done;
+ }
+
+ if (!is_del && fproto != FIB_PROTOCOL_MAX)
+ {
+ uei = udp_tunnel_add(fproto, fib_index, &src_ip, &dst_ip, src_port, dst_port, UDP_ENCAP_FIXUP_NONE);
+
+ vlib_cli_output (vm, "udp-encap: %d\n", uei);
+ }
+ else if (is_del)
+ {
+ int ret = udp_tunnel_del(fproto, fib_index, &src_ip, &dst_ip, src_port, dst_port, UDP_ENCAP_FIXUP_NONE);
+ error = (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
+ get_error_string
+ (ret));
+ }
+ else
+ {
+ error = clib_error_return (0, "specify some IP addresses");
+ }
+
+ done:
+ unformat_free (line_input);
+ return error;
+
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (udp_tunnel_command, static) =
+ {
+ .path = "udp tunnel",
+ .short_help = "udp tunnel [add/del] src_address dst_address src_port dst_port",
+ .function = udp_tunnel_command_fn,
+ };
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */
diff --git a/hicn-plugin/src/network/udp_tunnels/udp_tunnel.h b/hicn-plugin/src/network/udp_tunnels/udp_tunnel.h
new file mode 100644
index 000000000..2ec92056c
--- /dev/null
+++ b/hicn-plugin/src/network/udp_tunnels/udp_tunnel.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __UDP_TUNNEL__
+#define __UDP_TUNNEL__
+
+#include <vlib/vlib.h>
+#include <vppinfra/error.h>
+#include <vnet/udp/udp_encap.h>
+
+/**
+ * @file udp_tunnel.h
+ *
+ * This file implements bidirectional udp tunnels. Udp tunnels exploit
+ * the udp encap functionality in vpp. In particular, a udp tunnel creates
+ * an udp encap object with the information for encapsulating packets and it
+ * implements the udp decap node. The udp decap node checks if a udp tunnel exists
+ * before performing the decapsulation. If the tunnel does not exist the packet
+ * is dropped.
+ */
+
+#define UDP_TUNNEL_INVALID ~0
+
+extern dpo_type_t dpo_type_udp_ip4;
+extern dpo_type_t dpo_type_udp_ip6;
+
+extern vlib_node_registration_t udp4_decap_node;
+extern vlib_node_registration_t udp6_decap_node;
+
+/**
+ * @brief Create a udp tunnel
+ *
+ * @param proto FIB_PROTOCOL_IP4 or FIB_PROTOCOL_IP6
+ * @param fib_index fib index to add to the udp encap
+ * @param src_ip source address of the tunnel
+ * @param dst_ip destination address of the tunnel
+ * @param src_port source port
+ * @param src_port destination port
+ * @param flags flags for the udp encap
+ *
+ * @return return the id of the tunnel
+ */
+u32 udp_tunnel_add (fib_protocol_t proto,
+ index_t fib_index,
+ const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port,
+ udp_encap_fixup_flags_t flags);
+
+/**
+ * @brief Retrieve the index of a udp tunnel (same id of the udp encap)
+ *
+ * @param src_ip source address of the tunnel
+ * @param dst_ip destination address of the tunnel
+ * @param src_port source port
+ * @param src_port destination port
+ *
+ * @return id of the udp tunnel/encap
+ */
+u32 udp_tunnel_get(const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port);
+
+/**
+ * @brief Delete a udp tunnel
+ *
+ * @param proto FIB_PROTOCOL_IP4 or FIB_PROTOCOL_IP6
+ * @param fib_index fib index to add to the udp encap
+ * @param src_ip source address of the tunnel
+ * @param dst_ip destination address of the tunnel
+ * @param src_port source port
+ * @param src_port destination port
+ * @param flags flags for the udp encap
+ *
+ * @return HICN_ERROR_UDP_TUNNEL_NOT_FOUND if the tunnel was not found
+ * or HICN_ERROR_NONE if the tunnel has been deleted
+ */
+int udp_tunnel_del (fib_protocol_t proto,
+ index_t fib_index,
+ const ip46_address_t * src_ip,
+ const ip46_address_t * dst_ip,
+ u16 src_port,
+ u16 dst_port,
+ udp_encap_fixup_flags_t flags);
+
+/**
+ * @brief Add a udp tunnel from an existing udp encap
+ *
+ * @param uei index of the udp encap object
+ * @param proto DPO_PROTO_IP6 or DPO_PROTO_IP4
+ */
+void udp_tunnel_add_existing (index_t uei, dpo_proto_t proto);
+
+/**
+ * @brief Init the udp tunnel module
+ *
+ */
+void udp_tunnel_init();
+
+#endif
diff --git a/hicn-plugin/src/network/utils.h b/hicn-plugin/src/network/utils.h
new file mode 100644
index 000000000..689942ab6
--- /dev/null
+++ b/hicn-plugin/src/network/utils.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_UTILS_H__
+#define __HICN_UTILS_H__
+
+#include "hicn.h"
+
+/**
+ * @file
+ *
+ * Helpers to print hicn headers
+ */
+
+/**
+ * @Brief Print the hicn name
+ *
+ * @param name hicn name to print
+ */
+always_inline void
+hicn_print_name6 (hicn_name_t * name)
+{
+ u8 *s0;
+ s0 = format (0, "Source addr %U, seq_number %u", format_ip6_address,
+ (ip6_address_t *) name->ip6.prefix,
+ clib_net_to_host_u32 (name->ip6.suffix));
+
+ printf ("%s\n", s0);
+}
+
+/**
+ * @Brief Print the ipv6 hicn header (src and dst address and port)
+ *
+ * @param hicn0 hICN header to print
+ */
+always_inline void
+hicn_print6 (hicn_header_t * hicn0)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ u8 *s0;
+ s0 = format (0, "Source addr %U:%u, dest addr %U:%u", format_ip6_address,
+ &(hicn0->v6.ip.saddr),
+ clib_net_to_host_u32 (hicn0->v6.tcp.seq), format_ip6_address,
+ &(hicn0->v6.ip.daddr),
+ clib_net_to_host_u32 (hicn0->v6.tcp.seq));
+
+ vlib_cli_output (vm, "%s\n", s0);
+}
+
+/**
+ * @Brief Print the ipv4 hicn header (src and dst address and port)
+ *
+ * @param hicn0 hICN header to print
+ */
+always_inline void
+hicn_print4 (hicn_header_t * hicn0)
+{
+ u8 *s0;
+ s0 = format (0, "Source addr %U:%u, dest addr %U:%u", format_ip4_address,
+ &(hicn0->v4.ip.saddr),
+ clib_net_to_host_u32 (hicn0->v4.tcp.seq), format_ip4_address,
+ &(hicn0->v4.ip.daddr),
+ clib_net_to_host_u32 (hicn0->v4.tcp.seq));
+
+ printf ("%s\n", s0);
+}
+
+#endif /* // __HICN_UTILS_H__ */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables: eval: (c-set-style "gnu") End:
+ */