aboutsummaryrefslogtreecommitdiffstats
path: root/hicn-plugin/src
diff options
context:
space:
mode:
Diffstat (limited to 'hicn-plugin/src')
-rw-r--r--hicn-plugin/src/CMakeLists.txt465
-rw-r--r--hicn-plugin/src/cache_policies/cs_lru.c209
-rw-r--r--hicn-plugin/src/cache_policies/cs_lru.h47
-rw-r--r--hicn-plugin/src/cache_policies/cs_policy.h183
-rw-r--r--hicn-plugin/src/cli.c642
-rw-r--r--hicn-plugin/src/data_fwd.h66
-rw-r--r--hicn-plugin/src/data_fwd_node.c647
-rw-r--r--hicn-plugin/src/data_input_node.c409
-rw-r--r--hicn-plugin/src/data_pcslookup.h13
-rw-r--r--hicn-plugin/src/data_pcslookup_node.c104
-rw-r--r--hicn-plugin/src/error.c6
-rw-r--r--hicn-plugin/src/error.h101
-rw-r--r--hicn-plugin/src/face_db.h151
-rw-r--r--hicn-plugin/src/faces/app/address_mgr.c69
-rw-r--r--hicn-plugin/src/faces/app/address_mgr.h10
-rw-r--r--hicn-plugin/src/faces/app/face_app_cli.c86
-rw-r--r--hicn-plugin/src/faces/app/face_cons.c36
-rw-r--r--hicn-plugin/src/faces/app/face_cons.h27
-rw-r--r--hicn-plugin/src/faces/app/face_prod.c269
-rw-r--r--hicn-plugin/src/faces/app/face_prod.h36
-rw-r--r--hicn-plugin/src/faces/app/face_prod_node.c216
-rw-r--r--hicn-plugin/src/faces/face.c223
-rw-r--r--hicn-plugin/src/faces/face.h592
-rw-r--r--hicn-plugin/src/faces/face_cli.c184
-rw-r--r--hicn-plugin/src/faces/face_flags.h40
-rw-r--r--hicn-plugin/src/faces/face_node.c1017
-rw-r--r--hicn-plugin/src/faces/face_node.h8
-rw-r--r--hicn-plugin/src/faces/iface_node.c1056
-rw-r--r--hicn-plugin/src/faces/iface_node.h33
-rw-r--r--hicn-plugin/src/faces/inlines.h29
-rw-r--r--hicn-plugin/src/hashtb.c1017
-rw-r--r--hicn-plugin/src/hashtb.h546
-rw-r--r--hicn-plugin/src/hicn.api97
-rw-r--r--hicn-plugin/src/hicn.c83
-rw-r--r--hicn-plugin/src/hicn.h132
-rw-r--r--hicn-plugin/src/hicn_api.c498
-rw-r--r--hicn-plugin/src/hicn_api_test.c782
-rw-r--r--hicn-plugin/src/hicn_buffer_flags.h35
-rw-r--r--hicn-plugin/src/hicn_logging.h32
-rw-r--r--hicn-plugin/src/infra.c (renamed from hicn-plugin/src/hicn_all_api_h.h)13
-rw-r--r--hicn-plugin/src/infra.h26
-rw-r--r--hicn-plugin/src/interest_hitcs.h13
-rw-r--r--hicn-plugin/src/interest_hitcs_node.c162
-rw-r--r--hicn-plugin/src/interest_hitpit.h18
-rw-r--r--hicn-plugin/src/interest_hitpit_node.c180
-rw-r--r--hicn-plugin/src/interest_pcslookup.h24
-rw-r--r--hicn-plugin/src/interest_pcslookup_node.c539
-rw-r--r--hicn-plugin/src/mapme.h240
-rw-r--r--hicn-plugin/src/mapme_ack.h6
-rw-r--r--hicn-plugin/src/mapme_ack_node.c100
-rw-r--r--hicn-plugin/src/mapme_ctrl.h11
-rw-r--r--hicn-plugin/src/mapme_ctrl_node.c489
-rw-r--r--hicn-plugin/src/mapme_eventmgr.c294
-rw-r--r--hicn-plugin/src/mapme_eventmgr.h11
-rw-r--r--hicn-plugin/src/mgmt.c72
-rw-r--r--hicn-plugin/src/mgmt.h47
-rw-r--r--hicn-plugin/src/params.h84
-rw-r--r--hicn-plugin/src/parser.h135
-rw-r--r--hicn-plugin/src/pcs.c47
-rw-r--r--hicn-plugin/src/pcs.h1170
-rw-r--r--hicn-plugin/src/pg.c1401
-rw-r--r--hicn-plugin/src/pg.h168
-rw-r--r--hicn-plugin/src/pg_node.c1121
-rw-r--r--hicn-plugin/src/route.c718
-rw-r--r--hicn-plugin/src/route.h92
-rw-r--r--hicn-plugin/src/state.h73
-rw-r--r--hicn-plugin/src/strategies/dpo_lr.c139
-rw-r--r--hicn-plugin/src/strategies/dpo_lr.h122
-rw-r--r--hicn-plugin/src/strategies/dpo_mw.c74
-rw-r--r--hicn-plugin/src/strategies/dpo_mw.h50
-rw-r--r--hicn-plugin/src/strategies/dpo_rp.c145
-rw-r--r--hicn-plugin/src/strategies/dpo_rp.h130
-rw-r--r--hicn-plugin/src/strategies/dpo_rr.c74
-rw-r--r--hicn-plugin/src/strategies/dpo_rr.h51
-rw-r--r--hicn-plugin/src/strategies/strategy_lr.c136
-rw-r--r--hicn-plugin/src/strategies/strategy_lr.h35
-rw-r--r--hicn-plugin/src/strategies/strategy_mw.c64
-rw-r--r--hicn-plugin/src/strategies/strategy_mw.h2
-rw-r--r--hicn-plugin/src/strategies/strategy_mw_cli.c49
-rw-r--r--hicn-plugin/src/strategies/strategy_rp.c124
-rw-r--r--hicn-plugin/src/strategies/strategy_rp.h (renamed from hicn-plugin/src/hicn_msg_enum.h)32
-rw-r--r--hicn-plugin/src/strategies/strategy_rr.c56
-rw-r--r--hicn-plugin/src/strategies/strategy_rr.h2
-rw-r--r--hicn-plugin/src/strategy.h61
-rw-r--r--hicn-plugin/src/strategy_dpo_ctx.c66
-rw-r--r--hicn-plugin/src/strategy_dpo_ctx.h53
-rw-r--r--hicn-plugin/src/strategy_dpo_manager.c56
-rw-r--r--hicn-plugin/src/strategy_dpo_manager.h76
-rw-r--r--hicn-plugin/src/strategy_node.c285
-rw-r--r--hicn-plugin/src/test/CMakeLists.txt67
-rw-r--r--hicn-plugin/src/test/main.c (renamed from hicn-plugin/src/hicn_api.h)37
-rw-r--r--hicn-plugin/src/test/test_pcs.c622
-rw-r--r--hicn-plugin/src/test/vpp.c570
-rw-r--r--hicn-plugin/src/test/vpp.h21
-rw-r--r--hicn-plugin/src/udp_tunnels/udp_decap.h2
-rw-r--r--hicn-plugin/src/udp_tunnels/udp_decap_node.c611
-rw-r--r--hicn-plugin/src/udp_tunnels/udp_tunnel.c251
-rw-r--r--hicn-plugin/src/udp_tunnels/udp_tunnel.h69
-rw-r--r--hicn-plugin/src/utils.h19
99 files changed, 11516 insertions, 10085 deletions
diff --git a/hicn-plugin/src/CMakeLists.txt b/hicn-plugin/src/CMakeLists.txt
index a3480c21d..b7fa828f3 100644
--- a/hicn-plugin/src/CMakeLists.txt
+++ b/hicn-plugin/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-# Copyright (c) 2017-2020 Cisco and/or its affiliates.
+# Copyright (c) 2021-2022 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
@@ -11,293 +11,252 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
-
-# Dependencies
-
-find_package(Vpp REQUIRED)
-
-include_directories(${VPP_INCLUDE_DIR})
-
-set(LIBHICN_FILES
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/mapme.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/name.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/ops.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/ah.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/icmp.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/ipv4.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/ipv6.c
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/src/protocol/tcp.c
-)
-
-set(LIBHICN_HEADER_FILES_SRC
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/hicn.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/base.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/common.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/error.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/header.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/name.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/ops.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/mapme.h
-)
-
-set(LIBHICN_HEADER_FILES_PROTOCOL
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/ah.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/icmp.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/icmprd.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/ipv4.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/ipv6.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/tcp.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/protocol/udp.h
-)
+##############################################################
+# Dependencies and third party libs
+##############################################################
+find_package(Vpp ${VPP_DEFAULT_VERSION} EXACT REQUIRED)
+
+if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
+ find_package(Libhicn ${CURRENT_VERSION} REQUIRED NO_MODULE)
+ list(APPEND HICN_LIBRARIES hicn::hicn.${LIBTYPE})
+else()
+ set(LIBHICN_LIBRARIES ${LIBHICN_SHARED})
+ list(APPEND DEPENDENCIES
+ ${LIBHICN_LIBRARIES}
+ )
+endif()
-set(LIBHICN_HEADER_FILES_UTIL
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/util/ip_address.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/util/token.h
- ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/includes/hicn/util/types.h
-)
+##############################################################
+# Source/Header files
+##############################################################
set(HICN_PLUGIN_SOURCE_FILES
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn.c
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.c
- ${CMAKE_CURRENT_SOURCE_DIR}/cli.c
- ${CMAKE_CURRENT_SOURCE_DIR}/hashtb.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mgmt.c
- ${CMAKE_CURRENT_SOURCE_DIR}/pcs.c
- ${CMAKE_CURRENT_SOURCE_DIR}/route.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_ctx.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_manager.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategy_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/interest_pcslookup_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitpit_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitcs_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/data_input_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/data_pcslookup_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/data_fwd_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/error.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_cli.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/face.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/iface_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/address_mgr.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_cons.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_app_cli.c
- ${CMAKE_CURRENT_SOURCE_DIR}/pg.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_mw.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw_cli.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rr.c
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rr.c
- ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_lru.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ack_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ctrl_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme_eventmgr.c
- ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_decap_node.c
- ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_tunnel.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/infra.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mgmt.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pcs.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/route.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_ctx.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_manager.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_pcslookup_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitpit_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitcs_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_input_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_pcslookup_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_fwd_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/iface_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/address_mgr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_cons.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_app_cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pg.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/pg_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_mw.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw_cli.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rp.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_lr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rp.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_lr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_lru.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ack_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ctrl_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_eventmgr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_decap_node.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_tunnel.c
)
set(HICN_PLUGIN_HEADER_FILES
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_all_api_h.h
- ${CMAKE_CURRENT_SOURCE_DIR}/hashtb.h
- ${CMAKE_CURRENT_SOURCE_DIR}/mgmt.h
- ${CMAKE_CURRENT_SOURCE_DIR}/params.h
- ${CMAKE_CURRENT_SOURCE_DIR}/pcs.h
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.h
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn.h
- ${CMAKE_CURRENT_SOURCE_DIR}/state.h
- ${CMAKE_CURRENT_SOURCE_DIR}/infra.h
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_msg_enum.h
- ${CMAKE_CURRENT_SOURCE_DIR}/parser.h
- ${CMAKE_CURRENT_SOURCE_DIR}/route.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_ctx.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_manager.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategy.h
- ${CMAKE_CURRENT_SOURCE_DIR}/interest_pcslookup.h
- ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitpit.h
- ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitcs.h
- ${CMAKE_CURRENT_SOURCE_DIR}/data_pcslookup.h
- ${CMAKE_CURRENT_SOURCE_DIR}/data_fwd.h
- ${CMAKE_CURRENT_SOURCE_DIR}/error.h
- ${CMAKE_CURRENT_SOURCE_DIR}/face_db.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/face.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_node.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/iface_node.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/inlines.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_dpo.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/address_mgr.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_cons.h
- ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod.h
- ${CMAKE_CURRENT_SOURCE_DIR}/pg.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_mw.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rr.h
- ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rr.h
- ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_policy.h
- ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_lru.h
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme.h
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ack.h
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ctrl.h
- ${CMAKE_CURRENT_SOURCE_DIR}/mapme_eventmgr.h
- ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_tunnel.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_all_api_h.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_logging.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mgmt.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/params.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/pcs.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_buffer_flags.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/state.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/infra.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_msg_enum.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/parser.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/route.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_ctx.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy_dpo_manager.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategy.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_pcslookup.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitpit.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/interest_hitcs.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_pcslookup.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/data_fwd.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/face_db.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_flags.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_node.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/iface_node.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/inlines.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/face_dpo.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/address_mgr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_cons.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/faces/app/face_prod.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/pg.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_mw.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_mw.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_rp.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/dpo_lr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_rp.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/strategies/strategy_lr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_policy.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/cache_policies/cs_lru.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ack.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_ctrl.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/mapme_eventmgr.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/udp_tunnels/udp_tunnel.h
)
set(HICN_API_TEST_SOURCE_FILES
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api_test.c
- ${CMAKE_CURRENT_SOURCE_DIR}/error.c)
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api_test.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/error.c
+)
set(HICN_API_HEADER_FILES
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_msg_enum.h
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_all_api_h.h
- ${CMAKE_CURRENT_SOURCE_DIR}/hicn_api.h
- ${CMAKE_CURRENT_SOURCE_DIR}/error.h)
+ ${HICNPLUGIN_TO_INSTALL_HEADER_FILES}
+)
set(HICN_API_GENERATED_FILES
- ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h
- ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_types.h
- ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_enum.h
+ ${PROJECT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h
+ ${PROJECT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_types.h
+ ${PROJECT_BINARY_DIR}/vpp_plugins/hicn/hicn.api_enum.h
)
set(HICN_VAPI_GENERATED_FILES
- ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.vapi.h
- ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.vapi.hpp)
+ ${PROJECT_BINARY_DIR}/vapi/hicn.api.vapi.h
+ ${PROJECT_BINARY_DIR}/vapi/hicn.api.vapi.hpp
+)
-set(HICN_VPP_STARTUP_CONF_FILE
- ${CMAKE_BINARY_DIR}/startup.conf)
+##############################################################
+# Assume VPP is installed in standard location
+##############################################################
if (NOT VPP_HOME)
- set(VPP_HOME /usr)
+ set(VPP_HOME /usr)
endif()
-if (NOT CMAKE_BUILD_TYPE)
- set (CMAKE_BUILD_TYPE "Release")
-endif (NOT CMAKE_BUILD_TYPE)
-SET(HICN_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} CACHE STRING "hicn_install_prefix")
+##############################################################
+# RPath
+##############################################################
+SET(HICN_INSTALL_PREFIX
+ ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}
+)
-if (CMAKE_BUILD_TYPE STREQUAL "Release")
- set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} -Wall -march=native -O3 -g")
-elseif (CMAKE_BUILD_TYPE STREQUAL "Debug")
- set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -Wall -march=native -O0 -g")
- add_definitions(-DCLIB_DEBUG -fPIC -fstack-protector-all)
-endif()
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/hicn)
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vapi)
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip)
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vnet/fib)
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vnet/udp)
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins)
-file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn)
+##############################################################
+# Compiler Options
+##############################################################
+set(COMPILER_OPTIONS
+ ${DEFAULT_COMPILER_OPTIONS}
+ ${MARCH_COMPILER_OPTIONS}
+ PRIVATE "-Wno-address-of-packed-member"
+)
-# These files are missing from vpp binary distribution
-execute_process(
- COMMAND
- bash -c
- "if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vapi_json_parser.py ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vpp-api/vapi/vapi_json_parser.py -o ${CMAKE_CURRENT_BINARY_DIR}/vapi_json_parser.py;
- fi;
- if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vpp-api/vapi/vapi_c_gen.py -o ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py;
- fi;
- if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vpp-api/vapi/vapi_cpp_gen.py -o ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py;
- fi;
- if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_types.api ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vnet/ip/ip_types.api -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_types.api;
- fi;
- if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_format_fns.h ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vnet/ip/ip_format_fns.h -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/ip/ip_format_fns.h;
- fi;
- if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/fib/fib_entry_track.h ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vnet/fib/fib_entry_track.h -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/fib/fib_entry_track.h;
- fi;
- if [ ! -e ${CMAKE_CURRENT_BINARY_DIR}/vnet/udp/udp_encap.h ]; then
- curl https://raw.githubusercontent.com/FDio/vpp/stable/2005/src/vnet/udp/udp_encap.h -o ${CMAKE_CURRENT_BINARY_DIR}/vnet/udp/udp_encap.h;
- fi;
- chmod +x ${CMAKE_CURRENT_BINARY_DIR}/vapi_json_parser.py ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py"
+##############################################################
+# Compiler Definitions
+##############################################################
+set(COMPILE_DEFINITIONS
+ "-DHICN_VPP_PLUGIN=1"
+ "-DHICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY"
)
+if (${CMAKE_BUILD_TYPE} MATCHES "Debug")
+ list(APPEND COMPILE_DEFINITIONS
+ "-DHICN_DDEBUG"
+ "-DCLIB_DEBUG"
+ )
+endif()
+
+
+##############################################################
+# VPP API Generation
+##############################################################
+file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/vapi)
+file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/vpp_plugins/hicn)
+
execute_process(
- COMMAND ${VPP_HOME}/bin/vppapigen --includedir ${CMAKE_CURRENT_BINARY_DIR} --input ${CMAKE_CURRENT_SOURCE_DIR}/hicn.api --output ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h --outputdir ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn/
- COMMAND ${VPP_HOME}/bin/vppapigen JSON --includedir ${CMAKE_CURRENT_BINARY_DIR} --input ${CMAKE_CURRENT_SOURCE_DIR}/hicn.api --output ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json --outputdir ${CMAKE_CURRENT_BINARY_DIR}/vapi/
+ COMMAND ${VPP_HOME}/bin/vppapigen --includedir ${VPP_HOME}/include --input ${CMAKE_CURRENT_SOURCE_DIR}/hicn.api --output ${PROJECT_BINARY_DIR}/vpp_plugins/hicn/hicn.api.h --outputdir ${PROJECT_BINARY_DIR}/vpp_plugins/hicn/
+ COMMAND ${VPP_HOME}/bin/vppapigen JSON --includedir ${VPP_HOME}/include --input ${CMAKE_CURRENT_SOURCE_DIR}/hicn.api --output ${PROJECT_BINARY_DIR}/vapi/hicn.api.json --outputdir ${PROJECT_BINARY_DIR}/vapi/
)
execute_process(
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/vapi_c_gen.py ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json
- COMMAND ${CMAKE_CURRENT_BINARY_DIR}/vapi_cpp_gen.py ${CMAKE_CURRENT_BINARY_DIR}/vapi/hicn.api.json
+ COMMAND ${VPP_HOME}/bin/vapi_c_gen.py ${PROJECT_BINARY_DIR}/vapi/hicn.api.json
+ COMMAND ${VPP_HOME}/bin/vapi_cpp_gen.py ${PROJECT_BINARY_DIR}/vapi/hicn.api.json
+)
+install(
+ FILES ${PROJECT_BINARY_DIR}/vapi/hicn.api.json
+ DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/vpp/api/plugins
+ COMPONENT ${HICN_PLUGIN}
)
-include_directories(SYSTEM)
-include_directories(${CMAKE_CURRENT_BINARY_DIR})
-
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DHICN_VPP_PLUGIN=1")
-add_library(hicn_plugin SHARED
- ${LIBHICN_FILES}
- ${HICN_PLUGIN_SOURCE_FILES}
- ${HICN_API_GENERATED_FILES}
- ${HICN_VAPI_GENERATED_FILES})
-
-file(COPY ${HICN_API_HEADER_FILES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins/hicn)
-include_directories(${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins)
-
-file(COPY ${LIBHICN_HEADER_FILES_SRC} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/hicn)
-file(COPY ${LIBHICN_HEADER_FILES_PROTOCOL} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/hicn/protocol)
-file(COPY ${LIBHICN_HEADER_FILES_UTIL} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/hicn/util)
-
-add_library(hicn_api_test_plugin SHARED
- ${HICN_API_TEST_SOURCE_FILES}
- ${HICN_API_GENERATED_FILES})
-
-set(VPP_INSTALL_PLUGIN ${HICN_INSTALL_PREFIX}/vpp_plugins)
-set(VPP_INSTALL_API_TEST_PLUGIN ${HICN_INSTALL_PREFIX}/vpp_api_test_plugins CACHE STRING "vpp_install_api_test_plugin")
-set(VPP_STARTUP_CONF /etc/vpp/)
-
-set_target_properties(hicn_plugin
- PROPERTIES
- LINKER_LANGUAGE C
- INSTALL_RPATH ${VPP_INSTALL_PLUGIN}
- PREFIX "")
-set_target_properties(hicn_api_test_plugin
- PROPERTIES
- LINKER_LANGUAGE C
- PREFIX "")
-
-message (STATUS "hicn-plugin variable ${HICN_PLUGIN}")
-
-install(DIRECTORY
- DESTINATION ${VPP_INSTALL_PLUGIN}
- COMPONENT ${HICN_PLUGIN})
-install(TARGETS hicn_plugin
- DESTINATION
- ${VPP_INSTALL_PLUGIN}
- COMPONENT ${HICN_PLUGIN})
-install(DIRECTORY
- DESTINATION ${VPP_INSTALL_API_TEST_PLUGIN}
- COMPONENT ${HICN_PLUGIN})
-install(TARGETS hicn_api_test_plugin
- DESTINATION ${VPP_INSTALL_API_TEST_PLUGIN}
- COMPONENT ${HICN_PLUGIN})
+##############################################################
+# Include directories
+##############################################################
+set(HICN_PLUGIN_INCLUDE_DIRS_INTERNAL
+ PRIVATE
+ ${CMAKE_CURRENT_SOURCE_DIR}
+ ${VPP_INCLUDE_DIR}
+ PUBLIC
+ $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}>
+ $<BUILD_INTERFACE:${HICNPLUGIN_INCLUDE_DIRS}>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>
+)
-install(FILES ${HICN_API_HEADER_FILES} ${HICN_API_GENERATED_FILES}
- DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/vpp_plugins/hicn
- COMPONENT ${HICN_PLUGIN}-dev)
-install(FILES ${HICN_API_GENERATED_FILES}
- DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/vpp_plugins/hicn
- COMPONENT ${HICN_PLUGIN}-dev)
+##############################################################
+# Build hicn plugins for VPP
+##############################################################
+build_module(${HICNPLUGIN}
+ SOURCES ${HICN_PLUGIN_SOURCE_FILES}
+ INSTALL_HEADERS ${HICN_API_HEADER_FILES} ${HICN_API_GENERATED_FILES}
+ LINK_LIBRARIES PRIVATE ${LIBHICN_LIBRARIES}
+ DEPENDS ${DEPENDENCIES}
+ COMPONENT ${HICN_PLUGIN}
+ INCLUDE_DIRS ${HICN_PLUGIN_INCLUDE_DIRS_INTERNAL}
+ HEADER_ROOT_DIR "vpp_plugins"
+ LIBRARY_ROOT_DIR "vpp_plugins"
+ DEFINITIONS PUBLIC ${COMPILE_DEFINITIONS}
+ COMPILE_OPTIONS ${COMPILER_OPTIONS}
+ INSTALL_RPATH "${VPP_INSTALL_PLUGIN}:${HICN_INSTALL_PREFIX}:${VPP_HOME}/lib"
+)
-install(FILES ${HICN_VAPI_GENERATED_FILES}
- DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}/vapi
- COMPONENT ${HICN_PLUGIN}-dev)
+build_module(${HICN_API_TEST_PLUGIN}
+ SOURCES ${HICN_API_TEST_SOURCE_FILES}
+ INSTALL_HEADERS ${HICN_VAPI_GENERATED_FILES}
+ COMPONENT ${HICN_PLUGIN}
+ INCLUDE_DIRS ${HICN_PLUGIN_INCLUDE_DIRS_INTERNAL}
+ HEADER_ROOT_DIR "vapi"
+ LIBRARY_ROOT_DIR "vpp_api_test_plugins"
+ DEFINITIONS PUBLIC ${COMPILE_DEFINITIONS}
+ COMPILE_OPTIONS ${COMPILER_OPTIONS}
+ INSTALL_RPATH "${VPP_INSTALL_PLUGIN}:${HICN_INSTALL_PREFIX}:${VPP_HOME}/lib"
+)
-#Set variables for other project depending on hicn-plugin
-set(HICNPLUGIN_INCLUDE_DIRS
- ${CMAKE_CURRENT_BINARY_DIR}
- ${CMAKE_CURRENT_BINARY_DIR}/vpp_plugins
- ${VPP_INCLUDE_DIRS}
- CACHE INTERNAL "" FORCE)
-set(HICNPLUGIN_LIBRARIES ${VPP_LIBRARIES} CACHE INTERNAL "" FORCE)
+##############################################################
+# Unit tests
+##############################################################
+if (${BUILD_TESTS})
+ add_subdirectory(test)
+endif()
diff --git a/hicn-plugin/src/cache_policies/cs_lru.c b/hicn-plugin/src/cache_policies/cs_lru.c
index 079af58ab..07c4916fb 100644
--- a/hicn-plugin/src/cache_policies/cs_lru.c
+++ b/hicn-plugin/src/cache_policies/cs_lru.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -13,7 +13,6 @@
* limitations under the License.
*/
-#include "../hashtb.h"
#include "../strategy_dpo_manager.h"
#include "../error.h"
#include "cs_lru.h"
@@ -32,132 +31,112 @@ hicn_cs_policy_vft_t hicn_cs_lru = {
* Insert a new CS element at the head of the CS LRU
*/
void
-hicn_cs_lru_insert (hicn_pit_cs_t * p, hicn_hash_node_t * node,
- hicn_pcs_entry_t * pcs, hicn_cs_policy_t * policy_state)
+hicn_cs_lru_insert (hicn_cs_policy_t *lru_policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry)
{
- hicn_hash_node_t *lrunode;
hicn_pcs_entry_t *lrupcs;
u32 idx;
- idx = hicn_hashtb_node_idx_from_node (p->pcs_table, node);
+ idx = hicn_pcs_entry_get_index (pcs, pcs_entry);
- if (policy_state->head != 0)
+ if (lru_policy->head != HICN_CS_POLICY_END_OF_CHAIN)
{
- lrunode = hicn_hashtb_node_from_idx (p->pcs_table, policy_state->head);
- lrupcs = hicn_pit_get_data (lrunode);
+ lrupcs = hicn_pcs_entry_get_entry_from_index (pcs, lru_policy->head);
- ASSERT (lrupcs->u.cs.cs_lru_prev == 0);
+ ASSERT (lrupcs->u.cs.cs_lru_prev == HICN_CS_POLICY_END_OF_CHAIN);
lrupcs->u.cs.cs_lru_prev = idx;
- pcs->u.cs.cs_lru_prev = 0;
- pcs->u.cs.cs_lru_next = policy_state->head;
+ pcs_entry->u.cs.cs_lru_prev = HICN_CS_POLICY_END_OF_CHAIN;
+ pcs_entry->u.cs.cs_lru_next = lru_policy->head;
- policy_state->head = idx;
+ lru_policy->head = idx;
}
else
{
- ASSERT (policy_state->tail == 0); /* We think the list is
- * empty */
+ // The list should be empty
+ ASSERT (lru_policy->tail == HICN_CS_POLICY_END_OF_CHAIN);
- policy_state->head = policy_state->tail = idx;
+ lru_policy->head = lru_policy->tail = idx;
- pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
+ pcs_entry->u.cs.cs_lru_next = pcs_entry->u.cs.cs_lru_prev =
+ HICN_CS_POLICY_END_OF_CHAIN;
}
- policy_state->count++;
+ lru_policy->count++;
}
void
-hicn_cs_lru_delete_get (hicn_pit_cs_t * p, hicn_cs_policy_t * policy_state,
- hicn_hash_node_t ** nodep,
- hicn_pcs_entry_t ** pcs_entry,
- hicn_hash_entry_t ** hash_entry)
+hicn_cs_lru_delete_get (hicn_cs_policy_t *lru_policy, const hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t **pcs_entry)
{
- *nodep = hicn_hashtb_node_from_idx (p->pcs_table, policy_state->tail);
- *pcs_entry = hicn_pit_get_data (*nodep);
-
- *hash_entry = hicn_hashtb_get_entry (p->pcs_table, (*nodep)->entry_idx,
- (*nodep)->bucket_id,
- (*nodep)->hn_flags &
- HICN_HASH_NODE_OVERFLOW_BUCKET);
+ *pcs_entry = hicn_pcs_entry_get_entry_from_index (pcs, lru_policy->tail);
}
/*
* Dequeue an LRU element, for example when it has expired.
*/
void
-hicn_cs_lru_dequeue (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
- hicn_pcs_entry_t * pcs, hicn_cs_policy_t * lru)
+hicn_cs_lru_dequeue (hicn_cs_policy_t *lru_policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry)
{
- hicn_hash_node_t *lrunode;
hicn_pcs_entry_t *lrupcs;
- if (pcs->u.cs.cs_lru_prev != 0)
+ if (pcs_entry->u.cs.cs_lru_prev != HICN_CS_POLICY_END_OF_CHAIN)
{
/* Not already on the head of the LRU */
- lrunode = hicn_hashtb_node_from_idx (pit->pcs_table,
- pcs->u.cs.cs_lru_prev);
- lrupcs = hicn_pit_get_data (lrunode);
+ lrupcs =
+ hicn_pcs_entry_get_entry_from_index (pcs, pcs_entry->u.cs.cs_lru_prev);
- lrupcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_next;
+ lrupcs->u.cs.cs_lru_next = pcs_entry->u.cs.cs_lru_next;
}
else
{
- ASSERT (lru->head ==
- hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
- lru->head = pcs->u.cs.cs_lru_next;
+ ASSERT (lru_policy->head == hicn_pcs_entry_get_index (pcs, pcs_entry));
+ lru_policy->head = pcs_entry->u.cs.cs_lru_next;
}
- if (pcs->u.cs.cs_lru_next != 0)
+ if (pcs_entry->u.cs.cs_lru_next != HICN_CS_POLICY_END_OF_CHAIN)
{
/* Not already the end of the LRU */
- lrunode = hicn_hashtb_node_from_idx (pit->pcs_table,
- pcs->u.cs.cs_lru_next);
- lrupcs = hicn_pit_get_data (lrunode);
+ lrupcs =
+ hicn_pcs_entry_get_entry_from_index (pcs, pcs_entry->u.cs.cs_lru_next);
- lrupcs->u.cs.cs_lru_prev = pcs->u.cs.cs_lru_prev;
+ lrupcs->u.cs.cs_lru_prev = pcs_entry->u.cs.cs_lru_prev;
}
else
{
/* This was the last LRU element */
- ASSERT (lru->tail ==
- hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
- lru->tail = pcs->u.cs.cs_lru_prev;
+ ASSERT (lru_policy->tail == hicn_pcs_entry_get_index (pcs, pcs_entry));
+ lru_policy->tail = pcs_entry->u.cs.cs_lru_prev;
}
- pcs->u.cs.cs_lru_next = pcs->u.cs.cs_lru_prev = 0;
- lru->count--;
+ pcs_entry->u.cs.cs_lru_next = pcs_entry->u.cs.cs_lru_prev =
+ HICN_CS_POLICY_END_OF_CHAIN;
+ lru_policy->count--;
}
/*
- * Move a CS LRU element to the head, probably after it's been used.
+ * Move a CS LRU element to the head. The element must be part of the LRU list.
*/
void
-hicn_cs_lru_update_head (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
- hicn_pcs_entry_t * pcs, hicn_cs_policy_t * lru)
+hicn_cs_lru_update_head (hicn_cs_policy_t *lru_policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry)
{
- if (pcs->u.cs.cs_lru_prev != 0)
+ if (pcs_entry->u.cs.cs_lru_prev != HICN_CS_POLICY_END_OF_CHAIN)
{
/*
* Not already on the head of the LRU, detach it from its
* current position
*/
- hicn_cs_lru_dequeue (pit, pnode, pcs, lru);
+ hicn_cs_lru_dequeue (lru_policy, pcs, pcs_entry);
/* Now detached from the list; attach at head */
- hicn_cs_lru_insert (pit, pnode, pcs, lru);
-
+ hicn_cs_lru_insert (lru_policy, pcs, pcs_entry);
}
else
{
- /* The element is already dequeue */
- if (pcs->u.cs.cs_lru_next == 0)
- {
- /* Now detached from the list; attach at head */
- hicn_cs_lru_insert (pit, pnode, pcs, lru);
- }
- ASSERT (lru->head ==
- hicn_hashtb_node_idx_from_node (pit->pcs_table, pnode));
+ // The element must be already at the head of the LRU
+ ASSERT (lru_policy->head == hicn_pcs_entry_get_index (pcs, pcs_entry));
}
}
@@ -167,98 +146,86 @@ hicn_cs_lru_update_head (hicn_pit_cs_t * pit, hicn_hash_node_t * pnode,
* CS's limit. Return the number of removed nodes.
*/
int
-hicn_cs_lru_trim (hicn_pit_cs_t * pit, u32 * node_list, int sz,
- hicn_cs_policy_t * lru)
+hicn_cs_lru_trim (hicn_cs_policy_t *lru_policy, hicn_pit_cs_t *pcs,
+ u32 *node_list, size_t sz)
{
- hicn_hash_node_t *lrunode;
hicn_pcs_entry_t *lrupcs;
u32 idx;
int i;
- idx = lru->tail;
+ idx = lru_policy->tail;
- for (i = 0; i < sz; i++)
+ for (i = 0; i < sz && idx > 0; i++)
{
-
- if (idx == 0)
- {
- break;
- }
- lrunode = hicn_hashtb_node_from_idx (pit->pcs_table, idx);
- lrupcs = hicn_pit_get_data (lrunode);
+ lrupcs = hicn_pcs_entry_get_entry_from_index (pcs, idx);
node_list[i] = idx;
idx = lrupcs->u.cs.cs_lru_prev;
- lrupcs->u.cs.cs_lru_prev = 0;
- lrupcs->u.cs.cs_lru_next = 0;
+ lrupcs->u.cs.cs_lru_prev = HICN_CS_POLICY_END_OF_CHAIN;
+ lrupcs->u.cs.cs_lru_next = HICN_CS_POLICY_END_OF_CHAIN;
}
- lru->count -= i;
+ lru_policy->count -= i;
+ lru_policy->tail = idx;
- lru->tail = idx;
- if (idx != 0)
+ if (idx != HICN_CS_POLICY_END_OF_CHAIN)
{
- lrunode = hicn_hashtb_node_from_idx (pit->pcs_table, idx);
- lrupcs = hicn_pit_get_data (lrunode);
-
- lrupcs->u.cs.cs_lru_next = 0;
+ lrupcs = hicn_pcs_entry_get_entry_from_index (pcs, idx);
+ lrupcs->u.cs.cs_lru_next = HICN_CS_POLICY_END_OF_CHAIN;
}
else
{
/* If the tail is empty, the whole lru is empty */
- lru->head = 0;
+ lru_policy->head = HICN_CS_POLICY_END_OF_CHAIN;
}
- return (i);
+ return i;
}
int
-hicn_cs_lru_flush (vlib_main_t * vm, struct hicn_pit_cs_s *pitcs,
- hicn_cs_policy_t * state)
+hicn_cs_lru_flush (hicn_cs_policy_t *lru_policy, hicn_pit_cs_t *pcs)
{
- if (state->head == 0 && state->tail == 0)
+ if (lru_policy->head == HICN_CS_POLICY_END_OF_CHAIN &&
+ lru_policy->tail == HICN_CS_POLICY_END_OF_CHAIN)
return 0;
- hicn_hash_node_t *lrunode;
- hicn_pcs_entry_t *lrupcs;
+ hicn_pcs_entry_t *pcs_entry;
u32 idx;
int i = 0;
- idx = state->tail;
+ idx = lru_policy->tail;
- while (idx != 0)
+ while (idx != HICN_CS_POLICY_END_OF_CHAIN)
{
- lrunode = hicn_hashtb_node_from_idx (pitcs->pcs_table, idx);
- lrupcs = hicn_pit_get_data (lrunode);
-
- u64 hashval = 0;
- hicn_hashtb_fullhash ((u8 *) & (lrunode->hn_key.ks.key),
- lrunode->hn_keysize, &hashval);
- hicn_hash_bucket_t *bucket = NULL;
- if ((hashval & (pitcs->pcs_table->ht_bucket_count - 1)) ==
- lrunode->bucket_id)
- {
- //The bucket is in the non overflown
- bucket = pitcs->pcs_table->ht_buckets + lrunode->bucket_id;
- }
- else
- {
- bucket =
- pool_elt_at_index (pitcs->pcs_table->ht_overflow_buckets,
- lrunode->bucket_id);
- }
- hicn_hash_entry_t *hash_entry =
- &(bucket->hb_entries[lrunode->entry_idx]);
- hash_entry->locks++;
- hicn_pcs_cs_delete (vm, pitcs, &lrupcs, &lrunode, hash_entry, NULL,
- NULL);
- idx = state->tail;
+ // Get tail entry
+ pcs_entry = hicn_pcs_entry_get_entry_from_index (pcs, idx);
+
+ // Delete entry from the PCS. This will also update the LRU.
+ hicn_pcs_entry_remove_lock (pcs, pcs_entry);
+
+ // Set index to the new tail (updated in the previous call)
+ idx = lru_policy->tail;
+
+ // Advance counter
i++;
}
- return (i);
+ return i;
+}
+hicn_cs_policy_t
+hicn_cs_lru_create (u32 max_elts)
+{
+ hicn_cs_policy_t policy = {
+ .vft = hicn_cs_lru,
+ .head = HICN_CS_POLICY_END_OF_CHAIN,
+ .tail = HICN_CS_POLICY_END_OF_CHAIN,
+ .count = 0,
+ .max = max_elts,
+ };
+
+ return policy;
}
/*
diff --git a/hicn-plugin/src/cache_policies/cs_lru.h b/hicn-plugin/src/cache_policies/cs_lru.h
index 3bd18060d..1e67cb547 100644
--- a/hicn-plugin/src/cache_policies/cs_lru.h
+++ b/hicn-plugin/src/cache_policies/cs_lru.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -17,7 +17,6 @@
#define __LRU_H__
#include "../pcs.h"
-#include "../hashtb.h"
#include "cs_policy.h"
/**
@@ -26,48 +25,48 @@
* This file implements the LRU policy for the CS
*/
-
extern hicn_cs_policy_vft_t hicn_cs_lru;
-/*
- * Insert a new CS element at the head of the CS LRU
+/**
+ * @brief Insert a new CS element at the head of the CS LRU
+ *
+ * @param policy the cs insertion/eviction policy - LRU
+ * @param pcs the PCS table
+ * @param pcs_entry the PCS entry to insert
+ * @return 0 on success, -1 on overflow
*/
-void
-hicn_cs_lru_insert (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
- hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
-
+void hicn_cs_lru_insert (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry);
/*
* Dequeue an LRU element, for example when it has expired.
*/
-void
-hicn_cs_lru_dequeue (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
- hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+void hicn_cs_lru_dequeue (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry);
/*
* Move a CS LRU element to the head, probably after it's been used.
*/
-void
-hicn_cs_lru_update_head (hicn_pit_cs_t * pcs, hicn_hash_node_t * pnode,
- hicn_pcs_entry_t * entry, hicn_cs_policy_t * lru);
+void hicn_cs_lru_update_head (hicn_cs_policy_t *lru, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *entry);
-void
-hicn_cs_lru_delete_get (hicn_pit_cs_t * p, hicn_cs_policy_t * policy,
- hicn_hash_node_t ** node, hicn_pcs_entry_t ** pcs,
- hicn_hash_entry_t ** hash_entry);
+void hicn_cs_lru_delete_get (hicn_cs_policy_t *policy,
+ const hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t **pcs_entry);
/*
* Remove a batch of nodes from the CS LRU, copying their node indexes into
* the caller's array. We expect this is done when the LRU size exceeds the
* CS's limit. Return the number of removed nodes.
*/
-int hicn_cs_lru_trim (hicn_pit_cs_t * pcs, u32 * node_list, int sz,
- hicn_cs_policy_t * lru);
+int hicn_cs_lru_trim (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ u32 *node_list, size_t sz);
+
+int hicn_cs_lru_flush (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs);
+hicn_cs_policy_t hicn_cs_lru_create (u32 max_elts);
-int hicn_cs_lru_flush (vlib_main_t * vm, struct hicn_pit_cs_s *pitcs,
- hicn_cs_policy_t * state);
-#endif /* // __LRU_H__ */
+#endif /* __LRU_H__ */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/cache_policies/cs_policy.h b/hicn-plugin/src/cache_policies/cs_policy.h
index 0bf745915..5280a59c2 100644
--- a/hicn-plugin/src/cache_policies/cs_policy.h
+++ b/hicn-plugin/src/cache_policies/cs_policy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,7 +16,9 @@
#ifndef __HICN_CS_POLICY_H__
#define __HICN_CS_POLICY_H__
-#include "../hashtb.h"
+#include <vppinfra/types.h>
+#include <vppinfra/clib.h>
+#include <stddef.h>
/**
* @file cs_policy.h
@@ -24,70 +26,155 @@
* This file provides the needed structures to implement a CS policy
*/
+/* Forward declaration */
+typedef struct hicn_pit_cs_s hicn_pit_cs_t;
+typedef struct hicn_pcs_entry_s hicn_pcs_entry_t;
+typedef struct hicn_cs_policy_s hicn_cs_policy_t;
+
+/**
+ * @brief Definition of the virtual functin table for a cache policy.
+ *
+ * A cache policy must implement all the following functions:
+ * - insert: add a new element
+ * - update: update the position of an existing element
+ * - dequeue: remove an element from the list
+ * - delete_get: return the next element that should be removed
+ * - trim: trim last sz elements from the list
+ * - flush: clean the cs
+ */
+typedef struct hicn_cs_policy_vft_s
+{
+ void (*hicn_cs_insert) (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry);
+
+ void (*hicn_cs_update) (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry);
+
+ void (*hicn_cs_dequeue) (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry);
+
+ void (*hicn_cs_delete_get) (hicn_cs_policy_t *policy,
+ const hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t **pcs_entry);
+
+ int (*hicn_cs_trim) (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ u32 *node_list, size_t sz);
+
+ int (*hicn_cs_flush) (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs);
+} hicn_cs_policy_vft_t;
/*
- * Structure
+ * CS policy
*/
typedef struct hicn_cs_policy_s
{
+#define HICN_CS_POLICY_END_OF_CHAIN (u32) (~0)
+
+ /*
+ * VFT implementing the CS eviction/insertion policy. This must be the first
+ * element of the structure.
+ */
+ hicn_cs_policy_vft_t vft;
+
+ /*
+ * Max number of element in CS
+ */
u32 max;
+
+ /*
+ * Number of element in CS
+ */
u32 count;
- /* Indexes to hashtable nodes forming CS LRU */
+ /*
+ * Head element of the CS (i.e. the most recent element used for LRU)
+ */
u32 head;
- u32 tail;
+ /*
+ * Tail element of the LRU (i.e. the next element to evict for LRU)
+ */
+ u32 tail;
} hicn_cs_policy_t;
-/* Forward declaration */
-struct hicn_pit_cs_s;
-struct hicn_hash_node_s;
-struct hicn_pcs_entry_s;
-struct hicn_cs_policy_s;
+/*
+ * Get the max number of element in the CS
+ */
+always_inline u32
+hicn_cs_policy_get_max (const hicn_cs_policy_t *policy)
+{
+ return policy->max;
+}
-/**
- * @brief Definition of the virtual functin table for a cache policy.
- *
- * A cache policy must implement all the following functions:
- * - insert: add a new element
- * - update: update the position of an existing element
- * - dequeue: remove an element from the list
- * - delete_get: return the next element that should be removed trim
- * - flush: clean the cs
+/*
+ * Get the number of element in the CS
*/
-typedef struct hicn_cs_policy_vft_s
+always_inline u32
+hicn_cs_policy_get_count (const hicn_cs_policy_t *policy)
{
- void (*hicn_cs_insert) (struct hicn_pit_cs_s * p,
- struct hicn_hash_node_s * node,
- struct hicn_pcs_entry_s * pcs,
- hicn_cs_policy_t * policy);
-
- void (*hicn_cs_update) (struct hicn_pit_cs_s * p,
- struct hicn_hash_node_s * node,
- struct hicn_pcs_entry_s * pcs,
- hicn_cs_policy_t * policy);
-
- void (*hicn_cs_dequeue) (struct hicn_pit_cs_s * p,
- struct hicn_hash_node_s * node,
- struct hicn_pcs_entry_s * pcs,
- hicn_cs_policy_t * policy);
-
- void (*hicn_cs_delete_get) (struct hicn_pit_cs_s * p,
- hicn_cs_policy_t * policy,
- struct hicn_hash_node_s ** node,
- struct hicn_pcs_entry_s ** pcs,
- struct hicn_hash_entry_s ** hash_entry);
-
- int (*hicn_cs_trim) (struct hicn_pit_cs_s * p, u32 * node_list, int sz,
- hicn_cs_policy_t * policy);
-
- int (*hicn_cs_flush) (vlib_main_t * vm, struct hicn_pit_cs_s * p,
- hicn_cs_policy_t * policy_state);
-} hicn_cs_policy_vft_t;
+ return policy->count;
+}
+
+/*
+ * Get the head element of the CS
+ */
+always_inline u32
+hicn_cs_policy_get_head (const hicn_cs_policy_t *policy)
+{
+ return policy->head;
+}
+/*
+ * Get the tail element of the CS
+ */
+always_inline u32
+hicn_cs_policy_get_tail (const hicn_cs_policy_t *policy)
+{
+ return policy->tail;
+}
+
+always_inline void
+hicn_cs_policy_insert (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry)
+{
+ return policy->vft.hicn_cs_insert (policy, pcs, pcs_entry);
+}
+
+always_inline void
+hicn_cs_policy_update (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry)
+{
+ return policy->vft.hicn_cs_update (policy, pcs, pcs_entry);
+}
+always_inline void
+hicn_cs_policy_dequeue (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t *pcs_entry)
+{
+ return policy->vft.hicn_cs_dequeue (policy, pcs, pcs_entry);
+}
+
+always_inline void
+hicn_cs_policy_delete_get (hicn_cs_policy_t *policy, const hicn_pit_cs_t *pcs,
+ hicn_pcs_entry_t **pcs_entry)
+{
+ return policy->vft.hicn_cs_delete_get (policy, pcs, pcs_entry);
+}
+
+always_inline int
+hicn_cs_policy_trim (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs,
+ u32 *node_list, int sz)
+{
+ return policy->vft.hicn_cs_trim (policy, pcs, node_list, sz);
+}
+
+always_inline int
+hicn_cs_policy_flush (hicn_cs_policy_t *policy, hicn_pit_cs_t *pcs)
+{
+ return policy->vft.hicn_cs_flush (policy, pcs);
+}
-#endif /* // __HICN_POLICY_H__ */
+#endif /* __HICN_POLICY_H__ */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/cli.c b/hicn-plugin/src/cli.c
index 1adf3595c..64ebf61a4 100644
--- a/hicn-plugin/src/cli.c
+++ b/hicn-plugin/src/cli.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -19,9 +19,12 @@
#include <vlibmemory/api.h>
#include <vnet/udp/udp.h> // port registration
-#include <vnet/ip/ip6_packet.h> // ip46_address_t
+#include <vnet/ip/ip6_packet.h> // ip46_address_t
#include <vnet/ip/format.h>
#include <vnet/fib/fib_types.h>
+#include <vnet/pg/pg.h>
+
+#include <vpp_plugins/hicn/hicn_api.h>
#include "hicn.h"
#include "infra.h"
@@ -33,7 +36,7 @@
#include "error.h"
#include "faces/face.h"
#include "route.h"
-#include "hicn_api.h"
+#include "mapme.h"
static vl_api_hicn_api_node_params_set_t node_ctl_params = {
.pit_max_size = -1,
@@ -51,32 +54,31 @@ typedef enum
* cli handler for 'control start'
*/
static clib_error_t *
-hicn_cli_node_ctl_start_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_node_ctl_start_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
int ret;
- ret = hicn_infra_plugin_enable_disable (1 /* enable */ ,
- node_ctl_params.pit_max_size,
- node_ctl_params.pit_max_lifetime_sec,
- node_ctl_params.cs_max_size, ~0);
+ ret = hicn_infra_plugin_enable_disable (
+ 1 /* enable */, node_ctl_params.pit_max_size,
+ node_ctl_params.pit_max_lifetime_sec, node_ctl_params.cs_max_size, ~0);
vlib_cli_output (vm, "hicn: fwdr initialize => %s\n",
get_error_string (ret));
- return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
- get_error_string
- (ret));
+ return (ret == HICN_ERROR_NONE) ?
+ 0 :
+ clib_error_return (0, get_error_string (ret));
}
/*
* cli handler for 'control stop'
*/
static clib_error_t *
-hicn_cli_node_ctl_stop_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_node_ctl_stop_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
int ret;
@@ -92,39 +94,37 @@ hicn_cli_node_ctl_stop_set_command_fn (vlib_main_t * vm,
{
return (0);
}
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ if (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
return clib_error_return (0, "%s '%U'",
get_error_string (HICN_ERROR_CLI_INVAL),
format_unformat_error, line_input);
}
}
- ret = hicn_infra_plugin_enable_disable (0 /* !enable */ ,
- node_ctl_params.pit_max_size,
- node_ctl_params.pit_max_lifetime_sec,
- node_ctl_params.cs_max_size, ~0);
+ ret = hicn_infra_plugin_enable_disable (
+ 0 /* !enable */, node_ctl_params.pit_max_size,
+ node_ctl_params.pit_max_lifetime_sec, node_ctl_params.cs_max_size, ~0);
- return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0,
- get_error_string
- (ret));
+ return (ret == HICN_ERROR_NONE) ?
+ 0 :
+ clib_error_return (0, get_error_string (ret));
}
-#define DFLTD_RANGE_OK(val, min, max) \
-({ \
- __typeof__ (val) _val = (val); \
- __typeof__ (min) _min = (min); \
- __typeof__ (max) _max = (max); \
- (_val == -1) || \
- (_val >= _min && _val <= _max); \
-})
+#define DFLTD_RANGE_OK(val, min, max) \
+ ({ \
+ __typeof__ (val) _val = (val); \
+ __typeof__ (min) _min = (min); \
+ __typeof__ (max) _max = (max); \
+ (_val == -1) || (_val >= _min && _val <= _max); \
+ })
/*
* cli handler for 'control param'
*/
static clib_error_t *
-hicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_node_ctl_param_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
int rv = 0;
@@ -133,16 +133,15 @@ hicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
if (hicn_main.is_enabled)
{
- return (clib_error_return
- (0, "params cannot be altered once hicn started"));
+ return (
+ clib_error_return (0, "params cannot be altered once hicn started"));
}
/* Get a line of input. */
unformat_input_t _line_input, *line_input = &_line_input;
if (!unformat_user (main_input, unformat_line_input, line_input))
{
- return clib_error_return (0,
- get_error_string
- (HICN_ERROR_FWD_ALREADY_ENABLED));
+ return clib_error_return (
+ 0, get_error_string (HICN_ERROR_FWD_ALREADY_ENABLED));
}
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
@@ -160,9 +159,9 @@ hicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
}
else if (unformat (line_input, "maxlife %f", &lifetime))
{
- if (!DFLTD_RANGE_OK
- (lifetime, HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
- HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
+ if (!DFLTD_RANGE_OK (lifetime,
+ HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC,
+ HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC))
{
rv = HICN_ERROR_PIT_CONFIG_MAXLT_OOB;
break;
@@ -201,25 +200,24 @@ hicn_cli_node_ctl_param_set_command_fn (vlib_main_t * vm,
}
if (node_ctl_params.cs_max_size == 0)
- vlib_cli_output (vm,
- "CS size set to 0. Consider disable CS at compilation time for better performances\n");
-
- return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s '%U'",
- get_error_string
- (rv),
- format_unformat_error,
- line_input);
+ vlib_cli_output (vm, "CS size set to 0. Consider disable CS at "
+ "compilation time for better performances\n");
+
+ return (rv == HICN_ERROR_NONE) ?
+ 0 :
+ clib_error_return (0, "%s '%U'", get_error_string (rv),
+ format_unformat_error, line_input);
}
/*
* cli handler for 'hicn show'
*/
static clib_error_t *
-hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_show_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
- int face_p = 0, fib_p = 0, all_p, internal_p = 0, strategies_p = 0, ret =
- HICN_ERROR_NONE;
+ int face_p = 0, fib_p = 0, all_p = 0, strategies_p = 0,
+ ret = HICN_ERROR_NONE;
/* Get a line of input. */
unformat_input_t _line_input, *line_input = &_line_input;
@@ -231,14 +229,6 @@ hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
{
face_p = 1;
}
- else if (unformat (line_input, "internal"))
- {
- /*
- * We consider 'internal' a superset, so
- * include 'detail' too
- */
- internal_p = 1;
- }
else if (unformat (line_input, "strategies"))
{
/*
@@ -294,8 +284,7 @@ hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
" PIT:: max entries:%d,"
" lifetime default: max:%05.3f\n"
" CS:: max entries:%d\n",
- hicn_main.is_enabled ? "en" : "dis",
- hicn_infra_pit_size,
+ hicn_main.is_enabled ? "en" : "dis", hicn_infra_pit_size,
((f64) hicn_main.pit_lifetime_max_ms) / SEC_MS,
hicn_infra_cs_size);
@@ -303,25 +292,26 @@ hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
, *rmp = &rm;
if (hicn_mgmt_node_stats_get (&rm) == HICN_ERROR_NONE)
{
- vlib_cli_output (vm, //compare vl_api_hicn_api_node_stats_get_reply_t_handler block
- " PIT entries (now): %d\n"
- " CS total entries (now): %d, network entries (now): %d\n"
- " Forwarding statistics:\n"
- " pkts_processed: %d\n"
- " pkts_interest_count: %d\n"
- " pkts_data_count: %d\n"
- " pkts_from_cache_count: %d\n"
- " interests_aggregated: %d\n"
- " interests_retransmitted: %d\n",
- clib_net_to_host_u64 (rmp->pit_entries_count),
- clib_net_to_host_u64 (rmp->cs_entries_count),
- clib_net_to_host_u64 (rmp->cs_entries_ntw_count),
- clib_net_to_host_u64 (rmp->pkts_processed),
- clib_net_to_host_u64 (rmp->pkts_interest_count),
- clib_net_to_host_u64 (rmp->pkts_data_count),
- clib_net_to_host_u64 (rmp->pkts_from_cache_count),
- clib_net_to_host_u64 (rmp->interests_aggregated),
- clib_net_to_host_u64 (rmp->interests_retx));
+ vlib_cli_output (
+ vm, // compare vl_api_hicn_api_node_stats_get_reply_t_handler block
+ " PIT entries (now): %d\n"
+ " CS total entries (now): %d, network entries (now): %d\n"
+ " Forwarding statistics:\n"
+ " pkts_processed: %d\n"
+ " pkts_interest_count: %d\n"
+ " pkts_data_count: %d\n"
+ " pkts_from_cache_count: %d\n"
+ " interests_aggregated: %d\n"
+ " interests_retransmitted: %d\n",
+ clib_net_to_host_u64 (rmp->pit_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_count),
+ clib_net_to_host_u64 (rmp->cs_entries_ntw_count),
+ clib_net_to_host_u64 (rmp->pkts_processed),
+ clib_net_to_host_u64 (rmp->pkts_interest_count),
+ clib_net_to_host_u64 (rmp->pkts_data_count),
+ clib_net_to_host_u64 (rmp->pkts_from_cache_count),
+ clib_net_to_host_u64 (rmp->interests_aggregated),
+ clib_net_to_host_u64 (rmp->interests_retx));
}
if (face_p || all_p)
{
@@ -329,7 +319,6 @@ hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
strbuf = format_hicn_face_all (strbuf, 1, 0);
vlib_cli_output (vm, "%s", strbuf);
-
}
if (strategies_p || all_p)
{
@@ -339,32 +328,95 @@ hicn_cli_show_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
vlib_cli_output (vm, (char *) strbuf);
}
done:
- if (all_p && internal_p && ret == HICN_ERROR_NONE)
- {
- vlib_cli_output (vm, "Plugin features: cs:%d\n", HICN_FEATURE_CS);
- vlib_cli_output (vm,
- "Removed CS entries (and freed vlib buffers) %d, Removed PIT entries %d\n",
- hicn_main.pitcs.pcs_cs_dealloc,
- hicn_main.pitcs.pcs_pit_dealloc);
- vlib_cli_output (vm,
- "Bucke count %d, Overflow buckets count %d, used %d\n",
- hicn_main.pitcs.pcs_table->ht_bucket_count,
- hicn_main.pitcs.pcs_table->ht_overflow_bucket_count,
- hicn_main.pitcs.pcs_table->ht_overflow_buckets_used);
-
- }
- return (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
- get_error_string
- (ret));
+ return (ret == HICN_ERROR_NONE) ?
+ 0 :
+ clib_error_return (0, "%s\n", get_error_string (ret));
+}
+
+/*
+ * cli handler for 'mapme'
+ */
+static clib_error_t *
+hicn_cli_mapme_set_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
+{
+ clib_error_t *cl_err = 0;
+ fib_prefix_t prefix;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "default-route %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {
+ ;
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ prefix.fp_proto = ip46_address_is_ip4 (&prefix.fp_addr) ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+
+ hicn_mapme_main_t *mapme_main = hicn_mapme_get_main ();
+ mapme_main->default_route = prefix;
+
+ return (cl_err);
+}
+
+static clib_error_t *
+hicn_cli_mapme_get_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
+{
+ hicn_mapme_main_t *mm = hicn_mapme_get_main ();
+ int default_route = 0;
+ clib_error_t *cl_err = 0;
+
+ /* Get a line of input. */
+ unformat_input_t _line_input, *line_input = &_line_input;
+ if (!unformat_user (main_input, unformat_line_input, line_input))
+ {
+ return (0);
+ }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "default-route"))
+ {
+ default_route = 1;
+ }
+ else
+ {
+ return clib_error_return (0, "%s '%U'",
+ get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
+ }
+ }
+
+ if (default_route)
+ {
+ vlib_cli_output (vm, "Mapme default route: '%U'", format_fib_prefix,
+ &mm->default_route);
+ }
+
+ return cl_err;
}
/*
* cli handler for 'fib'
*/
static clib_error_t *
-hicn_cli_strategy_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_strategy_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
clib_error_t *cl_err = 0;
@@ -387,10 +439,11 @@ hicn_cli_strategy_set_command_fn (vlib_main_t * vm,
{
addpfx = 2;
}
- else if (addpfx != -1
- && unformat (line_input, "prefix %U/%d", unformat_ip46_address,
- &address, IP46_TYPE_ANY, &plen))
- {;
+ else if (addpfx != -1 &&
+ unformat (line_input, "prefix %U/%d", unformat_ip46_address,
+ &address, IP46_TYPE_ANY, &plen))
+ {
+ ;
}
else
{
@@ -412,10 +465,9 @@ hicn_cli_strategy_set_command_fn (vlib_main_t * vm,
}
rv = hicn_route_set_strategy (&prefix, strategy_id);
- cl_err =
- (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
- get_error_string
- (rv));
+ cl_err = (rv == HICN_ERROR_NONE) ?
+ NULL :
+ clib_error_return (0, get_error_string (rv));
done:
return (cl_err);
@@ -425,9 +477,9 @@ done:
* cli handler for 'pgen'
*/
static clib_error_t *
-hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_pgen_client_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
hicnpg_main_t *hpgm = &hicnpg_main;
ip46_address_t src_addr;
@@ -446,14 +498,13 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
{
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat
- (line_input, "intfc %U", unformat_vnet_sw_interface, vnm,
- &sw_if_index))
+ if (unformat (line_input, "intfc %U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
{
;
}
- else if (unformat (line_input, "src %U",
- unformat_ip46_address, &src_addr))
+ else if (unformat (line_input, "src %U", unformat_ip46_address,
+ &src_addr))
{
;
}
@@ -461,9 +512,8 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
{
;
}
- else if (unformat (line_input, "name %U/%d",
- unformat_ip46_address, &prefix->fp_addr,
- IP46_TYPE_ANY, &prefix->fp_len))
+ else if (unformat (line_input, "name %U/%d", unformat_ip46_address,
+ &prefix->fp_addr, IP46_TYPE_ANY, &prefix->fp_len))
{
;
}
@@ -481,9 +531,8 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
}
else
{
- return (clib_error_return
- (0, "Unknown input '%U'", format_unformat_error,
- line_input));
+ return (clib_error_return (0, "Unknown input '%U'",
+ format_unformat_error, line_input));
break;
}
}
@@ -495,7 +544,7 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
return (clib_error_return (0, "Packet generator interface missing"));
}
- //Remove bits that are out of the subnet
+ // Remove bits that are out of the subnet
if (ip46_address_is_ip4 (&prefix->fp_addr))
{
ip4_address_t mask;
@@ -517,34 +566,32 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
/*
* Enable the feature to divert data packet to the hicnpg-data node to count
* how many data packets have been received.
- * Diver all the packets from the packet-generator to the hicn-pg-interest node
- * to generate valid interests.
+ * Diver all the packets from the packet-generator to the hicn-pg-interest
+ * node to generate valid interests.
*/
- if (ip46_address_is_ip4 (&src_addr)
- && ip46_address_is_ip4 (&prefix->fp_addr))
+ if (ip46_address_is_ip4 (&src_addr) &&
+ ip46_address_is_ip4 (&prefix->fp_addr))
{
prefix->fp_proto = FIB_PROTOCOL_IP4;
- vnet_feature_enable_disable ("ip4-unicast", "hicnpg-data",
- sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip4-unicast", "hicnpg-data", sw_if_index,
+ 1, 0, 0);
/* Add pgen_client node to the vpp graph */
- vlib_node_add_next (vm,
- pg_input_node.index, hicn_pg_interest_node.index);
-
+ vlib_node_add_next (vm, pg_input_node.index,
+ hicn_pg_interest_node.index);
pg_node_t *pn;
pn = pg_get_node (hicn_pg_interest_node.index);
pn->unformat_edit = unformat_pg_ip4_header;
-
}
- else if (!ip46_address_is_ip4 (&src_addr)
- && !ip46_address_is_ip4 (&prefix->fp_addr))
+ else if (!ip46_address_is_ip4 (&src_addr) &&
+ !ip46_address_is_ip4 (&prefix->fp_addr))
{
prefix->fp_proto = FIB_PROTOCOL_IP6;
- vnet_feature_enable_disable ("ip6-unicast", "hicnpg-data",
- sw_if_index, 1, 0, 0);
+ vnet_feature_enable_disable ("ip6-unicast", "hicnpg-data", sw_if_index,
+ 1, 0, 0);
/* Add pgen_client node to the vpp graph */
vlib_node_add_next (vm, pg_input_node.index,
@@ -556,12 +603,11 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
}
else
{
- return (clib_error_return
- (0,
- "pg interface source address, source address and hicn name must be of the same type IPv4 or IPv6"));
+ return (clib_error_return (
+ 0, "pg interface source address, source address and hicn name must be "
+ "of the same type IPv4 or IPv6"));
}
-
hpgm->pgen_clt_src_addr = src_addr;
hpgm->pgen_clt_hicn_name = prefix;
hpgm->max_seq_number = max_seq;
@@ -591,17 +637,19 @@ hicn_cli_pgen_client_set_command_fn (vlib_main_t * vm,
* cli handler for 'pgen'
*/
static clib_error_t *
-hicn_cli_pgen_server_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_cli_pgen_server_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
- clib_error_t *cl_err;
- int rv = HICN_ERROR_NONE;
- hicnpg_server_main_t *pg_main = &hicnpg_server_main;
int payload_size = 1440;
u32 sw_if_index = ~0;
vnet_main_t *vnm = vnet_get_main ();
- fib_prefix_t *prefix = calloc (1, sizeof (fib_prefix_t));
+ fib_prefix_t prefix = {};
+ u32 hicnpg_server_index;
+ ip46_address_t locator;
+
+ locator.as_u64[0] = 0;
+ locator.as_u64[1] = 0;
/* Get a line of input. */
unformat_input_t _line_input, *line_input = &_line_input;
@@ -610,106 +658,83 @@ hicn_cli_pgen_server_set_command_fn (vlib_main_t * vm,
/* Parse the arguments */
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (line_input, "name %U/%d",
- unformat_ip46_address, &prefix->fp_addr,
- IP46_TYPE_ANY, &prefix->fp_len))
- {;
+ if (unformat (line_input, "name %U/%d", unformat_ip46_address,
+ &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ {
+ ;
}
else if (unformat (line_input, "size %d", &payload_size))
{
if (payload_size > 1440)
{
- return (clib_error_return (0,
- "Payload size must be <= 1440 bytes..."));
+ return (clib_error_return (
+ 0, "Payload size must be <= 1440 bytes..."));
}
}
- else
- if (unformat
- (line_input, "intfc %U", unformat_vnet_sw_interface, vnm,
- &sw_if_index))
+ else if (unformat (line_input, "intfc %U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ {
+ ;
+ }
+ else if (unformat (line_input, "dst %U", unformat_ip46_address,
+ &locator, IP46_TYPE_ANY))
{
;
}
else
{
- return (clib_error_return
- (0, "Unknown input '%U'", format_unformat_error,
- line_input));
+ return (clib_error_return (0, "Unknown input '%U'",
+ format_unformat_error, line_input));
break;
}
}
}
/* Attach our packet-gen node for ip4 udp local traffic */
- if ((prefix->fp_addr.ip6.as_u64[0] == (u64) 0
- && prefix->fp_addr.ip6.as_u64[1] == 0) || payload_size == 0
- || sw_if_index == ~0)
+ if ((prefix.fp_addr.ip6.as_u64[0] == (u64) 0 &&
+ prefix.fp_addr.ip6.as_u64[1] == 0) ||
+ payload_size == 0 || sw_if_index == ~0 ||
+ ip46_address_is_zero (&locator))
{
- return clib_error_return (0,
- "Error: must supply local port, payload size and incoming hICN prefix");
+ return clib_error_return (0, "Error: must supply locator, payload "
+ "size and incoming hICN prefix");
}
- //Remove bits that are out of the subnet
- if (ip46_address_is_ip4 (&prefix->fp_addr))
+ // Remove bits that are out of the subnet
+ if (ip46_address_is_ip4 (&prefix.fp_addr))
{
ip4_address_t mask;
- ip4_preflen_to_mask (prefix->fp_len, &mask);
- prefix->fp_addr.ip4.as_u32 = prefix->fp_addr.ip4.as_u32 & mask.as_u32;
- prefix->fp_proto = FIB_PROTOCOL_IP4;
+ ip4_preflen_to_mask (prefix.fp_len, &mask);
+ prefix.fp_addr.ip4.as_u32 = prefix.fp_addr.ip4.as_u32 & mask.as_u32;
+ prefix.fp_proto = FIB_PROTOCOL_IP4;
}
else
{
ip6_address_t mask;
- ip6_preflen_to_mask (prefix->fp_len, &mask);
- prefix->fp_addr.ip6.as_u64[0] =
- prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
- prefix->fp_addr.ip6.as_u64[1] =
- prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
- prefix->fp_proto = FIB_PROTOCOL_IP6;
+ ip6_preflen_to_mask (prefix.fp_len, &mask);
+ prefix.fp_addr.ip6.as_u64[0] =
+ prefix.fp_addr.ip6.as_u64[0] & mask.as_u64[0];
+ prefix.fp_addr.ip6.as_u64[1] =
+ prefix.fp_addr.ip6.as_u64[1] & mask.as_u64[1];
+ prefix.fp_proto = FIB_PROTOCOL_IP6;
}
- /* Allocate the buffer with the actual content payload TLV */
- int n_buf = vlib_buffer_alloc (vm, &pg_main->pgen_svr_buffer_idx, 1);
-
- if (n_buf == 0)
+ fib_protocol_t dest_proto =
+ ip46_address_is_ip4 (&locator) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ if (prefix.fp_proto != dest_proto)
{
- return (clib_error_return (0, "Impossible to allocate paylod buffer."));
+ return clib_error_return (0, "Error: prefix and locator must be of the "
+ "same protocol");
}
- vlib_buffer_t *rb = NULL;
- rb = vlib_get_buffer (vm, pg_main->pgen_svr_buffer_idx);
-
- pg_main->pgen_srv_hicn_name = prefix;
-
- /* Initialize the buffer data with zeros */
- memset (rb->data, 0, payload_size);
- rb->current_length = payload_size;
-
- vnet_feature_enable_disable ("ip4-unicast", "hicnpg-server",
- sw_if_index, 1, 0, 0);
- vnet_feature_enable_disable ("ip6-unicast", "hicnpg-server",
- sw_if_index, 1, 0, 0);
-
- switch (rv)
- {
- case 0:
- cl_err = 0;
- break;
-
- case VNET_API_ERROR_UNIMPLEMENTED:
- cl_err = clib_error_return (0, "Unimplemented, NYI");
- break;
-
- default:
- cl_err = clib_error_return (0, "hicn pgen server returned %d", rv);
- }
-
- return cl_err;
+ // Create hicnpg_server
+ return hicnpg_server_add_and_lock (&prefix, &hicnpg_server_index, &locator,
+ payload_size);
}
static clib_error_t *
-hicn_enable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_enable_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
clib_error_t *cl_err = 0;
@@ -724,13 +749,13 @@ hicn_enable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
}
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (line_input, "%U/%d",
- unformat_ip4_address, &pfx.fp_addr.ip4, &pfx.fp_len))
+ if (unformat (line_input, "%U/%d", unformat_ip4_address,
+ &pfx.fp_addr.ip4, &pfx.fp_len))
{
pfx.fp_proto = FIB_PROTOCOL_IP4;
}
- else if (unformat (line_input, "%U/%d",
- unformat_ip6_address, &pfx.fp_addr.ip6, &pfx.fp_len))
+ else if (unformat (line_input, "%U/%d", unformat_ip6_address,
+ &pfx.fp_addr.ip6, &pfx.fp_len))
{
pfx.fp_proto = FIB_PROTOCOL_IP6;
}
@@ -742,19 +767,33 @@ hicn_enable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
goto done;
}
}
- rv = hicn_route_enable (&pfx);
+
+ hicn_face_id_t *vec_faces = NULL;
+ fib_node_index_t hicn_fib_index;
+ rv = hicn_route_enable (&pfx, &hicn_fib_index, &vec_faces);
+
+ if (vec_faces != NULL)
+ {
+ hicn_face_id_t *face_id;
+ u8 *str = 0;
+ vec_foreach (face_id, vec_faces)
+ {
+ str = format (str, " %d", *face_id);
+ }
+ vec_free (vec_faces);
+ vlib_cli_output (vm, "Faces for this prefix: %s", str);
+ }
done:
- cl_err =
- (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
- get_error_string
- (rv));
+ cl_err = (rv == HICN_ERROR_NONE) ?
+ NULL :
+ clib_error_return (0, get_error_string (rv));
return cl_err;
}
static clib_error_t *
-hicn_disable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_disable_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
clib_error_t *cl_err = 0;
@@ -769,13 +808,13 @@ hicn_disable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
}
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (line_input, "%U/%d",
- unformat_ip4_address, &pfx.fp_addr.ip4, &pfx.fp_len))
+ if (unformat (line_input, "%U/%d", unformat_ip4_address,
+ &pfx.fp_addr.ip4, &pfx.fp_len))
{
pfx.fp_proto = FIB_PROTOCOL_IP4;
}
- else if (unformat (line_input, "%U/%d",
- unformat_ip6_address, &pfx.fp_addr.ip6, &pfx.fp_len))
+ else if (unformat (line_input, "%U/%d", unformat_ip6_address,
+ &pfx.fp_addr.ip6, &pfx.fp_len))
{
pfx.fp_proto = FIB_PROTOCOL_IP6;
}
@@ -791,103 +830,104 @@ hicn_disable_command_fn (vlib_main_t * vm, unformat_input_t * main_input,
rv = hicn_route_disable (&pfx);
done:
- cl_err =
- (rv == HICN_ERROR_NONE) ? NULL : clib_error_return (0,
- get_error_string
- (rv));
+ cl_err = (rv == HICN_ERROR_NONE) ?
+ NULL :
+ clib_error_return (0, get_error_string (rv));
return cl_err;
}
+/* cli declaration for 'mapme set default_route' */
+VLIB_CLI_COMMAND (hicn_cli_mapme_set_command, static) = {
+ .path = "hicn mapme set",
+ .short_help = "hicn mapme set default-route <prefix>",
+ .function = hicn_cli_mapme_set_command_fn,
+};
-/* cli declaration for 'control start' */
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND(hicn_cli_node_ctl_start_set_command, static)=
-{
- .path = "hicn control start",
- .short_help = "hicn control start",
- .function = hicn_cli_node_ctl_start_set_command_fn,
+VLIB_CLI_COMMAND (hicn_cli_mapme_get_command, static) = {
+ .path = "hicn mapme get",
+ .short_help = "hicn mapme get default-route",
+ .function = hicn_cli_mapme_get_command_fn,
};
+/* cli declaration for 'control start' */
+VLIB_CLI_COMMAND (hicn_cli_node_ctl_start_set_command, static) = {
+ .path = "hicn control start",
+ .short_help = "hicn control start",
+ .function = hicn_cli_node_ctl_start_set_command_fn,
+};
/* cli declaration for 'control stop' */
-VLIB_CLI_COMMAND(hicn_cli_node_ctl_stop_set_command, static)=
-{
- .path = "hicn control stop",
- .short_help = "hicn control stop",
- .function = hicn_cli_node_ctl_stop_set_command_fn,
+VLIB_CLI_COMMAND (hicn_cli_node_ctl_stop_set_command, static) = {
+ .path = "hicn control stop",
+ .short_help = "hicn control stop",
+ .function = hicn_cli_node_ctl_stop_set_command_fn,
};
-
/* cli declaration for 'control param' */
-VLIB_CLI_COMMAND(hicn_cli_node_ctl_param_set_command, static)=
-{
- .path = "hicn control param",
- .short_help = "hicn control param { pit { size <entries> | { dfltlife | minlife | maxlife } <seconds> } | fib size <entries> | cs {size <entries> | app <portion to reserved to app>} }\n",
- .function = hicn_cli_node_ctl_param_set_command_fn,
+VLIB_CLI_COMMAND (hicn_cli_node_ctl_param_set_command, static) = {
+ .path = "hicn control param",
+ .short_help = "hicn control param { pit { size <entries> | { dfltlife | "
+ "minlife | maxlife } <seconds> } | fib size <entries> | cs "
+ "{size <entries> | app <portion to reserved to app>} }\n",
+ .function = hicn_cli_node_ctl_param_set_command_fn,
};
/* cli declaration for 'control' (root path of multiple commands, for help) */
-VLIB_CLI_COMMAND(hicn_cli_node_ctl_command, static)=
-{
- .path = "hicn control",
- .short_help = "hicn control"
-};
+VLIB_CLI_COMMAND (hicn_cli_node_ctl_command,
+ static) = { .path = "hicn control",
+ .short_help = "hicn control" };
/* cli declaration for 'fib' */
-VLIB_CLI_COMMAND(hicn_cli_strategy_set_command, static)=
- {
- .path = "hicn strategy",
- .short_help = "hicn strategy set <strategy_id> prefix <prefix>",
- .function = hicn_cli_strategy_set_command_fn,
- };
+VLIB_CLI_COMMAND (hicn_cli_strategy_set_command, static) = {
+ .path = "hicn strategy",
+ .short_help = "hicn strategy set <strategy_id> prefix <prefix>",
+ .function = hicn_cli_strategy_set_command_fn,
+};
/* cli declaration for 'show' */
-VLIB_CLI_COMMAND(hicn_cli_show_command, static)=
-{
- .path = "hicn show",
- .short_help = "hicn show "
- "[internal]"
- "[strategies]",
- .function = hicn_cli_show_command_fn,
+VLIB_CLI_COMMAND (hicn_cli_show_command, static) = {
+ .path = "hicn show",
+ .short_help = "hicn show "
+ "[internal]"
+ "[strategies]",
+ .function = hicn_cli_show_command_fn,
};
/* cli declaration for 'hicn pgen client' */
-VLIB_CLI_COMMAND(hicn_cli_pgen_client_set_command, static)=
-{
- .path = "hicn pgen client",
- .short_help = "hicn pgen client src <src_addr> name <prefix> { n_ifaces <n_ifaces> lifetime <interest-lifetime> intfc <data in-interface> max_seq <max sequence number> n_flows <number of flows>}",
- .long_help = "Run hicn in packet-gen client mode\n",
- .function = hicn_cli_pgen_client_set_command_fn,
+VLIB_CLI_COMMAND (hicn_cli_pgen_client_set_command, static) = {
+ .path = "hicn pgen client",
+ .short_help =
+ "hicn pgen client src <src_addr> name <prefix> { n_ifaces <n_ifaces> "
+ "lifetime <interest-lifetime> intfc <data in-interface> max_seq <max "
+ "sequence number> n_flows <number of flows>}",
+ .long_help = "Run hicn in packet-gen client mode\n",
+ .function = hicn_cli_pgen_client_set_command_fn,
};
/* cli declaration for 'hicn pgen client' */
-VLIB_CLI_COMMAND(hicn_cli_pgen_server_set_command, static)=
-{
- .path = "hicn pgen server",
- .short_help = "hicn pgen server name <prefix> intfc <interest in-interface> size <payload_size>",
- .long_help = "Run hicn in packet-gen server mode\n",
- .function = hicn_cli_pgen_server_set_command_fn,
+VLIB_CLI_COMMAND (hicn_cli_pgen_server_set_command, static) = {
+ .path = "hicn pgen server",
+ .short_help = "hicn pgen server name <prefix> intfc <interest in-interface> "
+ "dst <ip_address> size <payload_size>",
+ .long_help = "Run hicn in packet-gen server mode\n",
+ .function = hicn_cli_pgen_server_set_command_fn,
};
/* cli declaration for 'hicn pgen client' */
-VLIB_CLI_COMMAND(hicn_enable_command, static)=
- {
- .path = "hicn enable",
- .short_help = "hicn enable <prefix>",
- .long_help = "Enable hicn for the give prefix\n",
- .function = hicn_enable_command_fn,
- };
+VLIB_CLI_COMMAND (hicn_enable_command, static) = {
+ .path = "hicn enable",
+ .short_help = "hicn enable <prefix>",
+ .long_help = "Enable hicn for the give prefix\n",
+ .function = hicn_enable_command_fn,
+};
/* cli declaration for 'hicn pgen client' */
-VLIB_CLI_COMMAND(hicn_disable_command, static)=
- {
- .path = "hicn disable",
- .short_help = "hicn disable <prefix>",
- .long_help = "Disable hicn for the give prefix\n",
- .function = hicn_disable_command_fn,
- };
-
-/* *INDENT-ON* */
+VLIB_CLI_COMMAND (hicn_disable_command, static) = {
+ .path = "hicn disable",
+ .short_help = "hicn disable <prefix>",
+ .long_help = "Disable hicn for the give prefix\n",
+ .function = hicn_disable_command_fn,
+};
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/data_fwd.h b/hicn-plugin/src/data_fwd.h
index d95f564c3..4acfdea4d 100644
--- a/hicn-plugin/src/data_fwd.h
+++ b/hicn-plugin/src/data_fwd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -24,23 +24,25 @@
* @file data_fwd.h
*
* This is the node encoutered by data packets after the hicn-data-pcslookup.
- * This node has two goals: 1) clone/copy the vlib buffer as many time as the number
- * of faces stored in the pit entry, 2) store a clone/copy of the vlib buffer in the CS.
- * Unless there are memory issue (no more vlib buffer available to perform cloning/copy),
- * a single vlib buffer received might results in several vlib buffer sent to the next
- * vlib node (hicn4-iface-output or hicn6-iface-output).
+ * This node has two goals: 1) clone/copy the vlib buffer as many time as the
+ * number of faces stored in the pit entry, 2) store a clone/copy of the vlib
+ * buffer in the CS. Unless there are memory issue (no more vlib buffer
+ * available to perform cloning/copy), a single vlib buffer received might
+ * results in several vlib buffer sent to the next vlib node
+ * (hicn4-iface-output or hicn6-iface-output).
*
- * It must be noted that cloning is possible only if the lentgh of the data pointed by
- * the vlib buffer is at least 256 bytes. This is due to an imposition in the vpp source
- * code. In all the other cases the vlib buffer is copied. Cloning is performed by advancing
- * the vlib buffer of 256 bytes and a new vlib buffer is created and chained in from of the received
- * buffer. Additionally, the 256 bytes removed (advanced) from the received vlib buffer are
- * copied in the head vlib buffer. In case of multiple cloning for the same vlib buffer, this
- * mechanism allows us to have a different hICN header for each clone (+ the same additional bytes
- * due to the vpp restriction on cloning).
+ * It must be noted that cloning is possible only if the lentgh of the data
+ * pointed by the vlib buffer is at least 256 bytes. This is due to an
+ * imposition in the vpp source code. In all the other cases the vlib buffer is
+ * copied. Cloning is performed by advancing the vlib buffer of 256 bytes and a
+ * new vlib buffer is created and chained in from of the received buffer.
+ * Additionally, the 256 bytes removed (advanced) from the received vlib buffer
+ * are copied in the head vlib buffer. In case of multiple cloning for the same
+ * vlib buffer, this mechanism allows us to have a different hICN header for
+ * each clone (+ the same additional bytes due to the vpp restriction on
+ * cloning).
*/
-
/* Trace context struct */
typedef struct
{
@@ -48,6 +50,7 @@ typedef struct
u32 sw_if_index;
u8 pkt_type;
u8 packet_data[64];
+ u32 face;
} hicn_data_fwd_trace_t;
typedef enum
@@ -60,6 +63,15 @@ typedef enum
HICN_DATA_FWD_N_NEXT,
} hicn_data_fwd_next_t;
+/*
+ * Node context data; we think this is per-thread/instance
+ */
+typedef struct hicn_data_fwd_runtime_s
+{
+ int id;
+ hicn_pit_cs_t *pitcs;
+} hicn_data_fwd_runtime_t;
+
/**
* @brief Create a maximum of 256 clones of buffer and store them
* in the supplied array. Unlike the original function in the vlib
@@ -76,7 +88,7 @@ typedef enum
* less than the number requested or zero
*/
always_inline u16
-vlib_buffer_clone_256_2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+vlib_buffer_clone_256_2 (vlib_main_t *vm, u32 src_buffer, u32 *buffers,
u16 n_buffers, u16 head_end_offset)
{
u16 i;
@@ -97,8 +109,8 @@ vlib_buffer_clone_256_2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
}
return n_buffers;
}
- n_buffers = vlib_buffer_alloc_from_pool (vm, buffers, n_buffers,
- s->buffer_pool_index);
+ n_buffers =
+ vlib_buffer_alloc_from_pool (vm, buffers, n_buffers, s->buffer_pool_index);
for (i = 0; i < n_buffers; i++)
{
@@ -107,8 +119,8 @@ vlib_buffer_clone_256_2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
d->current_length = head_end_offset;
d->trace_handle = s->trace_handle;
- d->total_length_not_including_first_buffer = s->current_length -
- head_end_offset;
+ d->total_length_not_including_first_buffer =
+ s->current_length - head_end_offset;
if (PREDICT_FALSE (s->flags & VLIB_BUFFER_NEXT_PRESENT))
{
d->total_length_not_including_first_buffer +=
@@ -149,7 +161,7 @@ vlib_buffer_clone_256_2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
* less than the number requested or zero
*/
always_inline u16
-vlib_buffer_clone2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+vlib_buffer_clone2 (vlib_main_t *vm, u32 src_buffer, u32 *buffers,
u16 n_buffers, u16 head_end_offset)
{
vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
@@ -174,11 +186,9 @@ vlib_buffer_clone2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
vlib_buffer_t *copy;
/* Ok to call the original vlib_buffer_copy. */
copy = vlib_buffer_copy (vm, s);
- n_cloned += vlib_buffer_clone (vm,
- vlib_get_buffer_index (vm, copy),
- buffers,
- n_buffers - n_clone_src,
- head_end_offset);
+ n_cloned +=
+ vlib_buffer_clone (vm, vlib_get_buffer_index (vm, copy), buffers,
+ n_buffers - n_clone_src, head_end_offset);
n_buffers -= n_cloned;
}
/*
@@ -195,9 +205,7 @@ vlib_buffer_clone2 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
* complexity to the code, especially because we need to add 1 to
* ref_count when the packet is cloned.
*/
- n_cloned += vlib_buffer_clone_256_2 (vm,
- src_buffer,
- (buffers + n_cloned),
+ n_cloned += vlib_buffer_clone_256_2 (vm, src_buffer, (buffers + n_cloned),
n_buffers, head_end_offset);
s->ref_count += (tmp_ref_count - 1);
diff --git a/hicn-plugin/src/data_fwd_node.c b/hicn-plugin/src/data_fwd_node.c
index c65b62454..e3466c904 100644
--- a/hicn-plugin/src/data_fwd_node.c
+++ b/hicn-plugin/src/data_fwd_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -33,292 +33,8 @@ static char *hicn_data_fwd_error_strings[] = {
/* Declarations */
always_inline void
-drop_packet (vlib_main_t * vm, u32 bi0,
- u32 * n_left_to_next, u32 * next0, u32 ** to_next,
- u32 * next_index, vlib_node_runtime_t * node);
-
-always_inline int
-hicn_satisfy_faces (vlib_main_t * vm, u32 b0,
- hicn_pcs_entry_t * pitp, u32 * n_left_to_next,
- u32 ** to_next, u32 * next_index,
- vlib_node_runtime_t * node, u8 isv6,
- vl_api_hicn_api_node_stats_get_reply_t * stats);
-
-always_inline void
-clone_data_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * pitp, hicn_header_t * hicn0, f64 tnow,
- hicn_hash_node_t * nodep, vlib_buffer_t * b0,
- hicn_hash_entry_t * hash_entry, u64 name_hash,
- hicn_buffer_t * hicnb, const hicn_dpo_vft_t * dpo_vft,
- dpo_id_t * hicn_dpo_id, hicn_lifetime_t dmsg_lifetime);
-
-
-/* packet trace format function */
-always_inline u8 *hicn_data_fwd_format_trace (u8 * s, va_list * args);
-
-vlib_node_registration_t hicn_data_fwd_node;
-
-/*
- * ICN forwarder node for interests: handling of Data delivered based on ACL.
- * - 1 packet at a time - ipv4/tcp ipv6/tcp
- */
-static uword
-hicn_data_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
-
- u32 n_left_from, *from, *to_next;
- hicn_data_fwd_next_t next_index;
- hicn_pit_cs_t *pitcs = &hicn_main.pitcs;
- vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
- f64 tnow;
- u32 data_received = 1;
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
-
- /* Capture time in vpp terms */
- tnow = vlib_time_now (vm);
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- vlib_buffer_t *b0;
- u8 isv6;
- u8 *nameptr;
- u16 namelen;
- u32 bi0;
- u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
- hicn_name_t name;
- hicn_header_t *hicn0;
- hicn_buffer_t *hicnb0;
- hicn_hash_node_t *node0;
- const hicn_strategy_vft_t *strategy_vft0;
- const hicn_dpo_vft_t *dpo_vft0;
- u8 dpo_ctx_id0;
- hicn_pcs_entry_t *pitp;
- hicn_hash_entry_t *hash_entry0;
- int ret = HICN_ERROR_NONE;
-
- /* Prefetch for next iteration. */
- if (n_left_from > 1)
- {
- vlib_buffer_t *b1;
- b1 = vlib_get_buffer (vm, from[1]);
- CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
- }
- /* Dequeue a packet buffer */
- /*
- * Do not copy the index in the next buffer, we'll do
- * it later. The packet might be cloned, so the buffer to move
- * to next must be the cloned one
- */
- bi0 = from[0];
- from += 1;
- n_left_from -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- /* Get hicn buffer and state */
- hicnb0 = hicn_get_buffer (b0);
- hicn_get_internal_state (hicnb0, pitcs, &node0, &strategy_vft0,
- &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
-
- ret = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
- pitp = hicn_pit_get_data (node0);
- nameptr = (u8 *) (&name);
-
- if (PREDICT_FALSE
- (ret != HICN_ERROR_NONE
- || !hicn_node_compare (nameptr, namelen, node0)
- || (hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)))
- {
- /*
- * Remove the lock acquired from
- * data_pcslookup node
- */
- dpo_id_t hicn_dpo_id0 = { dpo_vft0->hicn_dpo_get_type (), 0, 0,
- dpo_ctx_id0
- };
- hicn_pcs_remove_lock (pitcs, &pitp, &node0, vm,
- hash_entry0, dpo_vft0, &hicn_dpo_id0);
-
- drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
- &next_index, node);
-
- goto end_processing;
- }
- /*
- * Check if the hit is instead a collision in the
- * hash table. Unlikely to happen.
- */
- /*
- * there is no guarantee that the type of entry has
- * not changed from the lookup.
- */
-
- if (tnow > pitp->shared.expire_time
- || (hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
- {
- dpo_id_t hicn_dpo_id0 =
- { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
- hicn_pcs_delete (pitcs, &pitp, &node0, vm, hash_entry0,
- dpo_vft0, &hicn_dpo_id0);
-
- drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
- &next_index, node);
- stats.pit_expired_count++;
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
- (b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- hicn_data_fwd_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_CONTENT;
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- t->next_index = next0;
- clib_memcpy (t->packet_data,
- vlib_buffer_get_current (b0),
- sizeof (t->packet_data));
- }
- }
- else
- {
- ASSERT ((hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
- == 0);
-
- data_received++;
- /*
- * We do not check if the data is coming from
- * the outgoing interest face.
- */
-
- /* Prepare the buffer for the cloning */
- ret = hicn_satisfy_faces (vm, bi0, pitp, &n_left_to_next,
- &to_next, &next_index, node,
- isv6, &stats);
-
- dpo_id_t hicn_dpo_id0 = { dpo_vft0->hicn_dpo_get_type (), 0, 0,
- dpo_ctx_id0
- };
-
- if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
- {
- hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
- hash_entry0, dpo_vft0, &hicn_dpo_id0);
- continue;
- }
- /*
- * Call the strategy callback since the
- * interest has been satisfied
- */
- strategy_vft0->hicn_receive_data (dpo_ctx_id0,
- pitp->u.pit.pe_txnh);
-
-#if HICN_FEATURE_CS
- hicn_lifetime_t dmsg_lifetime;
-
- hicn_type_t type = hicnb0->type;
- hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
- &dmsg_lifetime);
-
- if (dmsg_lifetime)
- {
- /*
- * Clone data packet in the content store and
- * convert the PIT entry into a CS entry
- */
- clone_data_to_cs (vm, pitcs, pitp, hicn0, tnow, node0,
- b0, hash_entry0, hicnb0->name_hash,
- hicnb0, dpo_vft0, &hicn_dpo_id0,
- dmsg_lifetime);
-
- hicn_pcs_remove_lock (pitcs, &pitp, &node0, vm,
- hash_entry0, NULL, NULL);
- }
- else
- {
- /*
- * If the packet is copied and not cloned, we need to free the vlib_buffer
- */
- if (hicnb0->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
- {
- vlib_buffer_free_one (vm, bi0);
- }
- else
- {
- /*
- * Remove one reference as the buffer is no
- * longer in any frame. The vlib_buffer will be freed when
- * all its cloned vlib_buffer will be freed.
- */
- b0->ref_count--;
- }
-
- /* Delete the PIT entry */
- hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
- hash_entry0, dpo_vft0, &hicn_dpo_id0);
- }
-#else
- ASSERT (pitp == hicn_pit_get_data (node0));
- /*
- * If the packet is copied and not cloned, we need to free the vlib_buffer
- */
- if (hicnb0->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
- {
- vlib_buffer_free_one (vm, bi0);
- }
- else
- {
- /*
- * Remove one reference as the buffer is no
- * longer in any frame. The vlib_buffer will be freed when
- * all its cloned vlib_buffer will be freed.
- */
- b0->ref_count--;
- }
-
- /* Delete the PIT entry */
- hicn_pcs_pit_delete (pitcs, &pitp, &node0, vm,
- hash_entry0, dpo_vft0, &hicn_dpo_id0);
-#endif
- }
- end_processing:
-
- /* Incr packet counter */
- stats.pkts_processed += 1;
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
- u32 pit_int_count = hicn_pit_get_int_count (pitcs);
- u32 pit_cs_count = hicn_pit_get_cs_count (pitcs);
-
- vlib_node_increment_counter (vm, hicn_data_fwd_node.index,
- HICNFWD_ERROR_DATAS, stats.pkts_data_count);
-
-
- update_node_counter (vm, hicn_data_fwd_node.index,
- HICNFWD_ERROR_INT_COUNT, pit_int_count);
- update_node_counter (vm, hicn_data_fwd_node.index,
- HICNFWD_ERROR_CS_COUNT, pit_cs_count);
- update_node_counter (vm, hicn_data_fwd_node.index,
- HICNFWD_ERROR_INTEREST_AGG_ENTRY,
- stats.pkts_data_count / data_received);
-
- return (frame->n_vectors);
-}
-
-always_inline void
-drop_packet (vlib_main_t * vm, u32 bi0,
- u32 * n_left_to_next, u32 * next0, u32 ** to_next,
- u32 * next_index, vlib_node_runtime_t * node)
+drop_packet (vlib_main_t *vm, u32 bi0, u32 *n_left_to_next, u32 *next0,
+ u32 **to_next, u32 *next_index, vlib_node_runtime_t *node)
{
*next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
@@ -326,23 +42,23 @@ drop_packet (vlib_main_t * vm, u32 bi0,
*to_next += 1;
*n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, *next_index,
- *to_next, *n_left_to_next, bi0, *next0);
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index, *to_next,
+ *n_left_to_next, bi0, *next0);
}
always_inline int
-hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
- hicn_pcs_entry_t * pitp, u32 * n_left_to_next,
- u32 ** to_next, u32 * next_index,
- vlib_node_runtime_t * node, u8 isv6,
- vl_api_hicn_api_node_stats_get_reply_t * stats)
+hicn_satisfy_faces (vlib_main_t *vm, u32 bi0, hicn_pcs_entry_t *pitp,
+ u32 *n_left_to_next, u32 **to_next, u32 *next_index,
+ vlib_node_runtime_t *node, u8 isv6,
+ vl_api_hicn_api_node_stats_get_reply_t *stats)
{
int found = 0;
int ret = HICN_ERROR_NONE;
- u32 *clones = NULL, *header = NULL;
+ u32 inline_clones[HICN_FACE_DB_INLINE_FACES];
+ u32 *clones = inline_clones, *header = NULL;
u32 n_left_from = 0;
- u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP, next1 =
- HICN_DATA_FWD_NEXT_ERROR_DROP;
+ u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP,
+ next1 = HICN_DATA_FWD_NEXT_ERROR_DROP;
word buffer_advance = CLIB_CACHE_LINE_BYTES * 2;
/*
@@ -354,8 +70,11 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
* need to be careful to clone it only 254 times as the buffer
* already has n_add_reds=1.
*/
- vec_alloc (clones, pitp->u.pit.faces.n_faces);
- header = clones;
+ if (hicn_pcs_entry_pit_get_n_faces (pitp) > HICN_FACE_DB_INLINE_FACES)
+ {
+ vec_alloc (clones, hicn_pcs_entry_pit_get_n_faces (pitp));
+ header = clones;
+ }
/* Clone bi0 */
vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
@@ -363,40 +82,40 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
hicn_buffer_t *hicnb = hicn_get_buffer (b0);
/*
- * Mark the buffer as smaller than TWO_CL. It will be stored as is in the CS, without excluding
- * the hicn_header. Cloning is not possible, it will be copied.
+ * Mark the buffer as smaller than TWO_CL. It will be stored as is in the CS,
+ * without excluding the hicn_header. Cloning is not possible, it will be
+ * copied.
*/
if (b0->current_length <= (buffer_advance + (CLIB_CACHE_LINE_BYTES * 2)))
{
- /* In this case the packet is copied. We don't need to add a reference as no buffer are
- * chained to it.
+ /* In this case the packet is copied. We don't need to add a reference as
+ * no buffer are chained to it.
*/
hicnb->flags |= HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL;
}
else
{
/* Add one reference to maintain the buffer in the CS.
- * b0->ref_count == 0 has two meaning: it has 1 buffer or no buffer chained to it.
- * vlib_buffer_clone2 add a number of reference equalt to pitp->u.pit.faces.n_faces - 1
- * as vlib_buffer_clone does. So after all the packet are forwarded the buffer stored in
- * the CS will have ref_count == 0;
+ * b0->ref_count == 0 has two meaning: it has 1 buffer or no buffer
+ * chained to it. vlib_buffer_clone2 add a number of reference equal to
+ * pitp->u.pit.faces.n_faces - 1 as vlib_buffer_clone does. So after all
+ * the packet are forwarded the buffer stored in the CS will have
+ * ref_count == 0;
*/
b0->ref_count++;
}
found = n_left_from =
- vlib_buffer_clone2 (vm, bi0, clones, pitp->u.pit.faces.n_faces,
- buffer_advance);
+ vlib_buffer_clone2 (vm, bi0, clones, pitp->u.pit.n_faces, buffer_advance);
- ASSERT (n_left_from == pitp->u.pit.faces.n_faces);
+ ASSERT (n_left_from == hicn_pcs_entry_pit_get_n_faces (pitp));
/* Index to iterate over the faces */
int i = 0;
while (n_left_from > 0)
{
-
- //Dual loop, X2
+ // Dual loop, X2
while (n_left_from >= 4 && *n_left_to_next >= 2)
{
vlib_buffer_t *h0, *h1;
@@ -412,8 +131,11 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
CLIB_PREFETCH (h3, 2 * CLIB_CACHE_LINE_BYTES, STORE);
}
- face0 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
- face1 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+ face0 = hicn_pcs_entry_pit_get_dpo_face (pitp, i);
+ face1 = hicn_pcs_entry_pit_get_dpo_face (pitp, i + 1);
+
+ // Increment index
+ i += 2;
h0 = vlib_get_buffer (vm, clones[0]);
h1 = vlib_get_buffer (vm, clones[1]);
@@ -426,11 +148,11 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
clones += 2;
next0 = isv6 ? HICN_DATA_FWD_NEXT_IFACE6_OUT :
- HICN_DATA_FWD_NEXT_IFACE4_OUT;
+ HICN_DATA_FWD_NEXT_IFACE4_OUT;
next1 = isv6 ? HICN_DATA_FWD_NEXT_IFACE6_OUT :
- HICN_DATA_FWD_NEXT_IFACE4_OUT;
+ HICN_DATA_FWD_NEXT_IFACE4_OUT;
- vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0;
+ vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0;
vnet_buffer (h1)->ip.adj_index[VLIB_TX] = face1;
stats->pkts_data_count += 2;
@@ -440,11 +162,11 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
{
hicn_data_fwd_trace_t *t =
vlib_add_trace (vm, node, h0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->face = face0;
+ t->pkt_type = HICN_PACKET_TYPE_DATA;
t->sw_if_index = vnet_buffer (h0)->sw_if_index[VLIB_RX];
t->next_index = next0;
- clib_memcpy (t->packet_data,
- vlib_buffer_get_current (h0),
+ clib_memcpy (t->packet_data, vlib_buffer_get_current (h0),
sizeof (t->packet_data));
}
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
@@ -452,26 +174,26 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
{
hicn_data_fwd_trace_t *t =
vlib_add_trace (vm, node, h1, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->face = face1;
+ t->pkt_type = HICN_PACKET_TYPE_DATA;
t->sw_if_index = vnet_buffer (h1)->sw_if_index[VLIB_RX];
t->next_index = next1;
- clib_memcpy (t->packet_data,
- vlib_buffer_get_current (h1),
+ clib_memcpy (t->packet_data, vlib_buffer_get_current (h1),
sizeof (t->packet_data));
}
- vlib_validate_buffer_enqueue_x2 (vm, node, *next_index,
- (*to_next), *n_left_to_next,
- hi0, hi1, next0, next1);
+ vlib_validate_buffer_enqueue_x2 (vm, node, *next_index, (*to_next),
+ *n_left_to_next, hi0, hi1, next0,
+ next1);
}
-
while (n_left_from > 0 && *n_left_to_next > 0)
{
vlib_buffer_t *h0;
u32 hi0;
hicn_face_id_t face0;
- face0 = hicn_face_db_get_dpo_face (i++, &pitp->u.pit.faces);
+ face0 = hicn_pcs_entry_pit_get_dpo_face (pitp, i);
+ i += 1;
h0 = vlib_get_buffer (vm, clones[0]);
@@ -482,7 +204,7 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
clones += 1;
next0 = isv6 ? HICN_DATA_FWD_NEXT_IFACE6_OUT :
- HICN_DATA_FWD_NEXT_IFACE4_OUT;
+ HICN_DATA_FWD_NEXT_IFACE4_OUT;
vnet_buffer (h0)->ip.adj_index[VLIB_TX] = face0;
stats->pkts_data_count++;
@@ -492,11 +214,11 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
{
hicn_data_fwd_trace_t *t =
vlib_add_trace (vm, node, h0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->face = face0;
+ t->pkt_type = HICN_PACKET_TYPE_DATA;
t->sw_if_index = vnet_buffer (h0)->sw_if_index[VLIB_RX];
t->next_index = next0;
- clib_memcpy (t->packet_data,
- vlib_buffer_get_current (h0),
+ clib_memcpy (t->packet_data, vlib_buffer_get_current (h0),
sizeof (t->packet_data));
}
/*
@@ -507,10 +229,8 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
* Fix in case of a wrong speculation. Needed to
* clone the data in the right frame
*/
- vlib_validate_buffer_enqueue_x1 (vm, node, *next_index,
- *to_next, *n_left_to_next,
- hi0, next0);
-
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index, *to_next,
+ *n_left_to_next, hi0, next0);
}
/* Ensure that there is space for the next clone (if any) */
@@ -528,70 +248,262 @@ hicn_satisfy_faces (vlib_main_t * vm, u32 bi0,
if (PREDICT_FALSE (!found))
{
ASSERT (0);
- drop_packet (vm, bi0, n_left_to_next, &next0, to_next, next_index,
- node);
+ drop_packet (vm, bi0, n_left_to_next, &next0, to_next, next_index, node);
ret = HICN_ERROR_FACE_NOT_FOUND;
}
return ret;
}
always_inline void
-clone_data_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * pitp, hicn_header_t * hicn0, f64 tnow,
- hicn_hash_node_t * nodep, vlib_buffer_t * b0,
- hicn_hash_entry_t * hash_entry, u64 name_hash,
- hicn_buffer_t * hicnb, const hicn_dpo_vft_t * dpo_vft,
- dpo_id_t * hicn_dpo_id, hicn_lifetime_t dmsg_lifetime)
+clone_data_to_cs (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *pcs_entry,
+ u32 buffer_index, f64 tnow, hicn_lifetime_t dmsg_lifetime)
{
/*
* At this point we think we're safe to proceed. Store the CS buf in
* the PIT/CS hashtable entry
*/
- /*
- * Start turning the PIT into a CS. Note that we may be stepping on
- * the PIT part of the union as we update the CS part, so don't
- * expect the PIT part to be valid after this point.
- */
- hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
- hicn_pit_to_cs (vm, pitcs, pitp, hash_entry, nodep, dpo_vft, hicn_dpo_id,
- hicnb->face_id,
- hicnb0->flags & HICN_BUFFER_FLAGS_FACE_IS_APP);
-
- pitp->shared.create_time = tnow;
+ // Start turning the PIT into a CS. Note that we may be stepping on the PIT
+ // part of the union as we update the CS part, so don't expect the PIT part
+ // to be valid after this point.
+ hicn_pit_to_cs (pitcs, pcs_entry, buffer_index);
+ hicn_pcs_entry_set_create_time (pcs_entry, tnow);
- if (dmsg_lifetime < HICN_PARAM_CS_LIFETIME_MIN
- || dmsg_lifetime > HICN_PARAM_CS_LIFETIME_MAX)
+ if (dmsg_lifetime < HICN_PARAM_CS_LIFETIME_MIN ||
+ dmsg_lifetime > HICN_PARAM_CS_LIFETIME_MAX)
{
dmsg_lifetime = HICN_PARAM_CS_LIFETIME_DFLT;
}
- pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, dmsg_lifetime);
-
- /* Store the original packet buffer in the CS node */
- pitp->u.cs.cs_pkt_buf = vlib_get_buffer_index (vm, b0);
+ hicn_pcs_entry_set_expire_time (pcs_entry,
+ hicn_pcs_get_exp_time (tnow, dmsg_lifetime));
}
/* packet trace format function */
always_inline u8 *
-hicn_data_fwd_format_trace (u8 * s, va_list * args)
+hicn_data_fwd_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
hicn_data_fwd_trace_t *t = va_arg (*args, hicn_data_fwd_trace_t *);
u32 indent = format_get_indent (s);
- s = format (s, "DATAFWD: pkt: %d, sw_if_index %d, next index %d\n",
- (int) t->pkt_type, t->sw_if_index, t->next_index);
+ s = format (s, "DATAFWD: pkt: %d, sw_if_index %d, next index %d, face %d\n",
+ (int) t->pkt_type, t->sw_if_index, t->next_index, t->face);
- s = format (s, "%U%U", format_white_space, indent,
- format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ s = format (s, "%U%U", format_white_space, indent, format_ip6_header,
+ t->packet_data, sizeof (t->packet_data));
return (s);
}
+vlib_node_registration_t hicn_data_fwd_node;
+
+/*
+ * ICN forwarder node for interests: handling of Data delivered based on ACL.
+ * - 1 packet at a time - ipv4/tcp ipv6/tcp
+ */
+static uword
+hicn_data_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+
+ u32 n_left_from, *from, *to_next;
+ hicn_data_fwd_next_t next_index;
+ hicn_data_fwd_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ f64 tnow;
+ u32 data_received = 1;
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u32 bi0;
+ u32 next0 = HICN_DATA_FWD_NEXT_ERROR_DROP;
+ hicn_buffer_t *hicnb0;
+ const hicn_strategy_vft_t *strategy_vft0 = NULL;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0 = ~0;
+ u32 pcs_entry_id;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ hicn_lifetime_t dmsg_lifetime;
+ int ret = HICN_ERROR_NONE;
+
+ rt = vlib_node_get_runtime_data (vm, node->node_index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ /* Capture time in vpp terms */
+ tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ // Dequeue a packet buffer. Do not copy the index in the next buffer,
+ // we'll do it later. The packet might be cloned, so the buffer to
+ // move to next must be the cloned one
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ // Get hicn buffer and state
+ hicnb0 = hicn_get_buffer (b0);
+
+ hicn_get_internal_state (hicnb0, &pcs_entry_id, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0);
+
+ // Get PCS entry
+ pcs_entry =
+ hicn_pcs_entry_get_entry_from_index_safe (rt->pitcs, pcs_entry_id);
+
+ if (PREDICT_FALSE (pcs_entry == NULL))
+ {
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+
+ goto end_processing;
+ }
+
+ isv6 = hicn_buffer_is_v6 (b0);
+
+ // If PCS entry is CS, drop the packet
+ if (PREDICT_FALSE (hicn_pcs_entry_is_cs (pcs_entry)))
+ {
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+
+ goto end_processing;
+ }
+
+ // We are sure the entry is a PIT entry. Check whether it is expired.
+ if (tnow > hicn_pcs_entry_get_expire_time (pcs_entry))
+ {
+ // Entry expired. Release lock
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+
+ // Drop packet
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+
+ // Update stats
+ stats.pit_expired_count++;
+
+ // Trace
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_data_fwd_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PACKET_TYPE_DATA;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ clib_memcpy (t->packet_data, vlib_buffer_get_current (b0),
+ sizeof (t->packet_data));
+ }
+ }
+ else
+ {
+ // Update stats
+ data_received++;
+
+ /*
+ * We do not check if the data is coming from
+ * the outgoing interest face.
+ */
+
+ // Prepare the buffer for the cloning
+ ret =
+ hicn_satisfy_faces (vm, bi0, pcs_entry, &n_left_to_next,
+ &to_next, &next_index, node, isv6, &stats);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
+ {
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+ continue;
+ }
+
+ // Call the strategy callback since the interest has been
+ // satisfied
+ strategy_vft0->hicn_receive_data (
+ dpo_ctx_id0, vnet_buffer (b0)->ip.adj_index[VLIB_RX]);
+
+ dmsg_lifetime = hicn_buffer_get_lifetime (b0);
+
+ if (dmsg_lifetime)
+ {
+ // Clone data packet in the content store and convert the PIT
+ // entry into a CS entry
+ clone_data_to_cs (rt->pitcs, pcs_entry, bi0, tnow,
+ dmsg_lifetime);
+ }
+ else
+ {
+ /*
+ * If the packet is copied and not cloned, we need to free
+ * the vlib_buffer
+ */
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
+ {
+ vlib_buffer_free_one (vm, bi0);
+ }
+ else
+ {
+ /*
+ * Remove one reference as the buffer is no
+ * longer in any frame. The vlib_buffer will be freed
+ * when all its cloned vlib_buffer will be freed.
+ */
+ b0->ref_count--;
+ }
+ // Delete the PIT entry
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+ }
+ }
+ end_processing:
+
+ /* Incr packet counter */
+ stats.pkts_processed += 1;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ u32 pit_int_count = hicn_pcs_get_pit_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pcs_get_cs_count (rt->pitcs);
+
+ vlib_node_increment_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+
+ update_node_counter (vm, hicn_data_fwd_node.index, HICNFWD_ERROR_INT_COUNT,
+ pit_int_count);
+ update_node_counter (vm, hicn_data_fwd_node.index, HICNFWD_ERROR_CS_COUNT,
+ pit_cs_count);
+ update_node_counter (vm, hicn_data_fwd_node.index,
+ HICNFWD_ERROR_INTEREST_AGG_ENTRY,
+ stats.pkts_data_count / data_received);
+
+ return (frame->n_vectors);
+}
+
/*
* Node registration for the data forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn_data_fwd_node) =
{
.function = hicn_data_node_fn,
@@ -611,7 +523,6 @@ VLIB_REGISTER_NODE(hicn_data_fwd_node) =
[HICN_DATA_FWD_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/data_input_node.c b/hicn-plugin/src/data_input_node.c
index 8d20f54a6..a91f9156d 100644
--- a/hicn-plugin/src/data_input_node.c
+++ b/hicn-plugin/src/data_input_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,8 +16,8 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vnet/fib/ip6_fib.h>
-#include <vnet/fib/fib_table.h> /* for FIB table and entry creation */
-#include <vnet/fib/fib_entry.h> /* for FIB table and entry creation */
+#include <vnet/fib/fib_table.h> /* for FIB table and entry creation */
+#include <vnet/fib/fib_entry.h> /* for FIB table and entry creation */
#include <vnet/fib/ip4_fib.h>
#include <vnet/dpo/load_balance.h>
@@ -61,7 +61,7 @@ vlib_node_registration_t hicn_data_input_ip6_node;
vlib_node_registration_t hicn_data_input_ip4_node;
static __clib_unused u8 *
-format_hicn_data_input_trace (u8 * s, va_list * args)
+format_hicn_data_input_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -69,22 +69,59 @@ format_hicn_data_input_trace (u8 * s, va_list * args)
u32 indent = format_get_indent (s);
u8 isv6 = (u8) va_arg (*args, int);
- s =
- format (s, "%U hicn_data_input%s: sw_if_index %d next-index %d",
- format_white_space, indent, isv6 ? "_ip6" : "_ip4",
- t->sw_if_index, t->next_index);
+ s = format (s, "%U hicn_data_input%s: sw_if_index %d next-index %d",
+ format_white_space, indent, isv6 ? "_ip6" : "_ip4",
+ t->sw_if_index, t->next_index);
return s;
}
+static void
+hicn_data_input_set_adj_index (vlib_buffer_t *b,
+ const ip46_address_t *dst_addr,
+ const hicn_dpo_ctx_t *dpo_ctx)
+{
+ CLIB_UNUSED (u8 set) = 0;
+ u32 *adj_index = &vnet_buffer (b)->ip.adj_index[VLIB_RX];
+
+ if (*adj_index != ADJ_INDEX_INVALID)
+ {
+ return;
+ }
+
+ for (u8 pos = 0; pos < dpo_ctx->entry_count; pos++)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (dpo_ctx->next_hops[pos]);
+ assert (face);
+
+ if (ip46_address_cmp (&(face->nat_addr), dst_addr) == 0)
+ {
+ *adj_index = face->dpo.dpoi_index;
+ set = 1;
+ break;
+ }
+ }
+
+ ASSERT (set == 1);
+}
+
static uword
-hicn_data_input_ip6_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+hicn_data_input_ip6_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
ip6_main_t *im = &ip6_main;
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
u32 n_left_from, n_left_to_next, *from, *to_next;
ip_lookup_next_t next;
u32 thread_index = vm->thread_index;
+ vlib_buffer_t *p0, *p1;
+ u32 pi0, pi1, lbi0, lbi1, wrong_next;
+ ip_lookup_next_t next0, next1;
+ ip6_header_t *ip0, *ip1;
+ ip6_address_t *src_addr0, *src_addr1;
+ ip46_address_t dst_addr0, dst_addr1;
+ const dpo_id_t *dpo0, *dpo1;
+ const load_balance_t *lb0, *lb1;
+ hicn_buffer_t *hicnb0, *hicnb1;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -96,14 +133,6 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
while (n_left_from >= 4 && n_left_to_next >= 2)
{
- vlib_buffer_t *p0, *p1;
- u32 pi0, pi1, lbi0, lbi1, wrong_next;
- ip_lookup_next_t next0, next1;
- ip6_header_t *ip0, *ip1;
- ip6_address_t *src_addr0, *src_addr1;
- const dpo_id_t *dpo0, *dpo1;
- const load_balance_t *lb0, *lb1;
-
/* Prefetch next iteration. */
{
vlib_buffer_t *p2, *p3;
@@ -123,12 +152,18 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
p0 = vlib_get_buffer (vm, pi0);
p1 = vlib_get_buffer (vm, pi1);
+ hicnb0 = hicn_get_buffer (p0);
+ hicnb1 = hicn_get_buffer (p1);
+
ip0 = vlib_buffer_get_current (p0);
ip1 = vlib_buffer_get_current (p1);
src_addr0 = &ip0->src_address;
src_addr1 = &ip1->src_address;
+ ip46_address_set_ip6 (&dst_addr0, &ip0->dst_address);
+ ip46_address_set_ip6 (&dst_addr1, &ip1->dst_address);
+
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p0);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p1);
@@ -146,19 +181,29 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
vnet_buffer (p0)->ip.flow_hash = vnet_buffer (p1)->ip.flow_hash = 0;
- //No vpp loadbalancing. Missing header file to exploit it
+ // No vpp loadbalancing. Missing header file to exploit it
dpo0 = load_balance_get_bucket_i (lb0, 0);
dpo1 = load_balance_get_bucket_i (lb1, 0);
if (dpo_is_hicn (dpo0))
- next0 = HICN_DATA_INPUT_IP6_NEXT_FACE;
+ {
+ next0 = (ip_lookup_next_t) HICN_DATA_INPUT_IP6_NEXT_FACE;
+ hicnb0->dpo_ctx_id = dpo0->dpoi_index;
+ hicn_data_input_set_adj_index (
+ p0, &dst_addr0, hicn_strategy_dpo_ctx_get (dpo0->dpoi_index));
+ }
else
- next0 = HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
+ next0 = (ip_lookup_next_t) HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
if (dpo_is_hicn (dpo1))
- next1 = HICN_DATA_INPUT_IP6_NEXT_FACE;
+ {
+ next1 = (ip_lookup_next_t) HICN_DATA_INPUT_IP6_NEXT_FACE;
+ hicnb1->dpo_ctx_id = dpo1->dpoi_index;
+ hicn_data_input_set_adj_index (
+ p1, &dst_addr1, hicn_strategy_dpo_ctx_get (dpo1->dpoi_index));
+ }
else
- next1 = HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
+ next1 = (ip_lookup_next_t) HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
(p0->flags & VLIB_BUFFER_IS_TRACED))
@@ -180,11 +225,10 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
t->isv6 = 1;
}
-
- vlib_increment_combined_counter
- (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
- vlib_increment_combined_counter
- (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+ vlib_increment_combined_counter (
+ cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter (
+ cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
from += 2;
to_next += 2;
@@ -236,15 +280,19 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
u32 pi0, lbi0;
ip_lookup_next_t next0;
load_balance_t *lb0;
+ hicn_buffer_t *hicnb0;
ip6_address_t *src_addr0;
+ ip46_address_t dst_addr0;
const dpo_id_t *dpo0;
pi0 = from[0];
to_next[0] = pi0;
p0 = vlib_get_buffer (vm, pi0);
+ hicnb0 = hicn_get_buffer (p0);
ip0 = vlib_buffer_get_current (p0);
src_addr0 = &ip0->src_address;
+ ip46_address_set_ip6 (&dst_addr0, &ip0->dst_address);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p0);
lbi0 = ip6_fib_table_fwding_lookup (vnet_buffer (p0)->ip.fib_index,
src_addr0);
@@ -253,13 +301,18 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
ASSERT (lb0->lb_n_buckets > 0);
ASSERT (is_pow2 (lb0->lb_n_buckets));
- //No vpp loadbalancing. Missing header file to exploit it
+ // No vpp loadbalancing. Missing header file to exploit it
dpo0 = load_balance_get_bucket_i (lb0, 0);
if (dpo_is_hicn (dpo0))
- next0 = HICN_DATA_INPUT_IP6_NEXT_FACE;
+ {
+ next0 = (ip_lookup_next_t) HICN_DATA_INPUT_IP6_NEXT_FACE;
+ hicn_data_input_set_adj_index (
+ p0, &dst_addr0, hicn_strategy_dpo_ctx_get (dpo0->dpoi_index));
+ hicnb0->dpo_ctx_id = dpo0->dpoi_index;
+ }
else
- next0 = HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
+ next0 = (ip_lookup_next_t) HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL;
if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
(p0->flags & VLIB_BUFFER_IS_TRACED))
@@ -271,8 +324,8 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
t->isv6 = 1;
}
- vlib_increment_combined_counter
- (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+ vlib_increment_combined_counter (
+ cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
from += 1;
to_next += 1;
@@ -297,39 +350,29 @@ hicn_data_input_ip6_fn (vlib_main_t * vm,
return frame->n_vectors;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn_data_input_ip6) =
- {
- .function = hicn_data_input_ip6_fn,
- .name = "hicn-data-input-ip6",
- .vector_size = sizeof(u32),
- .format_trace = format_hicn_data_input_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicn_data_input_error_strings),
- .error_strings = hicn_data_input_error_strings,
- .n_next_nodes = HICN_DATA_INPUT_IP6_N_NEXT,
- .next_nodes =
- {
- [HICN_DATA_INPUT_IP6_NEXT_FACE] = "hicn6-face-input",
- [HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL] = "ip6-local-end-of-arc"
- },
- };
-/* *INDENT-ON* */
-
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_data_input_ip6_arc, static)=
- {
- .arc_name = "ip6-local",
- .node_name = "hicn-data-input-ip6",
- .runs_before = VNET_FEATURES("ip6-local-end-of-arc"),
- };
-/* *INDENT-ON* */
-
+VLIB_REGISTER_NODE (hicn_data_input_ip6) = {
+ .function = hicn_data_input_ip6_fn,
+ .name = "hicn-data-input-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_hicn_data_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_data_input_error_strings),
+ .error_strings = hicn_data_input_error_strings,
+ .n_next_nodes = HICN_DATA_INPUT_IP6_N_NEXT,
+ .next_nodes = { [HICN_DATA_INPUT_IP6_NEXT_FACE] = "hicn6-face-input",
+ [HICN_DATA_INPUT_IP6_NEXT_IP6_LOCAL] =
+ "ip6-local-end-of-arc" },
+};
+VNET_FEATURE_INIT (hicn_data_input_ip6_arc, static) = {
+ .arc_name = "ip6-local",
+ .node_name = "hicn-data-input-ip6",
+ .runs_before = VNET_FEATURES ("ip6-local-end-of-arc")
+};
always_inline uword
-hicn_data_input_ip4_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+hicn_data_input_ip4_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
ip4_main_t *im = &ip4_main;
vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
@@ -338,6 +381,13 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
vlib_buffer_t **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next;
+ ip4_header_t *ip0, *ip1, *ip2, *ip3;
+ const load_balance_t *lb0, *lb1, *lb2, *lb3;
+ ip4_address_t *src_addr0, *src_addr1, *src_addr2, *src_addr3;
+ ip46_address_t dst_addr0, dst_addr1, dst_addr2, dst_addr3;
+ u32 lb_index0, lb_index1, lb_index2, lb_index3;
+ const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
+ hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
from = vlib_frame_vector_args (frame);
n_left = frame->n_vectors;
@@ -347,14 +397,6 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
#if (CLIB_N_PREFETCHES >= 8)
while (n_left >= 4)
{
- ip4_header_t *ip0, *ip1, *ip2, *ip3;
- const load_balance_t *lb0, *lb1, *lb2, *lb3;
- ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3;
- ip4_fib_mtrie_leaf_t leaf0, leaf1, leaf2, leaf3;
- ip4_address_t *src_addr0, *src_addr1, *src_addr2, *src_addr3;
- u32 lb_index0, lb_index1, lb_index2, lb_index3;
- const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;
-
/* Prefetch next iteration. */
if (n_left >= 8)
{
@@ -374,40 +416,31 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
ip2 = vlib_buffer_get_current (b[2]);
ip3 = vlib_buffer_get_current (b[3]);
+ hicnb0 = hicn_get_buffer (b[0]);
+ hicnb1 = hicn_get_buffer (b[1]);
+ hicnb2 = hicn_get_buffer (b[2]);
+ hicnb3 = hicn_get_buffer (b[3]);
+
src_addr0 = &ip0->src_address;
src_addr1 = &ip1->src_address;
src_addr2 = &ip2->src_address;
src_addr3 = &ip3->src_address;
+ ip46_address_set_ip4 (&dst_addr0, &ip0->dst_address);
+ ip46_address_set_ip4 (&dst_addr1, &ip1->dst_address);
+ ip46_address_set_ip4 (&dst_addr2, &ip2->dst_address);
+ ip46_address_set_ip4 (&dst_addr3, &ip3->dst_address);
+
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[2]);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[3]);
- mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
- mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
- mtrie2 = &ip4_fib_get (vnet_buffer (b[2])->ip.fib_index)->mtrie;
- mtrie3 = &ip4_fib_get (vnet_buffer (b[3])->ip.fib_index)->mtrie;
-
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
- leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, src_addr1);
- leaf2 = ip4_fib_mtrie_lookup_step_one (mtrie2, src_addr2);
- leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, src_addr3);
-
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 2);
- leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, src_addr2, 2);
- leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, src_addr3, 2);
-
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 3);
- leaf2 = ip4_fib_mtrie_lookup_step (mtrie2, leaf2, src_addr2, 3);
- leaf3 = ip4_fib_mtrie_lookup_step (mtrie3, leaf3, src_addr3, 3);
-
- lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
- lb_index2 = ip4_fib_mtrie_leaf_get_adj_index (leaf2);
- lb_index3 = ip4_fib_mtrie_leaf_get_adj_index (leaf3);
+ ip4_fib_forwarding_lookup_x4 (
+ vnet_buffer (b[0])->ip.fib_index, vnet_buffer (b[1])->ip.fib_index,
+ vnet_buffer (b[2])->ip.fib_index, vnet_buffer (b[3])->ip.fib_index,
+ src_addr0, src_addr1, src_addr2, src_addr3, &lb_index0, &lb_index1,
+ &lb_index2, &lb_index3);
ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
lb0 = load_balance_get (lb_index0);
@@ -430,26 +463,45 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
dpo3 = load_balance_get_bucket_i (lb3, 0);
if (dpo_is_hicn (dpo0))
- next[0] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[0] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb0->dpo_ctx_id = dpo0->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[0], &dst_addr0, hicn_strategy_dpo_ctx_get (dpo0->dpoi_index));
+ }
else
next[0] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
if (dpo_is_hicn (dpo1))
- next[1] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[1] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb1->dpo_ctx_id = dpo1->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[1], &dst_addr1, hicn_strategy_dpo_ctx_get (dpo1->dpoi_index));
+ }
else
next[1] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
if (dpo_is_hicn (dpo2))
- next[2] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[2] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb2->dpo_ctx_id = dpo2->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[2], &dst_addr2, hicn_strategy_dpo_ctx_get (dpo2->dpoi_index));
+ }
else
next[2] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
if (dpo_is_hicn (dpo3))
- next[3] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[3] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb3->dpo_ctx_id = dpo3->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[3], &dst_addr3, hicn_strategy_dpo_ctx_get (dpo3->dpoi_index));
+ }
else
next[3] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
-
if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
(b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -490,18 +542,14 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
t->isv6 = 0;
}
- vlib_increment_combined_counter
- (cm, thread_index, lb_index0, 1,
- vlib_buffer_length_in_chain (vm, b[0]));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index1, 1,
- vlib_buffer_length_in_chain (vm, b[1]));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index2, 1,
- vlib_buffer_length_in_chain (vm, b[2]));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index3, 1,
- vlib_buffer_length_in_chain (vm, b[3]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, b[1]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index2, 1,
+ vlib_buffer_length_in_chain (vm, b[2]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index3, 1,
+ vlib_buffer_length_in_chain (vm, b[3]));
b += 4;
next += 4;
@@ -510,16 +558,6 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
#elif (CLIB_N_PREFETCHES >= 4)
while (n_left >= 4)
{
- ip4_header_t *ip0, *ip1;
- const load_balance_t *lb0, *lb1;
- ip4_fib_mtrie_t *mtrie0, *mtrie1;
- ip4_fib_mtrie_leaf_t leaf0, leaf1;
- ip4_address_t *src_addr0, *src_addr1;
- u32 lb_index0, lb_index1;
- flow_hash_config_t flow_hash_config0, flow_hash_config1;
- u32 hash_c0, hash_c1;
- const dpo_id_t *dpo0, *dpo1;
-
/* Prefetch next iteration. */
{
vlib_prefetch_buffer_header (b[2], LOAD);
@@ -532,26 +570,21 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
ip0 = vlib_buffer_get_current (b[0]);
ip1 = vlib_buffer_get_current (b[1]);
+ hicnb0 = hicn_get_buffer (b[0]);
+ hicnb1 = hicn_get_buffer (b[1]);
+
src_addr0 = &ip0->src_address;
src_addr1 = &ip1->src_address;
+ ip46_address_set_ip4 (&dst_addr0, &ip0->dst_address);
+ ip46_address_set_ip4 (&dst_addr1, &ip1->dst_address);
+
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
- mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
- mtrie1 = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
-
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
- leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, src_addr1);
-
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 2);
-
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, src_addr1, 3);
-
- lb_index0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- lb_index1 = ip4_fib_mtrie_leaf_get_adj_index (leaf1);
+ ip4_fib_forwarding_lookup_x2 (
+ vnet_buffer (b[0])->ip.fib_index, vnet_buffer (b[1])->ip.fib_index,
+ src_addr0, src_addr1, &lb_index0, &lb_index1);
ASSERT (lb_index0 && lb_index1);
lb0 = load_balance_get (lb_index0);
@@ -566,12 +599,22 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
dpo1 = load_balance_get_bucket_i (lb1, 0);
if (dpo_is_hicn (dpo0))
- next[0] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[0] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb0->dpo_ctx_id = dpo0->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[0], &dst_addr0, hicn_strategy_dpo_ctx_get (dpo0->dpoi_index));
+ }
else
next[0] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
if (dpo_is_hicn (dpo1))
- next[1] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[1] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb1->dpo_ctx_id = dpo1->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[1], &dst_addr1, hicn_strategy_dpo_ctx_get (dpo1->dpoi_index));
+ }
else
next[1] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
@@ -595,13 +638,10 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
t->isv6 = 0;
}
-
- vlib_increment_combined_counter
- (cm, thread_index, lb_index0, 1,
- vlib_buffer_length_in_chain (vm, b[0]));
- vlib_increment_combined_counter
- (cm, thread_index, lb_index1, 1,
- vlib_buffer_length_in_chain (vm, b[1]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index1, 1,
+ vlib_buffer_length_in_chain (vm, b[1]));
b += 2;
next += 2;
@@ -610,26 +650,17 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
#endif
while (n_left > 0)
{
- ip4_header_t *ip0;
- const load_balance_t *lb0;
- ip4_fib_mtrie_t *mtrie0;
- ip4_fib_mtrie_leaf_t leaf0;
- ip4_address_t *src_addr0;
- u32 lbi0;
- const dpo_id_t *dpo0;
-
ip0 = vlib_buffer_get_current (b[0]);
+ hicnb0 = hicn_get_buffer (b[0]);
src_addr0 = &ip0->src_address;
+ ip46_address_set_ip4 (&dst_addr0, &ip0->dst_address);
ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
- mtrie0 = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, src_addr0);
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 2);
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, src_addr0, 3);
- lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+ lb_index0 = ip4_fib_forwarding_lookup (vnet_buffer (b[0])->ip.fib_index,
+ src_addr0);
- ASSERT (lbi0);
- lb0 = load_balance_get (lbi0);
+ ASSERT (lb_index0);
+ lb0 = load_balance_get (lb_index0);
ASSERT (lb0->lb_n_buckets > 0);
ASSERT (is_pow2 (lb0->lb_n_buckets));
@@ -637,7 +668,12 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
dpo0 = load_balance_get_bucket_i (lb0, 0);
if (dpo_is_hicn (dpo0))
- next[0] = HICN_DATA_INPUT_IP4_NEXT_FACE;
+ {
+ next[0] = (ip_lookup_next_t) HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
+ hicnb0->dpo_ctx_id = dpo0->dpoi_index;
+ hicn_data_input_set_adj_index (
+ b[0], &dst_addr0, hicn_strategy_dpo_ctx_get (dpo0->dpoi_index));
+ }
else
next[0] = HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL;
@@ -651,9 +687,8 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
t->isv6 = 0;
}
- vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
- vlib_buffer_length_in_chain (vm,
- b[0]));
+ vlib_increment_combined_counter (cm, thread_index, lb_index0, 1,
+ vlib_buffer_length_in_chain (vm, b[0]));
b += 1;
next += 1;
@@ -668,30 +703,22 @@ hicn_data_input_ip4_fn (vlib_main_t * vm,
return frame->n_vectors;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn_data_input_ip4) =
- {
- .function = hicn_data_input_ip4_fn,
- .name = "hicn-data-input-ip4",
- .vector_size = sizeof(u32),
- .format_trace = format_hicn_data_input_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicn_data_input_error_strings),
- .error_strings = hicn_data_input_error_strings,
- .n_next_nodes = HICN_DATA_INPUT_IP4_N_NEXT,
- .next_nodes =
- {
- [HICN_DATA_INPUT_IP4_NEXT_FACE] = "hicn4-face-input",
- [HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL] = "ip4-local-end-of-arc"
- },
- };
-/* *INDENT-ON* */
-
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_data_input_ip4_arc, static)=
- {
- .arc_name = "ip4-local",
- .node_name = "hicn-data-input-ip4",
- .runs_before = VNET_FEATURES("ip4-local-end-of-arc"),
- };
-/* *INDENT-ON* */
+VLIB_REGISTER_NODE (hicn_data_input_ip4) = {
+ .function = hicn_data_input_ip4_fn,
+ .name = "hicn-data-input-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_hicn_data_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicn_data_input_error_strings),
+ .error_strings = hicn_data_input_error_strings,
+ .n_next_nodes = HICN_DATA_INPUT_IP4_N_NEXT,
+ .next_nodes = { [HICN_DATA_INPUT_IP4_NEXT_FACE] = "hicn4-face-input",
+ [HICN_DATA_INPUT_IP4_NEXT_IP4_LOCAL] =
+ "ip4-local-end-of-arc" },
+};
+
+VNET_FEATURE_INIT (hicn_data_input_ip4_arc, static) = {
+ .arc_name = "ip4-local",
+ .node_name = "hicn-data-input-ip4",
+ .runs_before = VNET_FEATURES ("ip4-local-end-of-arc"),
+}; \ No newline at end of file
diff --git a/hicn-plugin/src/data_pcslookup.h b/hicn-plugin/src/data_pcslookup.h
index e3050c31c..32fca952a 100644
--- a/hicn-plugin/src/data_pcslookup.h
+++ b/hicn-plugin/src/data_pcslookup.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -22,9 +22,10 @@
* @file data_pcslookup.h
*
* This is the node encoutered by data packets after the hicn6-face-input or
- * hicn4-face-input. This node performs a lookup in the pit and content store and
- * if there is a hit in the PIT, the vlib buffer is passed to the hicn-data-fwd
- * while if there is a hit in the CS or there isn't any hit, the packet is dropped.
+ * hicn4-face-input. This node performs a lookup in the pit and content store
+ * and if there is a hit in the PIT, the vlib buffer is passed to the
+ * hicn-data-fwd while if there is a hit in the CS or there isn't any hit, the
+ * packet is dropped.
*/
/*
@@ -46,8 +47,8 @@ typedef struct
typedef enum
{
- HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD, /* This must be one position
- * before the error drop!! */
+ HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD, /* This must be one position
+ * before the error drop!! */
HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP,
HICN_DATA_PCSLOOKUP_N_NEXT,
} hicn_data_pcslookup_next_t;
diff --git a/hicn-plugin/src/data_pcslookup_node.c b/hicn-plugin/src/data_pcslookup_node.c
index 99af350b0..5ae6958f5 100644
--- a/hicn-plugin/src/data_pcslookup_node.c
+++ b/hicn-plugin/src/data_pcslookup_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -29,7 +29,7 @@ static char *hicn_data_pcslookup_error_strings[] = {
};
/* packet trace format function */
-always_inline u8 *hicn_data_pcslookup_format_trace (u8 * s, va_list * args);
+always_inline u8 *hicn_data_pcslookup_format_trace (u8 *s, va_list *args);
vlib_node_registration_t hicn_data_pcslookup_node;
@@ -37,13 +37,19 @@ vlib_node_registration_t hicn_data_pcslookup_node;
* hICN node for handling data. It performs a lookup in the PIT.
*/
static uword
-hicn_data_pcslookup_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+hicn_data_pcslookup_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next;
hicn_data_pcslookup_next_t next_index;
hicn_data_pcslookup_runtime_t *rt;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u32 next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ hicn_buffer_t *hicnb0;
+ int ret;
rt = vlib_node_get_runtime_data (vm, node->node_index);
@@ -62,25 +68,7 @@ hicn_data_pcslookup_node_fn (vlib_main_t * vm,
while (n_left_from > 0 && n_left_to_next > 0)
{
- vlib_buffer_t *b0;
- u8 isv6;
- u8 *nameptr;
- u16 namelen;
- u32 bi0;
- u32 next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
- u64 name_hash = 0;
- hicn_name_t name;
- hicn_header_t *hicn0 = NULL;
- u32 node_id0 = 0;
- index_t dpo_ctx_id0 = 0;
- int ret0;
- u8 vft_id0;
- u8 is_cs0;
- u8 hash_entry_id = 0;
- u8 bucket_is_overflown = 0;
- u32 bucket_id = ~0;
-
- /* Prefetch for next iteration. */
+ // Prefetch for next iteration.
if (n_left_from > 1)
{
vlib_buffer_t *b1;
@@ -88,9 +76,9 @@ hicn_data_pcslookup_node_fn (vlib_main_t * vm,
// Prefetch two cache lines-- 128 byte-- so that we load the
// hicn_buffer_t as well
CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
}
- /* Dequeue a packet buffer */
+
+ // Dequeue a packet buffer
bi0 = from[0];
from += 1;
n_left_from -= 1;
@@ -99,46 +87,35 @@ hicn_data_pcslookup_node_fn (vlib_main_t * vm,
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
+ hicnb0 = hicn_get_buffer (b0);
+
+ // By default go to drop
next0 = HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP;
- /* Incr packet counter */
+ // Increase packet counters
stats.pkts_processed += 1;
+ stats.pkts_data_count += 1;
- ret0 = hicn_data_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
- nameptr = (u8 *) (&name);
+ // Lookup the name in the PIT
+ hicn_name_t name;
+ hicn_packet_get_name (&hicn_get_buffer (b0)->pkbuf, &name);
+ ret = hicn_pcs_lookup_one (rt->pitcs, &name, &pcs_entry);
- if (PREDICT_TRUE (ret0 == HICN_ERROR_NONE &&
- hicn_hashtb_fullhash (nameptr, namelen,
- &name_hash) ==
- HICN_ERROR_NONE))
+ if (ret == HICN_ERROR_NONE)
{
- int res =
- hicn_hashtb_lookup_node (rt->pitcs->pcs_table, nameptr,
- namelen, name_hash,
- 1
- /*is_data. Do not take lock if hit CS */
- ,
- &node_id0, &dpo_ctx_id0, &vft_id0,
- &is_cs0, &hash_entry_id, &bucket_id,
- &bucket_is_overflown);
-
- stats.pkts_data_count += 1;
-
- if (res == HICN_ERROR_NONE)
- {
- /*
- * In case the result of the lookup
- * is a CS entry, the packet is
- * dropped
- */
- next0 = HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD + is_cs0;
- }
+ ret = hicn_store_internal_state (
+ b0, hicn_pcs_entry_get_index (rt->pitcs, pcs_entry),
+ hicnb0->dpo_ctx_id);
+
+ /*
+ * In case the result of the lookup
+ * is a CS entry, the packet is
+ * dropped
+ */
+ next0 = HICN_DATA_PCSLOOKUP_NEXT_DATA_FWD +
+ (hicn_pcs_entry_is_cs (pcs_entry) && !ret);
}
- hicn_store_internal_state (b0, name_hash, node_id0, dpo_ctx_id0,
- vft_id0, hash_entry_id, bucket_id,
- bucket_is_overflown);
-
/*
* Verify speculative enqueue, maybe switch current
* next frame
@@ -156,16 +133,18 @@ hicn_data_pcslookup_node_fn (vlib_main_t * vm,
{
hicn_data_pcslookup_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_CONTENT;
+ t->pkt_type = HICN_PACKET_TYPE_DATA;
t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
t->next_index = next0;
}
}
+
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
+
/* Check the CS LRU, and trim if necessary. */
- u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
- u32 pit_cs_count = hicn_pit_get_cs_count (rt->pitcs);
+ u32 pit_int_count = hicn_pcs_get_pit_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pcs_get_cs_count (rt->pitcs);
vlib_node_increment_counter (vm, hicn_data_pcslookup_node.index,
HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
@@ -177,12 +156,13 @@ hicn_data_pcslookup_node_fn (vlib_main_t * vm,
HICNFWD_ERROR_INT_COUNT, pit_int_count);
update_node_counter (vm, hicn_data_pcslookup_node.index,
HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn_data_pcslookup_format_trace (u8 * s, va_list * args)
+hicn_data_pcslookup_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -197,7 +177,6 @@ hicn_data_pcslookup_format_trace (u8 * s, va_list * args)
/*
* Node registration for the data forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn_data_pcslookup_node) =
{
.function = hicn_data_pcslookup_node_fn,
@@ -214,7 +193,6 @@ VLIB_REGISTER_NODE(hicn_data_pcslookup_node) =
[HICN_DATA_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/error.c b/hicn-plugin/src/error.c
index 588ae2398..a64066d67 100644
--- a/hicn-plugin/src/error.c
+++ b/hicn-plugin/src/error.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -13,10 +13,10 @@
* limitations under the License.
*/
-#include "error.h"
+#include <vpp_plugins/hicn/error.h>
const char *HICN_ERROR_STRING[] = {
-#define _(a,b,c) c,
+#define _(a, b, c) c,
foreach_hicn_error
#undef _
};
diff --git a/hicn-plugin/src/error.h b/hicn-plugin/src/error.h
deleted file mode 100644
index 59ebce61c..000000000
--- a/hicn-plugin/src/error.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __HICN_ERROR_H__
-#define __HICN_ERROR_H__
-
-/**
- * @file error.h
- *
- * Error codes for the hICN plugin.
- */
-
-
-#define foreach_hicn_error \
- _(NONE, 0, "Ok") \
- _(UNSPECIFIED, -128, "Unspecified Error") \
- _(FACE_NOT_FOUND, -129, "Face not found in Face table") \
- _(FACE_NULL, -130, "Face null") \
- _(FACE_IP_ADJ_NOT_FOUND, -131, "Ip adjacecny for face not found") \
- _(FACE_HW_INT_NOT_FOUND, -132, "Hardware interface not found") \
- _(FACE_NOMEM, -133, "Face table is full") \
- _(FACE_NO_GLOBAL_IP, -134, "No global ip address for face") \
- _(FACE_NOT_FOUND_IN_ENTRY, -135, "Face not found in entry") \
- _(FACE_ALREADY_DELETED, -136, "Face alredy deleted") \
- _(FACE_ALREADY_CREATED, -137, "Face alredy created") \
- _(FWD_NOT_ENABLED, -138, "hICN forwarder not enabled") \
- _(FWD_ALREADY_ENABLED, -139, "hICN forwarder alredy enabled") \
- _(PARSER_UNSUPPORTED_PROTO, -140, "Unsupported protocol") \
- _(PARSER_PKT_INVAL, -141, "Packet null") \
- _(PIT_CONFIG_MINLT_OOB, -142, "Min lifetime ouf of bounds") \
- _(PIT_CONFIG_MAXLT_OOB, -143, "Max lifetime ouf of bounds") \
- _(PIT_CONFIG_MINMAXLT, -144, "Min lifetime grater than max lifetime") \
- _(PIT_CONFIG_DFTLT_OOB, -145, "Default lifetime ouf of bounds") \
- _(PIT_CONFIG_SIZE_OOB, -146, "Pit size ouf of bounds") \
- _(CS_CONFIG_SIZE_OOB, -147, "CS size ouf of bounds") \
- _(CS_CONFIG_RESERVED_OOB, -148, "Reseved CS must be between 0 and 100 (excluded)") \
- _(DPO_CTX_NHOPS_NS, -149, "No space for additional next hop") \
- _(DPO_CTX_NHOPS_EXISTS, -150, "Next hop already in the route") \
- _(DPO_CTX_NOT_FOUND, -151, "Dpo context not found") \
- _(DPO_MGR_ID_NOT_VALID, -152, "Dpo id for strategy and context not valid") \
- _(HASHTB_HASH_NOT_FOUND, -153, "Hash not found in hash table") \
- _(HASHTB_HASH_INVAL, -154, "Error while calculating the hash") \
- _(HASHTB_NOMEM, -155, "Unable to allocate new buckets or nodes") \
- _(HASHTB_INVAL, -156, "Invalid argument") \
- _(HASHTB_KEY_INVAL, -157, "Invalid hashtb key") \
- _(HASHTB_EXIST, -158, "Hash already in hashtable") \
- _(ROUTE_INVAL, -159, "Invalid face id and weight") \
- _(ROUTE_NO_LD, -160, "Expected load balance dpo") \
- _(ROUTE_MLT_LD, -161, "Unexpected mulitple buckets in load balance dpo") \
- _(ROUTE_NO_INSERT, -162, "Unable to insert a new FIB entry") \
- _(ROUTE_DPO_NO_HICN, -163, "Dpo is not of type hICN") \
- _(ROUTE_NOT_FOUND, -164, "Route not found in FIB") \
- _(ROUTE_NOT_UPDATED, -165, "Unable to update route") \
- _(ROUTE_ALREADY_EXISTS, -166, "Route already in FIB") \
- _(CLI_INVAL, -167, "Invalid input") \
- _(IPS_ADDR_TYPE_NONUNIFORM, -168, "Src and dst addr have different ip types") \
- _(FACE_TYPE_EXISTS, -169, "Face type already registered") \
- _(NO_BUFFERS, -170, "No vlib_buffer available for packet cloning.") \
- _(NOT_IMPLEMENTED, -171, "Function not yet implemented") \
- _(IFACE_IP_ADJ_NOT_FOUND, -172, "IP adjacency on incomplete face not available") \
- _(APPFACE_ALREADY_ENABLED, -173, "Application face already enabled on interface") \
- _(APPFACE_FEATURE, -174, "Error while enabling app face feature") \
- _(APPFACE_NOT_FOUND, -175, "Application face not found") \
- _(APPFACE_PROD_PREFIX_NULL, -176, "Prefix must not be null for producer face") \
- _(STRATEGY_NH_NOT_FOUND, -177, "Next hop not found") \
- _(MW_STRATEGY_SET, -178, "Error while setting weight for next hop") \
- _(STRATEGY_NOT_FOUND, -179, "Strategy not found") \
- _(UDP_TUNNEL_NOT_FOUND, -180, "Udp tunnel not found") \
- _(UDP_TUNNEL_SRC_DST_TYPE, -181, "Src and dst addresses have different type (ipv4 and ipv6)")
-
-typedef enum
-{
-#define _(a,b,c) HICN_ERROR_##a = (b),
- foreach_hicn_error
-#undef _
- HICN_N_ERROR,
-} hicn_error_t;
-
-extern const char *HICN_ERROR_STRING[];
-
-#define get_error_string(errno) (char *)(errno ? HICN_ERROR_STRING[(-errno) - 127] : HICN_ERROR_STRING[errno])
-
-#endif /* //__HICN_ERROR_H__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables: eval: (c-set-style "gnu") End:
- */
diff --git a/hicn-plugin/src/face_db.h b/hicn-plugin/src/face_db.h
deleted file mode 100644
index 4dd8b2f32..000000000
--- a/hicn-plugin/src/face_db.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __HICN_FACE_DB_H__
-#define __HICN_FACE_DB_H__
-
-#include <vnet/dpo/dpo.h>
-#include "faces/face.h"
-
-/**
- * @file face_db.h
- *
- * Define a face db that is store in every pit entry. A face db containes a list
- * of incoming faces for interest packets that are used to forward data packets
- * on the interests' reverse path
- */
-
-/* Must be power of two */
-#define HICN_FACE_DB_INLINE_FACES 8
-
-#define HICN_PIT_BITMAP_SIZE_BYTE HICN_PARAM_FACES_MAX/8
-#define HICN_PIT_N_HOP_BITMAP_SIZE HICN_PARAM_FACES_MAX
-
-#define HICN_PIT_N_HOP_BUCKET (HICN_PARAM_PIT_ENTRY_PHOPS_MAX - HICN_FACE_DB_INLINE_FACES)
-
-typedef struct hicn_face_bucket_s
-{
- /* Array of indexes of virtual faces */
- hicn_face_id_t faces[HICN_PIT_N_HOP_BUCKET];
-
- /* Used to check if interests are retransmission */
- u8 bitmap[HICN_PIT_BITMAP_SIZE_BYTE];
-
-} hicn_face_bucket_t;
-
-extern hicn_face_bucket_t *hicn_face_bucket_pool;
-
-typedef struct __attribute__ ((packed)) hicn_face_db_s
-{
- /* 19B + 1B = 20B */
- /* Equal to one or zero */
- u8 is_overflow;
-
- /* Number of faces in the last bucket */
- /* Or next availabe entry for storing a dpo_id_t */
- /* 20B + 4B = 24B */
- u32 n_faces;
-
- /* 24B + 32B (8*4) = 56B */
- /* Array of indexes of virtual faces */
- hicn_face_id_t inline_faces[HICN_FACE_DB_INLINE_FACES];
-
- /* 56B + 4B = 60B */
- u32 next_bucket;
-
- /* 60B + 4B = 64B */
- u32 align;
- //align back to 64
-
-} hicn_face_db_t;
-
-always_inline hicn_face_id_t
-hicn_face_db_get_dpo_face (u32 index, hicn_face_db_t * face_db)
-{
- ASSERT (index < face_db->n_faces);
-
- return index < HICN_FACE_DB_INLINE_FACES ? (face_db->inline_faces[index]) :
- (pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket)->faces
- [(index - HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
-}
-
-always_inline void
-hicn_face_db_init (int max_element)
-{
- pool_init_fixed (hicn_face_bucket_pool, max_element);
-}
-
-always_inline hicn_face_bucket_t *
-hicn_face_db_get_bucket (u32 bucket_index)
-{
- return pool_elt_at_index (hicn_face_bucket_pool, bucket_index);
-}
-
-always_inline void
-hicn_face_db_add_face (hicn_face_id_t face_id, hicn_face_db_t * face_db)
-{
- //ASSERT (dpo->dpoi_index != ~0);
-
- hicn_face_bucket_t *faces_bkt =
- pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
-
- hicn_face_id_t *element =
- face_db->n_faces <
- HICN_FACE_DB_INLINE_FACES ? &(face_db->inline_faces[face_db->n_faces]) :
- &(faces_bkt->faces
- [(face_db->n_faces -
- HICN_FACE_DB_INLINE_FACES) & (HICN_PIT_N_HOP_BUCKET - 1)]);
-
- *element = face_id;
-
- u32 bitmap_index = face_id % HICN_PIT_N_HOP_BITMAP_SIZE;
- u32 position_array = bitmap_index / 8;
- u8 bit_index = (u8) (bitmap_index - position_array * 8);
-
- faces_bkt->bitmap[position_array] |= (0x01 << bit_index);
- face_db->n_faces++;
-}
-
-always_inline u8
-hicn_face_search (hicn_face_id_t index, hicn_face_db_t * face_db)
-{
- hicn_face_bucket_t *faces_bkt =
- pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
- u32 bitmap_index = index % HICN_PIT_N_HOP_BITMAP_SIZE;
-
- u32 position_array = bitmap_index / 8;
- u8 bit_index = bitmap_index - position_array * 8;
-
- return (faces_bkt->bitmap[position_array] >> bit_index) & 0x01;
-}
-
-always_inline void
-hicn_faces_flush (hicn_face_db_t * face_db)
-{
- hicn_face_bucket_t *faces_bkt =
- pool_elt_at_index (hicn_face_bucket_pool, face_db->next_bucket);
- clib_memset_u8 (&(faces_bkt->bitmap), 0, HICN_PIT_BITMAP_SIZE_BYTE);
- face_db->n_faces = 0;
- pool_put_index (hicn_face_bucket_pool, face_db->next_bucket);
-}
-
-
-#endif /* // __HICN_FACE_DB_H__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables: eval: (c-set-style "gnu") End:
- */
diff --git a/hicn-plugin/src/faces/app/address_mgr.c b/hicn-plugin/src/faces/app/address_mgr.c
index 2d5894ab8..44729fd69 100644
--- a/hicn-plugin/src/faces/app/address_mgr.c
+++ b/hicn-plugin/src/faces/app/address_mgr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -14,7 +14,7 @@
*/
/*
- * Copyright (c) 2017-2019 by cisco systems inc. All rights reserved.
+ * Copyright (c) 2021 by cisco systems inc. All rights reserved.
*
*/
@@ -24,17 +24,18 @@
#include <vnet/ip/ip6_packet.h>
#include <vnet/ip/ip4.h> //ip4_add_del_ip_address
#include <vnet/ip/ip6.h> //ip6_add_del_ip_address
-#include <vnet/fib/fib_types.h> //FIB_PROTOCOL_IP4/6, FIB_NODE_INDEX_INVALID
-#include <vnet/fib/fib_entry.h> //FIB_SOURCE_PRIORITY_HI
+#include <vnet/fib/fib_types.h> //FIB_PROTOCOL_IP4/6, FIB_NODE_INDEX_INVALID
+#include <vnet/fib/fib_entry.h> //FIB_SOURCE_PRIORITY_HI
#include <vnet/fib/fib_table.h>
#include <vppinfra/format.h>
-#include <vnet/interface.h> //appif_flags
-#include <vnet/interface_funcs.h> //vnet_sw_interface_set_flags
+#include <vnet/interface.h> //appif_flags
+#include <vnet/interface_funcs.h> //vnet_sw_interface_set_flags
+
+#include <vpp_plugins/hicn/error.h>
#include "address_mgr.h"
#include "../../hicn.h"
#include "../../infra.h"
-#include "../../error.h"
#include "../face.h"
#include "../../strategy_dpo_ctx.h"
#include "../../route.h"
@@ -48,7 +49,7 @@ typedef struct address_mgr_main_s
address_mgr_main_t address_mgr_main;
static void
-increment_v4_address (ip4_address_t * a, u32 val)
+increment_v4_address (ip4_address_t *a, u32 val)
{
u32 v;
@@ -57,7 +58,7 @@ increment_v4_address (ip4_address_t * a, u32 val)
}
static void
-increment_v6_address (ip6_address_t * a, u64 val)
+increment_v6_address (ip6_address_t *a, u64 val)
{
u64 v;
@@ -66,7 +67,7 @@ increment_v6_address (ip6_address_t * a, u64 val)
}
void
-get_two_ip4_addresses (ip4_address_t * appif_addr, ip4_address_t * nh_addr)
+get_two_ip4_addresses (ip4_address_t *appif_addr, ip4_address_t *nh_addr)
{
/* We want two consecutives address that fall into a /31 mask */
if (address_mgr_main.next_ip4_local_addr.as_u8[3] & 0x01)
@@ -85,20 +86,17 @@ get_two_ip4_addresses (ip4_address_t * appif_addr, ip4_address_t * nh_addr)
do
{
/* Check if the route already exist in the fib */
- fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, appif_addr->as_u8);
- fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
- HICN_FIB_TABLE,
- FIB_SOURCE_PRIORITY_HI);
+ fib_pfx.fp_addr = to_ip46 (/* is_v6 */ 0, appif_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (
+ fib_pfx.fp_proto, HICN_FIB_TABLE, FIB_SOURCE_PRIORITY_HI);
fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
if (fib_entry_index != FIB_NODE_INDEX_INVALID)
{
- fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
- fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
- HICN_FIB_TABLE,
- FIB_SOURCE_PRIORITY_HI);
- fib_entry_index =
- fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ fib_pfx.fp_addr = to_ip46 (/* is_v6 */ 0, nh_addr->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (
+ fib_pfx.fp_proto, HICN_FIB_TABLE, FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
fib_table_unlock (fib_index, fib_pfx.fp_proto,
FIB_SOURCE_PRIORITY_HI);
}
@@ -115,7 +113,7 @@ get_two_ip4_addresses (ip4_address_t * appif_addr, ip4_address_t * nh_addr)
}
void
-get_two_ip6_addresses (ip6_address_t * appif_addr, ip6_address_t * nh_addr)
+get_two_ip6_addresses (ip6_address_t *appif_addr, ip6_address_t *nh_addr)
{
/* We want two consecutives address that fall into a /127 mask */
@@ -126,7 +124,6 @@ get_two_ip6_addresses (ip6_address_t * appif_addr, ip6_address_t * nh_addr)
increment_v6_address (&(address_mgr_main.next_ip6_local_addr), 1);
*nh_addr = address_mgr_main.next_ip6_local_addr;
-
fib_prefix_t fib_pfx;
fib_node_index_t fib_entry_index = FIB_NODE_INDEX_INVALID;
u32 fib_index;
@@ -140,18 +137,18 @@ get_two_ip6_addresses (ip6_address_t * appif_addr, ip6_address_t * nh_addr)
do
{
/* Check if the route already exist in the fib */
- fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, appif_addr->as_u8);
+ fib_pfx.fp_addr = to_ip46 (/* is_v6 */ 1, appif_addr->as_u8);
fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
- //fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
+ // fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ // FIB_SOURCE_PRIORITY_HI);
if (fib_entry_index != FIB_NODE_INDEX_INVALID)
{
- fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, nh_addr->as_u8);
+ fib_pfx.fp_addr = to_ip46 (/* is_v6 */ 0, nh_addr->as_u8);
- fib_entry_index =
- fib_table_lookup_exact_match (fib_index, &fib_pfx);
- // fib_table_unlock (fib_index, fib_pfx.fp_proto,
- // FIB_SOURCE_PRIORITY_HI);
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ // fib_table_unlock (fib_index, fib_pfx.fp_proto,
+ // FIB_SOURCE_PRIORITY_HI);
}
if (fib_entry_index != FIB_NODE_INDEX_INVALID)
{
@@ -179,10 +176,9 @@ get_ip4_address ()
do
{
/* Check if the route already exist in the fib */
- fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 0, prefix->as_u8);
- fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
- HICN_FIB_TABLE,
- FIB_SOURCE_PRIORITY_HI);
+ fib_pfx.fp_addr = to_ip46 (/* is_v6 */ 0, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (
+ fib_pfx.fp_proto, HICN_FIB_TABLE, FIB_SOURCE_PRIORITY_HI);
fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
increment_v4_address (prefix, 1);
@@ -206,10 +202,9 @@ get_ip6_address ()
do
{
/* Check if the route already exist in the fib */
- fib_pfx.fp_addr = to_ip46 ( /* is_v6 */ 1, prefix->as_u8);
- fib_index = fib_table_find_or_create_and_lock (fib_pfx.fp_proto,
- HICN_FIB_TABLE,
- FIB_SOURCE_PRIORITY_HI);
+ fib_pfx.fp_addr = to_ip46 (/* is_v6 */ 1, prefix->as_u8);
+ fib_index = fib_table_find_or_create_and_lock (
+ fib_pfx.fp_proto, HICN_FIB_TABLE, FIB_SOURCE_PRIORITY_HI);
fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
fib_table_unlock (fib_index, fib_pfx.fp_proto, FIB_SOURCE_PRIORITY_HI);
increment_v6_address (prefix, 1);
diff --git a/hicn-plugin/src/faces/app/address_mgr.h b/hicn-plugin/src/faces/app/address_mgr.h
index 99450dcdd..32545f8ca 100644
--- a/hicn-plugin/src/faces/app/address_mgr.h
+++ b/hicn-plugin/src/faces/app/address_mgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,9 +25,9 @@
* an interface.
*/
-#define ADDR_MGR_IP4_LEN 32
+#define ADDR_MGR_IP4_LEN 32
#define ADDR_MGR_IP4_CONS_LEN 31
-#define ADDR_MGR_IP6_LEN 128
+#define ADDR_MGR_IP6_LEN 128
#define ADDR_MGR_IP6_CONS_LEN 127
/**
@@ -36,7 +36,7 @@
* @param addr1 first ip address with the least significant bit set to 0
* @param addr2 second ip address with the least significant bit set to 1
*/
-void get_two_ip4_addresses (ip4_address_t * addr1, ip4_address_t * addr2);
+void get_two_ip4_addresses (ip4_address_t *addr1, ip4_address_t *addr2);
/**
* @brief Get two consecutive IP v6 addresses from the same /126 subnet
@@ -44,7 +44,7 @@ void get_two_ip4_addresses (ip4_address_t * addr1, ip4_address_t * addr2);
* @param addr1 first ip address with the least significant bit set to 0
* @param addr2 second ip address with the least significant bit set to 1
*/
-void get_two_ip6_addresses (ip6_address_t * addr1, ip6_address_t * addr2);
+void get_two_ip6_addresses (ip6_address_t *addr1, ip6_address_t *addr2);
/**
* @brief Get one IP v4 address
diff --git a/hicn-plugin/src/faces/app/face_app_cli.c b/hicn-plugin/src/faces/app/face_app_cli.c
index 1aa27adc7..50e6ae71b 100644
--- a/hicn-plugin/src/faces/app/face_app_cli.c
+++ b/hicn-plugin/src/faces/app/face_app_cli.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -17,20 +17,21 @@
#include <vnet/dpo/dpo.h>
#include <vlib/vlib.h>
#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/format.h>
-//#include "../face_dpo.h"
+#include "../../params.h"
#include "../face.h"
#include "face_prod.h"
#include "face_cons.h"
-#define HICN_FACE_NONE 0
+#define HICN_FACE_NONE 0
#define HICN_FACE_DELETE 1
-#define HICN_FACE_ADD 2
+#define HICN_FACE_ADD 2
static clib_error_t *
-hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_face_app_cli_set_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
vnet_main_t *vnm = vnet_get_main ();
fib_prefix_t prefix;
@@ -42,7 +43,6 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
int face_op = HICN_FACE_NONE;
int prod = 0;
-
/* Get a line of input. */
unformat_input_t _line_input, *line_input = &_line_input;
if (!unformat_user (main_input, unformat_line_input, line_input))
@@ -56,8 +56,8 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
{
face_op = HICN_FACE_DELETE;
}
- else if (face_op == HICN_FACE_DELETE
- && unformat (line_input, "id %d", &face_id1))
+ else if (face_op == HICN_FACE_DELETE &&
+ unformat (line_input, "id %d", &face_id1))
;
else if (unformat (line_input, "add"))
{
@@ -65,13 +65,12 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
}
else if (face_op == HICN_FACE_ADD)
{
- if (unformat (line_input, "intfc %U",
- unformat_vnet_sw_interface, vnm, &sw_if))
+ if (unformat (line_input, "intfc %U", unformat_vnet_sw_interface,
+ vnm, &sw_if))
;
- else
- if (unformat
- (line_input, "prod prefix %U/%d", unformat_ip46_address,
- &prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
+ else if (unformat (line_input, "prod prefix %U/%d",
+ unformat_ip46_address, &prefix.fp_addr,
+ IP46_TYPE_ANY, &prefix.fp_len))
{
prod = 1;
}
@@ -81,10 +80,9 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
;
else
{
- return clib_error_return (0, "%s '%U'",
- get_error_string
- (HICN_ERROR_CLI_INVAL),
- format_unformat_error, line_input);
+ return clib_error_return (
+ 0, "%s '%U'", get_error_string (HICN_ERROR_CLI_INVAL),
+ format_unformat_error, line_input);
}
}
else
@@ -116,20 +114,17 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
if (prod)
{
- prefix.fp_proto =
- ip46_address_is_ip4 (&prefix.
- fp_addr) ? FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6;
- rv =
- hicn_face_prod_add (&prefix, sw_if, &cs_reserved, &prod_addr,
- &face_id1);
+ prefix.fp_proto = ip46_address_is_ip4 (&prefix.fp_addr) ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+ rv = hicn_face_prod_add (&prefix, sw_if, &cs_reserved, &prod_addr,
+ &face_id1);
if (rv == HICN_ERROR_NONE)
{
u8 *sbuf = NULL;
- sbuf =
- format (sbuf, "Face id: %d, producer address %U", face_id1,
- format_ip46_address, &prod_addr,
- 0 /*IP46_ANY_TYPE */ );
+ sbuf = format (sbuf, "Face id: %d, producer address %U",
+ face_id1, format_ip46_address, &prod_addr,
+ 0 /*IP46_ANY_TYPE */);
vlib_cli_output (vm, "%s", sbuf);
}
else
@@ -139,17 +134,16 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
}
else
{
- rv =
- hicn_face_cons_add (&cons_addr4, &cons_addr6, sw_if, &face_id1,
- &face_id2);
+ rv = hicn_face_cons_add (&cons_addr4, &cons_addr6, sw_if,
+ &face_id1, &face_id2);
if (rv == HICN_ERROR_NONE)
{
u8 *sbuf = NULL;
- sbuf =
- format (sbuf,
- "Face id: %d, address v4 %U, face id: %d address v6 %U",
- face_id1, format_ip4_address, &cons_addr4, face_id2,
- format_ip6_address, &cons_addr6);
+ sbuf = format (
+ sbuf,
+ "Face id: %d, address v4 %U, face id: %d address v6 %U",
+ face_id1, format_ip4_address, &cons_addr4, face_id2,
+ format_ip6_address, &cons_addr6);
vlib_cli_output (vm, "%s", sbuf);
}
else
@@ -181,20 +175,18 @@ hicn_face_app_cli_set_command_fn (vlib_main_t * vm,
return clib_error_return (0, "Operation (%d) not implemented", face_op);
break;
}
- return (rv == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
- get_error_string
- (rv));
+ return (rv == HICN_ERROR_NONE) ?
+ 0 :
+ clib_error_return (0, "%s\n", get_error_string (rv));
}
/* cli declaration for 'cfg face' */
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (hicn_face_app_cli_set_command, static) =
-{
+VLIB_CLI_COMMAND (hicn_face_app_cli_set_command, static) = {
.path = "hicn face app",
- .short_help = "hicn face app {add intfc <sw_if> { prod prefix <hicn_prefix> cs_size <size_in_packets>} {cons} | {del <face_id>}",
+ .short_help = "hicn face app {add intfc <sw_if> { prod prefix <hicn_prefix> "
+ "cs_size <size_in_packets>} {cons} | {del <face_id>}",
.function = hicn_face_app_cli_set_command_fn,
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/faces/app/face_cons.c b/hicn-plugin/src/faces/app/face_cons.c
index d44ba1a2b..edb03387d 100644
--- a/hicn-plugin/src/faces/app/face_cons.c
+++ b/hicn-plugin/src/faces/app/face_cons.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -22,9 +22,8 @@
#include "../../infra.h"
int
-hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
- u32 swif, hicn_face_id_t * faceid1,
- hicn_face_id_t * faceid2)
+hicn_face_cons_add (ip4_address_t *nh_addr4, ip6_address_t *nh_addr6, u32 swif,
+ hicn_face_id_t *faceid1, hicn_face_id_t *faceid2)
{
/* Create the corresponding appif if */
/* Retrieve a valid local ip address to assign to the appif */
@@ -49,32 +48,33 @@ hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
vnet_sw_interface_set_flags (vnm, swif, if_flags);
+#if 0
get_two_ip4_addresses (&(if_ip.ip4), nh_addr4);
- ip4_add_del_interface_address (vm,
- swif,
- &(if_ip.ip4),
- ADDR_MGR_IP4_CONS_LEN, 0 /* is_del */ );
+ ip4_add_del_interface_address (vm, swif, &(if_ip.ip4), ADDR_MGR_IP4_CONS_LEN,
+ 0 /* is_del */);
ip46_address_t nh_addr = to_ip46 (0, (u8 *) nh_addr4);
- index_t adj_index = adj_nbr_find(FIB_PROTOCOL_IP4, VNET_LINK_IP4, &nh_addr, swif);
+ index_t adj_index =
+ adj_nbr_find (FIB_PROTOCOL_IP4, VNET_LINK_IP4, &nh_addr, swif);
hicn_iface_add (&nh_addr, swif, faceid1, DPO_PROTO_IP4, adj_index);
hicn_face_t *face = hicn_dpoi_get_from_idx (*faceid1);
face->flags |= HICN_FACE_FLAGS_APPFACE_CONS;
+#endif
get_two_ip6_addresses (&(if_ip.ip6), nh_addr6);
- ip6_add_del_interface_address (vm,
- swif,
- &(if_ip.ip6),
- ADDR_MGR_IP6_CONS_LEN, 0 /* is_del */ );
+ ip6_add_del_interface_address (vm, swif, &(if_ip.ip6), ADDR_MGR_IP6_CONS_LEN,
+ 0 /* is_del */);
- adj_index = adj_nbr_find(FIB_PROTOCOL_IP6, VNET_LINK_IP6, &nh_addr, swif);
+ ip46_address_t nh_addr = to_ip46 (0, (u8 *) nh_addr6);
+ index_t adj_index =
+ adj_nbr_find (FIB_PROTOCOL_IP6, VNET_LINK_IP6, &nh_addr, swif);
- hicn_iface_add ((ip46_address_t *) nh_addr6, swif, faceid2, DPO_PROTO_IP6, adj_index);
+ hicn_iface_add ((ip46_address_t *) nh_addr6, swif, faceid2, adj_index, 0);
- face = hicn_dpoi_get_from_idx (*faceid2);
+ hicn_face_t *face = hicn_dpoi_get_from_idx (*faceid2);
face->flags |= HICN_FACE_FLAGS_APPFACE_CONS;
return HICN_ERROR_NONE;
@@ -90,7 +90,7 @@ hicn_face_cons_del (hicn_face_id_t face_id)
if (face->flags & HICN_FACE_FLAGS_APPFACE_CONS)
{
- return hicn_face_del (face_id);
+ return hicn_face_unlock_with_id (face_id);
}
else
{
@@ -99,7 +99,7 @@ hicn_face_cons_del (hicn_face_id_t face_id)
}
u8 *
-format_hicn_face_cons (u8 * s, va_list * args)
+format_hicn_face_cons (u8 *s, va_list *args)
{
CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
diff --git a/hicn-plugin/src/faces/app/face_cons.h b/hicn-plugin/src/faces/app/face_cons.h
index 5f8f5dde8..9df6f773f 100644
--- a/hicn-plugin/src/faces/app/face_cons.h
+++ b/hicn-plugin/src/faces/app/face_cons.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -28,8 +28,8 @@
* consumer application (co-located with the forwarder) that acts as a
* consumer. The interface used by the consumer application face is
* assumed to be reserved only for hICN traffic (e.g., dedicated memif that
- * connects the applictation to the forwarder). Only one application face can be
- * assigned to an interface.
+ * connects the applictation to the forwarder). Only one application face can
+ * be assigned to an interface.
*
* In the vlib graph a consumer application face directly connect the
* device-input node to the hicn-vface-ip node.
@@ -38,17 +38,17 @@
/**
* @brief Add a new consumer application face
*
- * The method creates the internal ip face and set the ip address to the interface.
- * @param nh_addr4 ipv4 address to assign to interface used by the application to
- * send interest to the consumer face
- * @param nh_addr6 ipv6 address to assign to interface used by the application to
- * send interest to the consumer face
+ * The method creates the internal ip face and set the ip address to the
+ * interface.
+ * @param nh_addr4 ipv4 address to assign to interface used by the application
+ * to send interest to the consumer face
+ * @param nh_addr6 ipv6 address to assign to interface used by the application
+ * to send interest to the consumer face
* @param swif interface associated to the face
*/
-int
-hicn_face_cons_add (ip4_address_t * nh_addr4, ip6_address_t * nh_addr6,
- u32 swif, hicn_face_id_t * faceid1,
- hicn_face_id_t * faceid2);
+int hicn_face_cons_add (ip4_address_t *nh_addr4, ip6_address_t *nh_addr6,
+ u32 swif, hicn_face_id_t *faceid1,
+ hicn_face_id_t *faceid2);
/**
* @brief Delete an existing consumer application face
@@ -64,8 +64,7 @@ int hicn_face_cons_del (hicn_face_id_t face_id);
* @param args Array storing input values. Expected u32 face_id and u32 indent
* @return String with the formatted face
*/
-u8 *format_hicn_face_cons (u8 * s, va_list * args);
-
+u8 *format_hicn_face_cons (u8 *s, va_list *args);
#endif /* _FACE_CONSUMER_H_ */
diff --git a/hicn-plugin/src/faces/app/face_prod.c b/hicn-plugin/src/faces/app/face_prod.c
index 645154325..5d0fa727b 100644
--- a/hicn-plugin/src/faces/app/face_prod.c
+++ b/hicn-plugin/src/faces/app/face_prod.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2023 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -17,6 +17,7 @@
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/interface_funcs.h>
+#include <vppinfra/pool.h>
#include "face_prod.h"
#include "address_mgr.h"
@@ -24,36 +25,30 @@
#include "../../route.h"
#include "../../cache_policies/cs_lru.h"
+#define INITIAL_POOL_SIZE 16
hicn_face_prod_state_t *face_state_vec;
/* used to check if an interface is already in the vector */
u32 *face_state_pool;
static int
-hicn_app_state_create (u32 swif, fib_prefix_t * prefix)
+hicn_app_state_create (u32 swif, index_t adj_index, fib_prefix_t *prefix)
{
- /* Make sure that the pool is not empty */
- pool_validate_index (face_state_pool, 0);
-
u32 *swif_app;
u8 found = 0;
- /* *INDENT-OFF* */
- pool_foreach (swif_app, face_state_pool,{
- if (*swif_app == swif)
- {
- found = 1;
- }
- }
- );
- /* *INDENT-ON* */
+ pool_foreach (swif_app, face_state_pool)
+ if (*swif_app == swif)
+ {
+ found = 1;
+ }
if (found)
return HICN_ERROR_APPFACE_ALREADY_ENABLED;
-
/* Create the appif and store in the vector */
vec_validate (face_state_vec, swif);
+ face_state_vec[swif].adj_index = adj_index;
clib_memcpy (&(face_state_vec[swif].prefix), prefix, sizeof (fib_prefix_t));
/* Set as busy the element in the vector */
@@ -63,15 +58,13 @@ hicn_app_state_create (u32 swif, fib_prefix_t * prefix)
int ret = HICN_ERROR_NONE;
if (ip46_address_is_ip4 (&(prefix->fp_addr)))
{
- ret =
- vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
- swif, 1, 0, 0);
+ ret = vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
}
else
{
- ret =
- vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
- swif, 1, 0, 0);
+ ret = vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 1, 0, 0);
}
return ret == 0 ? HICN_ERROR_NONE : HICN_ERROR_APPFACE_FEATURE;
@@ -80,23 +73,16 @@ hicn_app_state_create (u32 swif, fib_prefix_t * prefix)
static int
hicn_app_state_del (u32 swif)
{
- /* Make sure that the pool is not empty */
- pool_validate_index (face_state_pool, 0);
-
u32 *temp;
u32 *swif_app = NULL;
u8 found = 0;
fib_prefix_t *prefix;
- /* *INDENT-OFF* */
- pool_foreach (temp, face_state_pool,{
- if (*temp == swif)
- {
- found = 1;
- swif_app = temp;
- }
- }
- );
- /* *INDENT-ON* */
+ pool_foreach (temp, face_state_pool)
+ if (*temp == swif)
+ {
+ found = 1;
+ swif_app = temp;
+ }
if (!found)
return HICN_ERROR_APPFACE_NOT_FOUND;
@@ -106,15 +92,13 @@ hicn_app_state_del (u32 swif)
int ret = HICN_ERROR_NONE;
if (ip46_address_is_ip4 (&prefix->fp_addr))
{
- ret =
- vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
- swif, 0, 0, 0);
+ ret = vnet_feature_enable_disable ("ip4-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
}
else
{
- ret =
- vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
- swif, 0, 0, 0);
+ ret = vnet_feature_enable_disable ("ip6-unicast", "hicn-face-prod-input",
+ swif, 0, 0, 0);
}
pool_put (face_state_pool, swif_app);
@@ -124,16 +108,16 @@ hicn_app_state_del (u32 swif)
}
int
-hicn_face_prod_add (fib_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
- ip46_address_t * prod_addr, hicn_face_id_t * faceid)
+hicn_face_prod_add (fib_prefix_t *prefix, u32 sw_if, u32 *cs_reserved,
+ ip46_address_t *prod_addr, hicn_face_id_t *faceid)
{
vlib_main_t *vm = vlib_get_main ();
vnet_main_t *vnm = vnet_get_main ();
hicn_main_t *hm = &hicn_main;
- ip46_address_t local_app_ip;
- CLIB_UNUSED(ip46_address_t remote_app_ip);
+ ip46_address_t local_app_ip = { .as_u64 = { 0, 0 } };
+ CLIB_UNUSED (ip46_address_t remote_app_ip);
u32 if_flags = 0;
if (!hm->is_enabled)
@@ -152,25 +136,28 @@ hicn_face_prod_add (fib_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
if_flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
vnet_sw_interface_set_flags (vnm, sw_if, if_flags);
+#ifdef HICN_DDEBUG
u8 *s0;
s0 = format (0, "Prefix %U", format_fib_prefix, prefix);
-
- vlib_cli_output (vm, "Received request for %s, swif %d\n", s0, sw_if);
+ HICN_DEBUG ("Received request for %s, swif %d\n", s0, sw_if);
+#endif
if (ip46_address_is_zero (&prefix->fp_addr))
{
return HICN_ERROR_APPFACE_PROD_PREFIX_NULL;
}
- u8 isv6 = ip46_address_is_ip4(prod_addr);
- index_t adj_index = adj_nbr_find(isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4, isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
+ u8 isv6 = ip46_address_is_ip4 (prod_addr);
+ index_t adj_index =
+ adj_nbr_find (isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4,
+ isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
/*
* Check if a producer face is already existing for the same prefix
* and sw_if
*/
- face = hicn_face_get (&(prefix->fp_addr), sw_if,
- &hicn_face_hashtb, adj_index);
+ face =
+ hicn_face_get (&(prefix->fp_addr), sw_if, &hicn_face_hashtb, adj_index);
if (face != NULL)
{
@@ -207,13 +194,13 @@ hicn_face_prod_add (fib_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
ip4_address_t local_app_ip4;
ip4_address_t remote_app_ip4;
get_two_ip4_addresses (&local_app_ip4, &remote_app_ip4);
- ip4_add_del_interface_address (vm,
- sw_if,
- &local_app_ip4, 31, 0 /* is_del */ );
- local_app_ip = to_ip46 ( /* isv6 */ 0, local_app_ip4.as_u8);
- remote_app_ip = to_ip46 ( /* isv6 */ 0, remote_app_ip4.as_u8);
+ ip4_add_del_interface_address (vm, sw_if, &local_app_ip4, 31,
+ 0 /* is_del */);
+ local_app_ip = to_ip46 (/* isv6 */ 0, local_app_ip4.as_u8);
+ remote_app_ip = to_ip46 (/* isv6 */ 0, remote_app_ip4.as_u8);
- vnet_build_rewrite_for_sw_interface(vnm, sw_if, VNET_LINK_IP4, &remote_app_ip4);
+ vnet_build_rewrite_for_sw_interface (vnm, sw_if, VNET_LINK_IP4,
+ &remote_app_ip4);
}
else
{
@@ -225,80 +212,90 @@ hicn_face_prod_add (fib_prefix_t * prefix, u32 sw_if, u32 * cs_reserved,
vlib_cli_output (vm, "Setting ip address %s\n", s0);
- ip6_add_del_interface_address (vm,
- sw_if,
- &local_app_ip6, 127,
- 0 /* is_del */ );
- local_app_ip = to_ip46 ( /* isv6 */ 1, local_app_ip6.as_u8);
- remote_app_ip = to_ip46 ( /* isv6 */ 1, remote_app_ip6.as_u8);
+ ip6_add_del_interface_address (vm, sw_if, &local_app_ip6, 127,
+ 0 /* is_del */);
+ local_app_ip = to_ip46 (/* isv6 */ 1, local_app_ip6.as_u8);
+ remote_app_ip = to_ip46 (/* isv6 */ 1, remote_app_ip6.as_u8);
}
}
if (ret == HICN_ERROR_NONE)
- // && hicn_face_prod_set_lru_max (*faceid, cs_reserved) == HICN_ERROR_NONE)
+ // && hicn_face_prod_set_lru_max (*faceid, cs_reserved) ==
+ // HICN_ERROR_NONE)
{
- fib_route_path_t rpath = {0};
- fib_route_path_t * rpaths = NULL;
-
- if (ip46_address_is_ip4(&(prefix->fp_addr)))
- {
- ip4_address_t mask;
- ip4_preflen_to_mask (prefix->fp_len, &mask);
- prefix->fp_addr.ip4.as_u32 = prefix->fp_addr.ip4.as_u32 & mask.as_u32;
- prefix->fp_proto = FIB_PROTOCOL_IP4;
-
- rpath.frp_weight = 1;
- rpath.frp_sw_if_index = ~0;
- rpath.frp_addr.ip4.as_u32 = remote_app_ip.ip4.as_u32;
- rpath.frp_sw_if_index = sw_if;
- rpath.frp_proto = DPO_PROTO_IP4;
-
- vec_add1 (rpaths, rpath);
- }
+ fib_route_path_t rpath = { 0 };
+ fib_route_path_t *rpaths = NULL;
+
+ if (ip46_address_is_ip4 (&(prefix->fp_addr)))
+ {
+ ip4_address_t mask;
+ ip4_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip4.as_u32 =
+ prefix->fp_addr.ip4.as_u32 & mask.as_u32;
+ prefix->fp_proto = FIB_PROTOCOL_IP4;
+
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_addr.ip4.as_u32 = remote_app_ip.ip4.as_u32;
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_proto = DPO_PROTO_IP4;
+
+ vec_add1 (rpaths, rpath);
+ }
else
- {
- ip6_address_t mask;
- ip6_preflen_to_mask (prefix->fp_len, &mask);
- prefix->fp_addr.ip6.as_u64[0] =
- prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
- prefix->fp_addr.ip6.as_u64[1] =
- prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
- prefix->fp_proto = FIB_PROTOCOL_IP6;
-
- rpath.frp_weight = 1;
- rpath.frp_sw_if_index = ~0;
- rpath.frp_addr.ip6.as_u64[0] = remote_app_ip.ip6.as_u64[0];
- rpath.frp_addr.ip6.as_u64[1] = remote_app_ip.ip6.as_u64[1];
- rpath.frp_sw_if_index = sw_if;
- rpath.frp_proto = DPO_PROTO_IP6;
-
- vec_add1 (rpaths, rpath);
- }
+ {
+ ip6_address_t mask;
+ ip6_preflen_to_mask (prefix->fp_len, &mask);
+ prefix->fp_addr.ip6.as_u64[0] =
+ prefix->fp_addr.ip6.as_u64[0] & mask.as_u64[0];
+ prefix->fp_addr.ip6.as_u64[1] =
+ prefix->fp_addr.ip6.as_u64[1] & mask.as_u64[1];
+ prefix->fp_proto = FIB_PROTOCOL_IP6;
+
+ rpath.frp_weight = 1;
+ rpath.frp_sw_if_index = ~0;
+ rpath.frp_addr.ip6.as_u64[0] = remote_app_ip.ip6.as_u64[0];
+ rpath.frp_addr.ip6.as_u64[1] = remote_app_ip.ip6.as_u64[1];
+ rpath.frp_sw_if_index = sw_if;
+ rpath.frp_proto = DPO_PROTO_IP6;
+
+ vec_add1 (rpaths, rpath);
+ }
u32 fib_index = fib_table_find (prefix->fp_proto, 0);
- fib_table_entry_path_add2 (fib_index,
- prefix,
- FIB_SOURCE_CLI,
- FIB_ENTRY_FLAG_NONE, rpaths);
+ fib_table_entry_path_add2 (fib_index, prefix, FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE, rpaths);
+
+ HICN_DEBUG ("Calling hicn enable for producer face");
+
+ hicn_face_id_t *vec_faces = NULL;
+ fib_node_index_t hicn_fib_node_index;
+ hicn_route_enable (prefix, &hicn_fib_node_index, &vec_faces);
+ if (vec_faces != NULL)
+ vec_free (vec_faces);
- hicn_route_enable(prefix);
- hicn_app_state_create (sw_if, prefix);
+ adj_index =
+ adj_nbr_find (isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4,
+ isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
+
+ hicn_app_state_create (sw_if, adj_index, prefix);
}
- adj_index = adj_nbr_find(isv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4, isv6 ? VNET_LINK_IP6 : VNET_LINK_IP4, prod_addr, sw_if);
- face = hicn_face_get(&local_app_ip, sw_if, &hicn_face_hashtb, adj_index);//HICN_FACE_FLAGS_APPFACE_PROD);
+ face = hicn_face_get (&local_app_ip, sw_if, &hicn_face_hashtb, adj_index);
+ assert (face);
*faceid = hicn_dpoi_get_index (face);
face->flags |= HICN_FACE_FLAGS_APPFACE_PROD;
- hicn_face_unlock_with_id(*faceid);
+ hicn_face_unlock_with_id (*faceid);
*prod_addr = local_app_ip;
/* Cleanup in case of something went wrong. */
if (ret)
{
+ HICN_ERROR ("Somethig went wrong while adding producer face. Cleanup.");
hicn_app_state_del (sw_if);
}
return ret;
@@ -314,17 +311,32 @@ hicn_face_prod_del (hicn_face_id_t face_id)
if (face->flags & HICN_FACE_FLAGS_APPFACE_PROD)
{
- /* Remove the face from the fib */
- hicn_route_disable(&(face_state_vec[face->sw_if].prefix));
- //hicn_route_del_nhop (&(face_state_vec[face->sw_if].prefix),
- // face_id);
-
- //int ret = hicn_face_del (face_id);
- return hicn_app_state_del (face->sw_if);
- //ret == HICN_ERROR_NONE ? hicn_app_state_del (face->sw_if) : ret;
+ /* Remove the face from the hicn fib */
+ fib_prefix_t *prefix = &(face_state_vec[face->sw_if].prefix);
+ HICN_DEBUG ("Calling hicn_route_disable from hicn_face_prod_del");
+ int ret = hicn_route_disable (prefix);
+ if (ret)
+ {
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_cli_output (vm, "Error disabling route: %s",
+ get_error_string (ret));
+ }
+ /* Also remove it from main fib, as we sre the owners of this prefix */
+ u32 fib_index = fib_table_find (prefix->fp_proto, 0);
+ fib_table_entry_special_remove (fib_index, prefix, FIB_SOURCE_API);
+ ret = hicn_app_state_del (face->sw_if);
+ if (ret)
+ {
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_cli_output (vm, "Error deelting app state: %s",
+ get_error_string (ret));
+ }
}
else
{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_cli_output (vm, "APPFACE not found.",
+ get_error_string (HICN_ERROR_APPFACE_NOT_FOUND));
return HICN_ERROR_APPFACE_NOT_FOUND;
}
@@ -332,35 +344,34 @@ hicn_face_prod_del (hicn_face_id_t face_id)
}
u8 *
-format_hicn_face_prod (u8 * s, va_list * args)
+format_hicn_face_prod (u8 *s, va_list *args)
{
CLIB_UNUSED (index_t index) = va_arg (*args, index_t);
CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
- s =
- format (s, " (producer)");
+ s = format (s, " (producer)");
return s;
}
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_prod_app_input_ip6, static)=
+void
+hicn_face_prod_init ()
{
+ /* Make sure that the pool is not empty */
+ pool_alloc (face_state_pool, INITIAL_POOL_SIZE);
+}
+
+VNET_FEATURE_INIT (hicn_prod_app_input_ip6, static) = {
.arc_name = "ip6-unicast",
.node_name = "hicn-face-prod-input",
- .runs_before = VNET_FEATURES("ip6-inacl"),
+ .runs_before = VNET_FEATURES ("ip6-inacl"),
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_prod_app_input_ip4, static)=
-{
+VNET_FEATURE_INIT (hicn_prod_app_input_ip4, static) = {
.arc_name = "ip4-unicast",
.node_name = "hicn-face-prod-input",
- .runs_before = VNET_FEATURES("ip4-inacl"),
+ .runs_before = VNET_FEATURES ("ip4-inacl"),
};
-/* *INDENT-ON* */
-
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/faces/app/face_prod.h b/hicn-plugin/src/faces/app/face_prod.h
index 4cb2e3fbf..46bdf7c24 100644
--- a/hicn-plugin/src/faces/app/face_prod.h
+++ b/hicn-plugin/src/faces/app/face_prod.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2023 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,12 +25,12 @@
* @brief Producer application face.
*
* A producer application face is built upon an ip face and identify a local
- * producer application (co-located with the forwarder) that acts as a producer. In the
- * current design an application face is either a face towards a consumer face
- * or towards a producer. The interface used by the producer application face is
- * assumed to be reserved only for hICN traffic (e.g., dedicated memif that
- * connects the applictation to the forwarder). Only one application face can be
- * assigned to an interface.
+ * producer application (co-located with the forwarder) that acts as a
+ * producer. In the current design an application face is either a face towards
+ * a consumer face or towards a producer. The interface used by the producer
+ * application face is assumed to be reserved only for hICN traffic (e.g.,
+ * dedicated memif that connects the applictation to the forwarder). Only one
+ * application face can be assigned to an interface.
*
* To each producer application face it is assigned a portion of the CS. Every
* data arriving to a producer application will be stored in the portion of the
@@ -38,18 +38,19 @@
* face. Available eviction faces are list in the /cache_policy folder.
*
* In the vlib graph a producer application face is directly connected to the
- * device-input node (with the node hicn-face-prod-input) and passes every packet to
- * the hicn-face-ip node.
+ * device-input node (with the node hicn-face-prod-input) and passes every
+ * packet to the hicn-face-ip node.
*/
/**
- * @brief Producer application face state that refer to the hICN producer socket
- * created by the application.
+ * @brief Producer application face state that refer to the hICN producer
+ * socket created by the application.
*
*/
typedef struct
{
fib_prefix_t prefix;
+ index_t adj_index;
} hicn_face_prod_state_t;
extern hicn_face_prod_state_t *face_state_vec;
@@ -69,9 +70,8 @@ extern hicn_face_prod_state_t *face_state_vec;
* @param prod_addr address to assign to interface used by the appliction to
* send data to the producer face
*/
-int
-hicn_face_prod_add (fib_prefix_t * prefix, u32 swif, u32 * cs_reserved,
- ip46_address_t * prod_addr, hicn_face_id_t * faceid);
+int hicn_face_prod_add (fib_prefix_t *prefix, u32 swif, u32 *cs_reserved,
+ ip46_address_t *prod_addr, hicn_face_id_t *faceid);
/**
* @brief Delete an existing application face
@@ -85,7 +85,7 @@ int hicn_face_prod_del (hicn_face_id_t faceid);
*
* @param face_id Id of the producer application face
*/
-int hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 * requested_size);
+int hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 *requested_size);
/**
* @brief Format an application producer face
@@ -94,8 +94,12 @@ int hicn_face_prod_set_lru_max (hicn_face_id_t face_id, u32 * requested_size);
* @param args Array storing input values. Expected u32 face_id and u32 indent
* @return String with the formatted face
*/
-u8 *format_hicn_face_prod (u8 * s, va_list * args);
+u8 *format_hicn_face_prod (u8 *s, va_list *args);
+/**
+ * @brief Init the producer face module
+ */
+void hicn_face_prod_init ();
#endif /* _FACE_PROD_H_ */
diff --git a/hicn-plugin/src/faces/app/face_prod_node.c b/hicn-plugin/src/faces/app/face_prod_node.c
index 80c3e124c..93e80d1ac 100644
--- a/hicn-plugin/src/faces/app/face_prod_node.c
+++ b/hicn-plugin/src/faces/app/face_prod_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -19,13 +19,13 @@
* @brief Application interface node
*
* This node runs after the device-input node and perfoms some safety checks in
- * order to avoid unespected interest and data (i.e., hICN packets whose name do
- * not contain the prefix associated to the application face)
+ * order to avoid unespected interest and data (i.e., hICN packets whose name
+ * do not contain the prefix associated to the application face)
*/
#include "face_prod.h"
-#include "../../hicn_api.h"
#include "../../mgmt.h"
+#include "../../parser.h"
static __clib_unused char *face_prod_input_error_strings[] = {
#define _(sym, string) string,
@@ -43,12 +43,12 @@ typedef struct
{
u32 next_index;
u32 sw_if_index;
+ hicn_error_t error;
} hicn_face_prod_input_trace_t;
typedef enum
{
- HICN_FACE_PROD_NEXT_DATA_IP4,
- HICN_FACE_PROD_NEXT_DATA_IP6,
+ HICN_FACE_PROD_NEXT_PCS,
HICN_FACE_PROD_NEXT_ERROR_DROP,
HICN_FACE_PROD_N_NEXT,
} hicn_face_prod_next_t;
@@ -56,7 +56,7 @@ typedef enum
vlib_node_registration_t hicn_face_prod_input_node;
static __clib_unused u8 *
-format_face_prod_input_trace (u8 * s, va_list * args)
+format_face_prod_input_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -64,13 +64,16 @@ format_face_prod_input_trace (u8 * s, va_list * args)
va_arg (*args, hicn_face_prod_input_trace_t *);
CLIB_UNUSED (u32 indent) = format_get_indent (s);
- s = format (s, "prod-face: sw_if_index %d next-index %d",
- t->sw_if_index, t->next_index);
+ s = format (s, "prod-face: sw_if_index %d next-index %d", t->sw_if_index,
+ t->next_index);
+
+ if (t->error != HICN_ERROR_NONE)
+ s = format (s, " error %s", get_error_string (t->error));
return s;
}
static_always_inline int
-match_ip4_name (u32 * name, fib_prefix_t * prefix)
+match_ip4_name (u32 *name, const fib_prefix_t *prefix)
{
u32 xor = 0;
@@ -80,7 +83,7 @@ match_ip4_name (u32 * name, fib_prefix_t * prefix)
}
static_always_inline int
-match_ip6_name (u8 * name, fib_prefix_t * prefix)
+match_ip6_name (u8 *name, const fib_prefix_t *prefix)
{
union
{
@@ -93,34 +96,51 @@ match_ip6_name (u8 * name, fib_prefix_t * prefix)
xor_sum.as_u64[1] = ((u64 *) name)[1] & prefix->fp_addr.ip6.as_u64[1];
return (xor_sum.as_u64[0] == prefix->fp_addr.ip6.as_u64[0]) &&
- (xor_sum.as_u64[1] == prefix->fp_addr.ip6.as_u64[1]);
+ (xor_sum.as_u64[1] == prefix->fp_addr.ip6.as_u64[1]);
}
static_always_inline u32
-hicn_face_prod_next_from_data_hdr (vlib_node_runtime_t * node,
- vlib_buffer_t * b, fib_prefix_t * prefix)
+hicn_face_prod_next_from_data_hdr (vlib_main_t *vm, vlib_buffer_t *b)
{
- u8 *ptr = vlib_buffer_get_current (b);
- u8 v = *ptr & 0xf0;
+ u8 is_v6;
int match_res = 1;
+ int ret = 0;
+ hicn_name_t name;
+ hicn_face_prod_state_t *prod_face = NULL;
- if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
+ // 1 - ensure the packet is hicn and its format is correct
+ ret = hicn_data_parse_pkt (b, vlib_buffer_length_in_chain (vm, b));
+ if (PREDICT_FALSE (ret))
{
- match_res = match_ip4_name ((u32 *) & (ptr[12]), prefix);
+ return HICN_FACE_PROD_NEXT_ERROR_DROP;
}
- else if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
+
+ // 2 - make sure the packet refers to a valid producer app state and
+ // retrieve app state information
+ prod_face = &face_state_vec[vnet_buffer (b)->sw_if_index[VLIB_RX]];
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] = prod_face->adj_index;
+
+ // 3 - make sure the address in the packet belongs to the producer prefix
+ // of this face
+ const fib_prefix_t *prefix = &prod_face->prefix;
+ is_v6 = hicn_buffer_is_v6 (b);
+ hicn_packet_get_name (&hicn_get_buffer (b)->pkbuf, &name);
+ if (PREDICT_TRUE (!is_v6 && ip46_address_is_ip4 (&prefix->fp_addr)))
{
- match_res = match_ip6_name (& (ptr[8]), prefix);
+ match_res = match_ip4_name (&name.prefix.v4.as_u32, prefix);
+ }
+ else if (PREDICT_TRUE (is_v6 && !ip46_address_is_ip4 (&prefix->fp_addr)))
+ {
+ match_res = match_ip6_name (name.prefix.v6.as_u8, prefix);
}
- return match_res ? HICN_FACE_PROD_NEXT_DATA_IP4 + (v ==
- 0x60) :
- HICN_FACE_PROD_NEXT_ERROR_DROP;
+ // 4 - if match found, forward data to next hicn node
+ return match_res ? HICN_FACE_PROD_NEXT_PCS : HICN_FACE_PROD_NEXT_ERROR_DROP;
}
static_always_inline void
-hicn_face_prod_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
- u32 swif, vlib_buffer_t * b, u32 next)
+hicn_face_prod_trace_buffer (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 swif, vlib_buffer_t *b, u32 next)
{
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
(b->flags & VLIB_BUFFER_IS_TRACED)))
@@ -133,13 +153,13 @@ hicn_face_prod_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
}
static uword
-hicn_face_prod_input_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn_face_prod_input_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next;
hicn_face_prod_next_t next_index;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ u32 thread_index = vm->thread_index;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -153,23 +173,30 @@ hicn_face_prod_input_node_fn (vlib_main_t * vm,
while (n_left_from >= 8 && n_left_to_next >= 4)
{
vlib_buffer_t *b0, *b1, *b2, *b3;
+ hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
u32 bi0, bi1, bi2, bi3;
- hicn_face_prod_state_t *prod_face0 = NULL;
- hicn_face_prod_state_t *prod_face1 = NULL;
- hicn_face_prod_state_t *prod_face2 = NULL;
- hicn_face_prod_state_t *prod_face3 = NULL;
u32 next0, next1, next2, next3;
+ // Prefetch next iteration
{
vlib_buffer_t *b4, *b5, *b6, *b7;
b4 = vlib_get_buffer (vm, from[4]);
b5 = vlib_get_buffer (vm, from[5]);
b6 = vlib_get_buffer (vm, from[6]);
b7 = vlib_get_buffer (vm, from[7]);
- CLIB_PREFETCH (b4, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b5, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b6, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b7, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b4, 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+ CLIB_PREFETCH (b5, 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+ CLIB_PREFETCH (b6, 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+ CLIB_PREFETCH (b7, 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+
+ CLIB_PREFETCH (vlib_buffer_get_current (b4),
+ 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+ CLIB_PREFETCH (vlib_buffer_get_current (b5),
+ 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+ CLIB_PREFETCH (vlib_buffer_get_current (b6),
+ 2 * CLIB_CACHE_LINE_BYTES, WRITE);
+ CLIB_PREFETCH (vlib_buffer_get_current (b7),
+ 2 * CLIB_CACHE_LINE_BYTES, WRITE);
}
bi0 = from[0];
@@ -192,53 +219,71 @@ hicn_face_prod_input_node_fn (vlib_main_t * vm,
b2 = vlib_get_buffer (vm, bi2);
b3 = vlib_get_buffer (vm, bi3);
- prod_face0 =
- &face_state_vec[vnet_buffer (b0)->sw_if_index[VLIB_RX]];
- prod_face1 =
- &face_state_vec[vnet_buffer (b1)->sw_if_index[VLIB_RX]];
- prod_face2 =
- &face_state_vec[vnet_buffer (b2)->sw_if_index[VLIB_RX]];
- prod_face3 =
- &face_state_vec[vnet_buffer (b3)->sw_if_index[VLIB_RX]];
-
- next0 =
- hicn_face_prod_next_from_data_hdr (node, b0, &prod_face0->prefix);
- next1 =
- hicn_face_prod_next_from_data_hdr (node, b1, &prod_face1->prefix);
- next2 =
- hicn_face_prod_next_from_data_hdr (node, b2, &prod_face2->prefix);
- next3 =
- hicn_face_prod_next_from_data_hdr (node, b3, &prod_face3->prefix);
+ hicnb0 = hicn_get_buffer (b0);
+ hicnb1 = hicn_get_buffer (b1);
+ hicnb2 = hicn_get_buffer (b2);
+ hicnb3 = hicn_get_buffer (b3);
+
+ hicnb0->flags = HICN_FACE_FLAGS_DEFAULT;
+ hicnb1->flags = HICN_FACE_FLAGS_DEFAULT;
+ hicnb2->flags = HICN_FACE_FLAGS_DEFAULT;
+ hicnb3->flags = HICN_FACE_FLAGS_DEFAULT;
+
+ // parse packets and get next node
+ next0 = hicn_face_prod_next_from_data_hdr (vm, b0);
+ next1 = hicn_face_prod_next_from_data_hdr (vm, b1);
+ next2 = hicn_face_prod_next_from_data_hdr (vm, b2);
+ next3 = hicn_face_prod_next_from_data_hdr (vm, b3);
stats.pkts_data_count += 4;
- /* trace */
- hicn_face_prod_trace_buffer (vm, node,
- vnet_buffer (b0)->sw_if_index[VLIB_RX],
- b0, next0);
- hicn_face_prod_trace_buffer (vm, node,
- vnet_buffer (b1)->sw_if_index[VLIB_RX],
- b1, next1);
- hicn_face_prod_trace_buffer (vm, node,
- vnet_buffer (b2)->sw_if_index[VLIB_RX],
- b2, next2);
- hicn_face_prod_trace_buffer (vm, node,
- vnet_buffer (b3)->sw_if_index[VLIB_RX],
- b3, next3);
-
- /* enqueue */
+ // counters
+ vlib_increment_combined_counter (
+ &counters[hicnb0->face_id * HICN_N_COUNTER], thread_index,
+ HICN_FACE_COUNTERS_DATA_RX, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ stats.pkts_data_count += 1;
+
+ vlib_increment_combined_counter (
+ &counters[hicnb1->face_id * HICN_N_COUNTER], thread_index,
+ HICN_FACE_COUNTERS_DATA_RX, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ stats.pkts_data_count += 1;
+
+ vlib_increment_combined_counter (
+ &counters[hicnb2->face_id * HICN_N_COUNTER], thread_index,
+ HICN_FACE_COUNTERS_DATA_RX, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ stats.pkts_data_count += 1;
+
+ vlib_increment_combined_counter (
+ &counters[hicnb3->face_id * HICN_N_COUNTER], thread_index,
+ HICN_FACE_COUNTERS_DATA_RX, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ stats.pkts_data_count += 1;
+
+ // trace
+ hicn_face_prod_trace_buffer (
+ vm, node, vnet_buffer (b0)->sw_if_index[VLIB_RX], b0, next0);
+ hicn_face_prod_trace_buffer (
+ vm, node, vnet_buffer (b1)->sw_if_index[VLIB_RX], b1, next1);
+ hicn_face_prod_trace_buffer (
+ vm, node, vnet_buffer (b2)->sw_if_index[VLIB_RX], b2, next2);
+ hicn_face_prod_trace_buffer (
+ vm, node, vnet_buffer (b3)->sw_if_index[VLIB_RX], b3, next3);
+
+ // enqueue
vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
n_left_to_next, bi0, bi1, bi2, bi3,
next0, next1, next2, next3);
stats.pkts_processed += 4;
-
}
while (n_left_from > 0 && n_left_to_next > 0)
{
vlib_buffer_t *b0;
- u32 bi0, swif;
- hicn_face_prod_state_t *prod_face = NULL;
+ hicn_buffer_t *hicnb0;
+ u32 bi0;
u32 next0;
if (n_left_from > 1)
@@ -256,38 +301,41 @@ hicn_face_prod_input_node_fn (vlib_main_t * vm,
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
- swif = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- prod_face = &face_state_vec[swif];
+ hicnb0 = hicn_get_buffer (b0);
+ hicnb0->flags = HICN_FACE_FLAGS_DEFAULT;
- next0 =
- hicn_face_prod_next_from_data_hdr (node, b0, &prod_face->prefix);
+ next0 = hicn_face_prod_next_from_data_hdr (vm, b0);
stats.pkts_data_count++;
+ // counters
+ vlib_increment_combined_counter (
+ &counters[hicnb0->face_id * HICN_N_COUNTER], thread_index,
+ HICN_FACE_COUNTERS_DATA_RX, 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ stats.pkts_data_count += 1;
+
/* trace */
- hicn_face_prod_trace_buffer (vm, node,
- vnet_buffer (b0)->sw_if_index[VLIB_RX],
- b0, next0);
+ hicn_face_prod_trace_buffer (
+ vm, node, vnet_buffer (b0)->sw_if_index[VLIB_RX], b0, next0);
/* enqueue */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
stats.pkts_processed += 1;
-
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_PROCESSED,
+ stats.pkts_processed);
vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
stats.pkts_data_count);
return (frame->n_vectors);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn_face_prod_input_node) =
{
.function = hicn_face_prod_input_node_fn,
@@ -300,12 +348,10 @@ VLIB_REGISTER_NODE(hicn_face_prod_input_node) =
.n_next_nodes = HICN_FACE_PROD_N_NEXT,
.next_nodes =
{
- [HICN_FACE_PROD_NEXT_DATA_IP4] = "hicn4-face-input",
- [HICN_FACE_PROD_NEXT_DATA_IP6] = "hicn6-face-input",
+ [HICN_FACE_PROD_NEXT_PCS] = "hicn-data-pcslookup",
[HICN_FACE_PROD_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/faces/face.c b/hicn-plugin/src/faces/face.c
index b495d18b0..ce3001ac7 100644
--- a/hicn-plugin/src/faces/face.c
+++ b/hicn-plugin/src/faces/face.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2023 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -15,6 +15,7 @@
#include <vnet/fib/fib_entry_track.h>
#include "face.h"
+#include "app/face_prod.h"
#include "../hicn.h"
#include "../params.h"
#include "../error.h"
@@ -24,6 +25,7 @@
dpo_id_t *face_dpo_vec;
hicn_face_vft_t *face_vft_vec;
char **face_type_names_vec;
+u8 pl_index = 1;
hicn_face_t *hicn_dpoi_face_pool;
@@ -36,49 +38,42 @@ dpo_type_t hicn_face_type;
fib_node_type_t hicn_face_fib_node_type;
const char *HICN_FACE_CTRX_STRING[] = {
-#define _(a,b,c) c,
+#define _(a, b, c) c,
foreach_hicn_face_counter
#undef _
};
u8 *
-face_show (u8 * s, int face_id, u32 indent)
+face_show (u8 *s, int face_id, u32 indent)
{
s = format (s, "%U Faces:\n", format_white_space, indent);
indent += 4;
int i;
vec_foreach_index (i, face_dpo_vec)
- {
- s =
- format (s, "%U", face_vft_vec[i].format_face,
- face_dpo_vec[face_id].dpoi_index, indent);
- }
+ {
+ s = format (s, "%U", face_vft_vec[i].format_face,
+ face_dpo_vec[face_id].dpoi_index, indent);
+ }
return (s);
-
}
-mhash_t hicn_face_vec_hashtb;
mhash_t hicn_face_hashtb;
-hicn_face_vec_t *hicn_vec_pool;
-
const static char *const hicn_face6_nodes[] = {
- "hicn6-face-output", // this is the name you give your node in VLIB_REGISTER_NODE
- "hicn6-iface-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ "hicn6-face-output", // this is the name you give your node in
+ // VLIB_REGISTER_NODE
NULL,
};
const static char *const hicn_face4_nodes[] = {
- "hicn4-face-output", // this is the name you give your node in VLIB_REGISTER_NODE
- "hicn4-iface-output", // this is the name you give your node in VLIB_REGISTER_NODE
+ "hicn4-face-output", // this is the name you give your node in
+ // VLIB_REGISTER_NODE
NULL,
};
-
const static char *const *const hicn_face_nodes[DPO_PROTO_NUM] = {
- [DPO_PROTO_IP4] = hicn_face4_nodes,
- [DPO_PROTO_IP6] = hicn_face6_nodes
+ [DPO_PROTO_IP4] = hicn_face4_nodes, [DPO_PROTO_IP6] = hicn_face6_nodes
};
const static dpo_vft_t hicn_face_dpo_vft = {
@@ -98,19 +93,19 @@ hicn_face_node_get (fib_node_index_t index)
}
static void
-hicn_face_last_lock_gone (fib_node_t * node)
+hicn_face_last_lock_gone (fib_node_t *node)
{
}
static hicn_face_t *
-hicn_face_from_fib_node (fib_node_t * node)
+hicn_face_from_fib_node (fib_node_t *node)
{
return ((hicn_face_t *) (((char *) node) -
STRUCT_OFFSET_OF (hicn_face_t, fib_node)));
}
static fib_node_back_walk_rc_t
-hicn_face_back_walk_notify (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
+hicn_face_back_walk_notify (fib_node_t *node, fib_node_back_walk_ctx_t *ctx)
{
hicn_face_t *face = hicn_face_from_fib_node (node);
@@ -133,7 +128,8 @@ hicn_face_back_walk_notify (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
/* } */
/* else */
/* { */
- /* dpo_stack(hicn_face_type, face->dpo.dpoi_proto, &face->dpo, dpo); */
+ /* dpo_stack(hicn_face_type, face->dpo.dpoi_proto, &face->dpo, dpo);
+ */
/* } */
/* } */
@@ -145,7 +141,6 @@ hicn_face_show_memory (void)
{
}
-
static const fib_node_vft_t hicn_face_fib_node_vft = {
.fnv_get = hicn_face_node_get,
.fnv_last_lock = hicn_face_last_lock_gone,
@@ -155,39 +150,36 @@ static const fib_node_vft_t hicn_face_fib_node_vft = {
// Make this more flexible for future types face
void
-hicn_face_module_init (vlib_main_t * vm)
+hicn_face_module_init (vlib_main_t *vm)
{
- pool_validate (hicn_dpoi_face_pool);
pool_alloc (hicn_dpoi_face_pool, 1024);
- counters =
- vec_new (vlib_combined_counter_main_t,
- HICN_PARAM_FACES_MAX * HICN_N_COUNTER);
+ counters = vec_new (vlib_combined_counter_main_t,
+ HICN_PARAM_FACES_MAX * HICN_N_COUNTER);
- mhash_init (&hicn_face_vec_hashtb,
- sizeof (hicn_face_input_faces_t) /* value */ ,
- sizeof (hicn_face_key_t) /* key */ );
- mhash_init (&hicn_face_hashtb, sizeof (hicn_face_id_t) /* value */ ,
- sizeof (hicn_face_key_t) /* key */ );
-
- pool_alloc (hicn_vec_pool, 100);
+ mhash_init (&hicn_face_hashtb, sizeof (hicn_face_id_t) /* value */,
+ sizeof (hicn_face_key_t) /* key */);
/*
* How much useful is the following registration?
* So far it seems that we need it only for setting the dpo_type.
*/
- hicn_face_type =
- dpo_register_new_type (&hicn_face_dpo_vft, hicn_face_nodes);
+ hicn_face_type = dpo_register_new_type (&hicn_face_dpo_vft, hicn_face_nodes);
/*
- * We register a new node type to get informed when the adjacency corresponding
- * to a face is updated
+ * We register a new node type to get informed when the adjacency
+ * corresponding to a face is updated
*/
hicn_face_fib_node_type =
- fib_node_register_new_type (&hicn_face_fib_node_vft);
+ fib_node_register_new_type ("hicn_face_fib_node", &hicn_face_fib_node_vft);
+
+ /*
+ * Init producer face module
+ */
+ hicn_face_prod_init ();
}
u8 *
-format_hicn_face (u8 * s, va_list * args)
+format_hicn_face (u8 *s, va_list *args)
{
index_t index = va_arg (*args, index_t);
u32 indent = va_arg (*args, u32);
@@ -211,9 +203,8 @@ format_hicn_face (u8 * s, va_list * args)
if ((face->flags & HICN_FACE_FLAGS_DELETED))
s = format (s, " (deleted)");
- s = format (s, "\n%U%U",
- format_white_space, indent + 2,
- format_dpo_id, &face->dpo, indent + 3);
+ s = format (s, "\n%U%U", format_white_space, indent + 2, format_dpo_id,
+ &face->dpo, indent + 3);
}
else
{
@@ -235,9 +226,8 @@ format_hicn_face (u8 * s, va_list * args)
return s;
}
-
u8 *
-format_hicn_face_all (u8 * s, int n, ...)
+format_hicn_face_all (u8 *s, int n, ...)
{
va_list ap;
va_start (ap, n);
@@ -247,12 +237,11 @@ format_hicn_face_all (u8 * s, int n, ...)
hicn_face_t *face;
- /* *INDENT-OFF* */
- pool_foreach ( face, hicn_dpoi_face_pool,
- {
- s = format(s, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), indent);
- });
- /* *INDENT-ON* */
+ pool_foreach (face, hicn_dpoi_face_pool)
+ {
+ s = format (s, "%U\n", format_hicn_face, hicn_dpoi_get_index (face),
+ indent);
+ }
return s;
}
@@ -260,47 +249,11 @@ format_hicn_face_all (u8 * s, int n, ...)
int
hicn_face_del (hicn_face_id_t face_id)
{
- hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
- hicn_face_key_t key;
- hicn_face_key_t old_key;
- hicn_face_key_t old_key2;
-
- hicn_face_get_key (&(face->nat_addr), face->sw_if, &(face->dpo), &key);
- hicn_face_input_faces_t *in_faces_vec =
- hicn_face_get_vec (&(face->nat_addr),
- &hicn_face_vec_hashtb);
- if (in_faces_vec != NULL)
- {
- hicn_face_vec_t *vec =
- pool_elt_at_index (hicn_vec_pool, in_faces_vec->vec_id);
- u32 index_face = vec_search (*vec, face_id);
- vec_del1 (*vec, index_face);
-
- if (vec_len (*vec) == 0)
- {
- pool_put_index (hicn_vec_pool, in_faces_vec->vec_id);
- mhash_unset (&hicn_face_vec_hashtb, &key, (uword *) & old_key);
- vec_free (*vec);
- }
- else
- {
- /* Check if the face we are deleting is the preferred one. */
- /* If so, repleace with another. */
- if (in_faces_vec->face_id == face_id)
- {
- in_faces_vec->face_id = (*vec)[0];
- }
- }
-
- mhash_unset (&hicn_face_hashtb, &key, (uword *) & old_key2);
- }
-
int ret = HICN_ERROR_NONE;
if (hicn_dpoi_idx_is_valid (face_id))
{
hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
- face->locks--;
if (face->locks == 0)
pool_put_index (hicn_dpoi_face_pool, face_id);
else
@@ -309,15 +262,16 @@ hicn_face_del (hicn_face_id_t face_id)
else
ret = HICN_ERROR_FACE_NOT_FOUND;
-
return ret;
}
static void
-hicn_iface_to_face (hicn_face_t * face, const dpo_id_t * dpo)
+hicn_iface_to_face (hicn_face_t *face, const dpo_id_t *dpo,
+ dpo_proto_t dpo_proto)
{
- dpo_stack (hicn_face_type, dpo->dpoi_proto, &face->dpo, dpo);
+ dpo_stack (hicn_face_type, dpo_proto, &face->dpo, dpo);
+ face->dpo.dpoi_proto = dpo_proto;
face->flags &= ~HICN_FACE_FLAGS_IFACE;
face->flags |= HICN_FACE_FLAGS_FACE;
@@ -326,7 +280,7 @@ hicn_iface_to_face (hicn_face_t * face, const dpo_id_t * dpo)
fib_node_init (&face->fib_node, hicn_face_fib_node_type);
fib_node_lock (&face->fib_node);
- if (dpo->dpoi_type != DPO_ADJACENCY_MIDCHAIN ||
+ if (dpo->dpoi_type != DPO_ADJACENCY_MIDCHAIN &&
dpo->dpoi_type != DPO_ADJACENCY_MCAST_MIDCHAIN)
{
ip_adjacency_t *adj = adj_get (dpo->dpoi_index);
@@ -337,15 +291,11 @@ hicn_iface_to_face (hicn_face_t * face, const dpo_id_t * dpo)
{
fib_prefix_from_ip46_addr (nh, &prefix);
- u32 fib_index =
- fib_table_find (prefix.fp_proto, HICN_FIB_TABLE);
+ u32 fib_index = fib_table_find (prefix.fp_proto, HICN_FIB_TABLE);
- face->fib_entry_index = fib_entry_track (fib_index,
- &prefix,
- hicn_face_fib_node_type,
- hicn_dpoi_get_index
- (face),
- &face->fib_sibling);
+ face->fib_entry_index = fib_entry_track (
+ fib_index, &prefix, hicn_face_fib_node_type,
+ hicn_dpoi_get_index (face), &face->fib_sibling);
}
}
}
@@ -356,13 +306,10 @@ hicn_iface_to_face (hicn_face_t * face, const dpo_id_t * dpo)
* the ip_adjacency has already been set up.
*/
int
-hicn_face_add (const dpo_id_t * dpo_nh, ip46_address_t * nat_address,
- int sw_if, hicn_face_id_t * pfaceid, u8 is_app_prod)
+hicn_face_add (const dpo_id_t *dpo_nh, ip46_address_t *nat_address, int sw_if,
+ hicn_face_id_t *pfaceid, dpo_proto_t dpo_proto)
{
- hicn_face_flags_t flags = (hicn_face_flags_t) 0;
- flags |= HICN_FACE_FLAGS_FACE;
-
hicn_face_t *face;
face =
@@ -384,9 +331,7 @@ hicn_face_add (const dpo_id_t * dpo_nh, ip46_address_t * nat_address,
if (face == NULL)
{
-
- hicn_iface_add (nat_address, sw_if, pfaceid, dpo_nh->dpoi_proto,
- dpo_nh->dpoi_index);
+ hicn_iface_add (nat_address, sw_if, pfaceid, dpo_nh->dpoi_index, 0);
face = hicn_dpoi_get_from_idx (*pfaceid);
mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
@@ -401,71 +346,21 @@ hicn_face_add (const dpo_id_t * dpo_nh, ip46_address_t * nat_address,
mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
}
- hicn_iface_to_face (face, dpo_nh);
+ hicn_iface_to_face (face, dpo_nh, dpo_proto);
temp_dpo.dpoi_index = ~0;
- hicn_face_input_faces_t *in_faces =
- hicn_face_get_vec (nat_address, &hicn_face_vec_hashtb);
-
- if (in_faces == NULL)
- {
- hicn_face_input_faces_t in_faces_temp;
- hicn_face_vec_t *vec;
- pool_get (hicn_vec_pool, vec);
- *vec = vec_new (hicn_face_id_t, 0);
- u32 index = vec - hicn_vec_pool;
- in_faces_temp.vec_id = index;
- vec_add1 (*vec, *pfaceid);
-
- in_faces_temp.face_id = *pfaceid;
-
- hicn_face_get_key (nat_address, 0, &temp_dpo, &key);
-
- mhash_set_mem (&hicn_face_vec_hashtb, &key,
- (uword *) & in_faces_temp, 0);
- }
- else
- {
- hicn_face_vec_t *vec =
- pool_elt_at_index (hicn_vec_pool, in_faces->vec_id);
-
- /* */
- if (vec_search (*vec, *pfaceid) != ~0)
- return HICN_ERROR_FACE_ALREADY_CREATED;
-
- vec_add1 (*vec, *pfaceid);
-
- hicn_iface_to_face (face, dpo_nh);
-
- hicn_face_get_key (nat_address, 0, &temp_dpo, &key);
-
- mhash_set_mem (&hicn_face_vec_hashtb, &key, (uword *) in_faces, 0);
+ retx_t *retx = vlib_process_signal_event_data (
+ vlib_get_main (), hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_ADD, 1, sizeof (retx_t));
- /* If the face is an application producer face, we set it as the preferred incoming face. */
- /* This is required to handle the CS separation, and the push api in a lightway */
- if (is_app_prod)
- {
- in_faces->face_id = *pfaceid;
- }
- }
-
- retx_t *retx = vlib_process_signal_event_data (vlib_get_main (),
- hicn_mapme_eventmgr_process_node.
- index,
- HICN_MAPME_EVENT_FACE_ADD, 1,
- sizeof (retx_t));
-
- /* *INDENT-OFF* */
- *retx = (retx_t) {
+ *retx = (retx_t){
.face_id = *pfaceid,
};
- /* *INDENT-ON* */
return HICN_ERROR_NONE;
}
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/hicn-plugin/src/faces/face.h b/hicn-plugin/src/faces/face.h
index 234c3fcc2..43900dd4e 100644
--- a/hicn-plugin/src/faces/face.h
+++ b/hicn-plugin/src/faces/face.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,8 +25,12 @@
#include <vppinfra/bihash_8_8.h>
#include <vnet/adj/adj_midchain.h>
+#include <vpp_plugins/hicn/error.h>
+
+#include "face_flags.h"
+#include "../hicn_buffer_flags.h"
+#include "../hicn_logging.h"
-#include "../error.h"
typedef u8 hicn_face_flags_t;
typedef index_t hicn_face_id_t;
@@ -35,39 +39,40 @@ typedef index_t hicn_face_id_t;
*
* This file implements a general face type. The purpose of a face is to
* carry the needed information to forward interest and data packets to the
- * next node in the network. There are two type of faces: complete faces (in short
- * faces), and incomplete faces (in short ifaces).
+ * next node in the network. There are two type of faces: complete faces (in
+ * short faces), and incomplete faces (in short ifaces).
*
* A face that does not contain the indication of the adjacency is an
- * incomplete face (iface), otherwise it is considered to be complete. Ifaces are
- * used to forward data back to the previous hICN hop from which we received an
- * interest, while faces are used to forward interest packets to the next hicn node.
- * Faces and ifaces are created at two different points in time. Faces are created
- * when a route is added, while ifaces are created when an interest is received.
- * In details, faces and ifaces carry the following information:
- * - nat_addr: the ip address to perform src nat or dst nat on interest and data packets, respectively;
+ * incomplete face (iface), otherwise it is considered to be complete. Ifaces
+ * are used to forward data back to the previous hICN hop from which we
+ * received an interest, while faces are used to forward interest packets to
+ * the next hicn node. Faces and ifaces are created at two different points in
+ * time. Faces are created when a route is added, while ifaces are created when
+ * an interest is received. In details, faces and ifaces carry the following
+ * information:
+ * - nat_addr: the ip address to perform src nat or dst nat on interest and
+ * data packets, respectively;
* - pl_id: the path label
- * - locks: the number of entities using this face. When 0 the face can be deallocated
- * - dpo: the dpo that identifies the next node in the vlib graph for processing the vlib
- * buffer. The dpo contains the dpo.dpoi_next field that points to the next node
- * in the vlib graph and the dpo.dpoi_index which is an index to adj used by the next node
- * to perform the l2 rewrite. In case of ifaces, it is likely we don't know the
- * adjacency when creting the face. In this case, the next node in the vlib graph
- * will be the node that performs a lookup in the fib. Only in case of udp tunnels,
- * which are bidirectional tunnel we know that the incoming tunnel is also the outgoing
- * one, therefore in this case we store the tunnel in the dpo.dpoi_index fields. For
- * all the other tunnels (which are most likely unidirectional), the source address of
- * the interest will be used to retrieve the outgoing tunnel when sending the corresponding
- * data back.
+ * - locks: the number of entities using this face. When 0 the face can be
+ * deallocated
+ * - dpo: the dpo that identifies the next node in the vlib graph for
+ * processing the vlib buffer. The dpo contains the dpo.dpoi_next field that
+ * points to the next node in the vlib graph and the dpo.dpoi_index which is an
+ * index to adj used by the next node to perform the l2 rewrite. In case of
+ * ifaces, it is likely we don't know the adjacency when creting the face. In
+ * this case, the next node in the vlib graph will be the node that performs a
+ * lookup in the fib. Only in case of udp tunnels, which are bidirectional
+ * tunnel we know that the incoming tunnel is also the outgoing one, therefore
+ * in this case we store the tunnel in the dpo.dpoi_index fields. For all the
+ * other tunnels (which are most likely unidirectional), the source address of
+ * the interest will be used to retrieve the outgoing tunnel when sending the
+ * corresponding data back.
* - sw_if: the incoming interface of the interest
- * - fib_node, fib_entry_index and fib_sibling are information used to be notified of
- * changes in the adjacency pointed by the dpo.
+ * - fib_node, fib_entry_index and fib_sibling are information used to be
+ * notified of changes in the adjacency pointed by the dpo.
*
- * We maintain two hash tables to retrieve faces and ifaces. In particular one hash table which
- * index faces and ifaces for nat_address, sw_if and dpo. This is used to retrieve existing faces
- * or ifaces when an interest is received and when an new face is created. A second hash table that
- * indexes vectors of faces for nat_address and sw_if. This is used to retrieve a list of possible
- * incoming faces when a data is received.
+ * We maintain one hash tables to retrieve faces and ifaces, which indexes
+ * faces and ifaces for nat_address, sw_if and dpo.
*/
/**
@@ -82,7 +87,7 @@ typedef struct __attribute__ ((packed)) hicn_face_s
hicn_face_flags_t flags;
/* Align the upcoming fields */
- u8 align;
+ u8 iface_next;
/* Path label (2B) */
u16 pl_id;
@@ -91,7 +96,8 @@ typedef struct __attribute__ ((packed)) hicn_face_s
u32 locks;
/* Dpo for the adjacency (8B) */
- union {
+ union
+ {
dpo_id_t dpo;
u64 align_dpo;
};
@@ -112,75 +118,39 @@ typedef struct __attribute__ ((packed)) hicn_face_s
/* Pool of faces */
extern hicn_face_t *hicn_dpoi_face_pool;
-/* Flags */
-/* A face is complete and it stores all the information. A iface lacks of the
- adj index, therefore sending a packet through a iface require a lookup in
- the FIB. */
-#define HICN_FACE_FLAGS_DEFAULT 0x00
-#define HICN_FACE_FLAGS_FACE 0x01
-#define HICN_FACE_FLAGS_IFACE 0x02
-#define HICN_FACE_FLAGS_APPFACE_PROD 0x04 /* Currently only IP face can be appface */
-#define HICN_FACE_FLAGS_APPFACE_CONS 0x08 /* Currently only IP face can be appface */
-#define HICN_FACE_FLAGS_DELETED 0x10
-
#define HICN_FACE_NULL (hicn_face_id_t) ~0
-#define HICN_FACE_FLAGS_APPFACE_PROD_BIT 2
-#define HICN_FACE_FLAGS_APPFACE_CONS_BIT 3
-
-
-#define HICN_BUFFER_FLAGS_DEFAULT 0x00
-#define HICN_BUFFER_FLAGS_FACE_IS_APP 0x01
-
-STATIC_ASSERT ((1 << HICN_FACE_FLAGS_APPFACE_PROD_BIT) ==
- HICN_FACE_FLAGS_APPFACE_PROD,
- "HICN_FACE_FLAGS_APPFACE_PROD_BIT and HICN_FACE_FLAGS_APPFACE_PROD must correspond");
-
-STATIC_ASSERT ((1 << HICN_FACE_FLAGS_APPFACE_CONS_BIT) ==
- HICN_FACE_FLAGS_APPFACE_CONS,
- "HICN_FACE_FLAGS_APPFACE_CONS_BIT and HICN_FACE_FLAGS_APPFACE_CONS must correspond");
-
-STATIC_ASSERT ((HICN_FACE_FLAGS_APPFACE_PROD >>
- HICN_FACE_FLAGS_APPFACE_PROD_BIT) ==
- HICN_BUFFER_FLAGS_FACE_IS_APP,
- "hicn buffer app flag does not correspond to HICN_FACE_FLAGS_APPFACE_PROD");
-
-STATIC_ASSERT ((HICN_FACE_FLAGS_APPFACE_CONS >>
- HICN_FACE_FLAGS_APPFACE_CONS_BIT) ==
- HICN_BUFFER_FLAGS_FACE_IS_APP,
- "hicn buffer app flag does not correspond to HICN_FACE_FLAGS_APPFACE_PROD");
-
/**
* @brief Definition of the virtual functin table for an hICN FACE DPO.
*/
typedef struct hicn_face_vft_s
{
- u8 *(*format_face) (u8 * s, va_list * args);
+ u8 *(*format_face) (u8 *s, va_list *args);
/**< Format an hICN face dpo*/
int (*hicn_face_del) (hicn_face_id_t face_id);
- void (*hicn_face_get_dpo) (hicn_face_t * face, dpo_id_t * dpo);
+ void (*hicn_face_get_dpo) (hicn_face_t *face, dpo_id_t *dpo);
} hicn_face_vft_t;
-#define foreach_hicn_face_counter \
- _(INTEREST_RX, 0, "Interest rx") \
- _(INTEREST_TX, 1, "Interest tx") \
- _(DATA_RX, 2, "Data rx") \
- _(DATA_TX, 3, "Data tx") \
+#define foreach_hicn_face_counter \
+ _ (INTEREST_RX, 0, "Interest rx") \
+ _ (INTEREST_TX, 1, "Interest tx") \
+ _ (DATA_RX, 2, "Data rx") \
+ _ (DATA_TX, 3, "Data tx")
typedef enum
{
-#define _(a,b,c) HICN_FACE_COUNTERS_##a = (b),
+#define _(a, b, c) HICN_FACE_COUNTERS_##a = (b),
foreach_hicn_face_counter
#undef _
- HICN_N_COUNTER
+ HICN_N_COUNTER
} hicn_face_counters_t;
extern mhash_t hicn_face_hashtb;
extern const char *HICN_FACE_CTRX_STRING[];
-#define get_face_counter_string(ctrxno) (char *)(HICN_FACE_CTRX_STRING[ctrxno])
-
+#define get_face_counter_string(ctrxno) \
+ (char *) (HICN_FACE_CTRX_STRING[ctrxno])
/* Vector maintaining a dpo per face */
extern dpo_id_t *face_dpo_vec;
@@ -189,6 +159,9 @@ extern hicn_face_vft_t *face_vft_vec;
/* Vector holding the set of face names */
extern char **face_type_names_vec;
+/* Pathlabel counter */
+extern u8 pl_index;
+
/* First face type registered in the sytem.*/
extern dpo_type_t first_type;
@@ -202,7 +175,7 @@ extern vlib_combined_counter_main_t *counters;
* @return face id
*/
always_inline hicn_face_id_t
-hicn_dpoi_get_index (hicn_face_t * face_dpoi)
+hicn_dpoi_get_index (hicn_face_t *face_dpoi)
{
return face_dpoi - hicn_dpoi_face_pool;
}
@@ -217,7 +190,7 @@ hicn_dpoi_get_index (hicn_face_t * face_dpoi)
always_inline hicn_face_t *
hicn_dpoi_get_from_idx_safe (hicn_face_id_t dpoi_index)
{
- if (!pool_is_free_index(hicn_dpoi_face_pool, dpoi_index))
+ if (!pool_is_free_index (hicn_dpoi_face_pool, dpoi_index))
return (hicn_face_t *) pool_elt_at_index (hicn_dpoi_face_pool, dpoi_index);
else
return NULL;
@@ -241,10 +214,18 @@ hicn_dpoi_get_from_idx (hicn_face_id_t dpoi_index)
always_inline int
hicn_dpoi_idx_is_valid (hicn_face_id_t face_id)
{
- return pool_len (hicn_dpoi_face_pool) > face_id
- && !pool_is_free_index (hicn_dpoi_face_pool, face_id);
+ return pool_len (hicn_dpoi_face_pool) > face_id &&
+ !pool_is_free_index (hicn_dpoi_face_pool, face_id);
}
+/**
+ * @brief Delete a face
+ *
+ * @param face_id Id of the face to delete
+ * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
+ * HICN_ERROR_NONE
+ */
+int hicn_face_del (hicn_face_id_t face_id);
/**
* @brief Add a lock to the face dpo
@@ -264,12 +245,24 @@ hicn_face_lock_with_id (hicn_face_id_t face_id)
*
* @param dpo Pointer to the face dpo
*/
-always_inline void
+always_inline int
hicn_face_unlock_with_id (hicn_face_id_t face_id)
{
hicn_face_t *face;
face = hicn_dpoi_get_from_idx (face_id);
- face->locks--;
+
+ if (face->locks > 0)
+ {
+ face->locks--;
+
+ if (face->locks == 0)
+ {
+ HICN_DEBUG ("Deleting face %d", face_id);
+ return hicn_face_del (face_id);
+ }
+ }
+
+ return HICN_ERROR_NONE;
}
/**
@@ -278,32 +271,31 @@ hicn_face_unlock_with_id (hicn_face_id_t face_id)
* @param dpo Pointer to the face dpo
*/
always_inline void
-hicn_face_lock (dpo_id_t * dpo)
+hicn_face_lock (dpo_id_t *dpo)
{
- hicn_face_lock_with_id(dpo->dpoi_index);
+ hicn_face_lock_with_id (dpo->dpoi_index);
}
/**
- * @brief Remove a lock to the face through its dpo. Deallocate the face id locks == 0
+ * @brief Remove a lock to the face through its dpo. Deallocate the face id
+ * locks == 0
*
* @param dpo Pointer to the face dpo
*/
always_inline void
-hicn_face_unlock (dpo_id_t * dpo)
+hicn_face_unlock (dpo_id_t *dpo)
{
hicn_face_unlock_with_id (dpo->dpoi_index);
}
-
/**
* @brief Init the internal structures of the face module
*
* Must be called before processing any packet
*/
-void hicn_face_module_init (vlib_main_t * vm);
-
-u8 * format_hicn_face (u8 * s, va_list * args);
+void hicn_face_module_init (vlib_main_t *vm);
+u8 *format_hicn_face (u8 *s, va_list *args);
/**
* @brief Format all the existing faces
@@ -312,16 +304,7 @@ u8 * format_hicn_face (u8 * s, va_list * args);
* @param n Number of input parameters
* @return String with the faces formatted
*/
-u8 *format_hicn_face_all (u8 * s, int n, ...);
-
-/**
- * @brief Delete a face
- *
- * @param face_id Id of the face to delete
- * @return HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise
- * HICN_ERROR_NONE
- */
-int hicn_face_del (hicn_face_id_t face_id);
+u8 *format_hicn_face_all (u8 *s, int n, ...);
/**
* @bried vector of faces used to collect faces having the same local address
@@ -334,34 +317,25 @@ typedef struct hicn_input_faces_s_
/* Vector of all possible input faces */
u32 vec_id;
- /* Preferred face. If an prod_app face is in the vector it will be the preferred one. */
- /* It's not possible to have multiple prod_app face in the same vector, they would have */
- /* the same local address. Every prod_app face is a point-to-point face between the forwarder */
+ /* Preferred face. If an prod_app face is in the vector it will be the
+ * preferred one. */
+ /* It's not possible to have multiple prod_app face in the same vector, they
+ * would have */
+ /* the same local address. Every prod_app face is a point-to-point face
+ * between the forwarder */
/* and the application. */
hicn_face_id_t face_id;
} hicn_face_input_faces_t;
/**
- * Pool containing the vector of possible incoming faces.
- */
-extern hicn_face_vec_t *hicn_vec_pool;
-
-/**
- * Hash tables that indexes a face by remote address. For fast lookup when an
- * interest arrives.
- */
-extern mhash_t hicn_face_vec_hashtb;
-
-
-/**
* Key definition for the mhash table. An face is uniquely identified by ip
- * address, the interface id and a dpo pointing to the next node in the vlib graph.
- * The ip address can correspond to the remote ip address of the next hicn hop,
- * or to the local address of the receiving interface. The former is used to
- * retrieve the incoming face when an interest is received, the latter when
- * the arring packet is a data. If the face is a regular face
- * In case of iface, the following structure can be filled in different ways:
+ * address, the interface id and a dpo pointing to the next node in the vlib
+ * graph. The ip address can correspond to the remote ip address of the next
+ * hicn hop, or to the local address of the receiving interface. The former is
+ * used to retrieve the incoming face when an interest is received, the latter
+ * when the arring packet is a data. If the face is a regular face In case of
+ * iface, the following structure can be filled in different ways:
* - dpo equal to DPO_INVALID when the iface is a regular hICN iface
* - in case of udp_tunnel dpo =
* {
@@ -377,7 +351,8 @@ extern mhash_t hicn_face_vec_hashtb;
typedef struct __attribute__ ((packed)) hicn_face_key_s
{
ip46_address_t addr;
- union {
+ union
+ {
dpo_id_t dpo;
u64 align_dpo;
};
@@ -393,8 +368,8 @@ typedef struct __attribute__ ((packed)) hicn_face_key_s
* @param key Pointer to an allocated hicn_face_ip_key_t object
*/
always_inline void
-hicn_face_get_key (const ip46_address_t * addr,
- u32 sw_if, const dpo_id_t * dpo, hicn_face_key_t * key)
+hicn_face_get_key (const ip46_address_t *addr, u32 sw_if, const dpo_id_t *dpo,
+ hicn_face_key_t *key)
{
key->dpo = *dpo;
key->addr = *addr;
@@ -405,13 +380,15 @@ hicn_face_get_key (const ip46_address_t * addr,
* @brief Get the face obj from the nat address. Does not add any lock.
*
* @param addr Ip v4 address used to create the key for the hash table.
- * @param sw_if Software interface id used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash
+ * table.
* @param hashtb Hash table (remote or local) where to perform the lookup.
*
* @result Pointer to the face.
*/
always_inline hicn_face_t *
-hicn_face_get (const ip46_address_t * addr, u32 sw_if, mhash_t * hashtb, index_t adj_index)
+hicn_face_get (const ip46_address_t *addr, u32 sw_if, mhash_t *hashtb,
+ index_t adj_index)
{
hicn_face_key_t key;
@@ -421,12 +398,11 @@ hicn_face_get (const ip46_address_t * addr, u32 sw_if, mhash_t * hashtb, index_t
hicn_face_get_key (addr, sw_if, &dpo, &key);
- hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
- &key);
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb, &key);
- if ( dpoi_index != NULL)
+ if (dpoi_index != NULL && hicn_dpoi_idx_is_valid (*dpoi_index))
{
- hicn_face_lock_with_id(*dpoi_index);
+ hicn_face_lock_with_id (*dpoi_index);
return hicn_dpoi_get_from_idx (*dpoi_index);
}
@@ -434,27 +410,29 @@ hicn_face_get (const ip46_address_t * addr, u32 sw_if, mhash_t * hashtb, index_t
}
/**
- * @brief Get the face obj from the nat address and the dpo. Does not add any lock.
+ * @brief Get the face obj from the nat address and the dpo. Does not add any
+ * lock.
*
* @param addr Ip v4 address used to create the key for the hash table.
- * @param sw_if Software interface id used to create the key for the hash table.
+ * @param sw_if Software interface id used to create the key for the hash
+ * table.
* @param hashtb Hash table (remote or local) where to perform the lookup.
*
* @result Pointer to the face.
*/
always_inline hicn_face_t *
-hicn_face_get_with_dpo (const ip46_address_t * addr, u32 sw_if, const dpo_id_t * dpo, mhash_t * hashtb)
+hicn_face_get_with_dpo (const ip46_address_t *addr, u32 sw_if,
+ const dpo_id_t *dpo, mhash_t *hashtb)
{
hicn_face_key_t key;
hicn_face_get_key (addr, sw_if, dpo, &key);
- hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb,
- &key);
+ hicn_face_id_t *dpoi_index = (hicn_face_id_t *) mhash_get (hashtb, &key);
- if ( dpoi_index != NULL)
+ if (dpoi_index != NULL && hicn_dpoi_idx_is_valid (*dpoi_index))
{
- hicn_face_lock_with_id(*dpoi_index);
+ hicn_face_lock_with_id (*dpoi_index);
return hicn_dpoi_get_from_idx (*dpoi_index);
}
@@ -462,27 +440,6 @@ hicn_face_get_with_dpo (const ip46_address_t * addr, u32 sw_if, const dpo_id_t *
}
/**
- * @brief Get the vector of faces from the ip v4 address. Does not add any lock.
- *
- * @param addr Ip v4 address used to create the key for the hash table.
- * @param sw_if Software interface id used to create the key for the hash table.
- * @param hashtb Hash table (remote or local) where to perform the lookup.
- *
- * @result Pointer to the face.
- */
-always_inline hicn_face_input_faces_t *
-hicn_face_get_vec (const ip46_address_t * addr,
- mhash_t * hashtb)
-{
- hicn_face_key_t key;
-
- dpo_id_t dpo = DPO_INVALID;
-
- hicn_face_get_key (addr, 0, &dpo, &key);
- return (hicn_face_input_faces_t *) mhash_get (hashtb, &key);
-}
-
-/**
* @brief Create a new face ip. API for other modules (e.g., routing)
*
* @param dpo_nh dpo contained in the face that points to the next node in
@@ -490,15 +447,13 @@ hicn_face_get_vec (const ip46_address_t * addr,
* @param nat_addr nat ip v4 or v6 address of the face
* @param sw_if interface associated to the face
* @param pfaceid Pointer to return the face id
- * @param is_app_prod if HICN_FACE_FLAGS_APPFACE_PROD the face is a local application face, all other values are ignored
+ * @param is_app_prod if HICN_FACE_FLAGS_APPFACE_PROD the face is a local
+ * application face, all other values are ignored
* @return HICN_ERROR_FACE_NO_GLOBAL_IP if the face does not have a globally
* reachable ip address, otherwise HICN_ERROR_NONE
*/
-int hicn_face_add (const dpo_id_t * dpo_nh,
- ip46_address_t * nat_address,
- int sw_if,
- hicn_face_id_t * pfaceid,
- u8 is_app_prod);
+int hicn_face_add (const dpo_id_t *dpo_nh, ip46_address_t *nat_address,
+ int sw_if, hicn_face_id_t *pfaceid, dpo_proto_t dpo_proto);
/**
* @brief Create a new incomplete face ip. (Meant to be used by the data plane)
@@ -511,27 +466,28 @@ int hicn_face_add (const dpo_id_t * dpo_nh,
* reachable ip address, otherwise HICN_ERROR_NONE
*/
always_inline void
-hicn_iface_add (ip46_address_t * nat_address, int sw_if,
- hicn_face_id_t * pfaceid, dpo_proto_t proto,
- u32 adj_index)
+hicn_iface_add (const ip46_address_t *nat_address, int sw_if,
+ hicn_face_id_t *pfaceid, u32 adj_index, u8 flags)
{
hicn_face_t *face;
pool_get (hicn_dpoi_face_pool, face);
- clib_memcpy (&(face->nat_addr), nat_address,
- sizeof (ip46_address_t));
+ clib_memcpy (&(face->nat_addr), nat_address, sizeof (ip46_address_t));
face->sw_if = sw_if;
- face->dpo.dpoi_type = DPO_FIRST;
- face->dpo.dpoi_proto = DPO_PROTO_NONE;
+ face->dpo = (dpo_id_t) DPO_INVALID;
face->dpo.dpoi_index = adj_index;
- face->dpo.dpoi_next_node = 0;
- face->pl_id = (u16) 0;
- face->flags = HICN_FACE_FLAGS_IFACE;
- face->locks = 1;
hicn_face_key_t key;
hicn_face_get_key (nat_address, sw_if, &face->dpo, &key);
+
+ face->iface_next = 1;
+
+ face->pl_id = pl_index++;
+ face->flags = HICN_FACE_FLAGS_IFACE;
+ face->flags |= flags;
+ face->locks = 1;
+
*pfaceid = hicn_dpoi_get_index (face);
mhash_set_mem (&hicn_face_hashtb, &key, (uword *) pfaceid, 0);
@@ -544,97 +500,50 @@ hicn_iface_add (ip46_address_t * nat_address, int sw_if,
}
}
-/**** Helpers to manipulate faces and ifaces from the face/iface input nodes ****/
+/**** Helpers to manipulate faces and ifaces from the face/iface input nodes
+ * ****/
/**
- * @brief Retrieve a vector of faces from the ip4 local address and returns its index.
- *
- * @param vec: Result of the lookup. If no face exists for the local address vec = NULL
- * @param hicnb_flags: Flags that indicate whether the face is an application
- * face or not
- * @param local_addr: Ip v4 nat address of the face
- * @param sw_if: software interface id of the face
- *
- * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
+ * @brief Call back to get the adj of the tunnel
*/
-always_inline int
-hicn_face_ip4_lock (hicn_face_id_t * face_id,
- u32 * in_faces_vec_id,
- u8 * hicnb_flags,
- const ip4_address_t * nat_addr)
+static adj_walk_rc_t
+hicn4_iface_adj_walk_cb (adj_index_t ai, void *ctx)
{
- ip46_address_t ip_address = {0};
- ip46_address_set_ip4(&ip_address, nat_addr);
- hicn_face_input_faces_t *in_faces_vec =
- hicn_face_get_vec (&ip_address, &hicn_face_vec_hashtb);
-
- if (PREDICT_FALSE (in_faces_vec == NULL))
- return HICN_ERROR_FACE_NOT_FOUND;
- *in_faces_vec_id = in_faces_vec->vec_id;
- hicn_face_t *face = hicn_dpoi_get_from_idx (in_faces_vec->face_id);
+ hicn_face_t *face = (hicn_face_t *) ctx;
- *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
- *hicnb_flags |=
- (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
- HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+ dpo_set (&face->dpo, DPO_ADJACENCY_MIDCHAIN, DPO_PROTO_IP4, ai);
+ adj_nbr_midchain_stack (ai, &face->dpo);
- *face_id = in_faces_vec->face_id;
-
- return HICN_ERROR_NONE;
+ return (ADJ_WALK_RC_CONTINUE);
}
-/**
- * @brief Retrieve a face from the ip6 local address and returns its dpo. This
- * method adds a lock on the face state.
- *
- * @param dpo: Result of the lookup. If the face doesn't exist dpo = NULL
- * @param hicnb_flags: Flags that indicate whether the face is an application
- * face or not
- * @param nat_addr: Ip v6 nat address of the face
- * @param sw_if: software interface id of the face
- *
- * @result HICN_ERROR_FACE_NOT_FOUND if the face does not exist, otherwise HICN_ERROR_NONE.
- */
always_inline int
-hicn_face_ip6_lock (hicn_face_id_t * face_id,
- u32 * in_faces_vec_id,
- u8 * hicnb_flags,
- const ip6_address_t * nat_addr)
+hicn_face_ip4_find (hicn_face_id_t *index, u8 *hicnb_flags,
+ const ip4_address_t *nat_addr, u32 sw_if, u32 adj_index,
+ u32 node_index)
{
- hicn_face_input_faces_t *in_faces_vec =
- hicn_face_get_vec ((ip46_address_t *)nat_addr, &hicn_face_vec_hashtb);
-
- if (PREDICT_FALSE (in_faces_vec == NULL))
- return HICN_ERROR_FACE_NOT_FOUND;
-
- *in_faces_vec_id = in_faces_vec->vec_id;
- hicn_face_t *face = hicn_dpoi_get_from_idx (in_faces_vec->face_id);
-
- *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
- *hicnb_flags |=
- (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
- HICN_FACE_FLAGS_APPFACE_PROD_BIT;
-
- *face_id = in_faces_vec->face_id;
+ int ret = HICN_ERROR_FACE_NOT_FOUND;
+ hicn_face_id_t face_id;
- return HICN_ERROR_NONE;
-}
+ /*All (complete) faces are indexed by remote addess as well */
+ /* if the face exists, it adds a lock */
+ hicn_face_t *face = hicn_face_get ((const ip46_address_t *) nat_addr, sw_if,
+ &hicn_face_hashtb, adj_index);
-/**
- * @brief Call back to get the adj of the tunnel
- */
-static adj_walk_rc_t
-hicn4_iface_adj_walk_cb (adj_index_t ai,
- void *ctx)
-{
+ if (face != NULL)
+ {
+ /* unlock the face. We don't take a lock on each interest we receive */
+ face_id = hicn_dpoi_get_index (face);
+ hicn_face_unlock_with_id (face_id);
- hicn_face_t *face = (hicn_face_t *)ctx;
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *index = face_id;
- dpo_set(&face->dpo, DPO_ADJACENCY_MIDCHAIN, DPO_PROTO_IP4, ai);
- adj_nbr_midchain_stack(ai, &face->dpo);
+ ret = HICN_ERROR_FACE_ALREADY_CREATED;
+ }
- return (ADJ_WALK_RC_CONTINUE);
+ return ret;
}
/**
@@ -648,83 +557,95 @@ hicn4_iface_adj_walk_cb (adj_index_t ai,
* @param sw_if: software interface id of the face
* @param node_index: vlib edge index to use in the packet processing
*/
-always_inline void
-hicn_iface_ip4_add_and_lock (hicn_face_id_t * index,
- u8 * hicnb_flags,
- const ip4_address_t * nat_addr,
- u32 sw_if, u32 adj_index, u32 node_index)
+always_inline int
+hicn_face_ip4_add_and_lock (hicn_face_id_t *index, u8 *hicnb_flags,
+ const ip4_address_t *nat_addr, u32 sw_if,
+ u32 adj_index, u32 node_index)
{
- /*All (complete) faces are indexed by remote addess as well */
+ int ret = HICN_ERROR_NONE;
+ hicn_face_t *face = NULL;
- ip46_address_t ip_address = {0};
- ip46_address_set_ip4(&ip_address, nat_addr);
+ ret = hicn_face_ip4_find (index, hicnb_flags, nat_addr, sw_if, adj_index,
+ node_index);
- /* if the face exists, it adds a lock */
- hicn_face_t *face =
- hicn_face_get (&ip_address, sw_if, &hicn_face_hashtb, adj_index);
-
- if (face == NULL)
+ if (ret == HICN_ERROR_FACE_NOT_FOUND)
{
+ ip46_address_t ip_address = { 0 };
+ ip46_address_set_ip4 (&ip_address, nat_addr);
+
hicn_face_id_t idx;
- hicn_iface_add (&ip_address, sw_if, &idx, DPO_PROTO_IP4, adj_index);
+ u8 face_flags = *hicnb_flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL ?
+ HICN_FACE_FLAGS_UDP4 :
+ *hicnb_flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL ?
+ HICN_FACE_FLAGS_UDP6 :
+ 0;
- face = hicn_dpoi_get_from_idx(idx);
+ hicn_iface_add (&ip_address, sw_if, &idx, adj_index, face_flags);
- face->dpo.dpoi_type = DPO_FIRST;
- face->dpo.dpoi_proto = DPO_PROTO_IP4;
- face->dpo.dpoi_index = adj_index;
- face->dpo.dpoi_next_node = node_index;
+ face = hicn_dpoi_get_from_idx (idx);
+
+ face->iface_next = node_index;
/* if (nat_addr->as_u32 == 0) */
/* { */
- adj_nbr_walk(face->sw_if,
- FIB_PROTOCOL_IP4,
- hicn4_iface_adj_walk_cb,
- face);
- /* } */
+ adj_nbr_walk (face->sw_if, FIB_PROTOCOL_IP4, hicn4_iface_adj_walk_cb,
+ face);
+ /* } */
*hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *hicnb_flags |= HICN_BUFFER_FLAGS_NEW_FACE;
*index = idx;
- return;
- }
- else
- {
- /* unlock the face. We don't take a lock on each interest we receive */
- hicn_face_id_t face_id = hicn_dpoi_get_index(face);
- hicn_face_unlock_with_id(face_id);
}
- /* Code replicated on purpose */
- *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
- *hicnb_flags |=
- (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
- HICN_FACE_FLAGS_APPFACE_PROD_BIT;
-
- *index = hicn_dpoi_get_index (face);
+ return ret;
}
/**
* @brief Call back to get the adj of the tunnel
*/
static adj_walk_rc_t
-hicn6_iface_adj_walk_cb (adj_index_t ai,
- void *ctx)
+hicn6_iface_adj_walk_cb (adj_index_t ai, void *ctx)
{
- hicn_face_t *face = (hicn_face_t *)ctx;
+ hicn_face_t *face = (hicn_face_t *) ctx;
- ip_adjacency_t *adj = adj_get(ai);
+ ip_adjacency_t *adj = adj_get (ai);
if ((adj->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN) ||
(adj->lookup_next_index == IP_LOOKUP_NEXT_MCAST_MIDCHAIN))
{
- dpo_set(&face->dpo, DPO_ADJACENCY_MIDCHAIN, adj->ia_nh_proto, ai);
- adj_nbr_midchain_stack(ai, &face->dpo);
+ dpo_set (&face->dpo, DPO_ADJACENCY_MIDCHAIN,
+ (dpo_proto_t) adj->ia_nh_proto, ai);
+ adj_nbr_midchain_stack (ai, &face->dpo);
}
return (ADJ_WALK_RC_CONTINUE);
}
+always_inline int
+hicn_face_ip6_find (hicn_face_id_t *index, u8 *hicnb_flags,
+ const ip6_address_t *nat_addr, u32 sw_if, u32 adj_index,
+ u32 node_index)
+{
+ int ret = HICN_ERROR_FACE_NOT_FOUND;
+ hicn_face_id_t face_id;
+
+ hicn_face_t *face = hicn_face_get ((const ip46_address_t *) nat_addr, sw_if,
+ &hicn_face_hashtb, adj_index);
+
+ if (face != NULL)
+ {
+ /* unlock the face. We don't take a lock on each interest we receive */
+ face_id = hicn_dpoi_get_index (face);
+ hicn_face_unlock_with_id (face_id);
+ *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ *index = face_id;
+
+ ret = HICN_ERROR_FACE_ALREADY_CREATED;
+ }
+
+ return ret;
+}
/**
* @brief Retrieve, or create if it doesn't exist, a face from the ip6 local
@@ -737,54 +658,59 @@ hicn6_iface_adj_walk_cb (adj_index_t ai,
* @param sw_if: software interface id of the face
* @param node_index: vlib edge index to use in the packet processing
*/
-always_inline void
-hicn_iface_ip6_add_and_lock (hicn_face_id_t * index,
- u8 * hicnb_flags,
- const ip6_address_t * nat_addr,
- u32 sw_if, u32 adj_index, u32 node_index)
+always_inline int
+hicn_face_ip6_add_and_lock (hicn_face_id_t *index, u8 *hicnb_flags,
+ const ip6_address_t *nat_addr, u32 sw_if,
+ u32 adj_index, u32 node_index)
{
- /*All (complete) faces are indexed by remote addess as well */
- /* if the face exists, it adds a lock */
- hicn_face_t *face =
- hicn_face_get ((ip46_address_t *)nat_addr, sw_if, &hicn_face_hashtb, adj_index);
+ int ret = HICN_ERROR_NONE;
+ hicn_face_t *face = NULL;
+
+ ret = hicn_face_ip6_find (index, hicnb_flags, nat_addr, sw_if, adj_index,
+ node_index);
- if (face == NULL)
+ if (ret == HICN_ERROR_FACE_NOT_FOUND)
{
+ ip46_address_t ip_address = { 0 };
+ ip46_address_set_ip6 (&ip_address, nat_addr);
hicn_face_id_t idx;
- hicn_iface_add ((ip46_address_t *) nat_addr, sw_if, &idx, DPO_PROTO_IP6, adj_index);
+ u8 face_flags = *hicnb_flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL ?
+ HICN_FACE_FLAGS_UDP4 :
+ *hicnb_flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL ?
+ HICN_FACE_FLAGS_UDP6 :
+ 0;
- face = hicn_dpoi_get_from_idx(idx);
+ hicn_iface_add ((const ip46_address_t *) nat_addr, sw_if, &idx,
+ adj_index, face_flags);
- face->dpo.dpoi_type = DPO_FIRST;
- face->dpo.dpoi_proto = DPO_PROTO_IP6;
- face->dpo.dpoi_index = adj_index;
- face->dpo.dpoi_next_node = node_index;
+ face = hicn_dpoi_get_from_idx (idx);
+ face->iface_next = node_index;
- adj_nbr_walk(face->sw_if,
- FIB_PROTOCOL_IP6,
- hicn6_iface_adj_walk_cb,
- face);
-
- *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
+ adj_nbr_walk (face->sw_if, FIB_PROTOCOL_IP6, hicn6_iface_adj_walk_cb,
+ face);
+ *hicnb_flags = HICN_BUFFER_FLAGS_NEW_FACE;
*index = idx;
-
- return;
- }
- else
- {
- /* unlock the face. We don't take a lock on each interest we receive */
- hicn_face_id_t face_id = hicn_dpoi_get_index(face);
- hicn_face_unlock_with_id(face_id);
}
- /* Code replicated on purpose */
- *hicnb_flags = HICN_BUFFER_FLAGS_DEFAULT;
- *hicnb_flags |=
- (face->flags & HICN_FACE_FLAGS_APPFACE_PROD) >>
- HICN_FACE_FLAGS_APPFACE_PROD_BIT;
+ return ret;
+}
+
+/**
+ * @brief Check if a face is an application face
+ *
+ * @param face_id: The id of the face
+ * @return 1 if application face, 0 otherwise
+ */
+always_inline int
+hicn_face_is_local (hicn_face_id_t face_id)
+{
+ hicn_face_t *face;
+ face = hicn_dpoi_get_from_idx (face_id);
+ ASSERT (face != NULL);
- *index = hicn_dpoi_get_index (face);
+ return face->flags & HICN_FACE_FLAGS_APPFACE_PROD ||
+ face->flags & HICN_FACE_FLAGS_APPFACE_CONS;
}
#endif // __HICN_FACE_H__
diff --git a/hicn-plugin/src/faces/face_cli.c b/hicn-plugin/src/faces/face_cli.c
index e9e516cc6..1c1501478 100644
--- a/hicn-plugin/src/faces/face_cli.c
+++ b/hicn-plugin/src/faces/face_cli.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2023 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -19,9 +19,8 @@
#include "../error.h"
static clib_error_t *
-hicn_face_cli_show_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_face_cli_show_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
hicn_face_id_t face_id = HICN_FACE_NULL;
@@ -46,9 +45,8 @@ hicn_face_cli_show_command_fn (vlib_main_t * vm,
deleted = 1;
else
{
- return clib_error_return (0, "%s",
- get_error_string
- (HICN_ERROR_CLI_INVAL));
+ return clib_error_return (
+ 0, "%s", get_error_string (HICN_ERROR_CLI_INVAL));
}
}
@@ -56,48 +54,43 @@ hicn_face_cli_show_command_fn (vlib_main_t * vm,
{
int idx = 0;
vec_foreach_index (idx, face_type_names_vec)
- {
- if (!strcmp (face_type_names_vec[idx], face_type_name))
- found = idx;
- }
+ {
+ if (!strcmp (face_type_names_vec[idx], face_type_name))
+ found = idx;
+ }
if (found == ~0)
return (clib_error_return (0, "Face type unknown"));
}
-
}
if (face_id != HICN_FACE_NULL)
{
if (!hicn_dpoi_idx_is_valid (face_id))
- return clib_error_return (0, "%s",
- get_error_string
- (HICN_ERROR_FACE_NOT_FOUND));
+ return clib_error_return (
+ 0, "%s", get_error_string (HICN_ERROR_FACE_NOT_FOUND));
hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
- vlib_cli_output (vm, "%U\n", format_hicn_face, face_id, 0 /*indent */ );
+ vlib_cli_output (vm, "%U\n", format_hicn_face, face_id, 0 /*indent */);
u32 indent = 3;
for (int i = 0; i < HICN_N_COUNTER; i++)
{
- vlib_get_combined_counter (&counters
- [hicn_dpoi_get_index (face) *
- HICN_N_COUNTER], i, &v);
- s =
- format (s, "%U%s", format_white_space, indent,
- HICN_FACE_CTRX_STRING[i]);
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) * HICN_N_COUNTER], i, &v);
+ s = format (s, "%U%s", format_white_space, indent,
+ HICN_FACE_CTRX_STRING[i]);
if (n)
- _vec_len (n) = 0;
+ vec_set_len (n, 0);
n = format (n, "packets");
- s =
- format (s, "%U%-16v%16Ld", format_white_space,
- 30 - strlen (HICN_FACE_CTRX_STRING[i]), n, v.packets);
+ s = format (s, "%U%-16v%16Ld", format_white_space,
+ 30 - strlen (HICN_FACE_CTRX_STRING[i]), n, v.packets);
- _vec_len (n) = 0;
+ vec_set_len (n, 0);
n = format (n, "bytes");
- s = format (s, "\n%U%-16v%16Ld\n",
- format_white_space, indent + 30, n, v.bytes);
+ s = format (s, "\n%U%-16v%16Ld\n", format_white_space, indent + 30,
+ n, v.bytes);
}
vlib_cli_output (vm, "%s\n", s);
}
@@ -106,69 +99,79 @@ hicn_face_cli_show_command_fn (vlib_main_t * vm,
if (found != ~0)
{
hicn_face_t *face;
- /* *INDENT-OFF* */
- pool_foreach(face, hicn_dpoi_face_pool,
- {
- if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
- {
- if (face->flags)
- {
- vlib_cli_output(vm, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), 0);
- u8 * s = 0;
- u32 indent = 3;
-
- for (int i = 0; i < HICN_N_COUNTER; i++)
- {
- vlib_get_combined_counter (&counters[hicn_dpoi_get_index(face) * HICN_N_COUNTER], i, &v);
- s = format (s, "%U%s",format_white_space, indent, HICN_FACE_CTRX_STRING[i]);
-
- if (n)
- _vec_len (n) = 0;
- n = format (n, "packets");
- s = format (s, "%U%-16v%16Ld", format_white_space, 30-strlen(HICN_FACE_CTRX_STRING[i]), n, v.packets);
-
- _vec_len (n) = 0;
- n = format (n, "bytes");
- s = format (s, "\n%U%-16v%16Ld\n",
- format_white_space, indent+30, n, v.bytes);
- }
- vlib_cli_output (vm, "%s\n", s);
- }
- }
- });
- /* *INDENT-ON* */
+ pool_foreach (face, hicn_dpoi_face_pool)
+ {
+ if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ if (face->flags)
+ {
+ vlib_cli_output (vm, "%U\n", format_hicn_face,
+ hicn_dpoi_get_index (face), 0);
+ u8 *s = 0;
+ u32 indent = 3;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) *
+ HICN_N_COUNTER],
+ i, &v);
+ s = format (s, "%U%s", format_white_space, indent,
+ HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ vec_set_len (n, 0);
+ n = format (n, "packets");
+ s = format (s, "%U%-16v%16Ld", format_white_space,
+ 30 - strlen (HICN_FACE_CTRX_STRING[i]),
+ n, v.packets);
+
+ vec_set_len (n, 0);
+ n = format (n, "bytes");
+ s =
+ format (s, "\n%U%-16v%16Ld\n", format_white_space,
+ indent + 30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ }
+ }
}
else
{
hicn_face_t *face;
- /* *INDENT-OFF* */
- pool_foreach(face, hicn_dpoi_face_pool,
- {
- if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
- {
- vlib_cli_output(vm, "%U\n", format_hicn_face, hicn_dpoi_get_index(face), 0);
- u32 indent = 3;
- u8 * s = 0;
-
- for (int i = 0; i < HICN_N_COUNTER; i++)
- {
- vlib_get_combined_counter (&counters[hicn_dpoi_get_index(face) * HICN_N_COUNTER], i, &v);
- s = format (s, "%U%s",format_white_space, indent, HICN_FACE_CTRX_STRING[i]);
-
- if (n)
- _vec_len (n) = 0;
- n = format (n, "packets");
- s = format (s, "%U%-16v%16Ld", format_white_space, 30-strlen(HICN_FACE_CTRX_STRING[i]), n, v.packets);
-
- _vec_len (n) = 0;
- n = format (n, "bytes");
- s = format (s, "\n%U%-16v%16Ld\n",
- format_white_space, indent+30, n, v.bytes);
- }
- vlib_cli_output (vm, "%s\n", s);
- }
- });
- /* *INDENT-ON* */
+ pool_foreach (face, hicn_dpoi_face_pool)
+ {
+ if (!((face->flags & HICN_FACE_FLAGS_DELETED) && !deleted))
+ {
+ vlib_cli_output (vm, "%U\n", format_hicn_face,
+ hicn_dpoi_get_index (face), 0);
+ u32 indent = 3;
+ u8 *s = 0;
+
+ for (int i = 0; i < HICN_N_COUNTER; i++)
+ {
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ i, &v);
+ s = format (s, "%U%s", format_white_space, indent,
+ HICN_FACE_CTRX_STRING[i]);
+
+ if (n)
+ vec_set_len (n, 0);
+ n = format (n, "packets");
+ s = format (s, "%U%-16v%16Ld", format_white_space,
+ 30 - strlen (HICN_FACE_CTRX_STRING[i]), n,
+ v.packets);
+
+ vec_set_len (n, 0);
+ n = format (n, "bytes");
+ s = format (s, "\n%U%-16v%16Ld\n", format_white_space,
+ indent + 30, n, v.bytes);
+ }
+ vlib_cli_output (vm, "%s\n", s);
+ }
+ }
}
}
@@ -176,14 +179,11 @@ hicn_face_cli_show_command_fn (vlib_main_t * vm,
}
/* cli declaration for 'show faces' */
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (hicn_face_cli_show_command, static) =
-{
+VLIB_CLI_COMMAND (hicn_face_cli_show_command, static) = {
.path = "hicn face show",
.short_help = "hicn face show [<face_id>]",
.function = hicn_face_cli_show_command_fn,
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/faces/face_flags.h b/hicn-plugin/src/faces/face_flags.h
new file mode 100644
index 000000000..880d3b558
--- /dev/null
+++ b/hicn-plugin/src/faces/face_flags.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_FACE_FLAGS_H__
+#define __HICN_FACE_FLAGS_H__
+
+/* Flags */
+/* A face is complete and it stores all the information. A iface lacks of the
+ adj index, therefore sending a packet through a iface require a lookup in
+ the FIB. */
+#define foreach_face_flag \
+ _ (0, FACE, "face") \
+ _ (1, IFACE, "iface") \
+ _ (2, APPFACE_PROD, "face is producer face") \
+ _ (3, APPFACE_CONS, "face is consumer face") \
+ _ (4, DELETED, "face is deleted") \
+ _ (5, UDP4, "face is udp4") \
+ _ (6, UDP6, "face is udp6")
+
+enum
+{
+ HICN_FACE_FLAGS_DEFAULT = 0,
+#define _(a, b, c) HICN_FACE_FLAGS_##b = (1 << a),
+ foreach_face_flag
+#undef _
+};
+
+#endif /* __HICN_FACE_FLAGS_H__ */ \ No newline at end of file
diff --git a/hicn-plugin/src/faces/face_node.c b/hicn-plugin/src/faces/face_node.c
index 48d97ad51..2cfb9c7dc 100644
--- a/hicn-plugin/src/faces/face_node.c
+++ b/hicn-plugin/src/faces/face_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -23,6 +23,10 @@
#include "../cache_policies/cs_lru.h"
#include "../infra.h"
#include "../hicn.h"
+#include "../parser.h"
+
+#include <hicn/error.h>
+#include <hicn/util/ip_address.h>
/**
* @File
@@ -56,9 +60,9 @@ typedef struct
u32 next_index;
u32 sw_if_index;
u8 pkt_type;
+ hicn_error_t error;
u8 packet_data[60];
-}
-hicn4_face_input_trace_t;
+} hicn4_face_input_trace_t;
typedef enum
{
@@ -74,9 +78,9 @@ typedef struct
u32 next_index;
u32 sw_if_index;
u8 pkt_type;
+ hicn_error_t error;
u8 packet_data[60];
-}
-hicn6_face_input_trace_t;
+} hicn6_face_input_trace_t;
typedef enum
{
@@ -88,6 +92,7 @@ typedef enum
#define NEXT_MAPME_IP4 HICN4_FACE_INPUT_NEXT_MAPME
#define NEXT_MAPME_IP6 HICN6_FACE_INPUT_NEXT_MAPME
+
#define NEXT_DATA_IP4 HICN4_FACE_INPUT_NEXT_DATA
#define NEXT_DATA_IP6 HICN6_FACE_INPUT_NEXT_DATA
@@ -97,9 +102,6 @@ typedef enum
#define IP_HEADER_4 ip4_header_t
#define IP_HEADER_6 ip6_header_t
-#define LOCK_DPO_FACE_IP4 hicn_face_ip4_lock
-#define LOCK_DPO_FACE_IP6 hicn_face_ip6_lock
-
#define TRACE_INPUT_PKT_IP4 hicn4_face_input_trace_t
#define TRACE_INPUT_PKT_IP6 hicn6_face_input_trace_t
@@ -109,202 +111,287 @@ typedef enum
* followind code is pretty straighforward and most of the complexity is in
* functions that can be easily debug.
*/
-#define face_input_x1(ipv) \
- do{ \
- vlib_buffer_t *b0; \
- u32 bi0; \
- u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
- IP_HEADER_##ipv * ip_hdr = NULL; \
- hicn_buffer_t * hicnb0; \
- int ret; \
- /* Prefetch for next iteration. */ \
- if (n_left_from > 1) \
- { \
- vlib_buffer_t *b1; \
- b1 = vlib_get_buffer (vm, from[1]); \
- CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
- } \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- from += 1; \
- n_left_from -= 1; \
- to_next[0] = bi0; \
- to_next += 1; \
- n_left_to_next -= 1; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- hicnb0 = hicn_get_buffer(b0); \
- ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
- \
- u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
- \
- next0 = is_icmp*NEXT_MAPME_IP##ipv + \
- (1-is_icmp)*NEXT_DATA_IP##ipv; \
- \
- ret = LOCK_DPO_FACE_IP##ipv \
- (&(hicnb0->face_id), \
- &(hicnb0->in_faces_vec_id), \
- &hicnb0->flags, \
- &(ip_hdr->dst_address)); \
- \
- if ( PREDICT_FALSE(ret != HICN_ERROR_NONE) ) \
- next0 = NEXT_ERROR_DROP_IP##ipv; \
- else \
- { \
- vlib_increment_combined_counter ( \
- &counters[hicnb0->face_id \
- * HICN_N_COUNTER], thread_index, \
- HICN_FACE_COUNTERS_DATA_RX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0)); \
- stats.pkts_data_count += 1; \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_INPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, next0); \
- }while(0)
-
-
-#define face_input_x2(ipv) \
- do{ \
- vlib_buffer_t *b0, *b1; \
- u32 bi0, bi1; \
- u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
- u32 next1 = NEXT_ERROR_DROP_IP##ipv; \
- IP_HEADER_##ipv * ip_hdr0 = NULL; \
- IP_HEADER_##ipv * ip_hdr1 = NULL; \
- hicn_buffer_t * hicnb0; \
- hicn_buffer_t * hicnb1; \
- int ret0, ret1; \
- /* Prefetch for next iteration. */ \
- { \
- vlib_buffer_t *b2, *b3; \
- b2 = vlib_get_buffer (vm, from[2]); \
- b3 = vlib_get_buffer (vm, from[3]); \
- CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
- CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
- } \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- bi1 = from[1]; \
- from += 2; \
- n_left_from -= 2; \
- to_next[0] = bi0; \
- to_next[1] = bi1; \
- to_next += 2; \
- n_left_to_next -= 2; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- b1 = vlib_get_buffer (vm, bi1); \
- hicnb0 = hicn_get_buffer(b0); \
- hicnb1 = hicn_get_buffer(b1); \
- ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
- ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
- \
- u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
- u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
- \
- next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
- (1-is_icmp0)*NEXT_DATA_IP##ipv; \
- \
- next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
- (1-is_icmp1)*NEXT_DATA_IP##ipv; \
- \
- \
- ret0 = LOCK_DPO_FACE_IP##ipv \
- (&(hicnb0->face_id), \
- &(hicnb0->in_faces_vec_id), \
- &hicnb0->flags, \
- &(ip_hdr0->dst_address)); \
- \
- ret1 = LOCK_DPO_FACE_IP##ipv \
- (&(hicnb1->face_id), \
- &(hicnb1->in_faces_vec_id), \
- &hicnb1->flags, \
- &(ip_hdr1->dst_address)); \
- \
- if ( PREDICT_FALSE(ret0 != HICN_ERROR_NONE) ) \
- next0 = NEXT_ERROR_DROP_IP##ipv; \
- else \
- { \
- vlib_increment_combined_counter ( \
- &counters[hicnb0->face_id \
- * HICN_N_COUNTER], thread_index, \
- HICN_FACE_COUNTERS_DATA_RX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0)); \
- stats.pkts_data_count += 1; \
- } \
- \
- if ( PREDICT_FALSE(ret1 != HICN_ERROR_NONE) ) \
- next1 = NEXT_ERROR_DROP_IP##ipv; \
- else \
- { \
- vlib_increment_combined_counter ( \
- &counters[hicnb1->face_id \
- * HICN_N_COUNTER], thread_index,\
- HICN_FACE_COUNTERS_DATA_RX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b1)); \
- stats.pkts_data_count += 1; \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_INPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b1->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_INPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b1, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
- t->next_index = next1; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b1), \
- sizeof (t->packet_data)); \
- } \
- \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, bi1, next0, next1); \
- }while(0)
-
+#define face_input_x1(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0; \
+ u32 bi0, sw_if0; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ u8 is_mapme0; \
+ IP_HEADER_##ipv *ip_hdr = NULL; \
+ hicn_buffer_t *hicnb0; \
+ int from_tunnel0; \
+ int ret0 = HICN_ERROR_NONE; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer (b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current (b0); \
+ \
+ /* Parse packet and cache useful info in opaque2 */ \
+ ret0 = hicn_data_parse_pkt (b0, vlib_buffer_length_in_chain (vm, b0)); \
+ is_mapme0 = hicn_packet_get_type (&hicn_get_buffer (b0)->pkbuf) == \
+ HICN_PACKET_TYPE_MAPME; \
+ \
+ ret0 = (ret0 == HICN_ERROR_NONE) || \
+ (ret0 == HICN_ERROR_PARSER_MAPME_PACKET); \
+ \
+ /* If parsing is ok, send packet to next node */ \
+ if (PREDICT_FALSE (!ret0)) \
+ { \
+ next0 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ else \
+ { \
+ next0 = is_mapme0 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme0) * NEXT_DATA_IP##ipv; \
+ from_tunnel0 = \
+ (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL || \
+ hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) > 0; \
+ sw_if0 = \
+ (from_tunnel0) * ~0 + \
+ (1 - from_tunnel0) * vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ ret0 = hicn_face_ip##ipv##_find ( \
+ &hicnb0->face_id, &hicnb0->flags, &ip_hdr->dst_address, sw_if0, \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX], \
+ /* Should not be used */ ~0); \
+ /* Make sure the face is not created here */ \
+ if (PREDICT_FALSE (ret0 == HICN_ERROR_FACE_NOT_FOUND)) \
+ { \
+ next0 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, 1, vlib_buffer_length_in_chain (vm, b0)); \
+ stats.pkts_data_count += 1; \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->error = ret0; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, next0); \
+ } \
+ while (0)
+
+#define face_input_x2(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1, sw_if0, sw_if1; \
+ u32 next0 = NEXT_ERROR_DROP_IP##ipv; \
+ u32 next1 = NEXT_ERROR_DROP_IP##ipv; \
+ u8 is_mapme0, is_mapme1; \
+ IP_HEADER_##ipv *ip_hdr0 = NULL; \
+ IP_HEADER_##ipv *ip_hdr1 = NULL; \
+ hicn_buffer_t *hicnb0; \
+ hicn_buffer_t *hicnb1; \
+ int from_tunnel0, from_tunnel1; \
+ int ret0, ret1; \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2 * CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2 * CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES, LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES, LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer (b0); \
+ hicnb1 = hicn_get_buffer (b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current (b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current (b1); \
+ \
+ /* Parse packet and cache useful info in opaque2 */ \
+ ret0 = hicn_data_parse_pkt (b0, vlib_buffer_length_in_chain (vm, b0)); \
+ ret1 = hicn_data_parse_pkt (b1, vlib_buffer_length_in_chain (vm, b1)); \
+ is_mapme0 = hicn_packet_get_type (&hicn_get_buffer (b0)->pkbuf) == \
+ HICN_PACKET_TYPE_MAPME; \
+ is_mapme1 = hicn_packet_get_type (&hicn_get_buffer (b1)->pkbuf) == \
+ HICN_PACKET_TYPE_MAPME; \
+ ret0 = (ret0 == HICN_ERROR_NONE) || \
+ (ret0 == HICN_ERROR_PARSER_MAPME_PACKET); \
+ ret1 = (ret1 == HICN_ERROR_NONE) || \
+ (ret1 == HICN_ERROR_PARSER_MAPME_PACKET); \
+ if (PREDICT_TRUE (ret0 && ret1)) \
+ { \
+ next0 = is_mapme0 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme0) * NEXT_DATA_IP##ipv; \
+ \
+ next1 = is_mapme1 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme1) * NEXT_DATA_IP##ipv; \
+ \
+ from_tunnel0 = \
+ (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL || \
+ hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) > 0; \
+ sw_if0 = \
+ (from_tunnel0) * ~0 + \
+ (1 - from_tunnel0) * vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ ret0 = hicn_face_ip##ipv##_find ( \
+ &hicnb0->face_id, &hicnb0->flags, &ip_hdr0->dst_address, sw_if0, \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX], \
+ /* Should not be used */ ~0); \
+ /* Make sure the face is not created here */ \
+ if (PREDICT_FALSE (ret0 == HICN_ERROR_FACE_NOT_FOUND)) \
+ { \
+ next0 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ \
+ from_tunnel1 = \
+ (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL || \
+ hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) > 0; \
+ sw_if1 = \
+ (from_tunnel1) * ~0 + \
+ (1 - from_tunnel1) * vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ ret1 = hicn_face_ip##ipv##_find ( \
+ &hicnb1->face_id, &hicnb1->flags, &ip_hdr1->dst_address, sw_if1, \
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX], \
+ /* Should not be used */ ~0); \
+ /* Make sure the face is not created here */ \
+ if (PREDICT_FALSE (ret1 == HICN_ERROR_FACE_NOT_FOUND)) \
+ { \
+ next1 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ } \
+ else if (ret0 && !ret1) \
+ { \
+ next1 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ from_tunnel0 = \
+ (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL || \
+ hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) > 0; \
+ sw_if0 = \
+ (from_tunnel0) * ~0 + \
+ (1 - from_tunnel0) * vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ ret0 = hicn_face_ip##ipv##_find ( \
+ &hicnb0->face_id, &hicnb0->flags, &ip_hdr0->dst_address, sw_if0, \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX], \
+ /* Should not be used */ ~0); \
+ /* Make sure the face is not created here */ \
+ if (PREDICT_FALSE (ret0 == HICN_ERROR_FACE_NOT_FOUND)) \
+ { \
+ next0 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ else \
+ { \
+ next0 = is_mapme0 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme0) * NEXT_DATA_IP##ipv; \
+ } \
+ } \
+ else if (!ret0 && ret1) \
+ { \
+ next0 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ from_tunnel1 = \
+ (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL || \
+ hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) > 0; \
+ sw_if1 = \
+ (from_tunnel1) * ~0 + \
+ (1 - from_tunnel1) * vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ ret1 = hicn_face_ip##ipv##_find ( \
+ &hicnb1->face_id, &hicnb1->flags, &ip_hdr1->dst_address, sw_if1, \
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX], \
+ /* Should not be used */ ~0); \
+ /* Make sure the face is not created here */ \
+ if (PREDICT_FALSE (ret1 == HICN_ERROR_FACE_NOT_FOUND)) \
+ { \
+ next1 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ else \
+ { \
+ next1 = is_mapme1 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme1) * NEXT_DATA_IP##ipv; \
+ } \
+ } \
+ else \
+ { \
+ next0 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ next1 = HICN##ipv##_FACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, 1, vlib_buffer_length_in_chain (vm, b0)); \
+ stats.pkts_data_count += 1; \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb1->face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_RX, 1, vlib_buffer_length_in_chain (vm, b1)); \
+ stats.pkts_data_count += 1; \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->error = ret0; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->error = ret1; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, bi1, next0, \
+ next1); \
+ } \
+ while (0)
static uword
-hicn4_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn4_face_input_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
@@ -333,20 +420,19 @@ hicn4_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn4_face_input_format_trace (u8 * s, va_list * args)
+hicn4_face_input_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn4_face_input_trace_t *t =
- va_arg (*args, hicn4_face_input_trace_t *);
+ hicn4_face_input_trace_t *t = va_arg (*args, hicn4_face_input_trace_t *);
s = format (s, "FACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
(int) t->pkt_type, t->sw_if_index, t->next_index,
@@ -354,11 +440,9 @@ hicn4_face_input_format_trace (u8 * s, va_list * args)
return (s);
}
-
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn4_face_input_node) =
{
.function = hicn4_face_input_node_fn,
@@ -377,15 +461,14 @@ VLIB_REGISTER_NODE(hicn4_face_input_node) =
[HICN4_FACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/**
* @brief IPv6 face input node function
* @see hicn6_face_input_node_fn
*/
static uword
-hicn6_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn6_face_input_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
@@ -414,20 +497,19 @@ hicn6_face_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn6_face_input_format_trace (u8 * s, va_list * args)
+hicn6_face_input_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn6_face_input_trace_t *t =
- va_arg (*args, hicn6_face_input_trace_t *);
+ hicn6_face_input_trace_t *t = va_arg (*args, hicn6_face_input_trace_t *);
s = format (s, "FACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
(int) t->pkt_type, t->sw_if_index, t->next_index,
@@ -438,7 +520,6 @@ hicn6_face_input_format_trace (u8 * s, va_list * args)
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn6_face_input_node) =
{
.function = hicn6_face_input_node_fn,
@@ -457,12 +538,12 @@ VLIB_REGISTER_NODE(hicn6_face_input_node) =
[HICN6_FACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/**** FACE OUTPUT *****/
typedef enum
{
+ HICN4_FACE_OUTPUT_NEXT_ERROR_DROP,
HICN4_FACE_OUTPUT_NEXT_ECHO_REPLY,
HICN4_FACE_OUTPUT_NEXT_UDP4_ENCAP,
HICN4_FACE_OUTPUT_NEXT_UDP6_ENCAP,
@@ -471,109 +552,39 @@ typedef enum
typedef enum
{
+ HICN6_FACE_OUTPUT_NEXT_ERROR_DROP,
HICN6_FACE_OUTPUT_NEXT_ECHO_REPLY,
HICN6_FACE_OUTPUT_NEXT_UDP4_ENCAP,
HICN6_FACE_OUTPUT_NEXT_UDP6_ENCAP,
HICN6_FACE_OUTPUT_N_NEXT,
} hicn6_face_output_next_t;
-/* static_always_inline void */
-/* hicn_reply_probe_v4 (vlib_buffer_t * b, hicn_face_t * face) */
-/* { */
-/* hicn_header_t *h0 = vlib_buffer_get_current (b); */
-/* hicn_face_ip_t * face_ip = (hicn_face_ip_t *)(&face->data); */
-/* h0->v4.ip.saddr = h0->v4.ip.daddr; */
-/* h0->v4.ip.daddr = face_ip->local_addr.ip4; */
-/* vnet_buffer (b)->sw_if_index[VLIB_RX] = face->shared.sw_if; */
-
-/* u16 * dst_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip4_header_t) + sizeof(u16)); */
-/* u16 dst_port = *dst_port_ptr; */
-/* u16 * src_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip4_header_t)); */
-
-/* *dst_port_ptr = *src_port_ptr; */
-/* *src_port_ptr = dst_port; */
-
-/* hicn_type_t type = hicn_get_buffer (b)->type; */
-/* hicn_ops_vft[type.l1]->set_lifetime (type, &h0->protocol, 0); */
-/* } */
-
-/* static_always_inline void */
-/* hicn_reply_probe_v6 (vlib_buffer_t * b, hicn_face_t * face) */
-/* { */
-/* hicn_header_t *h0 = vlib_buffer_get_current (b); */
-/* hicn_face_ip_t * face_ip = (hicn_face_ip_t *)(&face->data); */
-/* h0->v6.ip.saddr = h0->v6.ip.daddr; */
-/* h0->v6.ip.daddr = face_ip->local_addr.ip6; */
-/* vnet_buffer (b)->sw_if_index[VLIB_RX] = face->shared.sw_if; */
-
-/* u16 * dst_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip6_header_t) + sizeof(u16)); */
-/* u16 dst_port = *dst_port_ptr; */
-/* u16 * src_port_ptr = (u16 *)(((u8*)h0) + sizeof(ip6_header_t)); */
-
-/* *dst_port_ptr = *src_port_ptr; */
-/* *src_port_ptr = dst_port; */
-
-/* hicn_type_t type = hicn_get_buffer (b)->type; */
-/* hicn_ops_vft[type.l1]->set_lifetime (type, &h0->protocol, 0); */
-
-/* } */
-
-/* static_always_inline u32 */
-/* hicn_face_match_probe (vlib_buffer_t * b, hicn_face_t * face, u32 * next) */
-/* { */
-
-/* u8 *ptr = vlib_buffer_get_current (b); */
-/* u8 v = *ptr & 0xf0; */
-/* u8 res = 0; */
-
-/* if ( v == 0x40 ) */
-/* { */
-/* u16 * dst_port = (u16 *)(ptr + sizeof(ip4_header_t) + sizeof(u16)); */
-/* if (*dst_port == clib_net_to_host_u16(DEFAULT_PROBING_PORT)) */
-/* { */
-/* hicn_reply_probe_v6(b, face); */
-/* *next = HICN4_FACE_NEXT_ECHO_REPLY; */
-/* res = 1; */
-/* } */
-/* } */
-/* else if ( v == 0x60 ) */
-/* { */
-/* u16 * dst_port = (u16 *)(ptr + sizeof(ip6_header_t) + sizeof(u16)); */
-/* if (*dst_port == clib_net_to_host_u16(DEFAULT_PROBING_PORT)) */
-/* { */
-/* hicn_reply_probe_v6(b, face); */
-/* *next = HICN6_FACE_NEXT_ECHO_REPLY; */
-/* res = 1; */
-/* } */
-/* } */
-/* return res; */
-/* } */
-
-
static inline void
-hicn_face_rewrite_interest (vlib_main_t * vm, vlib_buffer_t * b0,
- hicn_face_t * face, u32 * next)
+hicn_face_rewrite_interest (vlib_main_t *vm, vlib_buffer_t *b0,
+ hicn_face_t *face, u32 *next)
{
- /* if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD) && hicn_face_match_probe(b0, face, next)) */
+ /* if ((face->flags & HICN_FACE_FLAGS_APPFACE_PROD) &&
+ * hicn_face_match_probe(b0, face, next)) */
/* return; */
- hicn_header_t *hicn = vlib_buffer_get_current (b0);
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (b0)->pkbuf;
- //hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ u8 is_v4 = ip46_address_is_ip4 (&face->nat_addr) &&
+ !ip6_address_is_loopback (&face->nat_addr.ip6);
- ip46_address_t temp_addr;
- ip46_address_reset (&temp_addr);
- hicn_type_t type = hicn_get_buffer (b0)->type;
- int ret = hicn_ops_vft[type.l1]->rewrite_interest (type, &hicn->protocol,
- &face->nat_addr, &temp_addr);
+ // hicn_face_ip_t *ip_face = (hicn_face_ip_t *) face->data;
+ hicn_ip_address_t temp_addr;
+ ip46_address_reset (&(temp_addr.as_ip46));
+ hicn_ip_address_t *face_nat_addr = (hicn_ip_address_t *) &face->nat_addr;
+ int ret = hicn_interest_rewrite (pkbuf, face_nat_addr, &temp_addr);
if (ret == HICN_LIB_ERROR_REWRITE_CKSUM_REQUIRED)
{
- ensure_offload_flags(b0, ip46_address_is_ip4(&face->nat_addr));
+ ensure_offload_flags (b0, is_v4);
}
- ASSERT(face->flags & HICN_FACE_FLAGS_FACE);
+ ASSERT (face->flags & HICN_FACE_FLAGS_FACE);
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = face->dpo.dpoi_index;
*next = face->dpo.dpoi_next_node;
@@ -591,193 +602,184 @@ static char *hicn6_face_output_error_strings[] = {
#undef _
};
-
/* Trace context struct */
typedef struct
{
u32 next_index;
+ u32 next_node;
u32 sw_if_index;
u8 pkt_type;
u8 packet_data[60];
-}
-hicn4_face_output_trace_t;
+} hicn4_face_output_trace_t;
/* Trace context struct */
typedef struct
{
u32 next_index;
+ u32 next_node;
u32 sw_if_index;
u8 pkt_type;
u8 packet_data[60];
-}
-hicn6_face_output_trace_t;
+} hicn6_face_output_trace_t;
#define TRACE_OUTPUT_PKT_IP4 hicn4_face_output_trace_t
#define TRACE_OUTPUT_PKT_IP6 hicn6_face_output_trace_t
-#define face_output_x1(ipv) \
- do { \
- vlib_buffer_t *b0; \
- u32 bi0; \
- u32 next0 = ~0; \
- hicn_face_t * face; \
- \
- /* Prefetch for next iteration. */ \
- if (n_left_from > 1) \
- { \
- vlib_buffer_t *b1; \
- b1 = vlib_get_buffer (vm, from[1]); \
- CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
- } \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- from += 1; \
- n_left_from -= 1; \
- to_next[0] = bi0; \
- to_next += 1; \
- n_left_to_next -= 1; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- \
- hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
- face = \
- hicn_dpoi_get_from_idx (face_id); \
- \
- if (PREDICT_TRUE(face != NULL)) \
- { \
- hicn_face_rewrite_interest \
- (vm, b0, face, &next0); \
- stats.pkts_interest_count += 1; \
- vlib_increment_combined_counter ( \
- &counters[face_id * HICN_N_COUNTER], \
- thread_index, \
- HICN_FACE_COUNTERS_INTEREST_TX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_OUTPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, next0); \
- }while(0)
-
-#define face_output_x2(ipv) \
- do { \
- vlib_buffer_t *b0, *b1; \
- u32 bi0, bi1; \
- u32 next0 = ~0; \
- u32 next1 = ~0; \
- hicn_face_t *face0, *face1; \
- \
- /* Prefetch for next iteration. */ \
- { \
- vlib_buffer_t *b2, *b3; \
- b2 = vlib_get_buffer (vm, from[2]); \
- b3 = vlib_get_buffer (vm, from[3]); \
- CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
- CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
- } \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- bi1 = from[1]; \
- from += 2; \
- n_left_from -= 2; \
- to_next[0] = bi0; \
- to_next[1] = bi1; \
- to_next += 2; \
- n_left_to_next -= 2; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- b1 = vlib_get_buffer (vm, bi1); \
- \
- hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
- hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
- face0 = \
- hicn_dpoi_get_from_idx (face_id0); \
- face1 = \
- hicn_dpoi_get_from_idx (face_id1); \
- \
- if (PREDICT_TRUE(face0 != NULL)) \
- { \
- hicn_face_rewrite_interest \
- (vm, b0, face0, &next0); \
- stats.pkts_interest_count += 1; \
- vlib_increment_combined_counter ( \
- &counters[face_id0 * HICN_N_COUNTER], \
- thread_index, \
- HICN_FACE_COUNTERS_INTEREST_TX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0)); \
- } \
- \
- if (PREDICT_TRUE(face1 != NULL)) \
- { \
- hicn_face_rewrite_interest \
- (vm, b1, face1, &next1); \
- stats.pkts_interest_count += 1; \
- vlib_increment_combined_counter ( \
- &counters[face_id1 * HICN_N_COUNTER], \
- thread_index, \
- HICN_FACE_COUNTERS_INTEREST_TX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b1)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_OUTPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b1->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_OUTPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b1, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
- t->next_index = next1; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b1), \
- sizeof (t->packet_data)); \
- } \
- \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, bi1, next0, next1); \
- }while(0)
-
+#define face_output_x1(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = HICN##ipv##_FACE_OUTPUT_NEXT_ERROR_DROP; \
+ hicn_face_t *face = NULL; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ if (PREDICT_TRUE (hicn_dpoi_idx_is_valid (face_id))) \
+ face = hicn_dpoi_get_from_idx (face_id); \
+ \
+ if (PREDICT_TRUE (face != NULL) && face->flags & HICN_FACE_FLAGS_FACE) \
+ { \
+ hicn_face_rewrite_interest (vm, b0, face, &next0); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, 1, \
+ vlib_buffer_length_in_chain (vm, b0)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ t->next_node = next0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, next0); \
+ } \
+ while (0)
+
+#define face_output_x2(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = HICN##ipv##_FACE_OUTPUT_NEXT_ERROR_DROP; \
+ u32 next1 = HICN##ipv##_FACE_OUTPUT_NEXT_ERROR_DROP; \
+ hicn_face_t *face0 = NULL, *face1 = NULL; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES, STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ if (PREDICT_TRUE (hicn_dpoi_idx_is_valid (face_id0))) \
+ face0 = hicn_dpoi_get_from_idx (face_id0); \
+ if (PREDICT_TRUE (hicn_dpoi_idx_is_valid (face_id1))) \
+ face1 = hicn_dpoi_get_from_idx (face_id1); \
+ \
+ if (PREDICT_TRUE (face0 != NULL) && \
+ face0->flags & HICN_FACE_FLAGS_FACE) \
+ { \
+ hicn_face_rewrite_interest (vm, b0, face0, &next0); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id0 * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, 1, \
+ vlib_buffer_length_in_chain (vm, b0)); \
+ } \
+ \
+ if (PREDICT_TRUE (face1 != NULL) && \
+ face1->flags & HICN_FACE_FLAGS_FACE) \
+ { \
+ hicn_face_rewrite_interest (vm, b1, face1, &next1); \
+ stats.pkts_interest_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id1 * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_TX, 1, \
+ vlib_buffer_length_in_chain (vm, b1)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ t->next_node = next0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ t->next_node = next1; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, bi1, next0, \
+ next1); \
+ } \
+ while (0)
static uword
-hicn4_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn4_face_output_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
@@ -806,8 +808,7 @@ hicn4_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_INTERESTS,
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_INTERESTS,
stats.pkts_interest_count);
return (frame->n_vectors);
@@ -815,48 +816,37 @@ hicn4_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* packet trace format function */
static u8 *
-hicn4_face_output_format_trace (u8 * s, va_list * args)
+hicn4_face_output_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn4_face_output_trace_t *t =
- va_arg (*args, hicn4_face_output_trace_t *);
+ hicn4_face_output_trace_t *t = va_arg (*args, hicn4_face_output_trace_t *);
- s =
- format (s, "FACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
- (int) t->pkt_type, t->sw_if_index, t->next_index,
- format_ip4_header, t->packet_data, sizeof (t->packet_data));
+ s = format (s,
+ "FACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d, next "
+ "node: %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index, t->next_node,
+ format_ip4_header, t->packet_data, sizeof (t->packet_data));
return (s);
}
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn4_face_output_node) =
-{
+VLIB_REGISTER_NODE (hicn4_face_output_node) = {
.function = hicn4_face_output_node_fn,
.name = "hicn4-face-output",
- .vector_size = sizeof(u32),
+ .vector_size = sizeof (u32),
.format_trace = hicn4_face_output_format_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicn4_face_output_error_strings),
+ .n_errors = ARRAY_LEN (hicn4_face_output_error_strings),
.error_strings = hicn4_face_output_error_strings,
- .n_next_nodes = HICN4_FACE_OUTPUT_N_NEXT,
- /* Reusing the list of nodes from lookup to be compatible with arp */
- .next_nodes =
- {
- [HICN4_FACE_OUTPUT_NEXT_ECHO_REPLY] = "hicn4-face-input",
- [HICN4_FACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
- [HICN4_FACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
- }
+ .sibling_of = "ip4-lookup",
};
-/* *INDENT-ON* */
-
static uword
-hicn6_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn6_face_output_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
@@ -885,8 +875,7 @@ hicn6_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_INTERESTS,
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_INTERESTS,
stats.pkts_interest_count);
return (frame->n_vectors);
@@ -894,43 +883,33 @@ hicn6_face_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* packet trace format function */
static u8 *
-hicn6_face_output_format_trace (u8 * s, va_list * args)
+hicn6_face_output_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn6_face_output_trace_t *t =
- va_arg (*args, hicn6_face_output_trace_t *);
+ hicn6_face_output_trace_t *t = va_arg (*args, hicn6_face_output_trace_t *);
- s =
- format (s, "FACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
- (int) t->pkt_type, t->sw_if_index, t->next_index,
- format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ s = format (s,
+ "FACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d, next "
+ "node: %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index, t->next_node,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
return (s);
}
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn6_face_output_node) =
-{
+VLIB_REGISTER_NODE (hicn6_face_output_node) = {
.function = hicn6_face_output_node_fn,
.name = "hicn6-face-output",
- .vector_size = sizeof(u32),
+ .vector_size = sizeof (u32),
.format_trace = hicn6_face_output_format_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicn6_face_output_error_strings),
+ .n_errors = ARRAY_LEN (hicn6_face_output_error_strings),
.error_strings = hicn6_face_output_error_strings,
- .n_next_nodes = HICN6_FACE_OUTPUT_N_NEXT,
- /* Reusing the list of nodes from lookup to be compatible with neighbour discovery */
- .next_nodes =
- {
- [HICN6_FACE_OUTPUT_NEXT_ECHO_REPLY] = "hicn6-face-input",
- [HICN6_FACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
- [HICN6_FACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
- }
+ .sibling_of = "ip6-lookup",
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/faces/face_node.h b/hicn-plugin/src/faces/face_node.h
index f5a8bf5ae..70daa1393 100644
--- a/hicn-plugin/src/faces/face_node.h
+++ b/hicn-plugin/src/faces/face_node.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -31,9 +31,9 @@
* The following node to the input face nodes is the hicn-data-pcslookup.
* Output face nodes follow the strategy and the hicn-interest-hitpit nodes and
* they perform the src nat on each interest packet. The node following the
- * output face nodes depends on the adjacency type. In case of ip, the following
- * node is the ip-rewrite, in case of tunnels the next node is the one implementing
- * the tunnel encapsulation (udp-encap, mpls, etc).
+ * output face nodes depends on the adjacency type. In case of ip, the
+ * following node is the ip-rewrite, in case of tunnels the next node is the
+ * one implementing the tunnel encapsulation (udp-encap, mpls, etc).
*/
extern vlib_node_registration_t hicn4_face_input_node;
diff --git a/hicn-plugin/src/faces/iface_node.c b/hicn-plugin/src/faces/iface_node.c
index a4fd1885c..8b74c6c25 100644
--- a/hicn-plugin/src/faces/iface_node.c
+++ b/hicn-plugin/src/faces/iface_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -19,6 +19,11 @@
#include "../hicn.h"
#include "../infra.h"
#include "../cache_policies/cs_lru.h"
+#include "../parser.h"
+#include "iface_node.h"
+
+#include <hicn/error.h>
+#include <hicn/util/ip_address.h>
/**
* @File
@@ -52,12 +57,14 @@ typedef struct
u32 next_index;
u32 sw_if_index;
u8 pkt_type;
+ hicn_error_t error;
u8 packet_data[60];
} hicn4_iface_input_trace_t;
typedef enum
{
HICN4_IFACE_INPUT_NEXT_INTEREST,
+ HICN4_IFACE_INPUT_NEXT_INTEREST_MANIFEST,
HICN4_IFACE_INPUT_NEXT_MAPME,
HICN4_IFACE_INPUT_NEXT_ERROR_DROP,
HICN4_IFACE_INPUT_N_NEXT,
@@ -69,12 +76,14 @@ typedef struct
u32 next_index;
u32 sw_if_index;
u8 pkt_type;
+ hicn_error_t error;
u8 packet_data[60];
} hicn6_iface_input_trace_t;
typedef enum
{
HICN6_IFACE_INPUT_NEXT_INTEREST,
+ HICN6_IFACE_INPUT_NEXT_INTEREST_MANIFEST,
HICN6_IFACE_INPUT_NEXT_MAPME,
HICN6_IFACE_INPUT_NEXT_ERROR_DROP,
HICN6_IFACE_INPUT_N_NEXT,
@@ -86,21 +95,8 @@ typedef enum
#define NEXT_INTEREST_IP4 HICN4_IFACE_INPUT_NEXT_INTEREST
#define NEXT_INTEREST_IP6 HICN6_IFACE_INPUT_NEXT_INTEREST
-#define ADDRESS_IP4 ip_interface_address_t *ia = 0;ip4_address_t *local_address = ip4_interface_first_address(&ip4_main, swif, &ia)
-#define ADDRESS_IP6 ip6_address_t *local_address = ip6_interface_first_address(&ip6_main, swif)
-
-#define ADDRESSX2_IP4 ip_interface_address_t *ia0, *ia1; ia0 = ia1 = 0; \
- ip4_address_t *local_address0 = ip4_interface_first_address(&ip4_main, swif0, &ia0); \
- ip4_address_t *local_address1 = ip4_interface_first_address(&ip4_main, swif1, &ia1);
-
-#define ADDRESSX2_IP6 ip6_address_t *local_address0 = ip6_interface_first_address(&ip6_main, swif0); \
- ip6_address_t *local_address1 = ip6_interface_first_address(&ip6_main, swif1);
-
-#define DPO_ADD_LOCK_IFACE_IP4 hicn_iface_ip4_add_and_lock
-#define DPO_ADD_LOCK_IFACE_IP6 hicn_iface_ip6_add_and_lock
-
-//#define VLIB_EDGE_IP4 data_fwd_iface_ip4_vlib_edge
-//#define VLIB_EDGE_IP6 data_fwd_iface_ip6_vlib_edge
+#define DPO_ADD_LOCK_FACE_IP4 hicn_face_ip4_add_and_lock
+#define DPO_ADD_LOCK_FACE_IP6 hicn_face_ip6_add_and_lock
#define IP_HEADER_4 ip4_header_t
#define IP_HEADER_6 ip6_header_t
@@ -122,7 +118,6 @@ static char *hicn6_iface_output_error_strings[] = {
#undef _
};
-
/* Trace context struct */
typedef struct
{
@@ -132,14 +127,6 @@ typedef struct
u8 packet_data[60];
} hicn4_iface_output_trace_t;
-typedef enum
-{
- HICN4_IFACE_OUTPUT_NEXT_LOOKUP,
- HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
- HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
- HICN4_IFACE_OUTPUT_N_NEXT,
-} hicn4_iface_output_next_t;
-
/* Trace context struct */
typedef struct
{
@@ -149,14 +136,6 @@ typedef struct
u8 packet_data[60];
} hicn6_iface_output_trace_t;
-typedef enum
-{
- HICN6_IFACE_OUTPUT_NEXT_LOOKUP,
- HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
- HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
- HICN6_IFACE_OUTPUT_N_NEXT,
-} hicn6_iface_output_next_t;
-
//#define ERROR_OUTPUT_IP4 HICN4_IFACE_OUTPUT_NEXT_ERROR_DROP
//#define ERROR_OUTPUT_IP6 HICN6_IFACE_OUTPUT_NEXT_ERROR_DROP
@@ -166,6 +145,9 @@ typedef enum
#define NEXT_UDP_ENCAP_IP4 HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP
#define NEXT_UDP_ENCAP_IP6 HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP
+#define NEXT_PG4 HICN4_IFACE_OUTPUT_NEXT_PG
+#define NEXT_PG6 HICN6_IFACE_OUTPUT_NEXT_PG
+
#define HICN_REWRITE_DATA_IP4 hicn_rewrite_iface_data4
#define HICN_REWRITE_DATA_IP6 hicn_rewrite_iface_data6
@@ -174,207 +156,328 @@ typedef enum
// NODES IMPLEMENTATIONS
-#define iface_input_x1(ipv) \
- do { \
- vlib_buffer_t *b0; \
- u32 bi0, next0, next_iface0; \
- IP_HEADER_##ipv * ip_hdr = NULL; \
- hicn_buffer_t * hicnb0; \
- /* Prefetch for next iteration. */ \
- if (n_left_from > 1) \
- { \
- vlib_buffer_t *b1; \
- b1 = vlib_get_buffer (vm, from[1]); \
- CLIB_PREFETCH (b1, 2*CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , LOAD); \
- } \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- from += 1; \
- n_left_from -= 1; \
- to_next[0] = bi0; \
- to_next += 1; \
- n_left_to_next -= 1; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- hicnb0 = hicn_get_buffer(b0); \
- ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
- \
- stats.pkts_interest_count += 1; \
- \
- u8 is_icmp = ip_hdr->protocol == IPPROTO_ICMPV##ipv; \
- \
- next0 = is_icmp*NEXT_MAPME_IP##ipv + \
- (1-is_icmp)*NEXT_INTEREST_IP##ipv; \
- \
- next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
- \
- if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
- next_iface0 = NEXT_UDP_ENCAP_IP4; \
- else if(hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
- next_iface0 = NEXT_UDP_ENCAP_IP6; \
- \
- DPO_ADD_LOCK_IFACE_IP##ipv \
- (&(hicnb0->face_id), \
- &hicnb0->flags, \
- &(ip_hdr->src_address), \
- vnet_buffer(b0)->sw_if_index[VLIB_RX], \
- vnet_buffer(b0)->ip.adj_index[VLIB_RX], \
- next_iface0); \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_INPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- \
- } \
- \
- vlib_increment_combined_counter ( \
- &counters[hicnb0->face_id \
- * HICN_N_COUNTER], thread_index, \
- HICN_FACE_COUNTERS_INTEREST_RX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0)); \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, next0); \
- }while(0)
-
-
-#define iface_input_x2(ipv) \
- do { \
- vlib_buffer_t *b0, *b1; \
- u32 bi0, bi1, next0, next1, next_iface0, next_iface1; \
- IP_HEADER_##ipv * ip_hdr0 = NULL; \
- IP_HEADER_##ipv * ip_hdr1 = NULL; \
- hicn_buffer_t *hicnb0, *hicnb1; \
- \
- /* Prefetch for next iteration. */ \
- vlib_buffer_t *b2, *b3; \
- b2 = vlib_get_buffer (vm, from[2]); \
- b3 = vlib_get_buffer (vm, from[3]); \
- CLIB_PREFETCH (b2, 2*CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b3, 2*CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , LOAD); \
- CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , LOAD); \
- \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- bi1 = from[1]; \
- from += 2; \
- n_left_from -= 2; \
- to_next[0] = bi0; \
- to_next[1] = bi1; \
- to_next += 2; \
- n_left_to_next -= 2; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- b1 = vlib_get_buffer (vm, bi1); \
- hicnb0 = hicn_get_buffer(b0); \
- hicnb1 = hicn_get_buffer(b1); \
- ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b0); \
- ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current(b1); \
- \
- stats.pkts_interest_count += 2; \
- \
- u8 is_icmp0 = ip_hdr0->protocol == IPPROTO_ICMPV##ipv; \
- u8 is_icmp1 = ip_hdr1->protocol == IPPROTO_ICMPV##ipv; \
- \
- next0 = is_icmp0*NEXT_MAPME_IP##ipv + \
- (1-is_icmp0)*NEXT_INTEREST_IP##ipv; \
- \
- next1 = is_icmp1*NEXT_MAPME_IP##ipv + \
- (1-is_icmp1)*NEXT_INTEREST_IP##ipv; \
- \
- next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
- \
- if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
- next_iface0 = NEXT_UDP_ENCAP_IP4; \
- else if(hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
- next_iface0 = NEXT_UDP_ENCAP_IP6; \
- \
- next_iface1 = NEXT_DATA_LOOKUP_IP##ipv; \
- \
- if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL) \
- next_iface1 = NEXT_UDP_ENCAP_IP4; \
- else if(hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL) \
- next_iface1 = NEXT_UDP_ENCAP_IP6; \
- \
- DPO_ADD_LOCK_IFACE_IP##ipv \
- (&(hicnb0->face_id), \
- &hicnb0->flags, \
- &(ip_hdr0->src_address), \
- vnet_buffer(b0)->sw_if_index[VLIB_RX], \
- vnet_buffer(b0)->ip.adj_index[VLIB_RX], \
- next_iface0); \
- \
- DPO_ADD_LOCK_IFACE_IP##ipv \
- (&(hicnb1->face_id), \
- &hicnb1->flags, \
- &(ip_hdr1->src_address), \
- vnet_buffer(b1)->sw_if_index[VLIB_RX], \
- vnet_buffer(b1)->ip.adj_index[VLIB_RX], \
- next_iface1); \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_INPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b1->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_INPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b1, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
- t->next_index = next1; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b1), \
- sizeof (t->packet_data)); \
- } \
- \
- vlib_increment_combined_counter ( \
- &counters[hicnb0->face_id \
- * HICN_N_COUNTER], thread_index, \
- HICN_FACE_COUNTERS_INTEREST_RX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0)); \
- \
- vlib_increment_combined_counter ( \
- &counters[hicnb1->face_id \
- * HICN_N_COUNTER], thread_index, \
- HICN_FACE_COUNTERS_INTEREST_RX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b1)); \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, bi1, next0, next1); \
- }while(0)
+#define iface_input_x1(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0; \
+ u32 bi0, next0, next_iface0, sw_if0 = ~0; \
+ IP_HEADER_##ipv *ip_hdr = NULL; \
+ hicn_buffer_t *hicnb0; \
+ int ret0 = HICN_ERROR_NONE; \
+ u8 is_mapme0, is_manifest0; \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ hicnb0 = hicn_get_buffer (b0); \
+ ip_hdr = (IP_HEADER_##ipv *) vlib_buffer_get_current (b0); \
+ \
+ /* Parse packet and cache useful info in opaque2 */ \
+ ret0 = \
+ hicn_interest_parse_pkt (b0, vlib_buffer_length_in_chain (vm, b0)); \
+ is_mapme0 = hicn_packet_get_type (&hicn_get_buffer (b0)->pkbuf) == \
+ HICN_PACKET_TYPE_MAPME; \
+ is_manifest0 = hicnb0->payload_type == HPT_MANIFEST; \
+ ret0 = (ret0 == HICN_ERROR_NONE) || \
+ (ret0 == HICN_ERROR_PARSER_MAPME_PACKET); \
+ if (PREDICT_FALSE (!ret0)) \
+ { \
+ next0 = HICN##ipv##_IFACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ else \
+ { \
+ next0 = is_mapme0 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme0) * (NEXT_INTEREST_IP##ipv + is_manifest0); \
+ \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ sw_if0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL && \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] != ADJ_INDEX_INVALID) \
+ { \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ sw_if0 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL && \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] != \
+ ADJ_INDEX_INVALID) \
+ { \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ sw_if0 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_PG) \
+ { \
+ next_iface0 = NEXT_PG##ipv; \
+ } \
+ \
+ DPO_ADD_LOCK_FACE_IP##ipv ( \
+ &(hicnb0->face_id), &hicnb0->flags, &(ip_hdr->src_address), \
+ sw_if0, vnet_buffer (b0)->ip.adj_index[VLIB_RX], next_iface0); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = sw_if0; \
+ t->next_index = next0; \
+ t->error = ret0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, 1, \
+ vlib_buffer_length_in_chain (vm, b0)); \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, next0); \
+ } \
+ while (0)
+
+#define iface_input_x2(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1, next0, next1; \
+ u32 next_iface0, next_iface1, sw_if0 = ~0, sw_if1 = ~0; \
+ u8 is_mapme0, is_mapme1, is_manifest0, is_manifest1; \
+ IP_HEADER_##ipv *ip_hdr0 = NULL; \
+ IP_HEADER_##ipv *ip_hdr1 = NULL; \
+ int ret0 = HICN_ERROR_NONE, ret1 = HICN_ERROR_NONE; \
+ hicn_buffer_t *hicnb0, *hicnb1; \
+ \
+ /* Prefetch for next iteration. */ \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, 2 * CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, 2 * CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES, LOAD); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES, LOAD); \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ hicnb0 = hicn_get_buffer (b0); \
+ hicnb1 = hicn_get_buffer (b1); \
+ ip_hdr0 = (IP_HEADER_##ipv *) vlib_buffer_get_current (b0); \
+ ip_hdr1 = (IP_HEADER_##ipv *) vlib_buffer_get_current (b1); \
+ \
+ stats.pkts_interest_count += 2; \
+ \
+ /* Parse packet and cache useful info in opaque2 */ \
+ ret0 = \
+ hicn_interest_parse_pkt (b0, vlib_buffer_length_in_chain (vm, b0)); \
+ ret1 = \
+ hicn_interest_parse_pkt (b1, vlib_buffer_length_in_chain (vm, b1)); \
+ is_mapme0 = hicn_packet_get_type (&hicn_get_buffer (b0)->pkbuf) == \
+ HICN_PACKET_TYPE_MAPME; \
+ is_mapme1 = hicn_packet_get_type (&hicn_get_buffer (b1)->pkbuf) == \
+ HICN_PACKET_TYPE_MAPME; \
+ is_manifest0 = hicnb0->payload_type == HPT_MANIFEST; \
+ is_manifest1 = hicnb1->payload_type == HPT_MANIFEST; \
+ ret0 = (ret0 == HICN_ERROR_NONE) || \
+ (ret0 == HICN_ERROR_PARSER_MAPME_PACKET); \
+ ret1 = (ret1 == HICN_ERROR_NONE) || \
+ (ret1 == HICN_ERROR_PARSER_MAPME_PACKET); \
+ \
+ if (PREDICT_TRUE (ret0 && ret1)) \
+ { \
+ next0 = is_mapme0 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme0) * (NEXT_INTEREST_IP##ipv + is_manifest0); \
+ \
+ next1 = is_mapme1 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme1) * (NEXT_INTEREST_IP##ipv + is_manifest1); \
+ \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ sw_if0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL && \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] != ADJ_INDEX_INVALID) \
+ { \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ sw_if0 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL && \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] != \
+ ADJ_INDEX_INVALID) \
+ { \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ sw_if0 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_PG) \
+ { \
+ next_iface0 = NEXT_PG##ipv; \
+ } \
+ \
+ next_iface1 = NEXT_DATA_LOOKUP_IP##ipv; \
+ sw_if1 = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ \
+ if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL && \
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] != ADJ_INDEX_INVALID) \
+ { \
+ next_iface1 = NEXT_UDP_ENCAP_IP4; \
+ sw_if1 = ~0; \
+ } \
+ else if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL && \
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] != \
+ ADJ_INDEX_INVALID) \
+ { \
+ next_iface1 = NEXT_UDP_ENCAP_IP6; \
+ sw_if1 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_PG) \
+ { \
+ next_iface1 = NEXT_PG##ipv; \
+ } \
+ \
+ DPO_ADD_LOCK_FACE_IP##ipv ( \
+ &(hicnb0->face_id), &hicnb0->flags, &(ip_hdr0->src_address), \
+ sw_if0, vnet_buffer (b0)->ip.adj_index[VLIB_RX], next_iface0); \
+ \
+ DPO_ADD_LOCK_FACE_IP##ipv ( \
+ &(hicnb1->face_id), &hicnb1->flags, &(ip_hdr1->src_address), \
+ sw_if1, vnet_buffer (b1)->ip.adj_index[VLIB_RX], next_iface1); \
+ } \
+ else if (ret0 && !ret1) \
+ { \
+ next1 = HICN##ipv##_IFACE_INPUT_NEXT_ERROR_DROP; \
+ next0 = is_mapme0 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme0) * NEXT_INTEREST_IP##ipv; \
+ next_iface0 = NEXT_DATA_LOOKUP_IP##ipv; \
+ sw_if0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ \
+ if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL && \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] != ADJ_INDEX_INVALID) \
+ { \
+ next_iface0 = NEXT_UDP_ENCAP_IP4; \
+ sw_if0 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL && \
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] != \
+ ADJ_INDEX_INVALID) \
+ { \
+ next_iface0 = NEXT_UDP_ENCAP_IP6; \
+ sw_if0 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_PG) \
+ { \
+ next_iface0 = NEXT_PG##ipv; \
+ } \
+ \
+ DPO_ADD_LOCK_FACE_IP##ipv ( \
+ &(hicnb0->face_id), &hicnb0->flags, &(ip_hdr0->src_address), \
+ sw_if0, vnet_buffer (b0)->ip.adj_index[VLIB_RX], next_iface0); \
+ } \
+ else if (!ret0 && ret1) \
+ { \
+ next0 = HICN##ipv##_IFACE_INPUT_NEXT_ERROR_DROP; \
+ next_iface1 = NEXT_DATA_LOOKUP_IP##ipv; \
+ sw_if1 = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ next1 = is_mapme1 * NEXT_MAPME_IP##ipv + \
+ (1 - is_mapme1) * NEXT_INTEREST_IP##ipv; \
+ \
+ if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL && \
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] != ADJ_INDEX_INVALID) \
+ { \
+ next_iface1 = NEXT_UDP_ENCAP_IP4; \
+ sw_if1 = ~0; \
+ } \
+ else if (hicnb1->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL && \
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] != \
+ ADJ_INDEX_INVALID) \
+ { \
+ next_iface1 = NEXT_UDP_ENCAP_IP6; \
+ sw_if1 = ~0; \
+ } \
+ else if (hicnb0->flags & HICN_BUFFER_FLAGS_FROM_PG) \
+ { \
+ next_iface1 = NEXT_PG##ipv; \
+ } \
+ \
+ DPO_ADD_LOCK_FACE_IP##ipv ( \
+ &(hicnb1->face_id), &hicnb1->flags, &(ip_hdr1->src_address), \
+ sw_if1, vnet_buffer (b1)->ip.adj_index[VLIB_RX], next_iface1); \
+ } \
+ else \
+ { \
+ next0 = HICN##ipv##_IFACE_INPUT_NEXT_ERROR_DROP; \
+ next1 = HICN##ipv##_IFACE_INPUT_NEXT_ERROR_DROP; \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = sw_if0; \
+ t->next_index = next0; \
+ t->error = ret0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_INPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = sw_if1; \
+ t->next_index = next1; \
+ t->error = ret1; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb0->face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, 1, \
+ vlib_buffer_length_in_chain (vm, b0)); \
+ \
+ vlib_increment_combined_counter ( \
+ &counters[hicnb1->face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_INTEREST_RX, 1, \
+ vlib_buffer_length_in_chain (vm, b1)); \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, bi1, next0, \
+ next1); \
+ } \
+ while (0)
static uword
-hicn4_iface_input_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn4_iface_input_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
@@ -403,8 +506,7 @@ hicn4_iface_input_node_fn (vlib_main_t * vm,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_INTERESTS,
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_INTERESTS,
stats.pkts_interest_count);
return (frame->n_vectors);
@@ -412,24 +514,29 @@ hicn4_iface_input_node_fn (vlib_main_t * vm,
/* packet trace format function */
static u8 *
-hicn4_iface_input_format_trace (u8 * s, va_list * args)
+hicn4_iface_input_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn4_iface_input_trace_t *t =
- va_arg (*args, hicn4_iface_input_trace_t *);
+ hicn4_iface_input_trace_t *t = va_arg (*args, hicn4_iface_input_trace_t *);
+
+ s = format (s, "IFACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+
+ if (t->error)
+ {
+ s = format (s, " drop reason: %s", get_error_string (t->error));
+ }
+
+ s = format (s, "\n%U", format_ip4_header, t->packet_data,
+ sizeof (t->packet_data));
- s =
- format (s, "IFACE_IP4_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
- (int) t->pkt_type, t->sw_if_index, t->next_index,
- format_ip4_header, t->packet_data, sizeof (t->packet_data));
return (s);
}
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (hicn4_iface_input_node) =
{
.function = hicn4_iface_input_node_fn,
@@ -444,16 +551,15 @@ VLIB_REGISTER_NODE (hicn4_iface_input_node) =
.next_nodes =
{
[HICN4_IFACE_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN4_IFACE_INPUT_NEXT_INTEREST_MANIFEST] = "hicn-interest-manifest-pcslookup",
[HICN4_IFACE_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
[HICN4_IFACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
static uword
-hicn6_iface_input_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn6_iface_input_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
@@ -483,8 +589,7 @@ hicn6_iface_input_node_fn (vlib_main_t * vm,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_INTERESTS,
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_INTERESTS,
stats.pkts_interest_count);
return (frame->n_vectors);
@@ -492,24 +597,21 @@ hicn6_iface_input_node_fn (vlib_main_t * vm,
/* packet trace format function */
static u8 *
-hicn6_iface_input_format_trace (u8 * s, va_list * args)
+hicn6_iface_input_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn6_iface_input_trace_t *t =
- va_arg (*args, hicn6_iface_input_trace_t *);
+ hicn6_iface_input_trace_t *t = va_arg (*args, hicn6_iface_input_trace_t *);
- s =
- format (s, "IFACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
- (int) t->pkt_type, t->sw_if_index, t->next_index,
- format_ip6_header, t->packet_data, sizeof (t->packet_data));
+ s = format (s, "IFACE_IP6_INPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
+ (int) t->pkt_type, t->sw_if_index, t->next_index,
+ format_ip6_header, t->packet_data, sizeof (t->packet_data));
return (s);
}
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (hicn6_iface_input_node) =
{
.function = hicn6_iface_input_node_fn,
@@ -524,20 +626,20 @@ VLIB_REGISTER_NODE (hicn6_iface_input_node) =
.next_nodes =
{
[HICN6_IFACE_INPUT_NEXT_INTEREST] = "hicn-interest-pcslookup",
+ [HICN6_IFACE_INPUT_NEXT_INTEREST_MANIFEST] = "hicn-interest-manifest-pcslookup",
[HICN6_IFACE_INPUT_NEXT_MAPME] = "hicn-mapme-ctrl",
[HICN6_IFACE_INPUT_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
-
/**** IFACE OUTPUT *****/
static inline void
-hicn_rewrite_iface_data4 (vlib_main_t * vm, vlib_buffer_t * b0,
- const hicn_face_t * iface, u32 * next)
+hicn_rewrite_iface_data4 (vlib_main_t *vm, vlib_buffer_t *b0,
+ const hicn_face_t *iface, u32 *next)
{
ip4_header_t *ip0;
+ int ret = HICN_ERROR_NONE;
/* Get the pointer to the old ip and tcp header */
ip0 = vlib_buffer_get_current (b0);
@@ -546,29 +648,36 @@ hicn_rewrite_iface_data4 (vlib_main_t * vm, vlib_buffer_t * b0,
/* IP4 lenght contains the size of the ip4 header too */
u16 sval = (vlib_buffer_length_in_chain (vm, b0));
ip0->length = clib_host_to_net_u16 (sval);
- ip0->ttl = 254; // FIXME TTL
+ ip0->ttl = 254; // FIXME TTL
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = iface->dpo.dpoi_index;
- *next = iface->dpo.dpoi_next_node;
- hicn_header_t *hicn = vlib_buffer_get_current (b0);
-
- ip46_address_t temp_addr;
- ip46_address_reset (&temp_addr);
- hicn_type_t type = hicn_get_buffer (b0)->type;
- int ret = hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
- &(iface->nat_addr), &(temp_addr),
- iface->pl_id);
+ *next = iface->iface_next;
+
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (b0)->pkbuf;
+
+ hicn_ip_address_t temp_addr;
+ ip46_address_reset (&(temp_addr.as_ip46));
+
+ hicn_ip_address_t *iface_nat_addr = (hicn_ip_address_t *) &(iface->nat_addr);
+
+ u8 flags = hicn_get_buffer (b0)->flags;
+ u8 reset_pl = flags & HICN_BUFFER_FLAGS_FROM_CS;
+
+ ret = hicn_data_rewrite (pkbuf, iface_nat_addr, &(temp_addr), iface->pl_id,
+ reset_pl);
+
if (ret == HICN_LIB_ERROR_REWRITE_CKSUM_REQUIRED)
{
- ensure_offload_flags(b0, 1 /* is_v4 */);
+ ensure_offload_flags (b0, 1 /* is_v4 */);
}
}
static inline void
-hicn_rewrite_iface_data6 (vlib_main_t * vm, vlib_buffer_t * b0,
- const hicn_face_t * iface, u32 * next)
+hicn_rewrite_iface_data6 (vlib_main_t *vm, vlib_buffer_t *b0,
+ const hicn_face_t *iface, u32 *next)
{
ip6_header_t *ip0;
+ int ret = HICN_ERROR_NONE;
/* Get the pointer to the old ip and tcp header */
/* Copy the previous ip and tcp header to the new portion of memory */
@@ -581,190 +690,178 @@ hicn_rewrite_iface_data6 (vlib_main_t * vm, vlib_buffer_t * b0,
ip0->hop_limit = HICN_IP6_HOP_LIMIT;
vnet_buffer (b0)->ip.adj_index[VLIB_TX] = iface->dpo.dpoi_index;
- *next = iface->dpo.dpoi_next_node;
+ *next = iface->iface_next;
- hicn_header_t *hicn = vlib_buffer_get_current (b0);
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (b0)->pkbuf;
- ip46_address_t temp_addr;
- ip46_address_reset (&temp_addr);
- hicn_type_t type = hicn_get_buffer (b0)->type;
- int ret = hicn_ops_vft[type.l1]->rewrite_data (type, &hicn->protocol,
- &(iface->nat_addr), &(temp_addr),
- iface->pl_id);
+ hicn_ip_address_t temp_addr;
+ ip46_address_reset (&(temp_addr.as_ip46));
+
+ hicn_ip_address_t *iface_nat_addr = (hicn_ip_address_t *) &(iface->nat_addr);
+ u8 flags = hicn_get_buffer (b0)->flags;
+ u8 reset_pl = flags & HICN_BUFFER_FLAGS_FROM_CS;
+
+ ret = hicn_data_rewrite (pkbuf, iface_nat_addr, &(temp_addr), iface->pl_id,
+ reset_pl);
if (ret == HICN_LIB_ERROR_REWRITE_CKSUM_REQUIRED)
{
- ensure_offload_flags(b0, 0 /* is_v4 */);
+ ensure_offload_flags (b0, 0 /* is_v4 */);
}
}
-#define iface_output_x1(ipv) \
- do { \
- vlib_buffer_t *b0; \
- u32 bi0; \
- u32 next0 = next_index; \
- hicn_face_t * face; \
- \
- /* Prefetch for next iteration. */ \
- if (n_left_from > 1) \
- { \
- vlib_buffer_t *b1; \
- b1 = vlib_get_buffer (vm, from[1]); \
- CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES , STORE); \
- } \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- from += 1; \
- n_left_from -= 1; \
- to_next[0] = bi0; \
- to_next += 1; \
- n_left_to_next -= 1; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- \
- hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
- face = \
- hicn_dpoi_get_from_idx (face_id); \
- \
- if (PREDICT_TRUE(face != NULL)) \
- { \
- HICN_REWRITE_DATA_IP##ipv \
- (vm, b0, face, &next0); \
- stats.pkts_data_count += 1; \
- vlib_increment_combined_counter ( \
- &counters[face_id * HICN_N_COUNTER], \
- thread_index, \
- HICN_FACE_COUNTERS_DATA_TX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0));\
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_OUTPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, next0); \
- }while(0); \
-
-
-#define iface_output_x2(ipv) \
- do { \
- vlib_buffer_t *b0, *b1; \
- u32 bi0, bi1; \
- u32 next0 = next_index; \
- u32 next1 = next_index; \
- hicn_face_t *face0, *face1; \
- \
- /* Prefetch for next iteration. */ \
- { \
- vlib_buffer_t *b2, *b3; \
- b2 = vlib_get_buffer (vm, from[2]); \
- b3 = vlib_get_buffer (vm, from[3]); \
- CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
- CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES , STORE); \
- CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES , STORE); \
- } \
- \
- /* Dequeue a packet buffer */ \
- bi0 = from[0]; \
- bi1 = from[1]; \
- from += 2; \
- n_left_from -= 2; \
- to_next[0] = bi0; \
- to_next[1] = bi1; \
- to_next += 2; \
- n_left_to_next -= 2; \
- \
- b0 = vlib_get_buffer (vm, bi0); \
- b1 = vlib_get_buffer (vm, bi1); \
- \
- hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
- hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
- face0 = \
- hicn_dpoi_get_from_idx (face_id0); \
- face1 = \
- hicn_dpoi_get_from_idx (face_id1); \
- \
- if (PREDICT_TRUE(face0 != NULL)) \
- { \
- HICN_REWRITE_DATA_IP##ipv \
- (vm, b0, face0, &next0); \
- stats.pkts_data_count += 1; \
- vlib_increment_combined_counter ( \
- &counters[face_id0 * HICN_N_COUNTER], \
- thread_index, \
- HICN_FACE_COUNTERS_DATA_TX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b0));\
- } \
- \
- if (PREDICT_TRUE(face1 != NULL)) \
- { \
- HICN_REWRITE_DATA_IP##ipv \
- (vm, b1, face1, &next1); \
- stats.pkts_data_count += 1; \
- vlib_increment_combined_counter ( \
- &counters[face_id1 * HICN_N_COUNTER], \
- thread_index, \
- HICN_FACE_COUNTERS_DATA_TX, \
- 1, \
- vlib_buffer_length_in_chain(vm, b1)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b0->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_OUTPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b0, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
- t->next_index = next0; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b0), \
- sizeof (t->packet_data)); \
- } \
- \
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
- (b1->flags & VLIB_BUFFER_IS_TRACED))) \
- { \
- TRACE_OUTPUT_PKT_IP##ipv *t = \
- vlib_add_trace (vm, node, b1, sizeof (*t)); \
- t->pkt_type = HICN_PKT_TYPE_INTEREST; \
- t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
- t->next_index = next1; \
- clib_memcpy_fast (t->packet_data, \
- vlib_buffer_get_current (b1), \
- sizeof (t->packet_data)); \
- } \
- \
- \
- /* Verify speculative enqueue, maybe switch current next frame */ \
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, \
- to_next, n_left_to_next, \
- bi0, bi1, next0, next1); \
- }while(0); \
-
-
+#define iface_output_x1(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0; \
+ u32 bi0; \
+ u32 next0 = next_index; \
+ hicn_face_t *face = NULL; \
+ \
+ /* Prefetch for next iteration. */ \
+ if (n_left_from > 1) \
+ { \
+ vlib_buffer_t *b1; \
+ b1 = vlib_get_buffer (vm, from[1]); \
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE); \
+ } \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ from += 1; \
+ n_left_from -= 1; \
+ to_next[0] = bi0; \
+ to_next += 1; \
+ n_left_to_next -= 1; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ \
+ hicn_face_id_t face_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ if (PREDICT_TRUE (hicn_dpoi_idx_is_valid (face_id))) \
+ face = hicn_dpoi_get_from_idx (face_id); \
+ \
+ if (PREDICT_TRUE (face != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv (vm, b0, face, &next0); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, 1, \
+ vlib_buffer_length_in_chain (vm, b0)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, next0); \
+ } \
+ while (0);
+
+#define iface_output_x2(ipv) \
+ do \
+ { \
+ vlib_buffer_t *b0, *b1; \
+ u32 bi0, bi1; \
+ u32 next0 = next_index; \
+ u32 next1 = next_index; \
+ hicn_face_t *face0 = NULL, *face1 = NULL; \
+ \
+ /* Prefetch for next iteration. */ \
+ { \
+ vlib_buffer_t *b2, *b3; \
+ b2 = vlib_get_buffer (vm, from[2]); \
+ b3 = vlib_get_buffer (vm, from[3]); \
+ CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b2->data, CLIB_CACHE_LINE_BYTES, STORE); \
+ CLIB_PREFETCH (b3->data, CLIB_CACHE_LINE_BYTES, STORE); \
+ } \
+ \
+ /* Dequeue a packet buffer */ \
+ bi0 = from[0]; \
+ bi1 = from[1]; \
+ from += 2; \
+ n_left_from -= 2; \
+ to_next[0] = bi0; \
+ to_next[1] = bi1; \
+ to_next += 2; \
+ n_left_to_next -= 2; \
+ \
+ b0 = vlib_get_buffer (vm, bi0); \
+ b1 = vlib_get_buffer (vm, bi1); \
+ \
+ hicn_face_id_t face_id0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; \
+ hicn_face_id_t face_id1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX]; \
+ if (PREDICT_TRUE (hicn_dpoi_idx_is_valid (face_id0))) \
+ face0 = hicn_dpoi_get_from_idx (face_id0); \
+ if (PREDICT_TRUE (hicn_dpoi_idx_is_valid (face_id1))) \
+ face1 = hicn_dpoi_get_from_idx (face_id1); \
+ \
+ if (PREDICT_TRUE (face0 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv (vm, b0, face0, &next0); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id0 * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, 1, \
+ vlib_buffer_length_in_chain (vm, b0)); \
+ } \
+ \
+ if (PREDICT_TRUE (face1 != NULL)) \
+ { \
+ HICN_REWRITE_DATA_IP##ipv (vm, b1, face1, &next1); \
+ stats.pkts_data_count += 1; \
+ vlib_increment_combined_counter ( \
+ &counters[face_id1 * HICN_N_COUNTER], thread_index, \
+ HICN_FACE_COUNTERS_DATA_TX, 1, \
+ vlib_buffer_length_in_chain (vm, b1)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b0->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b0, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX]; \
+ t->next_index = next0; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b0), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && \
+ (b1->flags & VLIB_BUFFER_IS_TRACED))) \
+ { \
+ TRACE_OUTPUT_PKT_IP##ipv *t = \
+ vlib_add_trace (vm, node, b1, sizeof (*t)); \
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST; \
+ t->sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX]; \
+ t->next_index = next1; \
+ clib_memcpy_fast (t->packet_data, vlib_buffer_get_current (b1), \
+ sizeof (t->packet_data)); \
+ } \
+ \
+ /* Verify speculative enqueue, maybe switch current next frame */ \
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, \
+ n_left_to_next, bi0, bi1, next0, \
+ next1); \
+ } \
+ while (0);
static uword
-hicn4_iface_output_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn4_iface_output_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
@@ -772,7 +869,7 @@ hicn4_iface_output_node_fn (vlib_main_t * vm,
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
+ next_index = HICN4_IFACE_OUTPUT_NEXT_DROP;
while (n_left_from > 0)
{
@@ -792,20 +889,19 @@ hicn4_iface_output_node_fn (vlib_main_t * vm,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn4_iface_output_format_trace (u8 * s, va_list * args)
+hicn4_iface_output_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn4_iface_output_trace_t *t =
- va_arg (*args, hicn4_iface_output_trace_t *);
+ hicn4_iface_output_trace_t *t = va_arg (*args, hicn4_iface_output_trace_t *);
s =
format (s, "IFACE_IP4_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
@@ -817,32 +913,26 @@ hicn4_iface_output_format_trace (u8 * s, va_list * args)
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (hicn4_iface_output_node) =
-{
+VLIB_REGISTER_NODE (hicn4_iface_output_node) = {
.function = hicn4_iface_output_node_fn,
.name = "hicn4-iface-output",
- .vector_size = sizeof (u32),
+ .vector_size = sizeof (u32),
.format_trace = hicn4_iface_output_format_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = ARRAY_LEN (hicn4_iface_output_error_strings),
.error_strings = hicn4_iface_output_error_strings,
.n_next_nodes = HICN4_IFACE_OUTPUT_N_NEXT,
/* edit / add dispositions here */
- .next_nodes =
- {
- [HICN4_IFACE_OUTPUT_NEXT_LOOKUP] = "ip4-lookup",
- [HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
- [HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
- },
+ .next_nodes = { [HICN4_IFACE_OUTPUT_NEXT_DROP] = "error-drop",
+ [HICN4_IFACE_OUTPUT_NEXT_LOOKUP] = "ip4-lookup",
+ [HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap",
+ [HICN4_IFACE_OUTPUT_NEXT_PG] = "hicnpg-data" },
};
-/* *INDENT-ON* */
-
static uword
-hicn6_iface_output_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn6_iface_output_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
@@ -850,7 +940,7 @@ hicn6_iface_output_node_fn (vlib_main_t * vm,
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
+ next_index = HICN6_IFACE_OUTPUT_NEXT_DROP;
while (n_left_from > 0)
{
@@ -869,20 +959,19 @@ hicn6_iface_output_node_fn (vlib_main_t * vm,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, node->node_index,
- HICNFWD_ERROR_DATAS, stats.pkts_data_count);
+ vlib_node_increment_counter (vm, node->node_index, HICNFWD_ERROR_DATAS,
+ stats.pkts_data_count);
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn6_iface_output_format_trace (u8 * s, va_list * args)
+hicn6_iface_output_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn6_iface_output_trace_t *t =
- va_arg (*args, hicn6_iface_output_trace_t *);
+ hicn6_iface_output_trace_t *t = va_arg (*args, hicn6_iface_output_trace_t *);
s =
format (s, "IFACE_IP6_OUTPUT: pkt: %d, sw_if_index %d, next index %d\n%U",
@@ -894,27 +983,22 @@ hicn6_iface_output_format_trace (u8 * s, va_list * args)
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (hicn6_iface_output_node) =
-{
+VLIB_REGISTER_NODE (hicn6_iface_output_node) = {
.function = hicn6_iface_output_node_fn,
.name = "hicn6-iface-output",
- .vector_size = sizeof (u32),
+ .vector_size = sizeof (u32),
.format_trace = hicn6_iface_output_format_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
.n_errors = ARRAY_LEN (hicn6_iface_output_error_strings),
.error_strings = hicn6_iface_output_error_strings,
.n_next_nodes = HICN6_IFACE_OUTPUT_N_NEXT,
/* edit / add dispositions here */
- .next_nodes =
- {
- [HICN6_IFACE_OUTPUT_NEXT_LOOKUP] = "ip6-lookup",
- [HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
- [HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap"
-
- },
+ .next_nodes = { [HICN6_IFACE_OUTPUT_NEXT_DROP] = "error-drop",
+ [HICN6_IFACE_OUTPUT_NEXT_LOOKUP] = "ip6-lookup",
+ [HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP] = "udp4-encap",
+ [HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP] = "udp6-encap",
+ [HICN6_IFACE_OUTPUT_NEXT_PG] = "hicnpg-data" },
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/faces/iface_node.h b/hicn-plugin/src/faces/iface_node.h
index 1a7c4291b..d580c9e31 100644
--- a/hicn-plugin/src/faces/iface_node.h
+++ b/hicn-plugin/src/faces/iface_node.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -30,18 +30,37 @@
* is to create (or retrieve if already existing) the list incoming face
* for each the interest packet.
* The following node to the input iface nodes is the hicn-interest-pcslookup.
- * Output iface nodes follow the hicn-data-fwd and the hicn-interest-hitcs nodes and
- * they perform the dst nat on each data packet. The node following the
- * output face nodes depends on the adjacency type. In case of ip, the following
- * node is the ip4/6-lookup, in case of tunnels the next node is the one implementing
- * the tunnel encapsulation (udp-encap, mpls, etc).
+ * Output iface nodes follow the hicn-data-fwd and the hicn-interest-hitcs
+ * nodes and they perform the dst nat on each data packet. The node following
+ * the output face nodes depends on the adjacency type. In case of ip, the
+ * following node is the ip4/6-lookup, in case of tunnels the next node is the
+ * one implementing the tunnel encapsulation (udp-encap, mpls, etc).
*/
+typedef enum
+{
+ HICN4_IFACE_OUTPUT_NEXT_DROP,
+ HICN4_IFACE_OUTPUT_NEXT_LOOKUP,
+ HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN4_IFACE_OUTPUT_NEXT_PG,
+ HICN4_IFACE_OUTPUT_N_NEXT,
+} hicn4_iface_output_next_t;
+
+typedef enum
+{
+ HICN6_IFACE_OUTPUT_NEXT_DROP,
+ HICN6_IFACE_OUTPUT_NEXT_LOOKUP,
+ HICN6_IFACE_OUTPUT_NEXT_UDP4_ENCAP,
+ HICN6_IFACE_OUTPUT_NEXT_UDP6_ENCAP,
+ HICN6_IFACE_OUTPUT_NEXT_PG,
+ HICN6_IFACE_OUTPUT_N_NEXT,
+} hicn6_iface_output_next_t;
/**
* @brief Initialize the ip iface module
*/
-void hicn_iface_init (vlib_main_t * vm);
+void hicn_iface_init (vlib_main_t *vm);
#endif // __HICN_IFACE_IP_NODE_H__
diff --git a/hicn-plugin/src/faces/inlines.h b/hicn-plugin/src/faces/inlines.h
index bfe56c8e6..ad9e26b62 100644
--- a/hicn-plugin/src/faces/inlines.h
+++ b/hicn-plugin/src/faces/inlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -19,24 +19,21 @@
#include <vlib/buffer.h>
always_inline void
-ensure_offload_flags (vlib_buffer_t * b, int is_v4)
+ensure_offload_flags (vlib_buffer_t *b, int is_v4)
{
- b->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
- b->flags |= is_v4 * VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
- size_t l3_header_size = is_v4 * sizeof(ip4_header_t) + (!is_v4) * sizeof(ip6_header_t);
+ vnet_buffer_offload_flags_set (b, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
+ vnet_buffer_offload_flags_set (b, is_v4 * VNET_BUFFER_OFFLOAD_F_IP_CKSUM);
+
+ size_t l3_header_size =
+ is_v4 * sizeof (ip4_header_t) + (!is_v4) * sizeof (ip6_header_t);
/* Make sure l3_hdr_offset and l4_hdr_offset are set */
- if (!(b->flags & VNET_BUFFER_F_L3_HDR_OFFSET_VALID))
- {
- b->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
- vnet_buffer(b)->l3_hdr_offset = b->current_data;
- }
- if (!(b->flags & VNET_BUFFER_F_L4_HDR_OFFSET_VALID))
- {
- b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
- vnet_buffer(b)->l4_hdr_offset =
- vnet_buffer(b)->l3_hdr_offset + l3_header_size;
- }
+ b->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
+ vnet_buffer (b)->l3_hdr_offset = b->current_data;
+
+ b->flags |= VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
+ vnet_buffer (b)->l4_hdr_offset =
+ vnet_buffer (b)->l3_hdr_offset + l3_header_size;
}
#endif /* __HICN_FACE_INLINES_H__ */ \ No newline at end of file
diff --git a/hicn-plugin/src/hashtb.c b/hicn-plugin/src/hashtb.c
deleted file mode 100644
index 6deddbd84..000000000
--- a/hicn-plugin/src/hashtb.c
+++ /dev/null
@@ -1,1017 +0,0 @@
-/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <stdlib.h>
-#include <errno.h>
-#include <assert.h>
-#include <inttypes.h>
-
-#include <vlib/vlib.h>
-#include <vppinfra/pool.h>
-
-#include "pcs.h"
-#include "hashtb.h"
-#include "parser.h"
-#include "error.h"
-
-/* return dvd/dvr, rounded up (intended for integer values) */
-#define CEIL(dvd, dvr) \
- ({ \
- __typeof__ (dvd) _dvd = (dvd); \
- __typeof__ (dvr) _dvr = (dvr); \
- (_dvd + _dvr - 1)/_dvr; \
- })
-
-#ifndef ALIGN8
-#define ALIGN8(p) (((p) + 0x7) & ~(0x7))
-#endif
-
-#ifndef ALIGNPTR8
-#define ALIGNPTR8(p) ((void *)(((u8 * )(p) + 0x7) & ~(0x7)))
-#endif
-
-#ifndef ALIGN64
-#define ALIGN64(p) (((p) + 0x3f) & ~(0x3f))
-#endif
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-
-/*
- * Offset to aligned start of additional data (PIT/CS, FIB) embedded in each
- * node.
- */
-u32 ht_node_data_offset_aligned;
-
-/* Some support for posix vs vpp mem management */
-#define MEM_ALLOC(x) clib_mem_alloc_aligned((x), 8)
-#define MEM_FREE(p) clib_mem_free((p))
-
-/*
- * Internal utilities
- */
-
-/* Allocate an overflow bucket */
-static hicn_hash_bucket_t *
-alloc_overflow_bucket (hicn_hashtb_h h)
-{
- hicn_hash_bucket_t *newbkt = NULL;
-
- if (h->ht_overflow_buckets_used < h->ht_overflow_bucket_count)
- {
- pool_get_aligned (h->ht_overflow_buckets, newbkt, 8);
-
- if (newbkt)
- {
- h->ht_overflow_buckets_used++;
- }
- }
- return (newbkt);
-}
-
-/* Free an overflow bucket; clear caller's pointer */
-static void
-free_overflow_bucket (hicn_hashtb_h h, hicn_hash_bucket_t ** pb)
-{
- hicn_hash_bucket_t *bkt = *pb;
-
- ASSERT (h->ht_overflow_buckets_used > 0);
-
- pool_put (h->ht_overflow_buckets, bkt);
- h->ht_overflow_buckets_used--;
- *pb = NULL;
-}
-
-/*
- * Init, allocate a new hashtable
- */
-int
-hicn_hashtb_alloc (hicn_hashtb_h * ph, u32 max_elems, size_t app_data_size)
-{
- int ret = HICN_ERROR_NONE;
- hicn_hashtb_h h = NULL;
- u32 count;
- u32 total_buckets;
- size_t sz;
- hicn_hash_node_t *nodep;
- hicn_hash_bucket_t *bucket;
-
- if (ph == NULL)
- {
- ret = HICN_ERROR_HASHTB_INVAL;
- goto done;
- }
- if (max_elems < HICN_HASHTB_MIN_ENTRIES ||
- max_elems > HICN_HASHTB_MAX_ENTRIES)
- {
- goto done;
- }
- /* Allocate and init main hashtable struct */
- h = MEM_ALLOC (sizeof (hicn_hashtb_t));
- if (h == NULL)
- {
- ret = HICN_ERROR_HASHTB_NOMEM;
- goto done;
- }
- memset (h, 0, sizeof (hicn_hashtb_t));
-
- /* Compute main table bucket (row) count and size, and allocate */
-
- /* Consider the last entry as used for containing the overflow bucket */
- total_buckets = CEIL (max_elems, HICN_HASHTB_BUCKET_ENTRIES - 1);
- count = ALIGN8 (CEIL (total_buckets, HICN_HASHTB_FILL_FACTOR));
-
- h->ht_bucket_count = count;
-
- /* We _really_ expect to have buckets aligned on cache lines ... */
- sz = sizeof (hicn_hash_bucket_t);
- assert (sz == ALIGN64 (sz));
-
- h->ht_buckets = MEM_ALLOC (count * sz);
- if (h->ht_buckets == NULL)
- {
- ret = HICN_ERROR_HASHTB_NOMEM;
- goto done;
- }
- memset (h->ht_buckets, 0, count * sz);
-
- /*
- * First time through, compute offset to aligned extra data start in
- * each node struct it's crucial that both the node struct (that the
- * base hashtable uses) and the extra data area (that's also probably
- * a struct) are aligned.
- */
- if (ht_node_data_offset_aligned == 0)
- {
- count = STRUCT_OFFSET_OF (hicn_hash_node_t, hn_data);
- ht_node_data_offset_aligned = ALIGN8 (count);
- }
- //check app struct fits into space provided(HICN_HASH_NODE_APP_DATA_SIZE)
- u32 ht_node_data_size;
- ht_node_data_size = sizeof (hicn_hash_node_t) - ht_node_data_offset_aligned;
- if (app_data_size > ht_node_data_size)
- {
- clib_error
- ("hicn hashtable: fatal error: requested app data size(%u) > hashtb node's configured bytes available(%u), sizeof(hicn_shared_t)=%u, sizeof(hicn_pit_entry_t)=%u, sizeof(hicn_cs_entry_t)=%u",
- app_data_size, ht_node_data_size, sizeof (hicn_pcs_shared_t),
- sizeof (hicn_pit_entry_t), sizeof (hicn_cs_entry_t));
- }
- /*
- * Compute entry node count and size, allocate Allocate/'Hide' the
- * zero-th node so can use zero as an 'empty' value
- */
- pool_alloc_aligned (h->ht_nodes, max_elems, 8);
- if (h->ht_nodes == NULL)
- {
- ret = HICN_ERROR_HASHTB_NOMEM;
- goto done;
- }
- pool_get_aligned (h->ht_nodes, nodep, 8);
- //alloc node 0
- nodep = nodep; /* Silence 'not used' warning */
-
- h->ht_node_count = max_elems;
- h->ht_nodes_used = 1;
-
- /*
- * Compute overflow bucket count and size, allocate
- */
- //count = ALIGN8(CEIL(max_elems, HICN_HASHTB_OVERFLOW_FRACTION));
- count = ALIGN8 (total_buckets - h->ht_bucket_count);
-
- pool_alloc_aligned (h->ht_overflow_buckets, count, 8);
- if (h->ht_overflow_buckets == NULL)
- {
- ret = HICN_ERROR_HASHTB_NOMEM;
- goto done;
- }
- /* 'Hide' the zero-th node so we can use zero as an 'empty' value */
- pool_get_aligned (h->ht_overflow_buckets, bucket, 8);
- bucket = bucket; /* Silence 'not used' warning */
-
- h->ht_overflow_bucket_count = count;
- h->ht_overflow_buckets_used = 1;
-
-done:
-
- if (h)
- {
- if ((ret == HICN_ERROR_NONE) && ph)
- {
- *ph = h;
- }
- else
- {
- hicn_hashtb_free (&h);
- }
- }
- return (ret);
-}
-
-/*
- * Free, de-allocate a hashtable
- */
-int
-hicn_hashtb_free (hicn_hashtb_h * ph)
-{
- int ret = 0;
-
- if (ph)
- {
- if ((*ph)->ht_nodes)
- {
- pool_free ((*ph)->ht_nodes);
- (*ph)->ht_nodes = 0;
- }
- if ((*ph)->ht_overflow_buckets)
- {
- pool_free ((*ph)->ht_overflow_buckets);
- (*ph)->ht_overflow_buckets = 0;
- }
- if ((*ph)->ht_buckets)
- {
- MEM_FREE ((*ph)->ht_buckets);
- (*ph)->ht_buckets = 0;
- }
- MEM_FREE (*ph);
-
- *ph = NULL;
- }
- return (ret);
-}
-
-
-
-/*
- * Basic api to lookup a specific hash+key tuple. This does the entire lookup
- * operation, retrieving node structs and comparing keys, so it's not
- * optimized for prefetching or high performance.
- *
- * Returns zero and mails back a node on success, errno otherwise.
- */
-int
-hicn_hashtb_lookup_node (hicn_hashtb_h h, const u8 * key,
- u32 keylen, u64 hashval, u8 is_data,
- u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
- u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow)
-{
- return (hicn_hashtb_lookup_node_ex
- (h, key, keylen, hashval, is_data, FALSE /* deleted nodes */ ,
- node_id,
- dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
- bucket_is_overflow));
-}
-
-/*
- * Extended api to lookup a specific hash+key tuple. The implementation
- * allows the caller to locate nodes that are marked for deletion, which is
- * part of some hashtable applications, such as the FIB.
- *
- * This does the entire lookup operation, retrieving node structs and comparing
- * keys, so it's not optimized for prefetching or high performance.
- *
- * Returns zero and mails back a node on success, errno otherwise.
- */
-int
-hicn_hashtb_lookup_node_ex (hicn_hashtb_h h, const u8 * key,
- u32 keylen, u64 hashval, u8 is_data,
- int include_deleted_p, u32 * node_id,
- index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
- u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow)
-{
- int i, ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
- int found_p = FALSE;
- u32 bidx;
- hicn_hash_bucket_t *bucket;
- u32 current_bucket_id = ~0;
-
- /*
- * Use some bits of the low half of the hash to locate a row/bucket
- * in the table
- */
- current_bucket_id = bidx = (hashval & (h->ht_bucket_count - 1));
-
- bucket = h->ht_buckets + bidx;
-
- *bucket_is_overflow = 0;
- /* Check the entries in the bucket for matching hash value */
-
-loop_buckets:
-
- for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES && !found_p; i++)
- {
- /*
- * If an entry is marked for deletion, ignore it unless the
- * caller explicitly wants these nodes.
- */
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
- {
- if (!include_deleted_p)
- {
- continue;
- }
- }
- if (bucket->hb_entries[i].he_msb64 == hashval)
- {
- /*
- * Found a candidate - must retrieve the actual node
- * and check the key.
- */
- *node_id = bucket->hb_entries[i].he_node;
- *dpo_ctx_id = bucket->hb_entries[i].dpo_ctx_id;
- *vft_id = bucket->hb_entries[i].vft_id;
- *is_cs =
- bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY;
- *hash_entry_id = i;
- *bucket_id = current_bucket_id;
- /*
- * If we are doing lookup for a data, do not take a
- * lock in case of a hit with a CS entry
- */
- if (!(is_data && *is_cs))
- {
- bucket->hb_entries[i].locks++;
- }
- found_p = TRUE;
- ret = HICN_ERROR_NONE;
- goto done;
- }
- }
-
- /*
- * Be prepared to continue to an overflow bucket if necessary. We
- * only expect the last entry in a bucket to refer to an overflow
- * bucket...
- */
- i = HICN_HASHTB_BUCKET_ENTRIES - 1;
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
- {
- current_bucket_id = bucket->hb_entries[i].he_node;
- bucket = pool_elt_at_index (h->ht_overflow_buckets,
- bucket->hb_entries[i].he_node);
- *bucket_is_overflow = 1;
- goto loop_buckets;
- }
-done:
-
- return (ret);
-}
-
-/**
- * This function allows to split the hash verification from the comparison of
- * the entire key. Useful to exploit prefertching.
- * return 1 if equals, 0 otherwise
- */
-int
-hicn_node_compare (const u8 * key, u32 keylen, hicn_hash_node_t * node)
-{
-
- int ret = 0;
-
- if (key && keylen == node->hn_keysize)
- {
- ret = (memcmp (key, node->hn_key.ks.key, keylen) == 0);
- }
- return ret;
-}
-
-/*
- * Utility to init a new entry in a hashtable bucket/row. We use this to add
- * new a node+hash, and to clear out an entry during removal.
- */
-void
-hicn_hashtb_init_entry (hicn_hash_entry_t * entry, u32 nodeidx,
- u64 hashval, u32 locks)
-{
- entry->he_msb64 = hashval;
- entry->he_node = nodeidx;
-
- /* Clear out some other fields in the entry */
- entry->he_flags = 0;
- entry->locks = locks;
- entry->vft_id = 0;
- entry->dpo_ctx_id = 0;
-}
-
-/*
- * Insert a node into the hashtable. We expect the caller has a) computed the
- * hash value to use, b) initialized the node with the hash and key info, and
- * c) filled in its app-specific data portion of the node.
- */
-
-int
-hicn_hashtb_insert (hicn_hashtb_h h, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hash,
- u32 * node_id,
- index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
- u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow)
-{
- int i, ret = HICN_ERROR_HASHTB_INVAL;
- u32 bidx;
- hicn_hash_bucket_t *bucket, *newbkt;
- int use_seven;
- u32 current_bucket_id = ~0;
- int is_overflow = 0;
-
- *hash_entry = NULL;
-
- if (h == NULL)
- {
- goto done;
- }
- /*
- * Use some bits of the low half of the hash to locate a row/bucket
- * in the table
- */
- current_bucket_id = bidx = (hash & (h->ht_bucket_count - 1));
-
- bucket = h->ht_buckets + bidx;
-
- use_seven = (h->ht_flags & HICN_HASHTB_FLAG_USE_SEVEN);
-
- /* Locate a free entry slot in the bucket */
-
-loop_buckets:
-
- for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++)
- {
-
- /*
- * If an entry is marked for deletion, ignore it
- */
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
- {
- continue;
- }
- /*
- * Be sure that we are not inserting the same entry twice
- */
- if (bucket->hb_entries[i].he_msb64 == hash)
- {
- /*
- * We hit an existing pit entry. increase lock.
- */
-
- *node_id = bucket->hb_entries[i].he_node;
- *dpo_ctx_id = bucket->hb_entries[i].dpo_ctx_id;
- *vft_id = bucket->hb_entries[i].vft_id;
- *is_cs =
- bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY;
- *hash_entry_id = i;
- *bucket_id = current_bucket_id;
- *hash_entry = &(bucket->hb_entries[i]);
- /*
- * If we are doing lookup for a data, do not take a
- * lock in case of a hit with a CS entry
- */
- if (!(*is_cs))
- bucket->hb_entries[i].locks++;
- *bucket_is_overflow = is_overflow;
- ret = HICN_ERROR_HASHTB_EXIST;
- goto done;
- }
- if ((bucket->hb_entries[i].he_msb64 == 0LL) &&
- (bucket->hb_entries[i].he_node == 0))
- {
- /* Found a candidate -- fill it in */
-
- /*
- * Special case if the application asked not to use
- * the last entry in each bucket.
- */
- if ((i != (HICN_HASHTB_BUCKET_ENTRIES - 1)) || use_seven)
- {
- hicn_hashtb_init_entry (&(bucket->hb_entries[i]),
- NODE_IDX_FROM_NODE (node, h), hash, 0);
-
- *hash_entry = &(bucket->hb_entries[i]);
-
- node->bucket_id = current_bucket_id;
- node->entry_idx = i;
- (*hash_entry)->vft_id = *vft_id;
- (*hash_entry)->dpo_ctx_id = *dpo_ctx_id;
- if (is_overflow)
- node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
-
- ret = HICN_ERROR_NONE;
- goto done;
- }
- }
- }
- /*
- * Be prepared to continue to an overflow bucket if necessary, or to
- * add a new overflow bucket. We only expect the last entry in a
- * bucket to refer to an overflow bucket...
- */
- i = HICN_HASHTB_BUCKET_ENTRIES - 1;
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
- {
- /* Existing overflow bucket - re-start the search loop */
- current_bucket_id = bucket->hb_entries[i].he_node;
- bucket = pool_elt_at_index (h->ht_overflow_buckets, current_bucket_id);
- is_overflow = 1;
- goto loop_buckets;
-
- }
- else
- {
- /*
- * Overflow - reached the end of a bucket without finding a
- * free entry slot. Need to allocate an overflow bucket, and
- * connect it to this bucket.
- */
- newbkt = alloc_overflow_bucket (h);
- if (newbkt == NULL)
- {
- ret = HICN_ERROR_HASHTB_NOMEM;
- goto done;
- }
- /*
- * We're touching some more bytes than we absolutely have to
- * here, but ... that seems ok.
- */
- memset (newbkt, 0, sizeof (hicn_hash_bucket_t));
-
- if (use_seven)
- {
- /*
- * Copy existing entry into new bucket - we really
- * expect these to be properly aligned so they can be
- * treated as int.
- */
- memcpy (&(newbkt->hb_entries[0]),
- &(bucket->hb_entries[i]), sizeof (hicn_hash_entry_t));
-
- /* Update bucket id and entry_idx on the hash node */
- hicn_hash_node_t *node =
- pool_elt_at_index (h->ht_nodes, newbkt->hb_entries[0].he_node);
- node->bucket_id = (newbkt - h->ht_overflow_buckets);
- node->entry_idx = 0;
- node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
-
- }
- /*
- * Connect original bucket to the index of the new overflow
- * bucket
- */
- bucket->hb_entries[i].he_flags |= HICN_HASH_ENTRY_FLAG_OVERFLOW;
- bucket->hb_entries[i].he_node = (newbkt - h->ht_overflow_buckets);
-
- /* Add new entry to new overflow bucket */
- bucket = newbkt;
-
- /*
- * Use entry [1] in the new bucket _if_ we just copied into
- * entry [zero] above.
- */
- if (use_seven)
- {
-
- hicn_hashtb_init_entry (&(bucket->hb_entries[1]),
- NODE_IDX_FROM_NODE (node, h), hash, 0);
- *hash_entry = &(bucket->hb_entries[1]);
-
- node->bucket_id = (newbkt - h->ht_overflow_buckets);
- node->entry_idx = 1;
- node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
- (*hash_entry)->vft_id = *vft_id;
- (*hash_entry)->dpo_ctx_id = *dpo_ctx_id;
- }
- else
- {
-
- hicn_hashtb_init_entry (&(bucket->hb_entries[0]),
- NODE_IDX_FROM_NODE (node, h), hash, 0);
- *hash_entry = &(bucket->hb_entries[0]);
- node->bucket_id = (newbkt - h->ht_overflow_buckets);
- node->entry_idx = 0;
- node->hn_flags |= HICN_HASH_NODE_OVERFLOW_BUCKET;
- (*hash_entry)->vft_id = *vft_id;
- (*hash_entry)->dpo_ctx_id = *dpo_ctx_id;
- }
- }
-
- /* And we're done with the overflow bucket */
- ret = HICN_ERROR_NONE;
-
-done:
-
- return (ret);
-}
-
-/*
- * Delete a node from a hashtable using the node itself, and delete/free the
- * node. Caller's pointer is cleared on success.
- */
-void
-hicn_hashtb_delete (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 hashval)
-{
-
- hicn_hashtb_remove_node (h, *pnode, hashval);
- hicn_hashtb_free_node (h, *pnode);
- *pnode = NULL;
-
-}
-
-/*
- * Delete an entry from a hashtable using the node itself. If the node was
- * stored in an overflow bucket, and the bucket is empty after freeing the
- * node, the bucket is freed as well.
- */
-void
-hicn_hashtb_remove_node (hicn_hashtb_h h, hicn_hash_node_t * node,
- u64 hashval)
-{
- int i, count;
- u32 bidx, overflow_p;
- hicn_hash_bucket_t *bucket, *parent;
-
- if ((h == NULL) || (node == NULL))
- {
- goto done;
- }
- if (node->hn_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
- bucket = pool_elt_at_index (h->ht_overflow_buckets, node->bucket_id);
- else
- {
- /*
- * Use some bits of the low half of the hash to locate a
- * row/bucket in the table
- */
- bidx = (hashval & (h->ht_bucket_count - 1));
- ASSERT (bidx == node->bucket_id);
- bucket = h->ht_buckets + node->bucket_id;
- }
-
- overflow_p = node->hn_flags & HICN_HASH_NODE_OVERFLOW_BUCKET;
-
- /* Clear out the entry. */
- hicn_hashtb_init_entry (&(bucket->hb_entries[node->entry_idx]), 0, 0LL, 0);
-
- if (!overflow_p)
- {
- /*
- * And we're done, in the easy case where we didn't change an
- * overflow bucket
- */
- goto done;
- }
- /*
- * The special case: if this is the last remaining entry in an
- * overflow bucket, liberate the bucket. That in turn has a special
- * case if this bucket is in the middle of a chain of overflow
- * buckets.
- *
- * Note that we're not trying aggressively (yet) to condense buckets at
- * every possible opportunity.
- */
-
- /*
- * Reset this flag; we'll set it again if this bucket links to
- * another
- */
- overflow_p = FALSE;
-
- for (i = 0, count = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++)
- {
- if (bucket->hb_entries[i].he_node != 0)
- {
- count++;
- }
- if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1) &&
- (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW))
- {
- count--; /* Doesn't count as a 'real' entry */
- overflow_p = TRUE;
- }
- }
-
- if (count > 0)
- {
- /* Still a (real) entry in the row */
- goto done;
- }
- /*
- * Need to locate the predecessor of 'bucket': start at the beginning
- * of the chain of buckets and move forward
- */
- bidx = (hashval & (h->ht_bucket_count - 1));
-
- for (parent = h->ht_buckets + bidx; parent != NULL;)
- {
-
- if ((parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_flags &
- HICN_HASH_ENTRY_FLAG_OVERFLOW) == 0)
- {
- parent = NULL;
- break;
- }
- bidx = parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
-
- if (pool_elt_at_index (h->ht_overflow_buckets, bidx) == bucket)
- {
- /*
- * Found the predecessor of 'bucket'. If 'bucket' has
- * a successor, connect 'parent' to it, and take
- * 'bucket out of the middle.
- */
- if (overflow_p)
- {
- parent->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node =
- bucket->hb_entries[(HICN_HASHTB_BUCKET_ENTRIES - 1)].he_node;
- }
- else
- {
- /*
- * Just clear the predecessor entry pointing
- * at 'bucket'
- */
- hicn_hashtb_init_entry (&parent->hb_entries
- [(HICN_HASHTB_BUCKET_ENTRIES - 1)], 0,
- 0LL, 0);
- }
-
- break;
- }
- /*
- * After the first iteration, 'parent' will be an overflow
- * bucket too
- */
- parent = pool_elt_at_index (h->ht_overflow_buckets, bidx);
- }
-
- /* We really expect to have found the predecessor */
- ASSERT (parent != NULL);
-
- /* And now, finally, we can put 'bucket' back on the free list */
- free_overflow_bucket (h, &bucket);
-
-done:
- return;
-}
-
-/*
- * Prepare a hashtable node, supplying the key, and computed hash info.
- */
-void
-hicn_hashtb_init_node (hicn_hashtb_h h, hicn_hash_node_t * node,
- const u8 * key, u32 keylen)
-{
- assert (h != NULL);
- assert (node != NULL);
- assert (keylen <= HICN_PARAM_HICN_NAME_LEN_MAX);
-
- /* Init the node struct */
- node->hn_flags = HICN_HASH_NODE_FLAGS_DEFAULT;
- node->hn_keysize = 0;
- node->hn_keysize = keylen;
- memcpy (node->hn_key.ks.key, key, keylen);
- node->bucket_id = ~0;
- node->entry_idx = ~0;
-}
-
-/*
- * Release a hashtable node back to the free list when an entry is cleared
- */
-void
-hicn_hashtb_free_node (hicn_hashtb_h h, hicn_hash_node_t * node)
-{
- ASSERT (h->ht_nodes_used > 0);
-
- /* Return 'node' to the free list */
- pool_put (h->ht_nodes, node);
- h->ht_nodes_used--;
-
-}
-
-/*
- * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'.
- */
-int
-hicn_hashtb_next_node (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 * ctx)
-{
- int i, j, ret = HICN_ERROR_HASHTB_INVAL;
- u32 bidx, entry;
- hicn_hash_bucket_t *bucket;
-
- if ((h == NULL) || (pnode == NULL) || (ctx == NULL))
- {
- goto done;
- }
- /* Special-case for new iteration */
- if (*ctx == HICN_HASH_WALK_CTX_INITIAL)
- {
- bidx = 0;
- bucket = &h->ht_buckets[0];
- entry = 0;
- j = 0;
- i = 0;
- goto search_table;
- }
- /* Convert context to bucket and entry indices */
- bidx = *ctx & 0xffffffffLL;
- entry = *ctx >> 32;
-
- if (bidx >= h->ht_bucket_count)
- {
- ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
- goto done;
- }
- bucket = h->ht_buckets + bidx;
-
- /* Init total index into entries (includes fixed bucket and overflow) */
- j = 0;
-
-skip_processed_bucket_chunks:
- /*
- * Figure out where to resume the search for the next entry in the
- * table, by trying to find the last entry returned, from the cookie.
- * Loop walks one (regular or overflow) bucket chunk, label is used
- * for walking chain of chunks. Note that if there was a deletion or
- * an addition that created an overflow, iterator can skip entries or
- * return duplicate entries, for entries that are present from before
- * the walk starts until after it ends.
- */
-
- for (i = 0; i < HICN_HASHTB_BUCKET_ENTRIES; i++, j++)
- {
- if (j > entry)
- {
- /*
- * Start search for next here, use existing 'bucket'
- * and 'i'
- */
- break;
- }
- /*
- * If an entry is marked for deletion, ignore it
- */
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
- {
- continue;
- }
- /*
- * Be prepared to continue to an overflow bucket if
- * necessary. (We only expect the last entry in a bucket to
- * refer to an overflow bucket...)
- */
- if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1))
- {
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
- {
- bucket = pool_elt_at_index (h->ht_overflow_buckets,
- bucket->hb_entries[i].he_node);
-
- /* Increment overall entry counter 'j' */
- j++;
-
- goto skip_processed_bucket_chunks;
- }
- /*
- * end of row (end of fixed bucket plus any
- * overflows)
- */
- i = 0;
- j = 0;
-
- bidx++;
-
- /* Special case - we're at the end */
- if (bidx >= h->ht_bucket_count)
- {
- ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
- goto done;
- }
- bucket = h->ht_buckets + bidx;
- break;
- }
- }
-
-search_table:
-
- /*
- * Now we're searching through the table for the next entry that's
- * set
- */
-
- for (; i < HICN_HASHTB_BUCKET_ENTRIES; i++, j++)
- {
- /*
- * If an entry is marked for deletion, ignore it
- */
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_DELETED)
- {
- continue;
- }
- /* Is this entry set? */
- if (bucket->hb_entries[i].he_node != 0)
- {
-
- /* Retrieve the node struct */
- *pnode = pool_elt_at_index (h->ht_nodes,
- bucket->hb_entries[i].he_node);
-
- /*
- * Set 'entry' as we exit, so we can update the
- * cookie
- */
- entry = j;
- ret = HICN_ERROR_NONE;
- break;
- }
- /*
- * Be prepared to continue to an overflow bucket if
- * necessary. (We only expect the last entry in a bucket to
- * refer to an overflow bucket...)
- */
- if (i == (HICN_HASHTB_BUCKET_ENTRIES - 1))
- {
- if (bucket->hb_entries[i].he_flags & HICN_HASH_ENTRY_FLAG_OVERFLOW)
- {
- bucket = pool_elt_at_index (h->ht_overflow_buckets,
- bucket->hb_entries[i].he_node);
- /*
- * Reset per-bucket index 'i', here (not done
- * in iterator)
- */
- i = 0;
- /* Increment overall entry counter 'j' */
- j++;
-
- goto search_table;
- }
- else
- {
- /*
- * Move to next bucket, resetting per-bucket
- * and overall entry indexes
- */
- i = 0;
- j = 0;
-
- bidx++;
-
- /* Special case - we're at the end */
- if (bidx >= h->ht_bucket_count)
- {
- ret = HICN_ERROR_HASHTB_HASH_NOT_FOUND;
- goto done;
- }
- bucket = h->ht_buckets + bidx;
- goto search_table;
- }
- }
- }
-
-done:
-
- if (ret == HICN_ERROR_NONE)
- {
- /* Update context */
- *ctx = bidx;
- *ctx |= ((u64) entry << 32);
- }
- return (ret);
-}
-
-int
-hicn_hashtb_key_to_buf (u8 ** vec_res, hicn_hashtb_h h,
- const hicn_hash_node_t * node)
-{
- int ret = HICN_ERROR_NONE;
- u8 *vec = *vec_res;
-
- if (node->hn_keysize <= HICN_HASH_KEY_BYTES)
- {
- vec_add (vec, node->hn_key.ks.key, node->hn_keysize);
- }
- *vec_res = vec;
- return (ret);
-}
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables: eval: (c-set-style "gnu") End:
- */
diff --git a/hicn-plugin/src/hashtb.h b/hicn-plugin/src/hashtb.h
deleted file mode 100644
index 3c72fda65..000000000
--- a/hicn-plugin/src/hashtb.h
+++ /dev/null
@@ -1,546 +0,0 @@
-/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __HICN_HASHTB_H__
-#define __HICN_HASHTB_H__
-
-#include <stdint.h>
-#include <vppinfra/bihash_8_8.h>
-#include <vppinfra/bihash_24_8.h>
-
-#include "params.h"
-#include "parser.h"
-#include "error.h"
-
-/**
- * @file hashtb.h
- * Lookup is finding a hashtable record whose name matches the name being
- * looked up. Most of the lookup work is based on the hash value of the two
- * names. Note that the intel cache line size is 64 bytes, and some platforms
- * load in 2 cache lines together.
- * - first step is to match a record at the bucket/slot level (htab has an
- * array of htbucket_t/htbc_elmt, where each bucket has 7 slots to hold indices
- * for entries.) Matching at this level implies
- * - the hashes of the lookup name and the record map to the same bucket
- * - the high 32 bits of the hashes (slot bce_hash_msb32s) match. Read
- * cost (on the hash table size, i.e. ignoring reading the name being
- * looked up):
- * - First step normally requires 1 cache line load to pull in the
- * 64-byte htbucket_t with the 7 element slot table holding the
- * hash_msb32s.
- * - In the event (hopefully rare for a hash table with appropriate
- * number of buckets) that more than 7 elements hash to the same bucket,
- * lookup may well need to look not only at the static htbc_elmt_t but at
- * the chain of dynamically allocated htbc_elmt_t's linked to the static
- * htbc_elmt_t, where each of these holds slot entries for additional elements.
- * - Before reaching that point, it is initially required to read in the
- * hash table record fields (ht_bucket_buf, htnode buf, etc) holding
- * pointers to the arrays, but these cache lines are common to all lookups
- * so will likely already be in the cache.
- * - second step is to match at the record level (htnode/htkb level) once a
- * slot-level match happens. Matching at this level implies the following match
- * - the hash values (the full 64 bits vs. bucket+32 msb, above).
- * - the name which, on the hash table side, is stored as a list of htkb_t (key buffers).
- *
- * Some hashtables (for which rare false positives are tolerable) store hash
- * values but no keys. (In ISM NDN forwarder, this was used for dcm_dpf: data
- * cache manager's dataplane filter, where speed was critical and very rare
- * false positives would be detected in the full dcm check.) - No key buffers
- * are used (or even allocated at hash table creation).
- */
-
-/* Handy abbreviations for success status, and for boolean values */
-#ifndef TRUE
-#define TRUE 1
-#endif
-
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-#define HICN_HASH_INVALID_IDX ~0
-/*
- * for hicn_hashtb_next_node() iterator, this otherwise illegal context value
- * indicates first call of iteration. Note: must not be 0, which is a legal
- * context value.
- */
-#define HICN_HASH_WALK_CTX_INITIAL (~((u64)0))
-
-/*
- * Key memory allocation scheme.
- *
- * The key is the bytestring that a hashtable entry is storing, e.g. a fib
- * prefix or packet name. The hash of the name is used not just to pick the
- * bucket, but also as a surrogate for the actual key value.
- *
- * Client calls pass key/name as contiguous memory for lookup/add/delete but
- * hashable stores its copy of the key/name as a list of one or more hash_key
- * structs. - key memory is managed as a list of keys (cache line
- * sized/aligned buffers). - If (keysize < 128) then use key struct's full
- * 128 bytes - If not, first key struct is head of a linked list of elements
- * where the first bytes are used for the key and the last 4 bytes are the
- * index of the next entry (or an end marker). - key memory is generally the
- * single largest use of memory in the hash table, especially for PIT, as
- * names are bigger than node structs (which is also per name/entry).
- *
- */
-
-/* Compute hash node index from node pointer */
-#define NODE_IDX_FROM_NODE(p, h) \
- (u32)((p) - ((h)->ht_nodes))
-
-#define HICN_HASH_KEY_BYTES 20
-
-typedef struct
-{
- struct
- {
- u8 key[HICN_HASH_KEY_BYTES];
- } ks; /* Entire key in one block */
-} hicn_hash_key_t;
-
-/*
- * Ratio of extra key blocks to allocate, in case the embedded ones aren't
- * sufficient. This is the fraction of the number of entries allocated.
- */
-#define HICN_HASHTB_KEY_RATIO 8
-
-/*
- * hash node, used to store a hash table entry; indexed by an entry in a
- * bucket. the node contains an embedded key; long keys are stored as chains
- * of keys.
- *
- * The memory block for a node includes space for storing outgoing faces for
- * interests, additional memory located off the end of the htnode data structure.
- *
- */
-
-/* Size this so that we can offer 64B aligned on 64-bits for storing outgoing
- * faces information
- */
-#define HICN_HASH_NODE_APP_DATA_SIZE 64
-
-/* How to align in the right way */
-typedef struct __attribute__ ((packed)) hicn_hash_node_s
-{
- /* Bucket id containing the corresponding hash entry. */
- u32 bucket_id;
-
- /* Hash entry index in the bucket */
- u32 entry_idx;
-
- /* Total size of the key */
- u16 hn_keysize;
-
- /* 1 byte of flags for application use */
- u8 hn_flags;
-
- u8 _hn_reserved1; /* TBD, to align what follows back to
- * 32 */
-
- hicn_hash_key_t hn_key; /* Key value embedded in the node, may chain
- * to more key buffers if necessary */
-
- /* 32B + HICN_HASH_NODE_APP_DATA_SIZE */
- /* Followed by app-specific data (fib or pit or cs entry, e.g.) */
- u8 hn_data[HICN_HASH_NODE_APP_DATA_SIZE];
-
-} hicn_hash_node_t;
-
-#define HICN_HASH_NODE_FLAGS_DEFAULT 0x00
-#define HICN_HASH_NODE_CS_FLAGS 0x01
-#define HICN_HASH_NODE_OVERFLOW_BUCKET 0x02
-
-/*
- * hicn_hash_entry_t Structure holding all or part of a hash value, a node
- * index, and other key pieces of info.
- *
- * - 128 bytes/bucket with 19 bytes/entry gives 6 entries, or 5 entries plus
- * next bucket ptr if overflow Changes in this structure will affect
- * hicn_hash_bucket_t
- */
-typedef struct __attribute__ ((packed)) hicn_hash_entry_s
-{
-
- /* MSB of the hash value */
- u64 he_msb64;
-
- /* Index of node block */
- u32 he_node;
-
- /*
- * Lock to prevent hash_node deletion while there are still interest
- * or data referring to it
- */
- u32 locks;
-
- /* Index of dpo (4B) */
- index_t dpo_ctx_id;
-
- /* A few flags, including 'this points to a chain of buckets' */
- u8 he_flags;
-
- /*
- * Index of the virtual function table corresponding to the dpo_ctx
- * strategy
- */
- u8 vft_id;
-
-} hicn_hash_entry_t; //size 22B
-
-STATIC_ASSERT (sizeof (index_t) <= 4, "sizeof index_t is greater than 4B");
-
-
-#define HICN_HASH_ENTRY_FLAGS_DEFAULT 0x00
-
-/* If entry is PIT this flag is 0 */
-#define HICN_HASH_ENTRY_FLAG_CS_ENTRY 0x01
-
-/*
- * This entry heads a chain of overflow buckets (we expect to see this only
- * in the last entry in a bucket.) In this case, the index is to an overflow
- * bucket rather than to a single node block.
- */
-#define HICN_HASH_ENTRY_FLAG_OVERFLOW 0x04
-
-/* This entry has been marked for deletion */
-#define HICN_HASH_ENTRY_FLAG_DELETED 0x08
-
-/* Use fast he_timeout units for expiration, slow if not */
-#define HICN_HASH_ENTRY_FLAG_FAST_TIMEOUT 0x10
-
-/*
- * hash bucket: Contains an array of entries. Cache line sized/aligned, so no
- * room for extra fields unless bucket size is increased to 2 cache lines or
- * the entry struct shrinks.
- */
-
-/*
- * Overflow bucket ratio as a fraction of the fixed/configured count; a pool
- * of hash buckets used if a row in the fixed table overflows.
- */
-#define HICN_HASHTB_BUCKET_ENTRIES 5
-
-typedef struct __attribute__ ((packed))
-{
- hicn_hash_entry_t hb_entries[HICN_HASHTB_BUCKET_ENTRIES];
- u64 align1;
- u64 align2;
- u16 align3;
-} hicn_hash_bucket_t;
-
-/* Overall target fill-factor for the hashtable */
-#define HICN_HASHTB_FILL_FACTOR 4
-
-#define HICN_HASHTB_MIN_ENTRIES (1 << 4) // includes dummy node 0 entry
-#define HICN_HASHTB_MAX_ENTRIES (1 << 24)
-
-#define HICN_HASHTB_MIN_BUCKETS (1 << 10)
-
-/*
- * htab_t
- *
- * Hash table main structure.
- *
- * Contains - pointers to dynamically allocated arrays of cache-line
- * sized/aligned structures (buckets, nodes, keys). Put frequently accessed
- * fields in the first cache line.
- */
-typedef struct hicn_hashtb_s
-{
-
- /* 8B - main array of hash buckets */
- hicn_hash_bucket_t *ht_buckets;
-
- /* 8B - just-in-case block of overflow buckets */
- hicn_hash_bucket_t *ht_overflow_buckets;
-
- /* 8B - block of nodes associated with entries in buckets */
- hicn_hash_node_t *ht_nodes;
-
- /* Flags */
- u32 ht_flags;
-
- /* Count of buckets allocated in the main array */
- u32 ht_bucket_count;
-
- /* Count of overflow buckets allocated */
- u32 ht_overflow_bucket_count;
- u32 ht_overflow_buckets_used;
-
- /* Count of nodes allocated */
- u32 ht_node_count;
- u32 ht_nodes_used;
-
- /* Count of overflow key structs allocated */
- u32 ht_key_count;
- u32 ht_keys_used;
-
-} hicn_hashtb_t, *hicn_hashtb_h;
-
-/*
- * Offset to aligned start of additional data (PIT/CS, FIB) embedded in each
- * node.
- */
-extern u32 ht_node_data_offset_aligned;
-
-/* Flags for hashtable */
-
-#define HICN_HASHTB_FLAGS_DEFAULT 0x00
-
-/*
- * Don't use the last entry in each bucket - only use it for overflow. We use
- * this for the FIB, currently, so that we can support in-place FIB changes
- * that would be difficult if there were hash entry copies as part of
- * overflow handling.
- */
-#define HICN_HASHTB_FLAG_USE_SEVEN 0x04
-#define HICN_HASHTB_FLAG_KEY_FMT_PFX 0x08
-#define HICN_HASHTB_FLAG_KEY_FMT_NAME 0x10
-
-/*
- * Max prefix name components we'll support in our incremental hashing;
- * currently used only for LPM in the FIB.
- */
-#define HICN_HASHTB_MAX_NAME_COMPS HICN_PARAM_FIB_ENTRY_PFX_COMPS_MAX
-
-/*
- * APIs and inlines
- */
-
-/* Compute hash node index from node pointer */
-static inline u32
-hicn_hashtb_node_idx_from_node (hicn_hashtb_h h, hicn_hash_node_t * p)
-{
- return (p - h->ht_nodes);
-}
-
-/* Retrieve a hashtable node by node index */
-static inline hicn_hash_node_t *
-hicn_hashtb_node_from_idx (hicn_hashtb_h h, u32 idx)
-{
- return (pool_elt_at_index (h->ht_nodes, idx));
-}
-
-/* Allocate a brand-new hashtable */
-int
-hicn_hashtb_alloc (hicn_hashtb_h * ph, u32 max_elems, size_t app_data_size);
-
-/* Free a hashtable, including its embedded arrays */
-int hicn_hashtb_free (hicn_hashtb_h * ph);
-
-/* Hash a bytestring, currently using bihash */
-u64 hicn_hashtb_hash_bytestring (const u8 * key, u32 keylen);
-
-always_inline hicn_hash_entry_t *
-hicn_hashtb_get_entry (hicn_hashtb_h h, u32 entry_idx, u32 bucket_id,
- u8 bucket_overflow)
-{
- hicn_hash_bucket_t *bucket;
- if (bucket_overflow)
- bucket = pool_elt_at_index (h->ht_overflow_buckets, bucket_id);
- else
- bucket = (hicn_hash_bucket_t *) (h->ht_buckets + bucket_id);
-
- return &(bucket->hb_entries[entry_idx]);
-}
-
-/* Hash a name, currently using bihash */
-always_inline u64
-hicn_hashtb_hash_name (const u8 * key, u16 keylen)
-{
- if (key != NULL && keylen == HICN_V4_NAME_LEN)
- {
- clib_bihash_kv_8_8_t kv;
- kv.key = ((u64 *) key)[0];
- return clib_bihash_hash_8_8 (&kv);
- }
- else if (key != NULL && keylen == HICN_V6_NAME_LEN)
- {
- clib_bihash_kv_24_8_t kv;
- kv.key[0] = ((u64 *) key)[0];
- kv.key[1] = ((u64 *) key)[1];
- kv.key[2] = ((u32 *) key)[4];
- return clib_bihash_hash_24_8 (&kv);
- }
- else
- {
- return (-1LL);
- }
-}
-
-
-/*
- * Prepare a hashtable node for insertion, supplying the key and computed
- * hash info. This sets up the node->key relationship, possibly allocating
- * overflow key buffers.
- */
-void
-hicn_hashtb_init_node (hicn_hashtb_h h, hicn_hash_node_t * node,
- const u8 * key, u32 keylen);
-
-/*
- * Insert a node into the hashtable. We expect the caller has used the init
- * api to set the node key and hash info, and populated the extra data area
- * (if any) - or done the equivalent work itself.
- */
-int
-hicn_hashtb_insert (hicn_hashtb_h h, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hash,
- u32 * node_id,
- index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
- u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow);
-
-/*
- * Basic api to lookup a specific hash+key tuple. This does the entire lookup
- * operation, retrieving node structs and comparing keys, so it's not
- * optimized for prefetching or high performance.
- *
- * Returns zero and mails back a node on success, errno otherwise.
- */
-int
-hicn_hashtb_lookup_node (hicn_hashtb_h h, const u8 * key,
- u32 keylen, u64 hashval, u8 is_data,
- u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
- u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow);
-
-/*
- * Extended api to lookup a specific hash+key tuple. The implementation
- * allows the caller to locate nodes that are marked for deletion; this is
- * part of some hashtable applications, such as the FIB.
- *
- * This does the entire lookup operation, retrieving node structs and comparing
- * keys, so it's not optimized for prefetching or high performance.
- *
- * Returns zero and mails back a node on success, errno otherwise.
- */
-int
-hicn_hashtb_lookup_node_ex (hicn_hashtb_h h, const u8 * key,
- u32 keylen, u64 hashval, u8 is_data,
- int include_deleted_p, u32 * node_id,
- index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
- u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow);
-
-/**
- * @brief Compares the key in the node with the given key
- *
- * This function allows to split the hash verification from the comparison of
- * the entire key. Useful to exploit prefertching.
- * @result 1 if equals, 0 otherwise
- */
-int hicn_node_compare (const u8 * key, u32 keylen, hicn_hash_node_t * node);
-
-/*
- * Remove a node from a hashtable using the node itself. The internal data
- * structs are cleaned up, but the node struct itself is not: the caller must
- * free the node itself.
- */
-void hicn_hashtb_remove_node (hicn_hashtb_h h, hicn_hash_node_t * node,
- u64 hashval);
-
-/*
- * Delete a node from a hashtable using the node itself, and delete/free the
- * node. Caller's pointer is cleared on success.
- */
-void hicn_hashtb_delete (hicn_hashtb_h h, hicn_hash_node_t ** pnode,
- u64 hashval);
-
-/*
- * Utility to init a new entry in a hashtable bucket/row. We use this to add
- * new a node+hash, and to clear out an entry during removal.
- */
-void
-hicn_hashtb_init_entry (hicn_hash_entry_t * entry,
- u32 nodeidx, u64 hashval, u32 locks);
-
-
-/*
- * Return data area embedded in a hash node struct. We maintain an 'offset'
- * value in case the common node body struct doesn't leave the data area
- * aligned properly.
- */
-static inline void *
-hicn_hashtb_node_data (hicn_hash_node_t * node)
-{
- return ((u8 *) (node) + ht_node_data_offset_aligned);
-}
-
-/*
- * Use some bits of the low half of the hash to locate a row/bucket in the
- * table
- */
-static inline u32
-hicn_hashtb_bucket_idx (hicn_hashtb_h h, u64 hashval)
-{
- return ((u32) (hashval & (h->ht_bucket_count - 1)));
-}
-
-/*
- * Return a hash node struct from the free list, or NULL. Note that the
- * returned struct is _not_ cleared/zeroed - init is up to the caller.
- */
-static inline hicn_hash_node_t *
-hicn_hashtb_alloc_node (hicn_hashtb_h h)
-{
- hicn_hash_node_t *p = NULL;
-
- if (h->ht_nodes_used < h->ht_node_count)
- {
- pool_get_aligned (h->ht_nodes, p, 8);
- h->ht_nodes_used++;
- }
- return (p);
-}
-
-/*
- * Release a hashtable node back to the free list when an entry is cleared
- */
-void hicn_hashtb_free_node (hicn_hashtb_h h, hicn_hash_node_t * node);
-
-/*
- * Walk a hashtable, iterating through the nodes, keeping context in 'ctx'
- * between calls.
- *
- * Set the context value to HICN_HASH_WALK_CTX_INITIAL to start an iteration.
- */
-int
-hicn_hashtb_next_node (hicn_hashtb_h h, hicn_hash_node_t ** pnode, u64 * ctx);
-
-
-int
-hicn_hashtb_key_to_str (hicn_hashtb_h h, const hicn_hash_node_t * node,
- char *buf, int bufsize, int must_fit);
-
-/*
- * single hash full name can pass offset for two hashes calculation in case
- * we use CS and PIT in a two steps hashes (prefix + seqno)
- */
-always_inline int
-hicn_hashtb_fullhash (const u8 * name, u16 namelen, u64 * name_hash)
-{
- *name_hash = hicn_hashtb_hash_name (name, namelen);
- return (*name_hash != (-1LL) ? HICN_ERROR_NONE : HICN_ERROR_HASHTB_INVAL);
-}
-
-#endif /* // __HICN_HASHTB_H__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables: eval: (c-set-style "gnu") End:
- */
diff --git a/hicn-plugin/src/hicn.api b/hicn-plugin/src/hicn.api
index 9643f2098..d999e0de5 100644
--- a/hicn-plugin/src/hicn.api
+++ b/hicn-plugin/src/hicn.api
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -22,6 +22,14 @@ enum hicn_action_type
HICN_ENABLE,
};
+enum hicn_strategy
+{
+ HICN_STRATEGY_MW = 0,
+ HICN_STRATEGY_RR,
+ HICN_STRATEGY_RP,
+ HICN_STRATEGY_LR,
+};
+
typedef hicn_face
{
/* IP local address */
@@ -360,6 +368,33 @@ define hicn_api_routes_dump
u32 context;
};
+define hicn_api_strategy_set
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Fib prefix for the strategy */
+ vl_api_prefix_t prefix;
+
+ /* ID of the strategy to set for this prefix */
+ vl_api_hicn_strategy_t strategy_id;
+};
+
+define hicn_api_strategy_set_reply
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
define hicn_api_strategies_get
{
/* Client identifier, set from api_main.my_client_index */
@@ -381,7 +416,7 @@ define hicn_api_strategies_get_reply
u8 n_strategies;
/* Strategies */
- u32 strategy_id[256];
+ vl_api_hicn_strategy_t strategy_id[256];
/* Return value, zero means all OK */
i32 retval;
@@ -396,7 +431,7 @@ define hicn_api_strategy_get
u32 context;
/* Route prefix */
- u32 strategy_id;
+ vl_api_hicn_strategy_t strategy_id;
};
define hicn_api_strategy_get_reply
@@ -437,8 +472,14 @@ define hicn_api_enable_disable_reply
/* Arbitrary context, so client can match reply to request */
u32 context;
-/* Return value, zero means all OK */
+ /* Return value, zero means all OK */
i32 retval;
+
+ /* Number of faces created */
+ u8 nfaces;
+
+ /* IDs of new faces */
+ u32 faceids[16];
};
define hicn_api_register_prod_app
@@ -457,6 +498,9 @@ define hicn_api_register_prod_app
/* CS memory reserved -- in number of packets */
u32 cs_reserved;
+
+ /* Port identifying producer application */
+ u16 port;
};
define hicn_api_register_prod_app_reply
@@ -499,6 +543,9 @@ define hicn_api_register_cons_app
/* swif */
u32 swif;
+
+ /* src port of consumer packet from this face */
+ u16 port;
};
define hicn_api_register_cons_app_reply
@@ -570,6 +617,48 @@ define hicn_api_udp_tunnel_add_del_reply
u32 uei;
};
+define hicn_api_mapme_default_route_set
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+
+ /* Fib prefix to be used as default if mapme EPM fails */
+ vl_api_prefix_t prefix;
+};
+
+define hicn_api_mapme_default_route_set_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+};
+
+define hicn_api_mapme_default_route_get
+{
+ /* Client identifier, set from api_main.my_client_index */
+ u32 client_index;
+
+ /* Arbitrary context, so client can match reply to request */
+ u32 context;
+};
+
+define hicn_api_mapme_default_route_get_reply
+{
+ /* From the request */
+ u32 context;
+
+ /* Return value, zero means all OK */
+ i32 retval;
+
+ /* Fib prefix to be used as default if mapme EPM fails */
+ vl_api_prefix_t prefix;
+};
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/hicn-plugin/src/hicn.c b/hicn-plugin/src/hicn.c
index 43a717f80..d48ef4023 100644
--- a/hicn-plugin/src/hicn.c
+++ b/hicn-plugin/src/hicn.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,22 +25,15 @@
#include "mgmt.h"
#include "error.h"
#include "faces/app/address_mgr.h"
-#include "face_db.h"
#include "udp_tunnels/udp_tunnel.h"
#include "route.h"
+#include "pg.h"
hicn_main_t hicn_main;
/* Module vars */
int hicn_infra_fwdr_initialized = 0;
-/*
- * Global time counters we're trying out for opportunistic hashtable
- * expiration.
- */
-uint16_t hicn_infra_fast_timer; /* Counts at 1 second intervals */
-uint16_t hicn_infra_slow_timer; /* Counts at 1 minute intervals */
-
-hicn_face_bucket_t *hicn_face_bucket_pool;
+// hicn_face_bucket_t *hicn_face_bucket_pool;
/*
* Init hicn forwarder with configurable PIT, CS sizes
@@ -48,29 +41,27 @@ hicn_face_bucket_t *hicn_face_bucket_pool;
static int
hicn_infra_fwdr_init (uint32_t shard_pit_size, uint32_t shard_cs_size)
{
- int ret = 0;
+ int ret = HICN_ERROR_NONE;
if (hicn_infra_fwdr_initialized)
{
ret = HICN_ERROR_FWD_ALREADY_ENABLED;
- goto done;
+ goto DONE;
}
+
/* Init per worker limits */
hicn_infra_pit_size = shard_pit_size;
hicn_infra_cs_size = shard_cs_size;
- /* Init the global time-compression counters */
- hicn_infra_fast_timer = 1;
- hicn_infra_slow_timer = 1;
+ hicn_pit_create (&hicn_main.pitcs, hicn_infra_pit_size, hicn_infra_cs_size);
- ret = hicn_pit_create (&hicn_main.pitcs, hicn_infra_pit_size);
- hicn_pit_set_lru_max (&hicn_main.pitcs, hicn_infra_cs_size);
-done:
+DONE:
if ((ret == HICN_ERROR_NONE) && !hicn_infra_fwdr_initialized)
{
hicn_infra_fwdr_initialized = 1;
}
- return (ret);
+
+ return ret;
}
/*
@@ -78,11 +69,9 @@ done:
* only 'enabling' now
*/
int
-hicn_infra_plugin_enable_disable (int enable_disable,
- int pit_size_req,
+hicn_infra_plugin_enable_disable (int enable_disable, int pit_size_req,
f64 pit_max_lifetime_sec_req,
- int cs_size_req,
- vnet_link_t link)
+ int cs_size_req, vnet_link_t link)
{
int ret = 0;
@@ -148,33 +137,32 @@ hicn_infra_plugin_enable_disable (int enable_disable,
vec_foreach (bp, bm->buffer_pools)
n_buffers = n_buffers < bp->n_buffers ? bp->n_buffers : n_buffers;
- // check if CS is bugger tha PIT or bigger than the available vlib_buffers
- uword cs_buffers =
- (n_buffers >
- HICN_PARAM_CS_MIN_MBUF) ? n_buffers - HICN_PARAM_CS_MIN_MBUF : 0;
+ // check if CS is bugger tha PIT or bigger than the available
+ // vlib_buffers
+ uword cs_buffers = (n_buffers > HICN_PARAM_CS_MIN_MBUF) ?
+ n_buffers - HICN_PARAM_CS_MIN_MBUF :
+ 0;
if (cs_size_req > (pit_size_req / 2) || cs_size_req > cs_buffers)
{
cs_size_req =
((pit_size_req / 2) > cs_buffers) ? cs_buffers : pit_size_req / 2;
vlib_cli_output (vm,
- "WARNING!! CS too large. Please check size of PIT or the number of buffers available in VPP\n");
-
+ "WARNING!! CS too large. Please check size of PIT "
+ "or the number of buffers available in VPP\n");
}
cs_size = (uint32_t) cs_size_req;
}
ret = hicn_infra_fwdr_init (pit_size, cs_size);
- hicn_face_db_init (pit_size);
-
if (ret != HICN_ERROR_NONE)
{
goto done;
}
sm->is_enabled = 1;
sm->link = link;
- //hicn_face_udp_init_internal ();
+ // hicn_face_udp_init_internal ();
done:
@@ -182,7 +170,7 @@ done:
}
static clib_error_t *
-hicn_configure (vlib_main_t * vm, unformat_input_t * input)
+hicn_configure (vlib_main_t *vm, unformat_input_t *input)
{
u32 pit_size = HICN_PARAM_PIT_ENTRIES_DFLT;
u32 cs_size = HICN_PARAM_CS_ENTRIES_DFLT;
@@ -190,7 +178,6 @@ hicn_configure (vlib_main_t * vm, unformat_input_t * input)
vnet_link_t link;
-
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "pit-size %u", &pit_size))
@@ -207,10 +194,8 @@ hicn_configure (vlib_main_t * vm, unformat_input_t * input)
unformat_free (input);
- hicn_infra_plugin_enable_disable (1, pit_size,
- pit_lifetime_max_sec,
- cs_size, link);
-
+ hicn_infra_plugin_enable_disable (1, pit_size, pit_lifetime_max_sec, cs_size,
+ link);
return 0;
}
@@ -221,41 +206,41 @@ VLIB_CONFIG_FUNCTION (hicn_configure, "hicn");
* Init entry-point for the icn plugin
*/
static clib_error_t *
-hicn_init (vlib_main_t * vm)
+hicn_init (vlib_main_t *vm)
{
clib_error_t *error = 0;
hicn_main_t *sm = &hicn_main;
- /* Init other elements in the 'main' struct */
+ // Init other elements in the 'main' struct
sm->is_enabled = 0;
error = hicn_api_plugin_hookup (vm);
- /* Init the dpo module */
+ // Init the dpo module
hicn_dpos_init ();
- /* Init the app manager */
+ // Init the app manager
address_mgr_init ();
+ // Init the face module
hicn_face_module_init (vm);
- /* Init the route module */
+ // Init the route module
hicn_route_init ();
+ // Init the UDP tunnels module
udp_tunnel_init ();
+ // Init the packet generator module
+ hicn_pg_init (vm);
+
return error;
}
VLIB_INIT_FUNCTION (hicn_init);
-/* *INDENT-OFF* */
-VLIB_PLUGIN_REGISTER() =
-{
- .description = "hICN forwarder"
-};
-/* *INDENT-ON* */
+VLIB_PLUGIN_REGISTER () = { .description = "hICN forwarder" };
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/hicn.h b/hicn-plugin/src/hicn.h
index 3d980bd49..7231773d7 100644
--- a/hicn-plugin/src/hicn.h
+++ b/hicn-plugin/src/hicn.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,20 +16,7 @@
#ifndef __HICN_H__
#define __HICN_H__
-#define ip_address_t hicn_ip_address_t
-#define ip_address_cmp hicn_ip_address_cmp
-#define ip_prefix_t hicn_ip_prefix_t
-#define ip_prefix_cmp hicn_ip_prefix_cmp
-#undef ip_prefix_len
-#define ip_prefix_len hicn_ip_prefix_len
#include <hicn/hicn.h>
-#undef ip_address_t
-#undef ip_address_cmp
-#undef ip_prefix_t
-#undef ip_prefix_cmp
-#undef ip_prefix_len
-#define ip_prefix_len(_a) (_a)->len
-
#include "faces/face.h"
#include <netinet/in.h>
@@ -43,10 +30,6 @@
* @file
*/
-/* Helper for avoiding warnings about type-punning */
-#define UNION_CAST(x, destType) \
- (((union {__typeof__(x) a; destType b;})x).b)
-
/*
* Update CMakeLists.txt as we have to manually replace the type for
* vppapigen
@@ -54,59 +37,116 @@
typedef u8 weight_t;
#define ISV6(isv6, dov6, dov4) isv6 ? dov6 : dov4
-#define HICN_IS_NAMEHASH_CACHED(b) (((u64)(b->opaque2)[0] != 0) || ((u64)(b->opaque2)[1] != 0))
+#define HICN_IS_NAMEHASH_CACHED(b) \
+ (((u64) (b->opaque2)[0] != 0) || ((u64) (b->opaque2)[1] != 0))
#ifndef VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE (128)
#endif
-/* vlib_buffer cloning utilities impose that current_lentgh is more that 2*CLIB_CACHE_LINE_BYTES. */
-/* This flag is used to mark packets whose lenght is less that 2*CLIB_CACHE_LINE_BYTES. */
-#define HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL 0x02
-#define HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL 0x04
-#define HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL 0x08
+#define MAX_OUT_FACES 8
/* The following is stored in the opaque2 field in the vlib_buffer_t */
typedef struct
{
- /* hash of the name */
- u64 name_hash;
-
- /* ids to prefetch a PIT/CS entry */
- u32 node_id;
- u32 bucket_id;
- u8 hash_entry_id;
- u8 hash_bucket_flags;
-
+ /**
+ * Cached packet info
+ */
+ hicn_packet_buffer_t pkbuf;
+
+ /**
+ * IDs to prefetch a PIT/CS entry (4)
+ */
+ u32 pcs_entry_id;
+
+ /**
+ * DPO/Stategy VFT ID. This is also the DPO type (4)
+ */
+ dpo_type_t vft_id;
+
+ /**
+ * DPO context ID (4)
+ */
+ u32 dpo_ctx_id;
+
+ /**
+ * Cached packet info
+ */
+ u16 payload_type;
+ hicn_lifetime_t lifetime;
+
+ /**
+ * Ingress face (4)
+ */
+ hicn_face_id_t face_id;
+
+ /**
+ * hICN buffer flags (1)
+ */
u8 flags;
- u8 dpo_ctx_id; /* used for data path */
- u8 vft_id; /* " */
-
- hicn_face_id_t face_id; /* ingress iface, sizeof(u32) */
- u32 in_faces_vec_id; /* vector of possible input face for a data packet */
-
- hicn_type_t type;
} hicn_buffer_t;
+STATIC_ASSERT (offsetof (hicn_buffer_t, pcs_entry_id) == 24, "");
+STATIC_ASSERT (offsetof (hicn_buffer_t, vft_id) == 28, "");
+STATIC_ASSERT (offsetof (hicn_buffer_t, dpo_ctx_id) == 32, "");
+STATIC_ASSERT (offsetof (hicn_buffer_t, payload_type) == 36, "");
+STATIC_ASSERT (offsetof (hicn_buffer_t, lifetime) == 40, "");
+STATIC_ASSERT (offsetof (hicn_buffer_t, face_id) == 44, "");
+STATIC_ASSERT (offsetof (hicn_buffer_t, flags) == 48, "");
+// + name = 16+4 = 20
+// opaque : u32[14] = 56
STATIC_ASSERT (sizeof (hicn_buffer_t) <=
- STRUCT_SIZE_OF (vlib_buffer_t, opaque2),
+ STRUCT_SIZE_OF (vlib_buffer_t, opaque2),
"hICN buffer opaque2 meta-data too large for vlib_buffer");
-
always_inline hicn_buffer_t *
-hicn_get_buffer (vlib_buffer_t * b0)
+hicn_get_buffer (vlib_buffer_t *b0)
{
- return (hicn_buffer_t *) & (b0->opaque2[0]);
+ return (hicn_buffer_t *) &(b0->opaque2[0]);
}
+#if 0
always_inline u8
-hicn_is_v6 (hicn_header_t * pkt_hdr)
+hicn_is_v6 (hicn_header_t *pkt_hdr)
{
return ((pkt_hdr->v4.ip.version_ihl >> 4) != 4);
}
-#endif /* __HICN_H__ */
+always_inline hicn_name_t *
+hicn_buffer_get_name (vlib_buffer_t *b)
+{
+ return hicn_packet_get_name(&hicn_get_buffer (b)->pkbuf);
+}
+#endif
+always_inline u8
+hicn_buffer_is_v6 (vlib_buffer_t *b0)
+{
+ hicn_packet_format_t format =
+ hicn_packet_get_format (&hicn_get_buffer (b0)->pkbuf);
+ return HICN_PACKET_FORMAT_IS_IPV6 (format);
+}
+
+always_inline void
+hicn_buffer_set_flags (vlib_buffer_t *b, u8 flags)
+{
+ hicn_buffer_t *hb = hicn_get_buffer (b);
+ hb->flags |= flags;
+}
+
+always_inline hicn_lifetime_t
+hicn_buffer_get_lifetime (vlib_buffer_t *b)
+{
+ return hicn_get_buffer (b)->lifetime;
+}
+
+always_inline hicn_payload_type_t
+hicn_buffer_get_payload_type (vlib_buffer_t *b)
+{
+ return hicn_get_buffer (b)->payload_type;
+}
+
+#endif /* __HICN_H__ */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/hicn_api.c b/hicn-plugin/src/hicn_api.c
index e6050f96c..28d993a4a 100644
--- a/hicn-plugin/src/hicn_api.c
+++ b/hicn-plugin/src/hicn_api.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -38,14 +38,15 @@
#include "faces/app/face_prod.h"
#include "faces/app/face_cons.h"
#include "route.h"
+#include "mapme.h"
/* define message IDs */
-#include <hicn/hicn.api_enum.h>
-#include <hicn/hicn.api_types.h>
+#include <vpp_plugins/hicn/hicn.api_enum.h>
+#include <vpp_plugins/hicn/hicn.api_types.h>
/* define generated endian-swappers */
#define vl_endianfun
-#include <hicn/hicn_all_api_h.h>
+#include <vpp_plugins/hicn/hicn_all_api_h.h>
#undef vl_endianfun
#define REPLY_MSG_ID_BASE sm->msg_id_base
@@ -57,19 +58,16 @@
* Convert a unix return code to a vnet_api return code. Currently stubby:
* should have more cases.
*/
-always_inline vnet_api_error_t
-hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
- vl_api_hicn_api_face_params_get_reply_t
- * reply);
-
+always_inline vnet_api_error_t hicn_face_api_entry_params_serialize (
+ hicn_face_id_t faceid, vl_api_hicn_api_face_params_get_reply_t *reply);
/****************** API MESSAGE HANDLERS ******************/
/****** NODE ******/
static void
-vl_api_hicn_api_node_params_set_t_handler (vl_api_hicn_api_node_params_set_t *
- mp)
+vl_api_hicn_api_node_params_set_t_handler (
+ vl_api_hicn_api_node_params_set_t *mp)
{
vl_api_hicn_api_node_params_set_reply_t *rmp;
int rv;
@@ -81,66 +79,58 @@ vl_api_hicn_api_node_params_set_t_handler (vl_api_hicn_api_node_params_set_t *
pit_max_size == -1 ? HICN_PARAM_PIT_ENTRIES_DFLT : pit_max_size;
f64 pit_max_lifetime_sec = mp->pit_max_lifetime_sec;
- pit_max_lifetime_sec =
- pit_max_lifetime_sec ==
- -1 ? HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS / SEC_MS : pit_max_lifetime_sec;
+ pit_max_lifetime_sec = pit_max_lifetime_sec == -1 ?
+ HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS / SEC_MS :
+ pit_max_lifetime_sec;
int cs_max_size = clib_net_to_host_i32 (mp->cs_max_size);
cs_max_size = cs_max_size == -1 ? HICN_PARAM_CS_ENTRIES_DFLT : cs_max_size;
- rv = hicn_infra_plugin_enable_disable ((int) (mp->enable_disable),
- pit_max_size,
- pit_max_lifetime_sec,
- cs_max_size,
- ~0);
+ rv =
+ hicn_infra_plugin_enable_disable ((int) (mp->enable_disable), pit_max_size,
+ pit_max_lifetime_sec, cs_max_size, ~0);
- REPLY_MACRO (VL_API_HICN_API_NODE_PARAMS_SET_REPLY /* , rmp, mp, rv */ );
+ REPLY_MACRO (VL_API_HICN_API_NODE_PARAMS_SET_REPLY /* , rmp, mp, rv */);
}
static void
-vl_api_hicn_api_node_params_get_t_handler (vl_api_hicn_api_node_params_get_t *
- mp)
+vl_api_hicn_api_node_params_get_t_handler (
+ vl_api_hicn_api_node_params_get_t *mp)
{
vl_api_hicn_api_node_params_get_reply_t *rmp;
int rv = HICN_ERROR_NONE;
hicn_main_t *sm = &hicn_main;
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_NODE_PARAMS_GET_REPLY, (
- {
- rmp->is_enabled = sm->is_enabled;
- rmp->feature_cs = HICN_FEATURE_CS;
- rmp->pit_max_size = clib_host_to_net_u32 (hicn_infra_pit_size);
- rmp->pit_max_lifetime_sec = ((f64) sm->pit_lifetime_max_ms) / SEC_MS;
- rmp->cs_max_size = clib_host_to_net_u32 (hicn_infra_cs_size);
- rmp->retval = clib_host_to_net_i32 (rv);
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_NODE_PARAMS_GET_REPLY, ({
+ rmp->is_enabled = sm->is_enabled;
+ rmp->feature_cs = HICN_FEATURE_CS;
+ rmp->pit_max_size =
+ clib_host_to_net_u32 (hicn_infra_pit_size);
+ rmp->pit_max_lifetime_sec =
+ ((f64) sm->pit_lifetime_max_ms) / SEC_MS;
+ rmp->cs_max_size = clib_host_to_net_u32 (hicn_infra_cs_size);
+ rmp->retval = clib_host_to_net_i32 (rv);
+ }));
}
static void
-vl_api_hicn_api_node_stats_get_t_handler (vl_api_hicn_api_node_stats_get_t *
- mp)
+vl_api_hicn_api_node_stats_get_t_handler (vl_api_hicn_api_node_stats_get_t *mp)
{
vl_api_hicn_api_node_stats_get_reply_t *rmp;
int rv = HICN_ERROR_NONE;
hicn_main_t *sm = &hicn_main;
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_NODE_STATS_GET_REPLY, (
- {
- rv = hicn_mgmt_node_stats_get (rmp);
- rmp->retval =clib_host_to_net_i32 (rv);
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_NODE_STATS_GET_REPLY, ({
+ rv = hicn_mgmt_node_stats_get (rmp);
+ rmp->retval = clib_host_to_net_i32 (rv);
+ }));
}
-
static void
- vl_api_hicn_api_face_params_get_t_handler
- (vl_api_hicn_api_face_params_get_t * mp)
+vl_api_hicn_api_face_params_get_t_handler (
+ vl_api_hicn_api_face_params_get_t *mp)
{
vl_api_hicn_api_face_params_get_reply_t *rmp;
int rv = 0;
@@ -149,17 +139,14 @@ static void
hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_FACE_PARAMS_GET_REPLY, (
- {
- rv = hicn_face_api_entry_params_serialize(faceid, rmp);
- rmp->retval = clib_host_to_net_u32(rv);
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_FACE_PARAMS_GET_REPLY, ({
+ rv = hicn_face_api_entry_params_serialize (faceid, rmp);
+ rmp->retval = clib_host_to_net_u32 (rv);
+ }));
}
static void
-send_face_details (hicn_face_t * face, vl_api_hicn_face_t * mp)
+send_face_details (hicn_face_t *face, vl_api_hicn_face_t *mp)
{
vnet_main_t *vnm = vnet_get_main ();
@@ -173,13 +160,12 @@ send_face_details (hicn_face_t * face, vl_api_hicn_face_t * mp)
{
sbuf =
format (0, "%U", format_vnet_sw_interface_name, vnm, sw_interface);
- strcpy ((char *) (mp->if_name), (char *) sbuf);
+ strcpy_s ((char *) (mp->if_name), sizeof (mp->if_name), (char *) sbuf);
}
}
static void
-send_faces_details (vl_api_registration_t * reg,
- hicn_face_t * face, u32 context)
+send_faces_details (vl_api_registration_t *reg, hicn_face_t *face, u32 context)
{
vl_api_hicn_api_faces_details_t *mp;
hicn_main_t *hm = &hicn_main;
@@ -195,7 +181,7 @@ send_faces_details (vl_api_registration_t * reg,
}
static void
-vl_api_hicn_api_faces_dump_t_handler (vl_api_hicn_api_faces_dump_t * mp)
+vl_api_hicn_api_faces_dump_t_handler (vl_api_hicn_api_faces_dump_t *mp)
{
hicn_face_t *face;
vl_api_registration_t *reg;
@@ -204,16 +190,14 @@ vl_api_hicn_api_faces_dump_t_handler (vl_api_hicn_api_faces_dump_t * mp)
if (!reg)
return;
- /* *INDENT-OFF* */
- pool_foreach (face, hicn_dpoi_face_pool,
- ({
- send_faces_details (reg, face, mp->context);
- }));
- /* *INDENT-ON* */
+ pool_foreach (face, hicn_dpoi_face_pool)
+ {
+ send_faces_details (reg, face, mp->context);
+ }
}
static void
-vl_api_hicn_api_face_get_t_handler (vl_api_hicn_api_face_get_t * mp)
+vl_api_hicn_api_face_get_t_handler (vl_api_hicn_api_face_get_t *mp)
{
vl_api_hicn_api_face_get_reply_t *rmp;
int rv = 0;
@@ -222,28 +206,25 @@ vl_api_hicn_api_face_get_t_handler (vl_api_hicn_api_face_get_t * mp)
hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_FACE_GET_REPLY, (
- {
- rv = hicn_dpoi_idx_is_valid(faceid);
- if (rv)
- {
- hicn_face_t * face = hicn_dpoi_get_from_idx(faceid);
- send_face_details(face, &(rmp->face));
- rv = HICN_ERROR_NONE;
- }
- else
- {
- rv = HICN_ERROR_FACE_NOT_FOUND;
- }
- rmp->retval = clib_host_to_net_u32(rv);
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_FACE_GET_REPLY, ({
+ rv = hicn_dpoi_idx_is_valid (faceid);
+ if (rv)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (faceid);
+ send_face_details (face, &(rmp->face));
+ rv = HICN_ERROR_NONE;
+ }
+ else
+ {
+ rv = HICN_ERROR_FACE_NOT_FOUND;
+ }
+ rmp->retval = clib_host_to_net_u32 (rv);
+ }));
}
static void
-send_face_stats_details (vl_api_registration_t * reg,
- hicn_face_t * face, u32 context)
+send_face_stats_details (vl_api_registration_t *reg, hicn_face_t *face,
+ u32 context)
{
vl_api_hicn_api_face_stats_details_t *mp;
hicn_main_t *hm = &hicn_main;
@@ -256,27 +237,27 @@ send_face_stats_details (vl_api_registration_t * reg,
mp->faceid = htonl (hicn_dpoi_get_index (face));
vlib_counter_t v;
- vlib_get_combined_counter (&counters
- [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
- HICN_FACE_COUNTERS_INTEREST_RX, &v);
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_INTEREST_RX, &v);
mp->irx_packets = clib_net_to_host_u64 (v.packets);
mp->irx_bytes = clib_net_to_host_u64 (v.bytes);
- vlib_get_combined_counter (&counters
- [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
- HICN_FACE_COUNTERS_INTEREST_TX, &v);
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_INTEREST_TX, &v);
mp->itx_packets = clib_net_to_host_u64 (v.packets);
mp->itx_bytes = clib_net_to_host_u64 (v.bytes);
- vlib_get_combined_counter (&counters
- [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
- HICN_FACE_COUNTERS_DATA_RX, &v);
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_DATA_RX, &v);
mp->drx_packets = clib_net_to_host_u64 (v.packets);
mp->drx_bytes = clib_net_to_host_u64 (v.bytes);
- vlib_get_combined_counter (&counters
- [hicn_dpoi_get_index (face) * HICN_N_COUNTER],
- HICN_FACE_COUNTERS_DATA_TX, &v);
+ vlib_get_combined_counter (
+ &counters[hicn_dpoi_get_index (face) * HICN_N_COUNTER],
+ HICN_FACE_COUNTERS_DATA_TX, &v);
mp->dtx_packets = clib_net_to_host_u64 (v.packets);
mp->dtx_bytes = clib_net_to_host_u64 (v.bytes);
@@ -284,8 +265,8 @@ send_face_stats_details (vl_api_registration_t * reg,
}
static void
- vl_api_hicn_api_face_stats_dump_t_handler
- (vl_api_hicn_api_face_stats_dump_t * mp)
+vl_api_hicn_api_face_stats_dump_t_handler (
+ vl_api_hicn_api_face_stats_dump_t *mp)
{
hicn_face_t *face;
vl_api_registration_t *reg;
@@ -294,19 +275,16 @@ static void
if (!reg)
return;
- /* *INDENT-OFF* */
- pool_foreach (face, hicn_dpoi_face_pool,
- ({
- send_face_stats_details (reg, face, mp->context);
- }));
- /* *INDENT-ON* */
+ pool_foreach (face, hicn_dpoi_face_pool)
+ {
+ send_face_stats_details (reg, face, mp->context);
+ }
}
-
/****** ROUTE *******/
-static void vl_api_hicn_api_route_get_t_handler
- (vl_api_hicn_api_route_get_t * mp)
+static void
+vl_api_hicn_api_route_get_t_handler (vl_api_hicn_api_route_get_t *mp)
{
vl_api_hicn_api_route_get_reply_t *rmp;
int rv = HICN_ERROR_NONE;
@@ -321,24 +299,25 @@ static void vl_api_hicn_api_route_get_t_handler
rv = hicn_route_get_dpo (&prefix, &hicn_dpo_id, &fib_index);
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_ROUTE_GET_REPLY, (
- {
+ REPLY_MACRO2 (
+ VL_API_HICN_API_ROUTE_GET_REPLY, ({
if (rv == HICN_ERROR_NONE)
{
- hicn_dpo_ctx = hicn_strategy_dpo_ctx_get(hicn_dpo_id->dpoi_index);
- for (int i = 0; hicn_dpo_ctx != NULL && i < hicn_dpo_ctx->entry_count; i++)
+ hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
+ for (int i = 0;
+ hicn_dpo_ctx != NULL && i < hicn_dpo_ctx->entry_count; i++)
{
- rmp->faceids[i] = hicn_dpo_ctx->next_hops[i];
+ rmp->faceids[i] = hicn_dpo_ctx->next_hops[i];
}
- rmp->strategy_id = clib_host_to_net_u32(hicn_dpo_get_vft_id(hicn_dpo_id));}
+ rmp->strategy_id =
+ clib_host_to_net_u32 (hicn_dpo_get_vft_id (hicn_dpo_id));
+ }
}));
- /* *INDENT-ON* */
}
static void
-send_route_details (vl_api_registration_t * reg,
- const fib_prefix_t * pfx, u32 context)
+send_route_details (vl_api_registration_t *reg, const fib_prefix_t *pfx,
+ u32 context)
{
vl_api_hicn_api_routes_details_t *mp;
hicn_main_t *hm = &hicn_main;
@@ -363,10 +342,8 @@ send_route_details (vl_api_registration_t * reg,
for (int i = 0; hicn_dpo_ctx != NULL && i < hicn_dpo_ctx->entry_count;
i++)
{
- mp->faceids[i] =
- clib_host_to_net_u32 (hicn_dpo_ctx->
- next_hops[i]);
- mp->nfaces++;
+ mp->faceids[i] = clib_host_to_net_u32 (hicn_dpo_ctx->next_hops[i]);
+ mp->nfaces++;
}
mp->strategy_id =
clib_host_to_net_u32 (hicn_dpo_get_vft_id (hicn_dpo_id));
@@ -413,7 +390,7 @@ vl_api_hicn_api_route_dump_walk (fib_node_index_t fei, void *arg)
}
static void
-vl_api_hicn_api_routes_dump_t_handler (vl_api_hicn_api_routes_dump_t * mp)
+vl_api_hicn_api_routes_dump_t_handler (vl_api_hicn_api_routes_dump_t *mp)
{
vl_api_registration_t *reg;
fib_table_t *fib_table;
@@ -429,34 +406,29 @@ vl_api_hicn_api_routes_dump_t_handler (vl_api_hicn_api_routes_dump_t * mp)
if (!reg)
return;
- pool_foreach (fib_table, im->fibs, (
- {
- fib_table_walk (fib_table->ft_index,
- FIB_PROTOCOL_IP4,
- vl_api_hicn_api_route_dump_walk,
- &ctx);}
- ));
-
- pool_foreach (fib_table, im6->fibs, (
- {
- fib_table_walk (fib_table->ft_index,
- FIB_PROTOCOL_IP6,
- vl_api_hicn_api_route_dump_walk,
- &ctx);}
- ));
+ pool_foreach (fib_table, im->fibs)
+ {
+ fib_table_walk (fib_table->ft_index, FIB_PROTOCOL_IP4,
+ vl_api_hicn_api_route_dump_walk, &ctx);
+ }
+
+ pool_foreach (fib_table, im6->fibs)
+ {
+ fib_table_walk (fib_table->ft_index, FIB_PROTOCOL_IP6,
+ vl_api_hicn_api_route_dump_walk, &ctx);
+ }
vec_foreach (lfeip, ctx.feis)
- {
- pfx = fib_entry_get_prefix (*lfeip);
- send_route_details (reg, pfx, mp->context);
- }
+ {
+ pfx = fib_entry_get_prefix (*lfeip);
+ send_route_details (reg, pfx, mp->context);
+ }
vec_free (ctx.feis);
-
}
-static void vl_api_hicn_api_strategies_get_t_handler
- (vl_api_hicn_api_strategies_get_t * mp)
+static void
+vl_api_hicn_api_strategies_get_t_handler (vl_api_hicn_api_strategies_get_t *mp)
{
vl_api_hicn_api_strategies_get_reply_t *rmp;
int rv = HICN_ERROR_NONE;
@@ -465,48 +437,112 @@ static void vl_api_hicn_api_strategies_get_t_handler
int n_strategies = hicn_strategy_get_all_available ();
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_STRATEGIES_GET_REPLY/* , rmp, mp, rv */ ,(
- {
- int j = 0;
- for (u32 i = 0; i < (u32) n_strategies; i++)
- {
- if (hicn_dpo_strategy_id_is_valid (i) == HICN_ERROR_NONE)
- {
- rmp->strategy_id[j] = clib_host_to_net_u32 (i); j++;}
- }
- rmp->n_strategies = n_strategies;
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_STRATEGIES_GET_REPLY /* , rmp, mp, rv */, ({
+ int j = 0;
+ for (u32 i = 0; i < (u32) n_strategies; i++)
+ {
+ if (hicn_dpo_strategy_id_is_valid (i) == HICN_ERROR_NONE)
+ {
+ rmp->strategy_id[j] = clib_host_to_net_u32 (i);
+ j++;
+ }
+ }
+ rmp->n_strategies = n_strategies;
+ }));
+}
+
+static void
+vl_api_hicn_api_strategy_set_t_handler (vl_api_hicn_api_strategy_set_t *mp)
+{
+ vl_api_hicn_api_strategy_set_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+ fib_prefix_t prefix;
+ vl_api_hicn_strategy_t strategy_id;
+
+ hicn_main_t *sm = &hicn_main;
+
+ // Decode prefix
+ ip_prefix_decode (&mp->prefix, &prefix);
+
+ // Decode strategy id
+ strategy_id = clib_net_to_host_u32 (mp->strategy_id);
+
+ // Try to set the strategy
+ rv = hicn_route_set_strategy (&prefix, strategy_id);
+
+ REPLY_MACRO (VL_API_HICN_API_STRATEGY_SET_REPLY);
}
-static void vl_api_hicn_api_strategy_get_t_handler
- (vl_api_hicn_api_strategy_get_t * mp)
+static void
+vl_api_hicn_api_strategy_get_t_handler (vl_api_hicn_api_strategy_get_t *mp)
{
vl_api_hicn_api_strategy_get_reply_t *rmp;
int rv = HICN_ERROR_NONE;
hicn_main_t *sm = &hicn_main;
- u32 strategy_id = clib_net_to_host_u32 (mp->strategy_id);
+ vl_api_hicn_strategy_t strategy_id = clib_net_to_host_u32 (mp->strategy_id);
rv = hicn_dpo_strategy_id_is_valid (strategy_id);
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_STRATEGY_GET_REPLY /* , rmp, mp, rv */ ,(
- {
+ REPLY_MACRO2 (VL_API_HICN_API_STRATEGY_GET_REPLY /* , rmp, mp, rv */, ({
+ if (rv == HICN_ERROR_NONE)
+ {
+ const hicn_strategy_vft_t *hicn_strategy_vft =
+ hicn_dpo_get_strategy_vft (strategy_id);
+ hicn_strategy_vft->hicn_format_strategy (
+ rmp->description, 0);
+ }
+ }));
+}
+
+/************* MAPME ****************/
+
+static void
+vl_api_hicn_api_mapme_default_route_set_t_handler (
+ vl_api_hicn_api_mapme_default_route_set_t *mp)
+{
+ vl_api_hicn_api_mapme_default_route_set_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+ fib_prefix_t prefix;
+
+ hicn_main_t *sm = &hicn_main;
+
+ // Decode prefix
+ ip_prefix_decode (&mp->prefix, &prefix);
+
+ // Set the the prefix
+ hicn_mapme_main_t *mm = hicn_mapme_get_main ();
+ mm->default_route = prefix;
+
+ REPLY_MACRO (VL_API_HICN_API_MAPME_DEFAULT_ROUTE_SET_REPLY);
+}
+
+static void
+vl_api_hicn_api_mapme_default_route_get_t_handler (
+ vl_api_hicn_api_mapme_default_route_get_t *mp)
+{
+ vl_api_hicn_api_mapme_default_route_get_reply_t *rmp;
+ int rv = HICN_ERROR_NONE;
+
+ hicn_main_t *sm = &hicn_main;
+
+ // Get the the prefix
+ hicn_mapme_main_t *mm = hicn_mapme_get_main ();
+
+ REPLY_MACRO2 (
+ VL_API_HICN_API_MAPME_DEFAULT_ROUTE_GET_REPLY /* , rmp, mp, rv */, ({
if (rv == HICN_ERROR_NONE)
{
- const hicn_strategy_vft_t * hicn_strategy_vft =
- hicn_dpo_get_strategy_vft (strategy_id);
- hicn_strategy_vft->hicn_format_strategy (rmp->description, 0);}
+ ip_prefix_encode (&mm->default_route, &rmp->prefix);
+ }
}));
- /* *INDENT-ON* */
}
/************* APP FACE ****************/
-static void vl_api_hicn_api_register_prod_app_t_handler
- (vl_api_hicn_api_register_prod_app_t * mp)
+static void
+vl_api_hicn_api_register_prod_app_t_handler (
+ vl_api_hicn_api_register_prod_app_t *mp)
{
vl_api_hicn_api_register_prod_app_reply_t *rmp;
int rv = HICN_ERROR_NONE;
@@ -523,18 +559,16 @@ static void vl_api_hicn_api_register_prod_app_t_handler
ip46_address_reset (&prod_addr);
rv = hicn_face_prod_add (&prefix, swif, &cs_reserved, &prod_addr, &faceid);
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_REGISTER_PROD_APP_REPLY, (
- {
- ip_address_encode(&prod_addr, IP46_TYPE_ANY, &rmp->prod_addr);
- rmp->cs_reserved = clib_net_to_host_u32(cs_reserved);
- rmp->faceid = clib_net_to_host_u32(faceid);
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_REGISTER_PROD_APP_REPLY, ({
+ ip_address_encode (&prod_addr, IP46_TYPE_ANY,
+ &rmp->prod_addr);
+ rmp->cs_reserved = clib_net_to_host_u32 (cs_reserved);
+ rmp->faceid = clib_net_to_host_u32 (faceid);
+ }));
}
static void
-vl_api_hicn_api_face_prod_del_t_handler (vl_api_hicn_api_face_prod_del_t * mp)
+vl_api_hicn_api_face_prod_del_t_handler (vl_api_hicn_api_face_prod_del_t *mp)
{
vl_api_hicn_api_face_prod_del_reply_t *rmp;
int rv = HICN_ERROR_FACE_NOT_FOUND;
@@ -544,11 +578,12 @@ vl_api_hicn_api_face_prod_del_t_handler (vl_api_hicn_api_face_prod_del_t * mp)
hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
rv = hicn_face_prod_del (faceid);
- REPLY_MACRO (VL_API_HICN_API_FACE_PROD_DEL_REPLY /* , rmp, mp, rv */ );
+ REPLY_MACRO (VL_API_HICN_API_FACE_PROD_DEL_REPLY /* , rmp, mp, rv */);
}
-static void vl_api_hicn_api_register_cons_app_t_handler
- (vl_api_hicn_api_register_cons_app_t * mp)
+static void
+vl_api_hicn_api_register_cons_app_t_handler (
+ vl_api_hicn_api_register_cons_app_t *mp)
{
vl_api_hicn_api_register_cons_app_reply_t *rmp;
int rv = HICN_ERROR_NONE;
@@ -561,23 +596,20 @@ static void vl_api_hicn_api_register_cons_app_t_handler
u32 faceid1;
u32 faceid2;
- rv =
- hicn_face_cons_add (&src_addr4.ip4, &src_addr6.ip6, swif, &faceid1,
- &faceid2);
+ rv = hicn_face_cons_add (&src_addr4.ip4, &src_addr6.ip6, swif, &faceid1,
+ &faceid2);
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_REGISTER_CONS_APP_REPLY, (
- {
- ip_address_encode(&src_addr4, IP46_TYPE_ANY, &rmp->src_addr4);
- ip_address_encode(&src_addr6, IP46_TYPE_ANY, &rmp->src_addr6);
- rmp->faceid1 = clib_net_to_host_u32(faceid1);
- rmp->faceid2 = clib_net_to_host_u32(faceid2);
+ REPLY_MACRO2 (
+ VL_API_HICN_API_REGISTER_CONS_APP_REPLY, ({
+ ip_address_encode (&src_addr4, IP46_TYPE_ANY, &rmp->src_addr4);
+ ip_address_encode (&src_addr6, IP46_TYPE_ANY, &rmp->src_addr6);
+ rmp->faceid1 = clib_net_to_host_u32 (faceid1);
+ rmp->faceid2 = clib_net_to_host_u32 (faceid2);
}));
- /* *INDENT-ON* */
}
static void
-vl_api_hicn_api_face_cons_del_t_handler (vl_api_hicn_api_face_cons_del_t * mp)
+vl_api_hicn_api_face_cons_del_t_handler (vl_api_hicn_api_face_cons_del_t *mp)
{
vl_api_hicn_api_face_cons_del_reply_t *rmp;
int rv = HICN_ERROR_FACE_NOT_FOUND;
@@ -587,11 +619,11 @@ vl_api_hicn_api_face_cons_del_t_handler (vl_api_hicn_api_face_cons_del_t * mp)
hicn_face_id_t faceid = clib_net_to_host_u32 (mp->faceid);
rv = hicn_face_cons_del (faceid);
- REPLY_MACRO (VL_API_HICN_API_FACE_CONS_DEL_REPLY /* , rmp, mp, rv */ );
+ REPLY_MACRO (VL_API_HICN_API_FACE_CONS_DEL_REPLY /* , rmp, mp, rv */);
}
-static void vl_api_hicn_api_enable_disable_t_handler
-(vl_api_hicn_api_enable_disable_t * mp)
+static void
+vl_api_hicn_api_enable_disable_t_handler (vl_api_hicn_api_enable_disable_t *mp)
{
vl_api_hicn_api_enable_disable_reply_t *rmp;
int rv = HICN_ERROR_NONE;
@@ -601,23 +633,43 @@ static void vl_api_hicn_api_enable_disable_t_handler
fib_prefix_t prefix;
ip_prefix_decode (&mp->prefix, &prefix);
- switch (clib_net_to_host_u32(mp->enable_disable))
+ hicn_face_id_t *vec_faces = NULL;
+ fib_node_index_t hicn_fib_index;
+
+ switch (clib_net_to_host_u32 (mp->enable_disable))
{
case HICN_ENABLE:
- rv = hicn_route_enable(&prefix);
+ HICN_DEBUG ("Calling hicn enable from API.");
+ rv = hicn_route_enable (&prefix, &hicn_fib_index, &vec_faces);
break;
case HICN_DISABLE:
- rv = hicn_route_disable(&prefix);
+ HICN_DEBUG ("Calling hicn disable from API.");
+ rv = hicn_route_disable (&prefix);
break;
}
- REPLY_MACRO (VL_API_HICN_API_ENABLE_DISABLE_REPLY/* , rmp, mp, rv */ );
+ REPLY_MACRO2 (VL_API_HICN_API_ENABLE_DISABLE_REPLY, ({
+ rmp->nfaces = 0;
+ if (vec_faces != NULL)
+ {
+ hicn_face_id_t *face;
+ vec_foreach (face, vec_faces)
+ {
+ rmp->faceids[rmp->nfaces++] =
+ clib_host_to_net_u32 (*face);
+ }
+
+ vec_free (vec_faces);
+ }
+ }));
}
-/*********************************** UDP TUNNELS ************************************/
+/*********************************** UDP TUNNELS
+ * ************************************/
-static void vl_api_hicn_api_udp_tunnel_add_del_t_handler
-(vl_api_hicn_api_udp_tunnel_add_del_t * mp)
+static void
+vl_api_hicn_api_udp_tunnel_add_del_t_handler (
+ vl_api_hicn_api_udp_tunnel_add_del_t *mp)
{
vl_api_hicn_api_udp_tunnel_add_del_reply_t *rmp;
int rv = HICN_ERROR_NONE;
@@ -637,44 +689,38 @@ static void vl_api_hicn_api_udp_tunnel_add_del_t_handler
goto done;
}
- src_port = clib_net_to_host_u16(mp->src_port);
- dst_port = clib_net_to_host_u16(mp->dst_port);
+ src_port = clib_net_to_host_u16 (mp->src_port);
+ dst_port = clib_net_to_host_u16 (mp->dst_port);
- fib_protocol_t proto = ip46_address_is_ip4(&src_addr) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ fib_protocol_t proto =
+ ip46_address_is_ip4 (&src_addr) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
index_t fib_index = fib_table_find (proto, HICN_FIB_TABLE);
if (mp->is_add)
{
- uei = udp_tunnel_add(proto,
- fib_index, &src_addr, &dst_addr, src_port, dst_port,
- UDP_ENCAP_FIXUP_NONE);
+ uei = udp_tunnel_add (proto, fib_index, &src_addr, &dst_addr, src_port,
+ dst_port, UDP_ENCAP_FIXUP_NONE);
}
else
{
- udp_tunnel_del(proto,
- fib_index, &src_addr, &dst_addr, src_port, dst_port,
- UDP_ENCAP_FIXUP_NONE);
+ udp_tunnel_del (proto, fib_index, &src_addr, &dst_addr, src_port,
+ dst_port, UDP_ENCAP_FIXUP_NONE);
}
+done:
- done:
-
- /* *INDENT-OFF* */
- REPLY_MACRO2 (VL_API_HICN_API_UDP_TUNNEL_ADD_DEL_REPLY, (
- {
- rmp->uei = clib_host_to_net_u32(uei);
- }));
- /* *INDENT-ON* */
+ REPLY_MACRO2 (VL_API_HICN_API_UDP_TUNNEL_ADD_DEL_REPLY,
+ ({ rmp->uei = clib_host_to_net_u32 (uei); }));
}
/************************************************************************************/
-#include <hicn/hicn.api.c>
+#include <vpp_plugins/hicn/hicn.api.c>
/* Set up the API message handling tables */
clib_error_t *
-hicn_api_plugin_hookup (vlib_main_t * vm)
+hicn_api_plugin_hookup (vlib_main_t *vm)
{
hicn_main_t *hm = &hicn_main;
@@ -682,8 +728,6 @@ hicn_api_plugin_hookup (vlib_main_t * vm)
return 0;
}
-
-
/******************* SUPPORTING FUNCTIONS *******************/
/*
@@ -691,9 +735,8 @@ hicn_api_plugin_hookup (vlib_main_t * vm)
* assuming only ip faces here. To be completed with othet types of faces
*/
vnet_api_error_t
-hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
- vl_api_hicn_api_face_params_get_reply_t
- * reply)
+hicn_face_api_entry_params_serialize (
+ hicn_face_id_t faceid, vl_api_hicn_api_face_params_get_reply_t *reply)
{
int rv = HICN_ERROR_NONE;
@@ -706,8 +749,7 @@ hicn_face_api_entry_params_serialize (hicn_face_id_t faceid,
if (face != NULL)
{
- ip_address_encode (&face->nat_addr, IP46_TYPE_ANY,
- &reply->nat_addr);
+ ip_address_encode (&face->nat_addr, IP46_TYPE_ANY, &reply->nat_addr);
reply->swif = clib_host_to_net_u32 (face->sw_if);
reply->flags = clib_host_to_net_u32 (face->flags);
diff --git a/hicn-plugin/src/hicn_api_test.c b/hicn-plugin/src/hicn_api_test.c
index e4704e8ea..3ca48d98a 100644
--- a/hicn-plugin/src/hicn_api_test.c
+++ b/hicn-plugin/src/hicn_api_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -31,182 +31,12 @@
#include <vpp/api/vpe.api_types.h>
-#include <hicn/hicn_api.h>
-#include "error.h"
-
+#include <vpp_plugins/hicn/hicn_api.h>
+#include <vpp_plugins/hicn/error.h>
+#include <vpp_plugins/hicn/hicn_enums.h>
/* Declare message IDs */
-#include "hicn_msg_enum.h"
-
-/* SUPPORTING FUNCTIONS NOT LOADED BY VPP_API_TEST */
-uword
-unformat_ip46_address (unformat_input_t * input, va_list * args)
-{
- ip46_address_t *ip46 = va_arg (*args, ip46_address_t *);
- ip46_type_t type = va_arg (*args, ip46_type_t);
- if ((type != IP46_TYPE_IP6) &&
- unformat (input, "%U", unformat_ip4_address, &ip46->ip4))
- {
- ip46_address_mask_ip4 (ip46);
- return 1;
- }
- else if ((type != IP46_TYPE_IP4) &&
- unformat (input, "%U", unformat_ip6_address, &ip46->ip6))
- {
- return 1;
- }
- return 0;
-}
-
-static ip46_type_t
-ip_address_union_decode (const vl_api_address_union_t * in,
- vl_api_address_family_t af, ip46_address_t * out)
-{
- ip46_type_t type;
-
- switch (clib_net_to_host_u32 (af))
- {
- case ADDRESS_IP4:
- clib_memset (out, 0, sizeof (*out));
- clib_memcpy (&out->ip4, &in->ip4, sizeof (out->ip4));
- type = IP46_TYPE_IP4;
- break;
- case ADDRESS_IP6:
- clib_memcpy (&out->ip6, &in->ip6, sizeof (out->ip6));
- type = IP46_TYPE_IP6;
- break;
- default:
- ASSERT (!"Unkown address family in API address type");
- type = IP46_TYPE_ANY;
- break;
- }
-
- return type;
-}
-
-void
-ip6_address_encode (const ip6_address_t * in, vl_api_ip6_address_t out)
-{
- clib_memcpy (out, in, sizeof (*in));
-}
-
-void
-ip6_address_decode (const vl_api_ip6_address_t in, ip6_address_t * out)
-{
- clib_memcpy (out, in, sizeof (*out));
-}
-
-void
-ip4_address_encode (const ip4_address_t * in, vl_api_ip4_address_t out)
-{
- clib_memcpy (out, in, sizeof (*in));
-}
-
-void
-ip4_address_decode (const vl_api_ip4_address_t in, ip4_address_t * out)
-{
- clib_memcpy (out, in, sizeof (*out));
-}
-
-static void
-ip_address_union_encode (const ip46_address_t * in,
- vl_api_address_family_t af,
- vl_api_address_union_t * out)
-{
- if (ADDRESS_IP6 == clib_net_to_host_u32 (af))
- ip6_address_encode (&in->ip6, out->ip6);
- else
- ip4_address_encode (&in->ip4, out->ip4);
-}
-
-ip46_type_t
-ip_address_decode (const vl_api_address_t * in, ip46_address_t * out)
-{
- return (ip_address_union_decode (&in->un, in->af, out));
-}
-
-void
-ip_address_encode (const ip46_address_t * in, ip46_type_t type,
- vl_api_address_t * out)
-{
- switch (type)
- {
- case IP46_TYPE_IP4:
- out->af = clib_net_to_host_u32 (ADDRESS_IP4);
- break;
- case IP46_TYPE_IP6:
- out->af = clib_net_to_host_u32 (ADDRESS_IP6);
- break;
- case IP46_TYPE_ANY:
- if (ip46_address_is_ip4 (in))
- out->af = clib_net_to_host_u32 (ADDRESS_IP4);
- else
- out->af = clib_net_to_host_u32 (ADDRESS_IP6);
- break;
- }
- ip_address_union_encode (in, out->af, &out->un);
-}
-
-fib_protocol_t
-fib_proto_from_ip46 (ip46_type_t iproto)
-{
- switch (iproto)
- {
- case IP46_TYPE_IP4:
- return FIB_PROTOCOL_IP4;
- case IP46_TYPE_IP6:
- return FIB_PROTOCOL_IP6;
- case IP46_TYPE_ANY:
- ASSERT (0);
- return FIB_PROTOCOL_IP4;
- }
-
- ASSERT (0);
- return FIB_PROTOCOL_IP4;
-}
-
-ip46_type_t
-fib_proto_to_ip46 (fib_protocol_t fproto)
-{
- switch (fproto)
- {
- case FIB_PROTOCOL_IP4:
- return (IP46_TYPE_IP4);
- case FIB_PROTOCOL_IP6:
- return (IP46_TYPE_IP6);
- case FIB_PROTOCOL_MPLS:
- return (IP46_TYPE_ANY);
- }
- ASSERT (0);
- return (IP46_TYPE_ANY);
-}
-
-void
-ip_prefix_decode (const vl_api_prefix_t * in, fib_prefix_t * out)
-{
- switch (clib_net_to_host_u32 (in->address.af))
- {
- case ADDRESS_IP4:
- out->fp_proto = FIB_PROTOCOL_IP4;
- break;
- case ADDRESS_IP6:
- out->fp_proto = FIB_PROTOCOL_IP6;
- break;
- }
- out->fp_len = in->len;
- out->___fp___pad = 0;
- ip_address_decode (&in->address, &out->fp_addr);
-}
-
-void
-ip_prefix_encode (const fib_prefix_t * in, vl_api_prefix_t * out)
-{
- out->len = in->fp_len;
- ip_address_encode (&in->fp_addr,
- fib_proto_to_ip46 (in->fp_proto), &out->address);
-}
-
-/////////////////////////////////////////////////////
+#include <vpp_plugins/hicn/hicn_msg_enum.h>
#define HICN_FACE_NULL ~0
@@ -220,24 +50,26 @@ typedef struct
hicn_test_main_t hicn_test_main;
-#define foreach_standard_reply_retval_handler \
-_(hicn_api_node_params_set_reply) \
-_(hicn_api_enable_disable_reply)
-
-#define _(n) \
- static void vl_api_##n##_t_handler \
- (vl_api_##n##_t * mp) \
- { \
- vat_main_t * vam = hicn_test_main.vat_main; \
- i32 retval = ntohl(mp->retval); \
- if (vam->async_mode) { \
- vam->async_errors += (retval < 0); \
- } else { \
- fformat (vam->ofp,"%s\n", get_error_string(retval));\
- vam->retval = retval; \
- vam->result_ready = 1; \
- } \
- }
+#define foreach_standard_reply_retval_handler \
+ _ (hicn_api_node_params_set_reply) \
+ _ (hicn_api_enable_disable_reply)
+
+#define _(n) \
+ static void vl_api_##n##_t_handler (vl_api_##n##_t *mp) \
+ { \
+ vat_main_t *vam = hicn_test_main.vat_main; \
+ i32 retval = ntohl (mp->retval); \
+ if (vam->async_mode) \
+ { \
+ vam->async_errors += (retval < 0); \
+ } \
+ else \
+ { \
+ fformat (vam->ofp, "%s\n", get_error_string (retval)); \
+ vam->retval = retval; \
+ vam->result_ready = 1; \
+ } \
+ }
foreach_standard_reply_retval_handler;
#undef _
@@ -245,23 +77,28 @@ foreach_standard_reply_retval_handler;
* Table of message reply handlers, must include boilerplate handlers we just
* generated
*/
-#define foreach_vpe_api_reply_msg \
-_(HICN_API_NODE_PARAMS_SET_REPLY, hicn_api_node_params_set_reply) \
-_(HICN_API_NODE_PARAMS_GET_REPLY, hicn_api_node_params_get_reply) \
-_(HICN_API_NODE_STATS_GET_REPLY, hicn_api_node_stats_get_reply) \
-_(HICN_API_FACE_GET_REPLY, hicn_api_face_get_reply) \
-_(HICN_API_FACES_DETAILS, hicn_api_faces_details) \
-_(HICN_API_FACE_STATS_DETAILS, hicn_api_face_stats_details) \
-_(HICN_API_FACE_PARAMS_GET_REPLY, hicn_api_face_params_get_reply) \
-_(HICN_API_ROUTE_GET_REPLY, hicn_api_route_get_reply) \
-_(HICN_API_ROUTES_DETAILS, hicn_api_routes_details) \
-_(HICN_API_STRATEGIES_GET_REPLY, hicn_api_strategies_get_reply) \
-_(HICN_API_STRATEGY_GET_REPLY, hicn_api_strategy_get_reply) \
-_(HICN_API_ENABLE_DISABLE_REPLY, hicn_api_enable_disable_reply) \
-_(HICN_API_UDP_TUNNEL_ADD_DEL_REPLY, hicn_api_udp_tunnel_add_del_reply)
+#define foreach_vpe_api_reply_msg \
+ _ (HICN_API_NODE_PARAMS_SET_REPLY, hicn_api_node_params_set_reply) \
+ _ (HICN_API_NODE_PARAMS_GET_REPLY, hicn_api_node_params_get_reply) \
+ _ (HICN_API_NODE_STATS_GET_REPLY, hicn_api_node_stats_get_reply) \
+ _ (HICN_API_FACE_GET_REPLY, hicn_api_face_get_reply) \
+ _ (HICN_API_FACES_DETAILS, hicn_api_faces_details) \
+ _ (HICN_API_FACE_STATS_DETAILS, hicn_api_face_stats_details) \
+ _ (HICN_API_FACE_PARAMS_GET_REPLY, hicn_api_face_params_get_reply) \
+ _ (HICN_API_ROUTE_GET_REPLY, hicn_api_route_get_reply) \
+ _ (HICN_API_ROUTES_DETAILS, hicn_api_routes_details) \
+ _ (HICN_API_STRATEGIES_GET_REPLY, hicn_api_strategies_get_reply) \
+ _ (HICN_API_STRATEGY_SET_REPLY, hicn_api_strategy_set_reply) \
+ _ (HICN_API_STRATEGY_GET_REPLY, hicn_api_strategy_get_reply) \
+ _ (HICN_API_ENABLE_DISABLE_REPLY, hicn_api_enable_disable_reply) \
+ _ (HICN_API_UDP_TUNNEL_ADD_DEL_REPLY, hicn_api_udp_tunnel_add_del_reply) \
+ _ (HICN_API_MAPME_DEFAULT_ROUTE_SET_REPLY, \
+ hicn_api_mapme_default_route_set_reply) \
+ _ (HICN_API_MAPME_DEFAULT_ROUTE_GET_REPLY, \
+ hicn_api_mapme_default_route_get_reply)
static int
-api_hicn_api_node_params_set (vat_main_t * vam)
+api_hicn_api_node_params_set (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
int enable_disable = 1;
@@ -279,13 +116,16 @@ api_hicn_api_node_params_set (vat_main_t * vam)
enable_disable = 0;
}
else if (unformat (input, "PIT size %d", &pit_size))
- {;
+ {
+ ;
}
else if (unformat (input, "CS size %d", &cs_size))
- {;
+ {
+ ;
}
else if (unformat (input, "PIT maxlife %f", &pit_max_lifetime_sec))
- {;
+ {
+ ;
}
else
{
@@ -295,7 +135,7 @@ api_hicn_api_node_params_set (vat_main_t * vam)
/* Construct the API message */
M (HICN_API_NODE_PARAMS_SET, mp);
- mp->enable_disable = clib_host_to_net_u32(enable_disable);
+ mp->enable_disable = clib_host_to_net_u32 (enable_disable);
mp->pit_max_size = clib_host_to_net_i32 (pit_size);
mp->cs_max_size = clib_host_to_net_i32 (cs_size);
mp->pit_max_lifetime_sec = pit_max_lifetime_sec;
@@ -310,12 +150,12 @@ api_hicn_api_node_params_set (vat_main_t * vam)
}
static int
-api_hicn_api_node_params_get (vat_main_t * vam)
+api_hicn_api_node_params_get (vat_main_t *vam)
{
vl_api_hicn_api_node_params_get_t *mp;
int ret;
- //Construct the API message
+ // Construct the API message
M (HICN_API_NODE_PARAMS_GET, mp);
/* send it... */
@@ -328,8 +168,8 @@ api_hicn_api_node_params_get (vat_main_t * vam)
}
static void
- vl_api_hicn_api_node_params_get_reply_t_handler
- (vl_api_hicn_api_node_params_get_reply_t * mp)
+vl_api_hicn_api_node_params_get_reply_t_handler (
+ vl_api_hicn_api_node_params_get_reply_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (mp->retval);
@@ -344,7 +184,7 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
@@ -354,14 +194,13 @@ static void
" PIT size %d\n"
" PIT lifetime dflt %.3f, min %.3f, max %.3f\n"
" CS size %d\n",
- mp->is_enabled,
- mp->feature_cs,
- clib_net_to_host_u32 (mp->pit_max_size),
- mp->pit_max_lifetime_sec, clib_net_to_host_u32 (mp->cs_max_size));
+ mp->is_enabled, mp->feature_cs,
+ clib_net_to_host_u32 (mp->pit_max_size), mp->pit_max_lifetime_sec,
+ clib_net_to_host_u32 (mp->cs_max_size));
}
static int
-api_hicn_api_node_stats_get (vat_main_t * vam)
+api_hicn_api_node_stats_get (vat_main_t *vam)
{
vl_api_hicn_api_node_stats_get_t *mp;
int ret;
@@ -379,8 +218,8 @@ api_hicn_api_node_stats_get (vat_main_t * vam)
}
static void
- vl_api_hicn_api_node_stats_get_reply_t_handler
- (vl_api_hicn_api_node_stats_get_reply_t * rmp)
+vl_api_hicn_api_node_stats_get_reply_t_handler (
+ vl_api_hicn_api_node_stats_get_reply_t *rmp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (rmp->retval);
@@ -395,13 +234,13 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
else
{
- fformat (vam->ofp, //compare hicn_cli_show_command_fn block:should match
+ fformat (vam->ofp, // compare hicn_cli_show_command_fn block:should match
" PIT entries (now): %d\n"
" CS entries (now): %d\n"
" Forwarding statistics:"
@@ -437,7 +276,7 @@ static void
}
static int
-api_hicn_api_face_params_get (vat_main_t * vam)
+api_hicn_api_face_params_get (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_face_params_get_t *mp;
@@ -446,7 +285,8 @@ api_hicn_api_face_params_get (vat_main_t * vam)
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "face %d", &faceid))
- {;
+ {
+ ;
}
else
{
@@ -454,28 +294,28 @@ api_hicn_api_face_params_get (vat_main_t * vam)
}
}
- //Check for presence of face ID
+ // Check for presence of face ID
if (faceid == HICN_FACE_NULL)
{
clib_warning ("Please specify face ID");
return 1;
}
- //Construct the API message
+ // Construct the API message
M (HICN_API_FACE_PARAMS_GET, mp);
mp->faceid = clib_host_to_net_u32 (faceid);
- //send it...
+ // send it...
S (mp);
- //Wait for a reply...
+ // Wait for a reply...
W (ret);
return ret;
}
static void
- vl_api_hicn_api_face_params_get_reply_t_handler
- (vl_api_hicn_api_face_params_get_reply_t * rmp)
+vl_api_hicn_api_face_params_get_reply_t_handler (
+ vl_api_hicn_api_face_params_get_reply_t *rmp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (rmp->retval);
@@ -492,24 +332,22 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
vec_reset_length (sbuf);
ip_address_decode (&rmp->nat_addr, &nat_addr);
- sbuf =
- format (0, "nat_addr %U", format_ip46_address,
- &nat_addr, 0 /*IP46_ANY_TYPE */);
+ sbuf = format (0, "nat_addr %U", format_ip46_address, &nat_addr,
+ 0 /*IP46_ANY_TYPE */);
- fformat (vam->ofp, "%s swif %d flags %d\n",
- sbuf,
+ fformat (vam->ofp, "%s swif %d flags %d\n", sbuf,
clib_net_to_host_u32 (rmp->swif),
clib_net_to_host_i32 (rmp->flags));
}
static void
-format_face (vl_api_hicn_face_t * rmp)
+format_face (vl_api_hicn_face_t *rmp)
{
vat_main_t *vam = hicn_test_main.vat_main;
u8 *sbuf = 0;
@@ -519,18 +357,16 @@ format_face (vl_api_hicn_face_t * rmp)
vec_reset_length (sbuf);
ip_address_decode (&rmp->nat_addr, &nat_addr);
- sbuf =
- format (0, "nat_addr %U", format_ip46_address,
- &local_addr, 0 /*IP46_ANY_TYPE */);
+ sbuf = format (0, "nat_addr %U", format_ip46_address, &local_addr,
+ 0 /*IP46_ANY_TYPE */);
- fformat (vam->ofp, "%s swif %d flags %d name %s\n",
- sbuf,
- clib_net_to_host_u32 (rmp->swif),
- clib_net_to_host_i32 (rmp->flags), rmp->if_name);
+ fformat (vam->ofp, "%s swif %d flags %d name %s\n", sbuf,
+ clib_net_to_host_u32 (rmp->swif), clib_net_to_host_i32 (rmp->flags),
+ rmp->if_name);
}
static int
-api_hicn_api_faces_dump (vat_main_t * vam)
+api_hicn_api_faces_dump (vat_main_t *vam)
{
hicn_test_main_t *hm = &hicn_test_main;
vl_api_hicn_api_faces_dump_t *mp;
@@ -564,14 +400,13 @@ api_hicn_api_faces_dump (vat_main_t * vam)
}
static void
- vl_api_hicn_api_faces_details_t_handler
- (vl_api_hicn_api_faces_details_t * mp)
+vl_api_hicn_api_faces_details_t_handler (vl_api_hicn_api_faces_details_t *mp)
{
format_face (&(mp->face));
}
static int
-api_hicn_api_face_get (vat_main_t * vam)
+api_hicn_api_face_get (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_face_get_t *mp;
@@ -580,7 +415,8 @@ api_hicn_api_face_get (vat_main_t * vam)
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "face %d", &faceid))
- {;
+ {
+ ;
}
else
{
@@ -588,29 +424,28 @@ api_hicn_api_face_get (vat_main_t * vam)
}
}
- //Check for presence of face ID
+ // Check for presence of face ID
if (faceid == HICN_FACE_NULL)
{
clib_warning ("Please specify face ID");
return 1;
}
- //Construct the API message
+ // Construct the API message
M (HICN_API_FACE_GET, mp);
mp->faceid = clib_host_to_net_u32 (faceid);
- //send it...
+ // send it...
S (mp);
- //Wait for a reply...
+ // Wait for a reply...
W (ret);
return ret;
}
-
static void
- vl_api_hicn_api_face_get_reply_t_handler
- (vl_api_hicn_api_face_get_reply_t * rmp)
+vl_api_hicn_api_face_get_reply_t_handler (
+ vl_api_hicn_api_face_get_reply_t *rmp)
{
vat_main_t *vam = hicn_test_main.vat_main;
@@ -626,17 +461,15 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
format_face (&(rmp->face));
}
-
-
static int
-api_hicn_api_face_stats_dump (vat_main_t * vam)
+api_hicn_api_face_stats_dump (vat_main_t *vam)
{
hicn_test_main_t *hm = &hicn_test_main;
vl_api_hicn_api_face_stats_dump_t *mp;
@@ -671,12 +504,13 @@ api_hicn_api_face_stats_dump (vat_main_t * vam)
/* face_stats-details message handler */
static void
- vl_api_hicn_api_face_stats_details_t_handler
- (vl_api_hicn_api_face_stats_details_t * mp)
+vl_api_hicn_api_face_stats_details_t_handler (
+ vl_api_hicn_api_face_stats_details_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
- fformat (vam->ofp, "face id %d\n"
+ fformat (vam->ofp,
+ "face id %d\n"
" interest rx packets %16Ld\n"
" bytes %16Ld\n"
" interest tx packets %16Ld\n"
@@ -697,7 +531,7 @@ static void
}
static int
-api_hicn_api_route_get (vat_main_t * vam)
+api_hicn_api_route_get (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
@@ -709,7 +543,8 @@ api_hicn_api_route_get (vat_main_t * vam)
{
if (unformat (input, "prefix %U/%d", unformat_ip46_address,
&prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
- {;
+ {
+ ;
}
else
{
@@ -718,29 +553,29 @@ api_hicn_api_route_get (vat_main_t * vam)
}
/* Check parse */
- if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0))
- || (prefix.fp_len == 0))
+ if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0)) ||
+ (prefix.fp_len == 0))
{
clib_warning ("Please specify a valid prefix...");
return 1;
}
- //Construct the API message
+ // Construct the API message
M (HICN_API_ROUTE_GET, mp);
if (!ip46_address_is_ip4 (&(prefix.fp_addr)))
prefix.fp_proto = fib_proto_from_ip46 (IP46_TYPE_IP6);
ip_prefix_encode (&prefix, &mp->prefix);
- //send it...
+ // send it...
S (mp);
- //Wait for a reply...
+ // Wait for a reply...
W (ret);
return ret;
}
static int
-api_hicn_api_routes_dump (vat_main_t * vam)
+api_hicn_api_routes_dump (vat_main_t *vam)
{
hicn_test_main_t *hm = &hicn_test_main;
@@ -775,8 +610,8 @@ api_hicn_api_routes_dump (vat_main_t * vam)
}
static void
-vl_api_hicn_api_route_get_reply_t_handler (vl_api_hicn_api_route_get_reply_t *
- rmp)
+vl_api_hicn_api_route_get_reply_t_handler (
+ vl_api_hicn_api_route_get_reply_t *rmp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (rmp->retval);
@@ -792,7 +627,7 @@ vl_api_hicn_api_route_get_reply_t_handler (vl_api_hicn_api_route_get_reply_t *
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
@@ -808,8 +643,7 @@ vl_api_hicn_api_route_get_reply_t_handler (vl_api_hicn_api_route_get_reply_t *
if (faceid != HICN_FACE_NULL)
{
sbuf =
- format (sbuf, "faceid %d",
- clib_net_to_host_u32 (rmp->faceids[i]));
+ format (sbuf, "faceid %d", clib_net_to_host_u32 (rmp->faceids[i]));
i++;
}
else
@@ -818,14 +652,13 @@ vl_api_hicn_api_route_get_reply_t_handler (vl_api_hicn_api_route_get_reply_t *
}
}
- fformat (vam->ofp, "%s\n Strategy: %d\n",
- sbuf, clib_net_to_host_u32 (rmp->strategy_id));
+ fformat (vam->ofp, "%s\n Strategy: %d\n", sbuf,
+ clib_net_to_host_u32 (rmp->strategy_id));
}
/* face_stats-details message handler */
static void
- vl_api_hicn_api_routes_details_t_handler
- (vl_api_hicn_api_routes_details_t * mp)
+vl_api_hicn_api_routes_details_t_handler (vl_api_hicn_api_routes_details_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
fib_prefix_t prefix;
@@ -834,9 +667,8 @@ static void
vec_reset_length (sbuf);
ip_prefix_decode (&mp->prefix, &prefix);
- sbuf =
- format (sbuf, "Prefix: %U/%u\n", format_ip46_address, &prefix.fp_addr, 0,
- prefix.fp_len);
+ sbuf = format (sbuf, "Prefix: %U/%u\n", format_ip46_address, &prefix.fp_addr,
+ 0, prefix.fp_len);
sbuf = format (sbuf, "Faces: \n");
for (int i = 0; i < mp->nfaces; i++)
@@ -845,17 +677,17 @@ static void
sbuf = format (sbuf, " faceid %d\n", faceid);
}
- fformat (vam->ofp, "%sStrategy: %d\n",
- sbuf, clib_net_to_host_u32 (mp->strategy_id));
+ fformat (vam->ofp, "%sStrategy: %d\n", sbuf,
+ clib_net_to_host_u32 (mp->strategy_id));
}
static int
-api_hicn_api_strategies_get (vat_main_t * vam)
+api_hicn_api_strategies_get (vat_main_t *vam)
{
vl_api_hicn_api_strategies_get_t *mp;
int ret;
- //TODO
+ // TODO
/* Construct the API message */
M (HICN_API_STRATEGIES_GET, mp);
@@ -869,8 +701,8 @@ api_hicn_api_strategies_get (vat_main_t * vam)
}
static void
- vl_api_hicn_api_strategies_get_reply_t_handler
- (vl_api_hicn_api_strategies_get_reply_t * mp)
+vl_api_hicn_api_strategies_get_reply_t_handler (
+ vl_api_hicn_api_strategies_get_reply_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (mp->retval);
@@ -886,7 +718,7 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
@@ -905,18 +737,200 @@ static void
}
static int
-api_hicn_api_strategy_get (vat_main_t * vam)
+api_hicn_api_strategy_set (vat_main_t *vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_strategy_set_t *mp;
+ int ret;
+ int addpfx = -1;
+ fib_prefix_t fib_prefix;
+ ip46_address_t address;
+ int plen;
+
+ vl_api_hicn_strategy_t strategy_id = HICN_STRATEGY_NULL;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "strategy %d", &strategy_id))
+ {
+ addpfx = 2;
+ }
+ else if (addpfx != -1 &&
+ unformat (input, "prefix %U/%d", unformat_ip46_address,
+ &address, IP46_TYPE_ANY, &plen))
+ {
+ ;
+ }
+ else
+ {
+ clib_warning ("Error parsing input string.");
+ return 1;
+ }
+ }
+
+ if (strategy_id == HICN_STRATEGY_NULL)
+ {
+ clib_warning ("Please specify strategy id...");
+ return 1;
+ }
+
+ // Get fib prefix
+ fib_prefix_from_ip46_addr (&address, &fib_prefix);
+ fib_prefix.fp_len = plen;
+
+ /* Construct the API message */
+ M (HICN_API_STRATEGY_SET, mp);
+ mp->strategy_id = clib_host_to_net_u32 (strategy_id);
+ ip_prefix_encode (&fib_prefix, &mp->prefix);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static void
+vl_api_hicn_api_mapme_default_route_set_reply_t_handler (
+ vl_api_hicn_api_mapme_default_route_set_reply_t *mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ }
+}
+
+static void
+vl_api_hicn_api_mapme_default_route_get_reply_t_handler (
+ vl_api_hicn_api_mapme_default_route_get_reply_t *mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+ fib_prefix_t prefix;
+ u8 *sbuf = 0;
+
+ ip_prefix_decode (&mp->prefix, &prefix);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ return;
+ }
+
+ sbuf = format (sbuf, "Mapme Default Route: %U", format_fib_prefix, &prefix);
+ fformat (vam->ofp, "%s\n", sbuf);
+}
+
+static int
+api_hicn_api_mapme_default_route_set (vat_main_t *vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_mapme_default_route_set_t *mp;
+ int ret;
+ fib_prefix_t fib_prefix;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "default_route %U/%d", unformat_ip46_address,
+ &fib_prefix.fp_addr, IP46_TYPE_ANY, &fib_prefix.fp_len))
+ {
+ ;
+ }
+ else
+ {
+ clib_warning ("Please specify valid route.");
+ return 1;
+ }
+ }
+
+ /* Construct the API message */
+ M (HICN_API_MAPME_DEFAULT_ROUTE_SET, mp);
+ ip_prefix_encode (&fib_prefix, &mp->prefix);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_mapme_default_route_get (vat_main_t *vam)
+{
+ unformat_input_t *input = vam->input;
+ vl_api_hicn_api_mapme_default_route_set_t *mp;
+ int ret;
+ int default_route = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "default_route"))
+ {
+ default_route = 1;
+ }
+ else
+ {
+ clib_warning ("Invalid option");
+ return 1;
+ }
+ }
+
+ if (default_route == 0)
+ {
+ return 1;
+ }
+
+ /* Construct the API message */
+ M (HICN_API_MAPME_DEFAULT_ROUTE_GET, mp);
+
+ /* send it... */
+ S (mp);
+
+ /* Wait for a reply... */
+ W (ret);
+
+ return ret;
+}
+
+static int
+api_hicn_api_strategy_get (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_strategy_get_t *mp;
int ret;
- u32 strategy_id = HICN_STRATEGY_NULL;
+ vl_api_hicn_strategy_t strategy_id = HICN_STRATEGY_NULL;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "strategy %d", strategy_id))
- {;
+ {
+ ;
}
else
{
@@ -944,8 +958,8 @@ api_hicn_api_strategy_get (vat_main_t * vam)
}
static void
- vl_api_hicn_api_strategy_get_reply_t_handler
- (vl_api_hicn_api_strategy_get_reply_t * mp)
+vl_api_hicn_api_strategy_get_reply_t_handler (
+ vl_api_hicn_api_strategy_get_reply_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (mp->retval);
@@ -960,15 +974,37 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
fformat (vam->ofp, "%s", mp->description);
}
+static void
+vl_api_hicn_api_strategy_set_reply_t_handler (
+ vl_api_hicn_api_strategy_set_reply_t *mp)
+{
+ vat_main_t *vam = hicn_test_main.vat_main;
+ i32 retval = ntohl (mp->retval);
+
+ if (vam->async_mode)
+ {
+ vam->async_errors += (retval < 0);
+ return;
+ }
+
+ vam->retval = retval;
+ vam->result_ready = 1;
+
+ if (vam->retval < 0)
+ {
+ fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
+ }
+}
+
static int
-api_hicn_api_enable_disable (vat_main_t * vam)
+api_hicn_api_enable_disable (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_enable_disable_t *mp;
@@ -981,12 +1017,14 @@ api_hicn_api_enable_disable (vat_main_t * vam)
{
if (unformat (input, "prefix %U/%d", unformat_ip46_address,
&prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
- {;
+ {
+ ;
}
else if (unformat (input, "disable"))
- {;
- en_dis = HICN_DISABLE;
- }
+ {
+ ;
+ en_dis = HICN_DISABLE;
+ }
else
{
break;
@@ -994,33 +1032,34 @@ api_hicn_api_enable_disable (vat_main_t * vam)
}
/* Check parse */
- if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0))
- || (prefix.fp_len == 0))
+ if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0)) ||
+ (prefix.fp_len == 0))
{
clib_warning ("Please specify a valid prefix...");
return 1;
}
- prefix.fp_proto = ip46_address_is_ip4 (&(prefix.fp_addr)) ? FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6;
+ prefix.fp_proto = ip46_address_is_ip4 (&(prefix.fp_addr)) ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
- //Construct the API message
+ // Construct the API message
M (HICN_API_ENABLE_DISABLE, mp);
ip_prefix_encode (&prefix, &mp->prefix);
mp->enable_disable = en_dis;
- //send it...
+ // send it...
S (mp);
- //Wait for a reply...
+ // Wait for a reply...
W (ret);
return ret;
}
static int
-api_hicn_api_register_prod_app (vat_main_t * vam)
+api_hicn_api_register_prod_app (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_register_prod_app_t *mp;
@@ -1032,10 +1071,12 @@ api_hicn_api_register_prod_app (vat_main_t * vam)
{
if (unformat (input, "prefix %U/%d", unformat_ip46_address,
&prefix.fp_addr, IP46_TYPE_ANY, &prefix.fp_len))
- {;
+ {
+ ;
}
else if (unformat (input, "id %d", &swif))
- {;
+ {
+ ;
}
else
{
@@ -1044,16 +1085,16 @@ api_hicn_api_register_prod_app (vat_main_t * vam)
}
/* Check parse */
- if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0))
- || (prefix.fp_len == 0))
+ if (((prefix.fp_addr.as_u64[0] == 0) && (prefix.fp_addr.as_u64[1] == 0)) ||
+ (prefix.fp_len == 0))
{
clib_warning ("Please specify prefix...");
return 1;
}
- prefix.fp_proto =
- ip46_address_is_ip4 (&(prefix.fp_addr)) ? FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6;
+ prefix.fp_proto = ip46_address_is_ip4 (&(prefix.fp_addr)) ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
/* Construct the API message */
M (HICN_API_REGISTER_PROD_APP, mp);
ip_prefix_encode (&prefix, &mp->prefix);
@@ -1070,8 +1111,8 @@ api_hicn_api_register_prod_app (vat_main_t * vam)
}
static void
- vl_api_hicn_api_register_prod_app_reply_t_handler
- (vl_api_hicn_api_register_prod_app_reply_t * mp)
+vl_api_hicn_api_register_prod_app_reply_t_handler (
+ vl_api_hicn_api_register_prod_app_reply_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (mp->retval);
@@ -1086,14 +1127,14 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
}
static int
-api_hicn_api_face_prod_del (vat_main_t * vam)
+api_hicn_api_face_prod_del (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_face_prod_del_t *mp;
@@ -1102,7 +1143,8 @@ api_hicn_api_face_prod_del (vat_main_t * vam)
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "face %d", &faceid))
- {;
+ {
+ ;
}
else
{
@@ -1110,27 +1152,27 @@ api_hicn_api_face_prod_del (vat_main_t * vam)
}
}
- //Check for presence of face ID
+ // Check for presence of face ID
if (faceid == ~0)
{
clib_warning ("Please specify face ID");
return 1;
}
- //Construct the API message
+ // Construct the API message
M (HICN_API_FACE_PROD_DEL, mp);
mp->faceid = clib_host_to_net_u32 (faceid);
- //send it...
+ // send it...
S (mp);
- //Wait for a reply...
+ // Wait for a reply...
W (ret);
return ret;
}
static int
-api_hicn_api_register_cons_app (vat_main_t * vam)
+api_hicn_api_register_cons_app (vat_main_t *vam)
{
vl_api_hicn_api_register_cons_app_t *mp;
int ret;
@@ -1148,7 +1190,7 @@ api_hicn_api_register_cons_app (vat_main_t * vam)
}
static int
-api_hicn_api_face_cons_del (vat_main_t * vam)
+api_hicn_api_face_cons_del (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_face_cons_del_t *mp;
@@ -1157,7 +1199,8 @@ api_hicn_api_face_cons_del (vat_main_t * vam)
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "face %d", &faceid))
- {;
+ {
+ ;
}
else
{
@@ -1165,28 +1208,28 @@ api_hicn_api_face_cons_del (vat_main_t * vam)
}
}
- //Check for presence of face ID
+ // Check for presence of face ID
if (faceid == ~0)
{
clib_warning ("Please specify face ID");
return 1;
}
- //Construct the API message
+ // Construct the API message
M (HICN_API_FACE_CONS_DEL, mp);
mp->faceid = clib_host_to_net_u32 (faceid);
- //send it...
+ // send it...
S (mp);
- //Wait for a reply...
+ // Wait for a reply...
W (ret);
return ret;
}
static void
- vl_api_hicn_api_register_cons_app_reply_t_handler
- (vl_api_hicn_api_register_cons_app_reply_t * mp)
+vl_api_hicn_api_register_cons_app_reply_t_handler (
+ vl_api_hicn_api_register_cons_app_reply_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (mp->retval);
@@ -1201,7 +1244,7 @@ static void
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
@@ -1213,12 +1256,12 @@ static void
fformat (vam->ofp,
"ip4 address %U\n"
"ip6 address :%U\n",
- format_ip46_address, IP46_TYPE_ANY, &src_addr4,
- format_ip46_address, IP46_TYPE_ANY, &src_addr6);
+ format_ip46_address, IP46_TYPE_ANY, &src_addr4, format_ip46_address,
+ IP46_TYPE_ANY, &src_addr6);
}
static int
-api_hicn_api_udp_tunnel_add_del (vat_main_t * vam)
+api_hicn_api_udp_tunnel_add_del (vat_main_t *vam)
{
unformat_input_t *input = vam->input;
vl_api_hicn_api_udp_tunnel_add_del_t *mp;
@@ -1236,26 +1279,23 @@ api_hicn_api_udp_tunnel_add_del (vat_main_t * vam)
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "add"))
- is_del = 0;
+ is_del = 0;
else if (unformat (input, "del"))
- is_del = 1;
- else if (unformat (input, "%U %U",
- unformat_ip4_address,
- &src_ip.ip4, unformat_ip4_address, &dst_ip.ip4))
- fproto = FIB_PROTOCOL_IP4;
- else if (unformat (input, "%U %U",
- unformat_ip6_address,
- &src_ip.ip6, unformat_ip6_address, &dst_ip.ip6))
- fproto = FIB_PROTOCOL_IP6;
+ is_del = 1;
+ else if (unformat (input, "%U %U", unformat_ip4_address, &src_ip.ip4,
+ unformat_ip4_address, &dst_ip.ip4))
+ fproto = FIB_PROTOCOL_IP4;
+ else if (unformat (input, "%U %U", unformat_ip6_address, &src_ip.ip6,
+ unformat_ip6_address, &dst_ip.ip6))
+ fproto = FIB_PROTOCOL_IP6;
else if (unformat (input, "%d %d", &src_port, &dst_port))
- ;
+ ;
else
- {
- break;
- }
+ {
+ break;
+ }
}
-
if (fproto == FIB_PROTOCOL_MAX)
{
clib_warning ("Please specify face ID");
@@ -1264,10 +1304,14 @@ api_hicn_api_udp_tunnel_add_del (vat_main_t * vam)
/* Construct the API message */
M (HICN_API_UDP_TUNNEL_ADD_DEL, mp);
- ip_address_encode (&src_ip, fproto == FIB_PROTOCOL_IP4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6 ,&mp->src_addr);
- ip_address_encode (&dst_ip, fproto == FIB_PROTOCOL_IP4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6 ,&mp->dst_addr);
- mp->src_port = clib_host_to_net_u16(src_port);
- mp->dst_port = clib_host_to_net_u16(dst_port);
+ ip_address_encode (
+ &src_ip, fproto == FIB_PROTOCOL_IP4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
+ &mp->src_addr);
+ ip_address_encode (
+ &dst_ip, fproto == FIB_PROTOCOL_IP4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
+ &mp->dst_addr);
+ mp->src_port = clib_host_to_net_u16 (src_port);
+ mp->dst_port = clib_host_to_net_u16 (dst_port);
mp->is_add = !is_del;
/* send it... */
@@ -1280,8 +1324,8 @@ api_hicn_api_udp_tunnel_add_del (vat_main_t * vam)
}
static void
-vl_api_hicn_api_udp_tunnel_add_del_reply_t_handler
-(vl_api_hicn_api_udp_tunnel_add_del_reply_t * mp)
+vl_api_hicn_api_udp_tunnel_add_del_reply_t_handler (
+ vl_api_hicn_api_udp_tunnel_add_del_reply_t *mp)
{
vat_main_t *vam = hicn_test_main.vat_main;
i32 retval = ntohl (mp->retval);
@@ -1296,21 +1340,17 @@ vl_api_hicn_api_udp_tunnel_add_del_reply_t_handler
if (vam->retval < 0)
{
- //vpp_api_test infra will also print out string form of error
+ // vpp_api_test infra will also print out string form of error
fformat (vam->ofp, " (API call error: %d)\n", vam->retval);
return;
}
- index_t uei = clib_net_to_host_u32(mp->uei);
+ index_t uei = clib_net_to_host_u32 (mp->uei);
- fformat (vam->ofp,
- "udp-encap %d\n",
- uei);
+ fformat (vam->ofp, "udp-encap %d\n", uei);
}
-
-
-#include <hicn/hicn.api_test.c>
+#include <vpp_plugins/hicn/hicn.api_test.c>
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/hicn_buffer_flags.h b/hicn-plugin/src/hicn_buffer_flags.h
new file mode 100644
index 000000000..7d99e6d33
--- /dev/null
+++ b/hicn-plugin/src/hicn_buffer_flags.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_BUFFER_FLAGS_H__
+#define __HICN_BUFFER_FLAGS_H__
+
+#define foreach_hicn_buffer_flag \
+ _ (0, NEW_FACE, "new face") \
+ _ (1, PKT_LESS_TWO_CL, "packet is less that 2 cache lines length") \
+ _ (2, FROM_UDP4_TUNNEL, "packet is from udp4 tunnel") \
+ _ (3, FROM_UDP6_TUNNEL, "packet is from udp6 tunnel") \
+ _ (4, FROM_CS, "packet is from cs") \
+ _ (5, FROM_PG, "packet is from packet generator")
+
+enum
+{
+ HICN_BUFFER_FLAGS_DEFAULT = 0,
+#define _(a, b, c) HICN_BUFFER_FLAGS_##b = (1 << a),
+ foreach_hicn_buffer_flag
+#undef _
+};
+
+#endif /* __HICN_BUFFER_FLAGS_H__ */ \ No newline at end of file
diff --git a/hicn-plugin/src/hicn_logging.h b/hicn-plugin/src/hicn_logging.h
new file mode 100644
index 000000000..2e534e546
--- /dev/null
+++ b/hicn-plugin/src/hicn_logging.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DEBUG_H__
+#define __HICN_DEBUG_H__
+
+#ifdef HICN_DDEBUG
+#define HICN_DEBUG(...) \
+ do \
+ { \
+ clib_warning (__VA_ARGS__); \
+ } \
+ while (0)
+#else
+#define HICN_DEBUG(...)
+#endif /* HICN_DEBUG */
+
+#define HICN_ERROR(...) clib_warning (__VA_ARGS__)
+
+#endif /* __HICN_DEBUG_H__ */ \ No newline at end of file
diff --git a/hicn-plugin/src/hicn_all_api_h.h b/hicn-plugin/src/infra.c
index 1263ea4a2..e0dba5efd 100644
--- a/hicn-plugin/src/hicn_all_api_h.h
+++ b/hicn-plugin/src/infra.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -13,10 +13,13 @@
* limitations under the License.
*/
-#include <hicn/hicn.api.h>
+#include "infra.h"
-/*
- * fd.io coding-style-patch-verification: ON
+/**
+ * @file infra.c
*
- * Local Variables: eval: (c-set-style "gnu") End:
*/
+
+/* PIT and CS size */
+u32 hicn_infra_pit_size;
+u32 hicn_infra_cs_size;
diff --git a/hicn-plugin/src/infra.h b/hicn-plugin/src/infra.h
index ff76de4e4..463617da0 100644
--- a/hicn-plugin/src/infra.h
+++ b/hicn-plugin/src/infra.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -46,7 +46,7 @@ typedef struct hicn_main_s
* Boundaries for the interest lifetime. If greater than
* pit_lifetime_max_ms, pit_lifetime_max_ms is used in the PIT
*/
- u64 pit_lifetime_max_ms;
+ u32 pit_lifetime_max_ms;
vnet_link_t link;
@@ -57,26 +57,24 @@ extern hicn_main_t hicn_main;
extern int hicn_infra_fwdr_initialized;
/* PIT and CS size */
-u32 hicn_infra_pit_size;
-u32 hicn_infra_cs_size;
+extern u32 hicn_infra_pit_size;
+extern u32 hicn_infra_cs_size;
/**
* @brief Enable and disable the hicn plugin
*
* Enable the time the hICN plugin and set the forwarder parameters.
- * @param enable_disable 1 if to enable, 0 otherwisw (currently only enable is supported)
+ * @param enable_disable 1 if to enable, 0 otherwisw (currently only enable is
+ * supported)
* @param pit_max_size Max size of the PIT
- * @param pit_max_lifetime_sec_req Maximum timeout allowed for a PIT entry lifetime
+ * @param pit_max_lifetime_sec_req Maximum timeout allowed for a PIT entry
+ * lifetime
* @param cs_max_size CS size. Must be <= than pit_max_size
* @param cs_reserved_app Amount of CS reserved for application faces
*/
-int
-hicn_infra_plugin_enable_disable (int enable_disable,
- int pit_max_size,
- f64 pit_max_lifetime_sec_req,
- int cs_max_size,
- vnet_link_t link);
-
+int hicn_infra_plugin_enable_disable (int enable_disable, int pit_max_size,
+ f64 pit_max_lifetime_sec_req,
+ int cs_max_size, vnet_link_t link);
/* vlib nodes that compose the hICN forwarder */
extern vlib_node_registration_t hicn_interest_pcslookup_node;
@@ -91,8 +89,6 @@ extern vlib_node_registration_t hicn_pg_server_node;
extern vlib_node_registration_t hicn_data_input_ip6_node;
extern vlib_node_registration_t hicn_data_input_ip4_node;
-
-
#endif /* // __HICN_INFRA_H__ */
/*
diff --git a/hicn-plugin/src/interest_hitcs.h b/hicn-plugin/src/interest_hitcs.h
index 94fa3e6f5..0df987778 100644
--- a/hicn-plugin/src/interest_hitcs.h
+++ b/hicn-plugin/src/interest_hitcs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -24,11 +24,12 @@
/**
* @file interest_hitcs.h
*
- * This is the node encoutered by interest packets after the hicn-interest-pcslookup.
- * This node satisfies an interest with a data stored in the CS and send the data back
- * from the incoming iface of the interest (i.e., the vlib buffer is sent to the
- * hicn6-iface-output or hicn4-iface-output node). In case the data is expired, the
- * vlib buffer is sent to the hicn-strategy node.
+ * This is the node encoutered by interest packets after the
+ * hicn-interest-pcslookup. This node satisfies an interest with a data stored
+ * in the CS and send the data back from the incoming iface of the interest
+ * (i.e., the vlib buffer is sent to the hicn6-iface-output or
+ * hicn4-iface-output node). In case the data is expired, the vlib buffer is
+ * sent to the hicn-strategy node.
*/
/*
diff --git a/hicn-plugin/src/interest_hitcs_node.c b/hicn-plugin/src/interest_hitcs_node.c
index f569fa897..651b2796a 100644
--- a/hicn-plugin/src/interest_hitcs_node.c
+++ b/hicn-plugin/src/interest_hitcs_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,8 +25,7 @@
#include "error.h"
/* packet trace format function */
-static u8 *hicn_interest_hitcs_format_trace (u8 * s, va_list * args);
-
+static u8 *hicn_interest_hitcs_format_trace (u8 *s, va_list *args);
/* Stats string values */
static char *hicn_interest_hitcs_error_strings[] = {
@@ -37,13 +36,14 @@ static char *hicn_interest_hitcs_error_strings[] = {
vlib_node_registration_t hicn_interest_hitcs_node;
-always_inline void drop_packet (u32 * next0);
+// always_inline void drop_packet (u32 *next0);
-always_inline void
-clone_from_cs (vlib_main_t * vm, u32 * bi0_cs, vlib_buffer_t * dest, u8 isv6)
+always_inline u32
+clone_from_cs (vlib_main_t *vm, u32 bi0_cs, vlib_buffer_t *dest, u8 isv6)
{
/* Retrieve the buffer to clone */
- vlib_buffer_t *cs_buf = vlib_get_buffer (vm, *bi0_cs);
+ u32 ret = bi0_cs;
+ vlib_buffer_t *cs_buf = vlib_get_buffer (vm, bi0_cs);
hicn_buffer_t *hicnb = hicn_get_buffer (cs_buf);
word buffer_advance = CLIB_CACHE_LINE_BYTES * 2;
if (hicnb->flags & HICN_BUFFER_FLAGS_PKT_LESS_TWO_CL)
@@ -64,7 +64,7 @@ clone_from_cs (vlib_main_t * vm, u32 * bi0_cs, vlib_buffer_t * dest, u8 isv6)
{
vlib_buffer_t *cs_buf2 = vlib_buffer_copy (vm, cs_buf);
vlib_buffer_advance (cs_buf, buffer_advance);
- *bi0_cs = vlib_get_buffer_index (vm, cs_buf2);
+ ret = vlib_get_buffer_index (vm, cs_buf2);
cs_buf->ref_count--;
cs_buf = cs_buf2;
}
@@ -77,18 +77,32 @@ clone_from_cs (vlib_main_t * vm, u32 * bi0_cs, vlib_buffer_t * dest, u8 isv6)
vlib_buffer_advance (cs_buf, buffer_advance);
vlib_buffer_attach_clone (vm, dest, cs_buf);
}
+
+ /* Set fag for packet coming from CS */
+ hicn_get_buffer (dest)->flags |= HICN_BUFFER_FLAGS_FROM_CS;
+
+ return ret;
}
static uword
-hicn_interest_hitcs_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn_interest_hitcs_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next;
hicn_interest_hitcs_next_t next_index;
hicn_interest_hitcs_runtime_t *rt;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ vlib_buffer_t *b0;
+ u8 isv6;
+ u32 bi0, bi_ret;
+ u32 next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
+ hicn_buffer_t *hicnb0;
+ u32 pcs_entry_index;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0;
f64 tnow;
- int ret;
rt = vlib_node_get_runtime_data (vm, hicn_interest_hitcs_node.index);
@@ -108,29 +122,12 @@ hicn_interest_hitcs_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_left_from > 0 && n_left_to_next > 0)
{
- vlib_buffer_t *b0;
- u8 isv6;
- u8 *nameptr;
- u16 namelen;
- u32 bi0;
- u32 next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
- hicn_name_t name;
- hicn_header_t *hicn0;
- hicn_buffer_t *hicnb0;
- hicn_hash_node_t *node0;
- hicn_pcs_entry_t *pitp;
- hicn_hash_entry_t *hash_entry0;
- const hicn_strategy_vft_t *strategy_vft0;
- const hicn_dpo_vft_t *dpo_vft0;
- u8 dpo_ctx_id0;
-
/* Prefetch for next iteration. */
if (n_left_from > 1)
{
vlib_buffer_t *b1;
b1 = vlib_get_buffer (vm, from[1]);
CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
}
/* Dequeue a packet buffer */
@@ -145,85 +142,64 @@ hicn_interest_hitcs_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* Get hicn buffer and state */
hicnb0 = hicn_get_buffer (b0);
- hicn_get_internal_state (hicnb0, rt->pitcs, &node0, &strategy_vft0,
- &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
-
- ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
- nameptr = (u8 *) (&name);
- pitp = hicn_pit_get_data (node0);
+ hicn_get_internal_state (hicnb0, &pcs_entry_index, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0);
+ pcs_entry =
+ hicn_pcs_entry_get_entry_from_index (rt->pitcs, pcs_entry_index);
- dpo_id_t hicn_dpo_id0 =
- { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
+ isv6 = hicn_buffer_is_v6 (b0);
- if (PREDICT_FALSE
- (ret != HICN_ERROR_NONE ||
- !hicn_node_compare (nameptr, namelen, node0)))
- {
- /* Remove lock from the entry */
- hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
- dpo_vft0, &hicn_dpo_id0);
- drop_packet (&next0);
- goto end_processing;
- }
- if ((tnow > pitp->shared.expire_time))
+ if (tnow > hicn_pcs_entry_get_expire_time (pcs_entry))
{
- /* Delete and clean up expired CS entry */
- hicn_pcs_delete (rt->pitcs, &pitp, &node0, vm, hash_entry0,
- dpo_vft0, &hicn_dpo_id0);
+ // Delete and clean up expired CS entry
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+
+ // Update stats
stats.cs_expired_count++;
- /* Forward interest to the strategy node */
+
+ // Forward interest to the strategy node
next0 = HICN_INTEREST_HITCS_NEXT_STRATEGY;
}
else
{
- if (PREDICT_TRUE
- (!(hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_DELETED)))
- hicn_pcs_cs_update (vm, rt->pitcs, pitp, pitp, node0);
-
- /*
- * Retrieve the incoming iface and forward
- * the data through it
- */
+ // Retrieve the incoming iface and forward
+ // the data through it
next0 = isv6 ? HICN_INTEREST_HITCS_NEXT_IFACE6_OUT :
- HICN_INTEREST_HITCS_NEXT_IFACE4_OUT;
- vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hicnb0->face_id;
+ HICN_INTEREST_HITCS_NEXT_IFACE4_OUT;
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hicnb0->face_id;
- clone_from_cs (vm, &pitp->u.cs.cs_pkt_buf, b0, isv6);
+ bi_ret = clone_from_cs (
+ vm, hicn_pcs_entry_cs_get_buffer (pcs_entry), b0, isv6);
+ hicn_pcs_entry_cs_set_buffer (pcs_entry, bi_ret);
+
+ // Update stats
stats.pkts_from_cache_count++;
stats.pkts_data_count++;
- /* Remove lock from the entry */
- hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
- dpo_vft0, &hicn_dpo_id0);
}
- end_processing:
-
- /* Maybe trace */
+ // Maybe trace
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
(b0->flags & VLIB_BUFFER_IS_TRACED)))
{
hicn_interest_hitcs_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST;
t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
t->next_index = next0;
}
- /* Incr packet counter */
+
+ // Increment packet counter
stats.pkts_processed += 1;
- /*
- * Verify speculative enqueue, maybe switch current
- * next frame
- */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ // Verify speculative enqueue, maybe switch current next frame
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
+ u32 pit_int_count = hicn_pcs_get_pit_count (rt->pitcs);
vlib_node_increment_counter (vm, hicn_interest_hitcs_node.index,
HICNFWD_ERROR_CACHED,
@@ -238,15 +214,17 @@ hicn_interest_hitcs_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
return (frame->n_vectors);
}
+#if 0
always_inline void
-drop_packet (u32 * next0)
+drop_packet (u32 *next0)
{
*next0 = HICN_INTEREST_HITCS_NEXT_ERROR_DROP;
}
+#endif
/* packet trace format function */
static u8 *
-hicn_interest_hitcs_format_trace (u8 * s, va_list * args)
+hicn_interest_hitcs_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -261,31 +239,25 @@ hicn_interest_hitcs_format_trace (u8 * s, va_list * args)
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn_interest_hitcs_node) =
-{
+VLIB_REGISTER_NODE (hicn_interest_hitcs_node) = {
.function = hicn_interest_hitcs_node_fn,
.name = "hicn-interest-hitcs",
- .vector_size = sizeof(u32),
- .runtime_data_bytes = sizeof(hicn_interest_hitcs_runtime_t),
+ .vector_size = sizeof (u32),
+ .runtime_data_bytes = sizeof (hicn_interest_hitcs_runtime_t),
.format_trace = hicn_interest_hitcs_format_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicn_interest_hitcs_error_strings),
+ .n_errors = ARRAY_LEN (hicn_interest_hitcs_error_strings),
.error_strings = hicn_interest_hitcs_error_strings,
.n_next_nodes = HICN_INTEREST_HITCS_N_NEXT,
/* edit / add dispositions here */
- .next_nodes =
- {
- [HICN_INTEREST_HITCS_NEXT_STRATEGY] = "hicn-strategy",
- [HICN_INTEREST_HITCS_NEXT_IFACE4_OUT] = "hicn4-iface-output",
- [HICN_INTEREST_HITCS_NEXT_IFACE6_OUT] = "hicn6-iface-output",
- [HICN_INTEREST_HITCS_NEXT_ERROR_DROP] = "error-drop"
- },
+ .next_nodes = { [HICN_INTEREST_HITCS_NEXT_STRATEGY] = "hicn-strategy",
+ [HICN_INTEREST_HITCS_NEXT_IFACE4_OUT] = "hicn4-iface-output",
+ [HICN_INTEREST_HITCS_NEXT_IFACE6_OUT] = "hicn6-iface-output",
+ [HICN_INTEREST_HITCS_NEXT_ERROR_DROP] = "error-drop" },
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables: eval: (c-set-style "gnu") End:
- */
+ */ \ No newline at end of file
diff --git a/hicn-plugin/src/interest_hitpit.h b/hicn-plugin/src/interest_hitpit.h
index ffdc61c8f..46659e67c 100644
--- a/hicn-plugin/src/interest_hitpit.h
+++ b/hicn-plugin/src/interest_hitpit.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -24,14 +24,14 @@
/**
* @file interest_hitpit.h
*
- * This is the node encoutered by interest packets after the hicn-interest-pcslookup.
- * This node aggregates an interest in the PIT or forward it in case of a retransmission.
- * If the interest must be retransmitted the next vlib node will be on of the
- * hicn6-face-output or hicn4-face-output nodes. If the pit entry is expired the next vlib node
- * will be the hicn-strategy node, otherwise the vlib buffer is dropped.
+ * This is the node encoutered by interest packets after the
+ * hicn-interest-pcslookup. This node aggregates an interest in the PIT or
+ * forward it in case of a retransmission. If the interest must be
+ * retransmitted the next vlib node will be on of the hicn6-face-output or
+ * hicn4-face-output nodes. If the pit entry is expired the next vlib node will
+ * be the hicn-strategy node, otherwise the vlib buffer is dropped.
*/
-
/*
* Node context data; we think this is per-thread/instance
*/
@@ -46,7 +46,7 @@ typedef struct
{
u32 next_index;
u32 sw_if_index;
- u8 pkt_type;
+ u32 pkt_type;
} hicn_interest_hitpit_trace_t;
typedef enum
@@ -65,4 +65,4 @@ typedef enum
* fd.io coding-style-patch-verification: ON
*
* Local Variables: eval: (c-set-style "gnu") End:
- */
+ */ \ No newline at end of file
diff --git a/hicn-plugin/src/interest_hitpit_node.c b/hicn-plugin/src/interest_hitpit_node.c
index 9ebf183c5..7f1b0f449 100644
--- a/hicn-plugin/src/interest_hitpit_node.c
+++ b/hicn-plugin/src/interest_hitpit_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,10 +25,9 @@
#include "strategy_dpo_manager.h"
#include "state.h"
#include "error.h"
-#include "face_db.h"
/* packet trace format function */
-static u8 *hicn_interest_hitpit_format_trace (u8 * s, va_list * args);
+static u8 *hicn_interest_hitpit_format_trace (u8 *s, va_list *args);
/* Stats string values */
static char *hicn_interest_hitpit_error_strings[] = {
@@ -39,20 +38,28 @@ static char *hicn_interest_hitpit_error_strings[] = {
vlib_node_registration_t hicn_interest_hitpit_node;
-always_inline void drop_packet (u32 * next0);
-
/*
* hICN forwarder node for interests hitting the PIT
*/
static uword
-hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn_interest_hitpit_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next;
hicn_interest_hitpit_next_t next_index;
hicn_interest_hitpit_runtime_t *rt;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
- f64 tnow;
+ u32 n_left_to_next;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
+ const hicn_strategy_vft_t *strategy_vft0;
+ const hicn_dpo_vft_t *dpo_vft0;
+ u8 dpo_ctx_id0;
+ u8 forward = 0;
+ u32 pit_entry_index;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ hicn_buffer_t *hicnb0;
rt = vlib_node_get_runtime_data (vm, hicn_interest_hitpit_node.index);
@@ -64,42 +71,17 @@ hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- /* Capture time in vpp terms */
- tnow = vlib_time_now (vm);
-
while (n_left_from > 0)
{
- u32 n_left_to_next;
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_left_from > 0 && n_left_to_next > 0)
{
- vlib_buffer_t *b0;
- u8 isv6;
- u8 *nameptr;
- u16 namelen;
- u32 bi0;
- u32 next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
- hicn_name_t name;
- hicn_header_t *hicn0;
- hicn_hash_node_t *node0;
- const hicn_strategy_vft_t *strategy_vft0;
- const hicn_dpo_vft_t *dpo_vft0;
- hicn_pcs_entry_t *pitp;
- u8 dpo_ctx_id0;
- u8 found = 0;
- int nh_idx;
- hicn_face_id_t outface;
- hicn_hash_entry_t *hash_entry0;
- hicn_buffer_t *hicnb0;
- int ret;
-
/* Prefetch for next iteration. */
if (n_left_from > 1)
{
vlib_buffer_t *b1;
b1 = vlib_get_buffer (vm, from[1]);
CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, STORE);
}
/* Dequeue a packet buffer */
@@ -114,98 +96,54 @@ hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* Get hicn buffer and state */
hicnb0 = hicn_get_buffer (b0);
- hicn_get_internal_state (hicnb0, rt->pitcs, &node0, &strategy_vft0,
- &dpo_vft0, &dpo_ctx_id0, &hash_entry0);
+ hicn_get_internal_state (hicnb0, &pit_entry_index, &strategy_vft0,
+ &dpo_vft0, &dpo_ctx_id0);
+ // Retrieve PIT entry
+ pcs_entry =
+ hicn_pcs_entry_get_entry_from_index (rt->pitcs, pit_entry_index);
- ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
- nameptr = (u8 *) (&name);
- pitp = hicn_pit_get_data (node0);
- dpo_id_t hicn_dpo_id0 =
- { dpo_vft0->hicn_dpo_get_type (), 0, 0, dpo_ctx_id0 };
-
- /*
- * Check if the hit is instead a collision in the
- * hash table. Unlikely to happen.
- */
- if (PREDICT_FALSE
- (ret != HICN_ERROR_NONE
- || !hicn_node_compare (nameptr, namelen, node0)))
- {
- stats.interests_hash_collision++;
- /* Remove lock from the entry */
- hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm, hash_entry0,
- dpo_vft0, &hicn_dpo_id0);
- drop_packet (&next0);
+ // Increment packet counter
+ stats.pkts_processed += 1;
- goto end_processing;
- }
- /*
- * If the entry is expired, remove it no matter of
- * the possible cases.
- */
- if (tnow > pitp->shared.expire_time)
+ // A data packet may have arrived in the time between the pcs
+ // lookup and now. Check again to make sure the entry is CS or
+ // PIT
+ if (hicn_pcs_entry_is_cs (pcs_entry))
{
- strategy_vft0->hicn_on_interest_timeout (dpo_ctx_id0);
- hicn_pcs_delete (rt->pitcs, &pitp, &node0, vm, hash_entry0,
- dpo_vft0, &hicn_dpo_id0);
- stats.pit_expired_count++;
- next0 = HICN_INTEREST_HITPIT_NEXT_STRATEGY;
+ next0 = HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS;
}
else
{
- if ((hash_entry0->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY))
+ // Distinguish between aggregation, retransmission and
+ // additionally check if the strategy mandates to always send
+ // the interest
+
+ // Retransmission
+ forward = hicn_pcs_entry_pit_search (pcs_entry, hicnb0->face_id);
+
+ // Strategy mandates to force send after aggregation
+ if (!forward && strategy_vft0->hicn_send_after_aggregation (
+ dpo_ctx_id0, hicnb0->face_id))
+ {
+ forward = true;
+ hicn_pcs_entry_pit_add_face (pcs_entry, hicnb0->face_id);
+ }
+
+ if (forward && hicnb0->payload_type != HPT_MANIFEST)
{
- next0 = HICN_INTEREST_HITPIT_NEXT_INTEREST_HITCS;
+ next0 = HICN_INTEREST_HITPIT_NEXT_STRATEGY;
}
else
{
- /*
- * Distinguish between aggregation or
- * retransmission
- */
-
- found =
- hicn_face_search (hicnb0->face_id,
- &(pitp->u.pit.faces));
-
- if (found)
- {
- strategy_vft0->hicn_select_next_hop (dpo_ctx_id0,
- &nh_idx, &outface);
- /* Retransmission */
- /*
- * Prepare the packet for the
- * forwarding
- */
- next0 = isv6 ? HICN_INTEREST_HITPIT_NEXT_FACE6_OUTPUT :
- HICN_INTEREST_HITPIT_NEXT_FACE4_OUTPUT;
- vnet_buffer (b0)->ip.adj_index[VLIB_TX] =
- outface;
+ // Aggregation
+ hicn_pcs_entry_pit_add_face (pcs_entry, hicnb0->face_id);
- /*
- * Update the egress face in
- * the PIT
- */
- pitp->u.pit.pe_txnh = nh_idx;
- stats.interests_retx++;
- }
- else
- {
- hicn_face_db_add_face (hicnb0->face_id,
- &pitp->u.pit.faces);
-
- /* Aggregation */
- drop_packet (&next0);
- stats.interests_aggregated++;
- }
- /* Remove lock from the entry */
- hicn_pcs_remove_lock (rt->pitcs, &pitp, &node0, vm,
- hash_entry0, dpo_vft0, &hicn_dpo_id0);
+ next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
+ stats.interests_aggregated++;
}
}
- end_processing:
/* Maybe trace */
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
@@ -213,25 +151,21 @@ hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
{
hicn_interest_hitpit_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST;
t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
t->next_index = next0;
}
- /* Incr packet counter */
- stats.pkts_processed += 1;
/*
* Verify speculative enqueue, maybe switch current
* next frame
*/
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
-
+ u32 pit_int_count = hicn_pcs_get_pit_count (rt->pitcs);
vlib_node_increment_counter (vm, hicn_interest_hitpit_node.index,
HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
@@ -256,7 +190,7 @@ hicn_interest_hitpit_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* packet trace format function */
static u8 *
-hicn_interest_hitpit_format_trace (u8 * s, va_list * args)
+hicn_interest_hitpit_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -268,16 +202,9 @@ hicn_interest_hitpit_format_trace (u8 * s, va_list * args)
return (s);
}
-void
-drop_packet (u32 * next0)
-{
- *next0 = HICN_INTEREST_HITPIT_NEXT_ERROR_DROP;
-}
-
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn_interest_hitpit_node) =
{
.function = hicn_interest_hitpit_node_fn,
@@ -299,7 +226,6 @@ VLIB_REGISTER_NODE(hicn_interest_hitpit_node) =
[HICN_INTEREST_HITPIT_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/interest_pcslookup.h b/hicn-plugin/src/interest_pcslookup.h
index cbc9dde51..ba0e7651d 100644
--- a/hicn-plugin/src/interest_pcslookup.h
+++ b/hicn-plugin/src/interest_pcslookup.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -24,12 +24,12 @@
/**
* @file interest_pcslookup.h
*
- * This is the node encoutered by interest packets after the hicn6-iface-input or
- * hicn4-iface-input. This node performs a lookup in the pit and content store and
- * if there is a hit in the PIT, the vlib buffer is passed to the hicn-interest-hitcs
- * while if there is a hit in the CS the vlib buffer is passed to the
- * hicn-interest-hitpit. If there isn't any hit, the vlib buffer is passed to the
- * hicn-strategy node.
+ * This is the node encoutered by interest packets after the hicn6-iface-input
+ * or hicn4-iface-input. This node performs a lookup in the pit and content
+ * store and if there is a hit in the PIT, the vlib buffer is passed to the
+ * hicn-interest-hitcs while if there is a hit in the CS the vlib buffer is
+ * passed to the hicn-interest-hitpit. If there isn't any hit, the vlib buffer
+ * is passed to the hicn-strategy node.
*/
/*
@@ -58,6 +58,16 @@ typedef enum
HICN_INTEREST_PCSLOOKUP_N_NEXT,
} hicn_interest_pcslookup_next_t;
+typedef enum
+{
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_FACE4,
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_FACE6,
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_INTEREST_HITPIT,
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_INTEREST_HITCS,
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_ERROR_DROP,
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_N_NEXT,
+} hicn_interest_manifest_pcslookup_next_t;
+
#endif /* // __HICN_INTEREST_PCSLOOKUP_H__ */
/*
diff --git a/hicn-plugin/src/interest_pcslookup_node.c b/hicn-plugin/src/interest_pcslookup_node.c
index 6ac2aa3a0..e8aad4a83 100644
--- a/hicn-plugin/src/interest_pcslookup_node.c
+++ b/hicn-plugin/src/interest_pcslookup_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -23,8 +23,11 @@
#include "error.h"
#include "state.h"
+#include <hicn/interest_manifest.h>
+
/**
- * @FILE This node performs a lookup in the PIT and CS for a received interest packet.
+ * @FILE This node performs a lookup in the PIT and CS for a received interest
+ * packet.
*
* This node passes the packet to the interest-hitpit and interest-hitcs nodes
* when there is a hit in the pit or content store, respectively.
@@ -33,8 +36,7 @@
/* Functions declarations */
/* packet trace format function */
-static u8 *hicn_interest_pcslookup_format_trace (u8 * s, va_list * args);
-
+static u8 *hicn_interest_pcslookup_format_trace (u8 *s, va_list *args);
/* Stats string values */
static char *hicn_interest_pcslookup_error_strings[] = {
@@ -45,19 +47,40 @@ static char *hicn_interest_pcslookup_error_strings[] = {
vlib_node_registration_t hicn_interest_pcslookup_node;
+always_inline void
+drop_packet (vlib_main_t *vm, u32 bi0, u32 *n_left_to_next, u32 *next0,
+ u32 **to_next, u32 *next_index, vlib_node_runtime_t *node)
+{
+ *next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+
+ (*to_next)[0] = bi0;
+ *to_next += 1;
+ *n_left_to_next -= 1;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index, *to_next,
+ *n_left_to_next, bi0, *next0);
+}
+
/*
- * ICN forwarder node for interests: handling of Interests delivered based on
- * ACL. - 1 packet at a time - ipv4/tcp ipv6/tcp
+ * ICN forwarder node for interests.
*/
static uword
-hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn_interest_pcslookup_node_inline (vlib_main_t *vm,
+ vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
+ int ret;
u32 n_left_from, *from, *to_next;
hicn_interest_pcslookup_next_t next_index;
hicn_interest_pcslookup_runtime_t *rt;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
- int ret;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u32 next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ f64 tnow;
+ hicn_buffer_t *hicnb0;
+ const hicn_strategy_vft_t *strategy;
rt = vlib_node_get_runtime_data (vm, hicn_interest_pcslookup_node.index);
@@ -69,38 +92,23 @@ hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
+ tnow = vlib_time_now (vm);
+
while (n_left_from > 0)
{
u32 n_left_to_next;
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_left_from > 0 && n_left_to_next > 0)
{
- vlib_buffer_t *b0;
- u8 isv6;
- u8 *nameptr;
- u16 namelen;
- u32 bi0;
- u32 next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
- u64 name_hash = 0;
- hicn_name_t name;
- hicn_header_t *hicn0;
- u32 node_id0 = 0;
- index_t dpo_ctx_id0 = 0;
- u8 vft_id0 = 0;
- u8 is_cs0 = 0;
- u8 hash_entry_id = 0;
- u8 bucket_is_overflown = 0;
- u32 bucket_id = ~0;
-
/* Prefetch for next iteration. */
if (n_left_from > 1)
{
vlib_buffer_t *b1;
b1 = vlib_get_buffer (vm, from[1]);
CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
}
- /* Dequeue a packet buffer */
+
+ // Dequeue a packet buffer
bi0 = from[0];
from += 1;
n_left_from -= 1;
@@ -109,49 +117,104 @@ hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
- ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
- if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
+ // By default we send the interest to strategy node
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_STRATEGY;
+
+ // Update stats
+ stats.pkts_processed++;
+
+ hicnb0 = hicn_get_buffer (b0);
+
+ // Check if the interest is in the PCS already
+ hicn_name_t name;
+ hicn_packet_get_name (&hicnb0->pkbuf, &name);
+ ret = hicn_pcs_lookup_one (rt->pitcs, &name, &pcs_entry);
+
+ if (ret == HICN_ERROR_NONE)
{
- next0 = HICN_INTEREST_PCSLOOKUP_NEXT_STRATEGY;
+ // We found an entry in the PCS.
+ ret = hicn_store_internal_state (
+ b0, hicn_pcs_entry_get_index (rt->pitcs, pcs_entry),
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ // Make sure the entry is not expired first
+ if (tnow > hicn_pcs_entry_get_expire_time (pcs_entry))
+ {
+ // Notify strategy
+ strategy = hicn_dpo_get_strategy_vft (hicnb0->vft_id);
+ strategy->hicn_on_interest_timeout (
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ // Release lock on entry - this MUST delete the entry
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+
+ stats.pit_expired_count++;
+
+ // Forward to strategy node
+ // TODO this can be simplified by checking directly in the
+ // pcslookup node!
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_STRATEGY;
+
+ goto newentry;
+ }
+ else
+ {
+ // Next stage for this packet is one of hitpit/cs nodes
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT +
+ hicn_pcs_entry_is_cs (pcs_entry);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+
+ goto end;
+ }
}
- nameptr = (u8 *) (&name);
- stats.pkts_processed++;
+ newentry:
+ // No entry in PCS. Let's create one now
+ pcs_entry = hicn_pcs_entry_pit_get (rt->pitcs, tnow,
+ hicn_buffer_get_lifetime (b0));
- if (PREDICT_FALSE (ret != HICN_ERROR_NONE ||
- hicn_hashtb_fullhash (nameptr, namelen,
- &name_hash) !=
- HICN_ERROR_NONE))
+ ret = hicn_pcs_pit_insert (rt->pitcs, pcs_entry, &name);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
{
next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ goto end;
}
- else
+
+ // Store internal state
+ ret = hicn_store_internal_state (
+ b0, hicn_pcs_entry_get_index (rt->pitcs, pcs_entry),
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
{
- if (hicn_hashtb_lookup_node (rt->pitcs->pcs_table, nameptr,
- namelen, name_hash,
- 0 /* is_data */ , &node_id0,
- &dpo_ctx_id0, &vft_id0, &is_cs0,
- &hash_entry_id, &bucket_id,
- &bucket_is_overflown) ==
- HICN_ERROR_NONE)
- {
- next0 =
- HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT + is_cs0;
- }
- stats.pkts_interest_count++;
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+ drop_packet (vm, bi0, &n_left_from, &next0, &to_next,
+ &next_index, node);
+ continue;
}
- hicn_store_internal_state (b0, name_hash, node_id0, dpo_ctx_id0,
- vft_id0, hash_entry_id, bucket_id,
- bucket_is_overflown);
+ // Add face
+ hicn_pcs_entry_pit_add_face (pcs_entry, hicnb0->face_id);
+
+ end:
+ stats.pkts_interest_count++;
- /* Maybe trace */
+ // Interest manifest?
+ if (hicn_buffer_get_payload_type (b0) == HPT_MANIFEST)
+ {
+ ;
+ }
+
+ // Maybe trace
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
(b0->flags & VLIB_BUFFER_IS_TRACED)))
{
hicn_interest_pcslookup_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_INTEREST;
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST;
t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
t->next_index = next0;
}
@@ -159,17 +222,13 @@ hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
* Verify speculative enqueue, maybe switch current
* next frame
*/
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
-
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- u32 pit_int_count = hicn_pit_get_int_count (rt->pitcs);
- u32 pit_cs_count = hicn_pit_get_cs_count (rt->pitcs);
- u32 pcs_ntw_count = hicn_pcs_get_ntw_count (rt->pitcs);
-
+ u32 pit_int_count = hicn_pcs_get_pit_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pcs_get_cs_count (rt->pitcs);
vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
@@ -184,19 +243,330 @@ hicn_interest_pcslookup_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
update_node_counter (vm, hicn_interest_pcslookup_node.index,
HICNFWD_ERROR_CS_COUNT, pit_cs_count);
+ return (frame->n_vectors);
+}
+
+/*
+ * ICN forwarder node for interests manifest
+ */
+static uword
+hicn_interest_manifest_pcslookup_node_inline (vlib_main_t *vm,
+ vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ int ret;
+ u32 n_left_from, *from, *to_next;
+ hicn_interest_pcslookup_next_t next_index;
+ hicn_interest_pcslookup_runtime_t *rt;
+ vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
+ vlib_buffer_t *b0;
+ hicn_buffer_t *hicnb0;
+ u32 bi0;
+
+ const hicn_dpo_ctx_t *dpo_ctx;
+ const hicn_strategy_vft_t *strategy;
+
+ u16 outfaces_len;
+
+ // For cloning
+ u32 clones[MAX_OUT_FACES];
+ hicn_face_id_t outfaces[MAX_OUT_FACES];
+ u32 next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ interest_manifest_header_t *int_manifest_header = NULL;
+ unsigned long pos = 0;
+
+ rt = vlib_node_get_runtime_data (vm, hicn_interest_pcslookup_node.index);
+
+ if (PREDICT_FALSE (rt->pitcs == NULL))
+ {
+ rt->pitcs = &hicn_main.pitcs;
+ }
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+ int forward = 0;
+
+ vlib_buffer_t *cloneb;
+
+ // Register now
+ f64 tnow = vlib_time_now (vm);
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ /* Prefetch for next iteration. */
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *b1;
+ b1 = vlib_get_buffer (vm, from[1]);
+ CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ // Dequeue a packet buffer
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ hicnb0 = hicn_get_buffer (b0);
+
+ // By default we send the interest to drop node
+ next0 = HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_ERROR_DROP;
+
+ // Update stats
+ stats.pkts_processed++;
+
+ // Do not forward by default
+ forward = 0;
+
+ // Check if the interest is in the PCS already
+ hicn_name_t name;
+ hicn_packet_get_name (&hicn_get_buffer (b0)->pkbuf, &name);
+
+ ASSERT (hicn_buffer_get_payload_type (b0) == HPT_MANIFEST);
+
+ // Process interest manifest
+ u8 *payload;
+ size_t payload_size;
+ hicn_interest_get_payload (&hicn_get_buffer (b0)->pkbuf, &payload,
+ &payload_size, 0);
+ int_manifest_header = (interest_manifest_header_t *) (payload);
+
+ // Deserialize interest manifest
+ interest_manifest_deserialize (int_manifest_header);
+
+ hicn_name_suffix_t *suffix;
+ if (interest_manifest_is_valid (int_manifest_header, payload_size))
+ {
+ interest_manifest_foreach_suffix (int_manifest_header, suffix,
+ pos)
+ {
+ name.suffix = *suffix;
+ ret = hicn_pcs_lookup_one (rt->pitcs, &name, &pcs_entry);
+
+ if (ret == HICN_ERROR_NONE)
+ {
+ // Match in PCS. We need to clone a packet for the
+ // interest_hic{pit,cs} nodes.
+
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_INTEREST_HITPIT +
+ hicn_pcs_entry_is_cs (pcs_entry);
+
+ vlib_buffer_clone (vm, bi0, clones, 1, 0);
+ cloneb = vlib_get_buffer (vm, clones[0]);
+
+ ret = hicn_store_internal_state (
+ cloneb,
+ hicn_pcs_entry_get_index (rt->pitcs, pcs_entry),
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
+ next0 = HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP;
+
+ to_next[0] = clones[0];
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ // Distinguish between aggregation or retransmission
+ ret = hicn_pcs_entry_pit_search (
+ pcs_entry, hicn_get_buffer (b0)->face_id);
+ if (!ret)
+ {
+ // Aggregated interest. Unset the corresponding
+ // position in bitmap.
+ bitmap_unset_no_check (
+ int_manifest_header->request_bitmap, pos);
+ }
+ else
+ {
+ // Interest must be forwarded fo face node as it
+ // contains retransmissions
+ forward = 1;
+ }
+
+ // Maybe trace
+ if (PREDICT_FALSE (
+ (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_interest_pcslookup_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST;
+ t->sw_if_index =
+ vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ }
+ /*
+ * Verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ clones[0], next0);
+ }
+ else
+ {
+ // No match. Create new pcs entry and set interest to be
+ // forwarded
+ pcs_entry = hicn_pcs_entry_pit_get (
+ rt->pitcs, tnow, hicn_buffer_get_lifetime (b0));
+
+ // Add entry to PCS table
+ ret = hicn_pcs_pit_insert (rt->pitcs, pcs_entry, &name);
+
+ // This cannot fail as we just checked if PCS contains
+ // this entry
+ assert (ret == HICN_ERROR_NONE);
+
+ // Store internal state
+ ret = hicn_store_internal_state (
+ b0, hicn_pcs_entry_get_index (rt->pitcs, pcs_entry),
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE))
+ {
+ // Remove entry
+ hicn_pcs_entry_remove_lock (rt->pitcs, pcs_entry);
+ // We do not drop the packet as it is an interest
+ // manifest.
+ continue;
+ }
+
+ // Add face
+ hicn_pcs_entry_pit_add_face (
+ pcs_entry, hicn_get_buffer (b0)->face_id);
+
+ forward = 1;
+ }
+ }
+ }
+ else
+ {
+ next0 = HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_ERROR_DROP;
+ }
+
+ // If interest must be forwarded, let's do it now
+ if (forward)
+ {
+ // Serialize interest manifest again
+ interest_manifest_serialize (int_manifest_header);
+
+ // Get the strategy VFT
+ hicnb0->dpo_ctx_id = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ dpo_ctx = hicn_strategy_dpo_ctx_get (hicnb0->dpo_ctx_id);
+ hicnb0->vft_id = dpo_ctx->dpo_type;
+ strategy = hicn_dpo_get_strategy_vft (hicnb0->vft_id);
+ strategy->hicn_add_interest (hicnb0->dpo_ctx_id);
+
+ // Check we have at least one next hop for the packet
+ ret = strategy->hicn_select_next_hop (
+ hicnb0->dpo_ctx_id, hicnb0->face_id, outfaces, &outfaces_len);
+ if (ret == HICN_ERROR_NONE && outfaces_len > 0)
+ {
+ next0 = hicn_buffer_is_v6 (b0) ?
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_FACE6 :
+ HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_FACE4;
+
+ // Clone interest if needed
+ if (outfaces_len > 1)
+ {
+ ret =
+ vlib_buffer_clone (vm, bi0, clones, (u16) outfaces_len,
+ CLIB_CACHE_LINE_BYTES * 2);
+ ASSERT (ret == outfaces_len);
+ }
+ else
+ {
+ clones[0] = bi0;
+ }
+
+ // Send interest to next hops
+ for (u32 nh = 0; nh < outfaces_len; nh++)
+ {
+ vlib_buffer_t *local_b0 =
+ vlib_get_buffer (vm, clones[nh]);
+
+ to_next[0] = clones[nh];
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ vnet_buffer (local_b0)->ip.adj_index[VLIB_TX] =
+ outfaces[nh];
+ stats.pkts_interest_count++;
+
+ // Maybe trace
+ if (PREDICT_FALSE (
+ (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (local_b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_strategy_trace_t *t =
+ vlib_add_trace (vm, node, local_b0, sizeof (*t));
+ t->pkt_type = HICN_PACKET_TYPE_DATA;
+ t->sw_if_index =
+ vnet_buffer (local_b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->dpo_type = hicnb0->vft_id;
+ }
+
+ /*
+ * Fix in case of a wrong speculation. Needed for
+ * cloning the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ clones[nh], next0);
+ }
+ }
+ else
+ {
+ next0 = HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_ERROR_DROP;
+
+ to_next[0] = bi0;
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ /*
+ * Fix in case of a wrong speculation. Needed for
+ * cloning the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (
+ vm, node, next_index, to_next, n_left_to_next, bi0, next0);
+ }
+ }
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ u32 pit_int_count = hicn_pcs_get_pit_count (rt->pitcs);
+ u32 pit_cs_count = hicn_pcs_get_cs_count (rt->pitcs);
+
+ vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_PROCESSED, stats.pkts_processed);
+
+ vlib_node_increment_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_INTERESTS,
+ stats.pkts_interest_count);
+
update_node_counter (vm, hicn_interest_pcslookup_node.index,
- HICNFWD_ERROR_CS_NTW_COUNT, pcs_ntw_count);
+ HICNFWD_ERROR_INT_COUNT, pit_int_count);
+
+ update_node_counter (vm, hicn_interest_pcslookup_node.index,
+ HICNFWD_ERROR_CS_COUNT, pit_cs_count);
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn_interest_pcslookup_format_trace (u8 * s, va_list * args)
+hicn_interest_pcslookup_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicn_interest_pcslookup_trace_t *t =
+ const hicn_interest_pcslookup_trace_t *t =
va_arg (*args, hicn_interest_pcslookup_trace_t *);
s = format (s, "INTEREST_PCSLOOKUP: pkt: %d, sw_if_index %d, next index %d",
@@ -204,14 +574,23 @@ hicn_interest_pcslookup_format_trace (u8 * s, va_list * args)
return (s);
}
+VLIB_NODE_FN (hicn_interest_pcslookup_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return hicn_interest_pcslookup_node_inline (vm, node, frame);
+}
+
+VLIB_NODE_FN (hicn_interest_manifest_pcslookup_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return hicn_interest_manifest_pcslookup_node_inline (vm, node, frame);
+}
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE(hicn_interest_pcslookup_node) =
{
- .function = hicn_interest_pcslookup_node_fn,
.name = "hicn-interest-pcslookup",
.vector_size = sizeof(u32),
.runtime_data_bytes = sizeof(hicn_interest_pcslookup_runtime_t),
@@ -228,7 +607,29 @@ VLIB_REGISTER_NODE(hicn_interest_pcslookup_node) =
[HICN_INTEREST_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
+
+/*
+ * Node registration for the interest manifest forwarder node
+ */
+VLIB_REGISTER_NODE(hicn_interest_manifest_pcslookup_node) =
+{
+ .name = "hicn-interest-manifest-pcslookup",
+ .vector_size = sizeof(u32),
+ .runtime_data_bytes = sizeof(hicn_interest_pcslookup_runtime_t),
+ .format_trace = hicn_interest_pcslookup_format_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicn_interest_pcslookup_error_strings),
+ .error_strings = hicn_interest_pcslookup_error_strings,
+ .n_next_nodes = HICN_INTEREST_MANIFEST_PCSLOOKUP_N_NEXT,
+ .next_nodes =
+ {
+ [HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_FACE4] = "hicn4-face-output",
+ [HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_FACE6] = "hicn6-face-output",
+ [HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_INTEREST_HITPIT] = "hicn-interest-hitpit",
+ [HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_INTEREST_HITCS] = "hicn-interest-hitcs",
+ [HICN_INTEREST_MANIFEST_PCSLOOKUP_NEXT_ERROR_DROP] = "error-drop",
+ },
+};
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/mapme.h b/hicn-plugin/src/mapme.h
index 17bd9a766..c5567d1d4 100644
--- a/hicn-plugin/src/mapme.h
+++ b/hicn-plugin/src/mapme.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -21,9 +21,9 @@
#include <hicn/mapme.h>
#include "hicn.h"
-#include "route.h"
#include "strategy_dpo_ctx.h"
-#include "strategy_dpo_manager.h" // dpo_is_hicn
+#include "strategy_dpo_manager.h" // dpo_is_hicn
+#include "udp_tunnels/udp_tunnel.h"
/**
* @file
@@ -32,53 +32,54 @@
*
* Mapme implementation follows the "Anchorless mobility through hICN" document
* specification. In particular, the implementation is made of:
- * - two internal nodes: hicn-mapme-ctrl and hicn-mapme-ack. The former processes
- * IU and the latter IU acknowledgment.
- * - a process node, mapme-eventmgr-process, that is signaled every time a face is
- * added or deleted, as well as when a new next hop is added to a fib entry as a
- * result of a mobility event.
+ * - two internal nodes: hicn-mapme-ctrl and hicn-mapme-ack. The former
+ * processes IU and the latter IU acknowledgment.
+ * - a process node, mapme-eventmgr-process, that is signaled every time a
+ * face is added or deleted, as well as when a new next hop is added to a fib
+ * entry as a result of a mobility event.
*
- * TFIB implementation is done as an extension of an hICN fib entry. In particular,
- * the list of next hops hold the list of next hops in the tfib as well (stored at the
- * end of the list of regualt next hops). Mapme implementation follows the hICN vrf
- * implementation and consider the vrf 0 (default fib) as the control-plane fib to
- * update every time a new next hop must be added or removed.
+ * TFIB implementation is done as an extension of an hICN fib entry. In
+ * particular, the list of next hops hold the list of next hops in the tfib as
+ * well (stored at the end of the list of regualt next hops). Mapme
+ * implementation follows the hICN vrf implementation and consider the vrf 0
+ * (default fib) as the control-plane fib to update every time a new next hop
+ * must be added or removed.
*/
-
#define HICN_MAPME_ALLOW_LOCATORS 1
//#define HICN_MAPME_NOTIFICATIONS 1
#define NOT_A_NOTIFICATION false
-#define TIMER_NO_REPEAT false
+#define TIMER_NO_REPEAT false
#define INVALID_SEQ 0
-STATIC_ASSERT (sizeof(u32) == sizeof(seq_t),
- "seq_t is not 4 bytes");
+STATIC_ASSERT (sizeof (u32) == sizeof (seq_t), "seq_t is not 4 bytes");
typedef struct hicn_mapme_conf_s
{
hicn_mapme_conf_t conf;
- bool remove_dpo; // FIXME used ?
+ bool remove_dpo; // FIXME used ?
+ fib_prefix_t default_route;
vlib_main_t *vm;
- vlib_log_class_t log_class;
} hicn_mapme_main_t;
+extern hicn_mapme_main_t *hicn_mapme_get_main ();
+
/**
* @brief List of event to signat to the procesing node (eventmgr)
*/
-#define foreach_hicn_mapme_event \
- _(FACE_ADD) \
- _(FACE_DEL) \
- _(FACE_APP_ADD) \
- _(FACE_APP_DEL) \
- _(FACE_NH_SET) \
- _(FACE_NH_ADD) \
- _(FACE_PH_ADD) \
- _(FACE_PH_DEL)
+#define foreach_hicn_mapme_event \
+ _ (FACE_ADD) \
+ _ (FACE_DEL) \
+ _ (FACE_APP_ADD) \
+ _ (FACE_APP_DEL) \
+ _ (FACE_NH_SET) \
+ _ (FACE_NH_ADD) \
+ _ (FACE_PH_ADD) \
+ _ (FACE_PH_DEL)
typedef enum
{
@@ -89,6 +90,21 @@ typedef enum
typedef hicn_dpo_ctx_t hicn_mapme_tfib_t;
+/**
+ * FIB Lookup Type
+ */
+#define foreach_hicn_mapme_fib_lookup_type \
+ _ (EPM) \
+ _ (LPM) \
+ _ (LESSPM)
+
+typedef enum
+{
+#define _(a) HICN_MAPME_FIB_LOOKUP_TYPE_##a,
+ foreach_hicn_mapme_fib_lookup_type
+#undef _
+} hicn_mapme_fib_lookup_type_t;
+
/*
* Ideally we might need to care about alignment, but this struct is only
* used for casting hicn_dpo_ctx_t.
@@ -99,63 +115,47 @@ typedef hicn_dpo_ctx_t hicn_mapme_tfib_t;
STATIC_ASSERT (sizeof (hicn_mapme_tfib_t) <= sizeof (hicn_dpo_ctx_t),
"hicn_mapme_tfib_t is greater than hicn_dpo_ctx_t");
-#define TFIB(dpo_ctx) ((hicn_mapme_tfib_t*)(dpo_ctx))
-
-static_always_inline int
-hicn_mapme_nh_set (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
-{
- hicn_dpo_ctx_t * strategy_ctx = (hicn_dpo_ctx_t *)tfib;
- const fib_prefix_t * prefix = fib_entry_get_prefix(strategy_ctx->fib_entry_index);
-
- u32 n_entries = tfib->entry_count;
- /* Remove all the existing next hops and set the new one */
- for (int i = 0; i < n_entries; i++)
- {
- hicn_face_t * face = hicn_dpoi_get_from_idx(strategy_ctx->next_hops[0]);
- ip_adjacency_t * adj = adj_get (face->dpo.dpoi_index);
- ip_nh_del_helper(face->dpo.dpoi_proto, prefix, &adj->sub_type.nbr.next_hop, face->sw_if);
- }
- hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
- ip_nh_add_helper(face->dpo.dpoi_proto, prefix, &face->nat_addr, face->sw_if);
- return 0;
-}
+#define TFIB(dpo_ctx) ((hicn_mapme_tfib_t *) (dpo_ctx))
/**
- * @brief Add a next hop iif it is not already a next hops
+ * @brief Check whether a face is already included in the TFIB.
+ *
+ * NOTE: linear scan on a contiguous small array should be the most efficient.
*/
static_always_inline int
-hicn_mapme_nh_add (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+hicn_mapme_tfib_has (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
{
- for (u8 pos = 0; pos < tfib->entry_count; pos++)
- if (tfib->next_hops[pos] == face_id)
- return 0;
-
- /* Add the next hop in the vrf 0 which will add it to the entry in the hICN vrf */
- hicn_dpo_ctx_t * strategy_ctx = (hicn_dpo_ctx_t *)tfib;
- const fib_prefix_t * prefix = fib_entry_get_prefix(strategy_ctx->fib_entry_index);
- hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
- ip_nh_add_helper(face->dpo.dpoi_proto, prefix, &face->nat_addr, face->sw_if);
-
+ u8 pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+ for (u8 pos2 = pos; pos2 < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos2++)
+ if (tfib->next_hops[pos2] == face_id)
+ return 1;
return 0;
}
/**
* Add a 'previous' hop to the TFIB
- *
- * XXX we should have the for look in the reverse order for simpler code.
*/
static_always_inline int
-hicn_mapme_tfib_add (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+hicn_mapme_tfib_add (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
{
- u8 pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
+ // Don't add if it already exists
+ // (eg. an old IU received on a face on which we are retransmitting)
+ if (hicn_mapme_tfib_has (tfib, face_id))
+ {
+ HICN_DEBUG ("Found face %d in tfib.");
+ return 0;
+ }
- //XXX don 't add if it already exist
- // eg.an old IU received on a face on which we are retransmitting
- for (u8 pos2 = pos; pos2 < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos2++)
- if (tfib->next_hops[pos2] == face_id)
+ // If local face, do not put in in tfib
+ if (hicn_face_is_local (face_id))
+ {
+ HICN_DEBUG ("Do not add local face %d to TFIB.", face_id);
return 0;
+ }
+
+ u8 pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
- //Make sure we have enough room
+ // Make sure we have enough room
if (pos <= tfib->entry_count)
return -1;
@@ -166,13 +166,13 @@ hicn_mapme_tfib_add (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
* Take a lock on the face as if it will be removed from the next_hops a
* lock will be removed.
*/
- hicn_face_lock_with_id(face_id);
+ hicn_face_lock_with_id (face_id);
return 0;
}
static_always_inline int
-hicn_mapme_tfib_clear (hicn_mapme_tfib_t * tfib)
+hicn_mapme_tfib_clear (hicn_mapme_tfib_t *tfib)
{
hicn_face_id_t invalid = NEXT_HOP_INVALID;
/*
@@ -182,11 +182,10 @@ hicn_mapme_tfib_clear (hicn_mapme_tfib_t * tfib)
u8 start_pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
u8 pos = ~0;
for (pos = start_pos; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos++)
- {
- hicn_face_unlock_with_id (tfib->next_hops[pos]);
- tfib->next_hops[pos] = invalid;
- break;
- }
+ {
+ hicn_face_unlock_with_id (tfib->next_hops[pos]);
+ tfib->next_hops[pos] = invalid;
+ }
tfib->tfib_entry_count = 0;
@@ -194,7 +193,7 @@ hicn_mapme_tfib_clear (hicn_mapme_tfib_t * tfib)
}
static_always_inline int
-hicn_mapme_tfib_del (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
+hicn_mapme_tfib_del (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
{
hicn_face_id_t invalid = NEXT_HOP_INVALID;
/*
@@ -203,9 +202,12 @@ hicn_mapme_tfib_del (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
*/
u8 start_pos = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
u8 pos = ~0;
+
for (pos = start_pos; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos++)
if (tfib->next_hops[pos] == face_id)
{
+ HICN_DEBUG ("Deleted the face_id=%d from TFIB as we received an ack.",
+ face_id);
hicn_face_unlock_with_id (tfib->next_hops[pos]);
tfib->next_hops[pos] = invalid;
break;
@@ -218,37 +220,22 @@ hicn_mapme_tfib_del (hicn_mapme_tfib_t * tfib, hicn_face_id_t face_id)
/* Likely we won't receive a new IU twice from the same face */
if (PREDICT_TRUE (pos > start_pos))
- memmove (tfib->next_hops + start_pos +1 , tfib->next_hops + start_pos,
+ memmove (tfib->next_hops + start_pos + 1, tfib->next_hops + start_pos,
(pos - start_pos) * sizeof (hicn_face_id_t));
return 0;
}
/**
- * @brief Performs an Exact Prefix Match lookup on the FIB
+ * @brief Retrive DPO from fib entry
* @returns the corresponding DPO (hICN or IP LB), or NULL
*/
-static_always_inline
- dpo_id_t * fib_epm_lookup (ip46_address_t * addr, u8 plen)
+static_always_inline dpo_id_t *
+dpo_from_fib_node_index (fib_node_index_t fib_entry_index)
{
- fib_prefix_t fib_pfx;
- fib_node_index_t fib_entry_index;
- u32 fib_index;
- dpo_id_t *dpo_id;
- load_balance_t *lb;
-
- const dpo_id_t *load_balance_dpo_id;
-
- /* At this point the face exists in the face table */
- fib_prefix_from_ip46_addr (addr, &fib_pfx);
- fib_pfx.fp_len = plen;
-
- /* Check if the route already exist in the fib : EPM */
- fib_index = fib_table_find (fib_pfx.fp_proto, HICN_FIB_TABLE);
-
- fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
- if (fib_entry_index == FIB_NODE_INDEX_INVALID)
- return NULL;
+ const dpo_id_t *load_balance_dpo_id = NULL;
+ load_balance_t *lb = NULL;
+ dpo_id_t *dpo_id = NULL;
load_balance_dpo_id = fib_entry_contribute_ip_forwarding (fib_entry_index);
@@ -280,6 +267,46 @@ static_always_inline
return (dpo_id_t *) load_balance_dpo_id;
}
+/**
+ * @brief Performs an Exact Prefix Match lookup on the FIB
+ * @returns the corresponding DPO (hICN or IP LB), or NULL
+ */
+static_always_inline dpo_id_t *
+fib_lookup (ip46_address_t *addr, u8 plen,
+ hicn_mapme_fib_lookup_type_t lookup_type)
+{
+ fib_prefix_t fib_pfx;
+ fib_node_index_t fib_entry_index;
+ u32 fib_index;
+
+ /* At this point the face exists in the face table */
+ fib_prefix_from_ip46_addr (addr, &fib_pfx);
+ fib_pfx.fp_len = plen;
+
+ /* Check if the route already exist in the fib : EPM */
+ fib_index = fib_table_find (fib_pfx.fp_proto, HICN_FIB_TABLE);
+
+ switch (lookup_type)
+ {
+ case HICN_MAPME_FIB_LOOKUP_TYPE_EPM:
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, &fib_pfx);
+ break;
+ case HICN_MAPME_FIB_LOOKUP_TYPE_LPM:
+ fib_entry_index = fib_table_lookup (fib_index, &fib_pfx);
+ break;
+ case HICN_MAPME_FIB_LOOKUP_TYPE_LESSPM:
+ fib_entry_index = fib_table_get_less_specific (fib_index, &fib_pfx);
+ break;
+ default:
+ return NULL;
+ }
+
+ if (fib_entry_index == FIB_NODE_INDEX_INVALID)
+ return NULL;
+
+ return dpo_from_fib_node_index (fib_entry_index);
+}
+
/* DPO types */
extern dpo_type_t hicn_face_udp_type;
@@ -294,14 +321,15 @@ extern u32 strategy_face_ip6_vlib_edge;
extern u32 strategy_face_udp4_vlib_edge;
extern u32 strategy_face_udp6_vlib_edge;
-
/**
- * @brief Returns the next hop vlib edge on which we can send an Interest packet.
+ * @brief Returns the next hop vlib edge on which we can send an Interest
+ * packet.
*
- * This is both used to preprocess a dpo that will be stored as a next hop in the FIB, and to determine on which node to send an Interest Update.
+ * This is both used to preprocess a dpo that will be stored as a next hop in
+ * the FIB, and to determine on which node to send an Interest Update.
*/
always_inline u32
-hicn_mapme_get_dpo_vlib_edge (dpo_id_t * dpo)
+hicn_mapme_get_dpo_vlib_edge (dpo_id_t *dpo)
{
if (dpo->dpoi_type == hicn_face_ip_type)
{
@@ -339,7 +367,7 @@ hicn_mapme_get_dpo_vlib_edge (dpo_id_t * dpo)
always_inline char *
hicn_mapme_get_dpo_face_node (hicn_face_id_t face_id)
{
- hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
switch (face->dpo.dpoi_proto)
{
@@ -352,10 +380,6 @@ hicn_mapme_get_dpo_face_node (hicn_face_id_t face_id)
}
}
-#define DEBUG(...) //vlib_log_debug(mapme_main.log_class, __VA_ARGS__)
-#define WARN(...) //vlib_log_warn(mapme_main.log_class, __VA_ARGS__)
-#define ERROR(...) //vlib_log_err(mapme_main.log_class, __VA_ARGS__)
-
#endif /* __HICN_MAPME__ */
/*
diff --git a/hicn-plugin/src/mapme_ack.h b/hicn-plugin/src/mapme_ack.h
index 821baf203..9f47e9188 100644
--- a/hicn-plugin/src/mapme_ack.h
+++ b/hicn-plugin/src/mapme_ack.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -14,7 +14,7 @@
*/
/*
- * Copyright (c) 2017-2019 by Cisco Systems Inc. All Rights Reserved.
+ * Copyright (c) 2021 by Cisco Systems Inc. All Rights Reserved.
*
*/
@@ -41,6 +41,8 @@ typedef struct
u32 next_index;
u32 sw_if_index;
u8 pkt_type;
+ u32 seq;
+ hicn_prefix_t prefix;
} hicn_mapme_ack_trace_t;
typedef enum
diff --git a/hicn-plugin/src/mapme_ack_node.c b/hicn-plugin/src/mapme_ack_node.c
index f26895d20..e1f4d3889 100644
--- a/hicn-plugin/src/mapme_ack_node.c
+++ b/hicn-plugin/src/mapme_ack_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -14,6 +14,7 @@
*/
#include <vnet/ip/ip6_packet.h>
+#include <vlib/log.h>
#include "hicn.h"
#include "mapme.h"
@@ -30,8 +31,7 @@
extern hicn_mapme_main_t mapme_main;
/* packet trace format function */
-static u8 *hicn_mapme_ack_format_trace (u8 * s, va_list * args);
-
+static u8 *hicn_mapme_ack_format_trace (u8 *s, va_list *args);
/* Stats string values */
static char *hicn_mapme_ack_error_strings[] = {
@@ -47,44 +47,42 @@ static char *hicn_mapme_ack_error_strings[] = {
* @param face_id Ingress face id
*/
bool
-hicn_mapme_process_ack (vlib_main_t * vm, vlib_buffer_t * b,
- hicn_face_id_t in_face)
+hicn_mapme_process_ack (vlib_main_t *vm, vlib_buffer_t *b,
+ hicn_face_id_t in_face, hicn_prefix_t *prefix,
+ u32 *seq)
{
seq_t fib_seq;
const dpo_id_t *dpo;
- hicn_prefix_t prefix;
mapme_params_t params;
int rc;
/* Parse incoming message */
- rc =
- hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ rc = hicn_mapme_parse_packet (vlib_buffer_get_current (b), prefix, &params);
if (rc < 0)
goto ERR_PARSE;
- /* if (params.seq == INVALID_SEQ) */
- /* { */
- /* DEBUG ("Invalid sequence number found in IU"); */
- /* return true; */
- /* } */
+ *seq = params.seq;
+
+ HICN_DEBUG ("ACK - type:%d seq:%d prefix:%U len:%d", params.type, params.seq,
+ format_ip46_address, &prefix->name, IP46_TYPE_ANY, prefix->len);
- dpo = fib_epm_lookup (&(prefix.name), prefix.len);
+ dpo = fib_lookup (&(prefix->name.as_ip46), prefix->len,
+ HICN_MAPME_FIB_LOOKUP_TYPE_EPM);
if (!dpo)
{
- DEBUG ("Ignored ACK for non-existing FIB entry. Ignored.");
+ HICN_ERROR ("Ignored ACK for non-existing FIB entry %U. Ignored.",
+ format_ip_prefix, prefix);
return true;
-
}
/* We are only expecting ACKs for hICN DPOs */
ASSERT (dpo_is_hicn (dpo));
- hicn_mapme_tfib_t *tfib =
- TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
+ hicn_mapme_tfib_t *tfib = TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
if (tfib == NULL)
{
- WARN ("Unable to get strategy ctx.");
+ HICN_ERROR ("Unable to get strategy ctx.");
return false;
}
@@ -96,27 +94,27 @@ hicn_mapme_process_ack (vlib_main_t * vm, vlib_buffer_t * b,
*/
if (params.seq < fib_seq)
{
- DEBUG ("Ignored ACK for low seq");
+ HICN_ERROR ("MAPME: Ignored ACK for low seq");
return true;
}
- hicn_mapme_tfib_del (tfib, in_face);
+ rc = hicn_mapme_tfib_del (tfib, in_face) != HICN_ERROR_NONE;
+ if (rc != HICN_ERROR_NONE)
+ {
+ HICN_ERROR (
+ "MAPME: Error deleting next hop %d from TFIB for prefix %U/%d",
+ in_face, format_ip46_address, &prefix->name, IP46_TYPE_ANY,
+ prefix->len);
+ }
/*
* Is the ingress face in TFIB ? if so, remove it, otherwise it might be a
* duplicate
*/
- retx_t *retx = vlib_process_signal_event_data (vm,
- hicn_mapme_eventmgr_process_node.
- index,
- HICN_MAPME_EVENT_FACE_PH_DEL,
- 1,
- sizeof (retx_t));
- *retx = (retx_t)
- {
- .prefix = prefix,
- .dpo = *dpo
- };
+ retx_t *retx = vlib_process_signal_event_data (
+ vm, hicn_mapme_eventmgr_process_node.index, HICN_MAPME_EVENT_FACE_PH_DEL,
+ 1, sizeof (retx_t));
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
return true;
@@ -127,24 +125,25 @@ ERR_PARSE:
vlib_node_registration_t hicn_mapme_ack_node;
static uword
-hicn_mapme_ack_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn_mapme_ack_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
hicn_buffer_t *hb;
hicn_mapme_ack_next_t next_index;
u32 n_left_from, *from, *to_next;
n_left_from = frame->n_vectors;
+ hicn_prefix_t prefix;
+ u32 seq;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- while (n_left_from > 0) // buffers in the current frame
+ while (n_left_from > 0) // buffers in the current frame
{
u32 n_left_to_next;
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
while (n_left_from > 0 && n_left_to_next > 0)
{
u32 bi0;
@@ -162,51 +161,53 @@ hicn_mapme_ack_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_cli_output (vm, "Received IUAck");
hb = hicn_get_buffer (b0);
- hicn_mapme_process_ack (vm, b0, hb->face_id);
+ hicn_mapme_process_ack (vm, b0, hb->face_id, &prefix, &seq);
/* Single loop: process 1 packet here */
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
{
hicn_mapme_ack_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
t->sw_if_index = sw_if_index0;
t->next_index = next0;
+ t->prefix = prefix;
+ t->seq = seq;
}
/* $$$$$ Done processing 1 packet here $$$$$ */
/* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
-// vlib_node_increment_counter (vm, hicn_mapme_ack_node.index,
-// HICN_MAPME_ACK_ERROR_SWAPPED, pkts_swapped);
+ // vlib_node_increment_counter (vm, hicn_mapme_ack_node.index,
+ // HICN_MAPME_ACK_ERROR_SWAPPED, pkts_swapped);
return (frame->n_vectors);
}
/* packet trace format function */
static u8 *
-hicn_mapme_ack_format_trace (u8 * s, va_list * args)
+hicn_mapme_ack_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
hicn_mapme_ack_trace_t *t = va_arg (*args, hicn_mapme_ack_trace_t *);
- s = format (s, "MAPME_ACK: pkt: %d, sw_if_index %d, next index %d",
- (int) t->pkt_type, t->sw_if_index, t->next_index);
+ s = format (
+ s,
+ "MAPME_ACK: pkt: %d, sw_if_index %d, next index %d, prefix %U/%d, seq %u",
+ (int) t->pkt_type, t->sw_if_index, t->next_index, format_ip46_address,
+ &t->prefix.name, IP46_TYPE_ANY, t->prefix.len, t->seq);
return (s);
}
-
/*
* Node registration for the MAP-Me node processing special interests
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (hicn_mapme_ack_node) =
{
.function = hicn_mapme_ack_node_fn,
@@ -223,7 +224,6 @@ VLIB_REGISTER_NODE (hicn_mapme_ack_node) =
[HICN_MAPME_ACK_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/mapme_ctrl.h b/hicn-plugin/src/mapme_ctrl.h
index 9af4beccc..d3dfb7b69 100644
--- a/hicn-plugin/src/mapme_ctrl.h
+++ b/hicn-plugin/src/mapme_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -14,7 +14,7 @@
*/
/*
- * Copyright (c) 2017-2019 by Cisco Systems Inc. All Rights Reserved.
+ * Copyright (c) 2021 by Cisco Systems Inc. All Rights Reserved.
*
*/
@@ -39,8 +39,9 @@ typedef struct hicn_mapme_ctrl_runtime_s
typedef struct
{
u32 next_index;
- u32 sw_if_index;
- u8 pkt_type;
+ u32 seq;
+ hicn_prefix_t prefix;
+ hicn_mapme_type_t type;
} hicn_mapme_ctrl_trace_t;
typedef enum
@@ -56,7 +57,7 @@ typedef enum
always_inline hicn_mapme_ctrl_next_t
hicn_mapme_ctrl_get_iface_node (hicn_face_id_t face_id)
{
- hicn_face_t * face = hicn_dpoi_get_from_idx(face_id);
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
switch (face->dpo.dpoi_proto)
{
diff --git a/hicn-plugin/src/mapme_ctrl_node.c b/hicn-plugin/src/mapme_ctrl_node.c
index a0be2be1d..cf825a957 100644
--- a/hicn-plugin/src/mapme_ctrl_node.c
+++ b/hicn-plugin/src/mapme_ctrl_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -18,6 +18,7 @@
*/
#include <vnet/ip/ip6_packet.h>
#include <vnet/dpo/load_balance.h>
+#include <vlib/log.h>
#include "hicn.h"
#include "mapme.h"
@@ -30,6 +31,7 @@
#include "strategy_dpo_ctx.h"
#include "error.h"
#include "state.h"
+#include "route.h"
extern hicn_mapme_main_t mapme_main;
@@ -38,8 +40,7 @@ extern hicn_mapme_main_t mapme_main;
/* Functions declarations */
/* packet trace format function */
-static u8 *hicn_mapme_ctrl_format_trace (u8 * s, va_list * args);
-
+static u8 *hicn_mapme_ctrl_format_trace (u8 *s, va_list *args);
/* Stats string values */
static char *hicn_mapme_ctrl_error_strings[] = {
@@ -48,6 +49,184 @@ static char *hicn_mapme_ctrl_error_strings[] = {
#undef _
};
+static_always_inline int
+hicn_mapme_nh_set (hicn_mapme_tfib_t *tfib, hicn_face_id_t in_face_id)
+{
+ hicn_dpo_ctx_t *strategy_ctx = (hicn_dpo_ctx_t *) tfib;
+ const fib_prefix_t *prefix =
+ fib_entry_get_prefix (strategy_ctx->fib_entry_index);
+
+ int ret = 0;
+
+ if ((tfib->entry_count == 1) && (tfib->next_hops[0] == in_face_id))
+ return ret;
+
+ /*
+ * Remove all the existing next hops and set the new one
+ */
+ u32 n_entries = tfib->entry_count;
+ for (int i = 0; i < n_entries; i++)
+ {
+ hicn_face_t *face = hicn_dpoi_get_from_idx (strategy_ctx->next_hops[0]);
+ if (dpo_is_adj (&face->dpo))
+ {
+ ip_adjacency_t *adj = adj_get (face->dpo.dpoi_index);
+ ip_nh_adj_add_del_helper (prefix->fp_proto, prefix,
+ &adj->sub_type.nbr.next_hop, face->sw_if,
+ 0);
+ }
+ else if (dpo_is_udp_encap (&face->dpo))
+ {
+ ip_nh_udp_tunnel_add_del_helper (prefix->fp_proto, prefix,
+ face->dpo.dpoi_index,
+ face->dpo.dpoi_proto, 0);
+ }
+ else
+ {
+ continue;
+ }
+ }
+
+ ret = HICN_ERROR_MAPME_NEXT_HOP_ADDED;
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_face_id);
+ if (dpo_is_udp_encap (&face->dpo))
+ {
+ ip_nh_udp_tunnel_add_del_helper (prefix->fp_proto, prefix,
+ face->dpo.dpoi_index,
+ face->dpo.dpoi_proto, 1);
+ }
+ else if (dpo_is_adj (&face->dpo))
+ {
+ ip_nh_adj_add_del_helper (prefix->fp_proto, prefix, &face->nat_addr,
+ face->sw_if, 1);
+ }
+ else
+ {
+ ret = HICN_ERROR_MAPME_NEXT_HOP_NOT_ADDED;
+ }
+
+ return ret;
+}
+
+/**
+ * @brief Check whether a face is already included in the FIB nexthops.
+ *
+ * NOTE: linear scan on a contiguous small array should be the most efficient.
+ */
+static_always_inline int
+hicn_mapme_nh_has (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
+{
+ for (u8 pos = 0; pos < tfib->entry_count; pos++)
+ if (tfib->next_hops[pos] == face_id)
+ return 1;
+ return 0;
+}
+
+/**
+ * @brief Add a next hop iif it is not already a next hops
+ */
+static_always_inline int
+hicn_mapme_nh_add (hicn_mapme_tfib_t *tfib, hicn_face_id_t face_id)
+{
+ if (hicn_mapme_nh_has (tfib, face_id))
+ return 0;
+
+ /* Add the next hop in the vrf 0 which will add it to the entry in the hICN
+ * vrf */
+ hicn_dpo_ctx_t *strategy_ctx = (hicn_dpo_ctx_t *) tfib;
+ const fib_prefix_t *prefix =
+ fib_entry_get_prefix (strategy_ctx->fib_entry_index);
+ hicn_face_t *face = hicn_dpoi_get_from_idx (face_id);
+
+ if (dpo_is_udp_encap (&face->dpo))
+ {
+ ip_nh_udp_tunnel_add_del_helper ((fib_protocol_t) face->dpo.dpoi_proto,
+ prefix, face->dpo.dpoi_index,
+ face->dpo.dpoi_proto, 1);
+ }
+ else
+ {
+ ip_nh_adj_add_del_helper ((fib_protocol_t) face->dpo.dpoi_proto, prefix,
+ &face->nat_addr, face->sw_if, 1);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Add a route to the fib.
+ *
+ */
+static_always_inline int
+hicn_mapme_add_fib_entry (const fib_prefix_t *prefix,
+ hicn_face_id_t in_face_id,
+ fib_node_index_t *hicn_fib_node_index)
+{
+ int ret = HICN_ERROR_NONE;
+ dpo_proto_t dpo_proto = DPO_PROTO_NONE;
+
+ hicn_face_t *face = hicn_dpoi_get_from_idx (in_face_id);
+
+ if (face->sw_if == ~0)
+ {
+ // UDP encap case
+ if (face->flags & HICN_FACE_FLAGS_UDP4)
+ {
+ dpo_proto = DPO_PROTO_IP4;
+ }
+ else if (face->flags & HICN_FACE_FLAGS_UDP6)
+ {
+ dpo_proto = DPO_PROTO_IP6;
+ }
+ else
+ {
+ // Invalid
+ return HICN_ERROR_FACE_NOT_VALID;
+ }
+
+ ret = ip_nh_udp_tunnel_add_del_helper (
+ prefix->fp_proto, prefix, face->dpo.dpoi_index, dpo_proto, 1);
+ }
+ else
+ {
+ ret = ip_nh_adj_add_del_helper (prefix->fp_proto, prefix,
+ &face->nat_addr, face->sw_if, 1);
+ }
+
+ if (ret != HICN_ERROR_NONE)
+ {
+ return ret;
+ }
+
+ // Now let's trigger the sync the main table with the hicn table
+ hicn_face_id_t *vec_faces = NULL;
+ ret = hicn_route_enable (prefix, hicn_fib_node_index, &vec_faces);
+
+ if (vec_faces[0] != in_face_id)
+ {
+ HICN_ERROR ("Created new face: new face id: %d, in_face id: %d",
+ vec_faces[0], in_face_id);
+ ret = HICN_ERROR_MAPME_WRONG_FACE_CREATED;
+ }
+
+ return ret;
+}
+
+/**
+ * Convert hicn prefix to fib prefix
+ */
+static_always_inline void
+hicn_prefix_to_fib_prefix (const hicn_prefix_t *prefix_in,
+ fib_prefix_t *prefix_out)
+{
+ clib_memcpy (&prefix_out->fp_addr, &prefix_in->name.as_ip46,
+ sizeof (prefix_out->fp_addr));
+ prefix_out->fp_len = (u16) prefix_in->len;
+ prefix_out->fp_proto = ip46_address_is_ip4 (&prefix_out->fp_addr) ?
+ FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
+}
+
/*
* @brief Process incoming control messages (Interest Update)
* @param vm vlib main data structure
@@ -57,73 +236,141 @@ static char *hicn_mapme_ctrl_error_strings[] = {
* NOTE:
* - this function answers locally to the IU interest by replying with a Ack
* (Data) packet, unless in case of outdated information, in which we can
- * consider the interest is dropped, and another IU (aka ICMP error) is sent so
- * that retransmissions stop.
+ * consider the interest is dropped, and another IU (aka ICMP error) is sent
+ * so that retransmissions stop.
*/
static_always_inline bool
-hicn_mapme_process_ctrl (vlib_main_t * vm, vlib_buffer_t * b,
- hicn_face_id_t in_face_id)
+hicn_mapme_process_ctrl (vlib_main_t *vm, vlib_buffer_t *b,
+ hicn_face_id_t in_face_id, hicn_prefix_t *prefix,
+ u32 *seq, hicn_mapme_type_t *type)
{
seq_t fib_seq;
- const dpo_id_t *dpo;
- hicn_prefix_t prefix;
+ const dpo_id_t *dpo, *dpo_mapme_default_route;
+ fib_prefix_t fib_prefix;
mapme_params_t params;
+ hicn_mapme_tfib_t *tfib;
int rc;
+ hicn_mapme_main_t *mm;
+#ifdef HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY
+ fib_node_index_t fib_node_index;
+ hicn_mapme_tfib_t *tfib_less_specific;
+#endif
+
+ mm = hicn_mapme_get_main ();
/* Parse incoming message */
- rc =
- hicn_mapme_parse_packet (vlib_buffer_get_current (b), &prefix, &params);
+ rc = hicn_mapme_parse_packet (vlib_buffer_get_current (b), prefix, &params);
if (rc < 0)
goto ERR_PARSE;
- vlib_cli_output (vm, "IU - type:%d seq:%d len:%d", params.type, params.seq,
- prefix.len);
-
- /* if (params.seq == INVALID_SEQ) */
- /* { */
- /* vlib_log_warn (mapme_main.log_class, */
- /* "Invalid sequence number found in IU"); */
+ *seq = params.seq;
+ *type = params.type;
- /* return true; */
- /* } */
+ HICN_DEBUG ("IU - type:%d seq:%d prefix:%U len:%d", params.type, params.seq,
+ format_ip46_address, &prefix->name, IP46_TYPE_ANY, prefix->len);
/* We forge the ACK which we be the packet forwarded by the node */
hicn_mapme_create_ack (vlib_buffer_get_current (b), &params);
- dpo = fib_epm_lookup (&prefix.name, prefix.len);
+ dpo = fib_lookup (&prefix->name.as_ip46, prefix->len,
+ HICN_MAPME_FIB_LOOKUP_TYPE_EPM);
if (!dpo)
{
#ifdef HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY
- /*
- * This might happen for a node hosting a producer which has moved.
- * Destroying the face has led to removing all corresponding FIB
- * entries. In that case, we need to correctly restore the FIB entries.
- */
- DEBUG ("Re-creating FIB entry with next hop on connection")
-#error "not implemented"
+ // As the EPM failed (but we received the IU), it means we have another
+ // prefix that is either more or less specific to which forward
+ // the IU. We do not update the NHs for this prefix, and we use the
+ // default mapme route to forward ther IU.
+
+ dpo_mapme_default_route =
+ fib_lookup (&mm->default_route.fp_addr, mm->default_route.fp_len,
+ HICN_MAPME_FIB_LOOKUP_TYPE_EPM);
+
+ if (!dpo_mapme_default_route)
+ {
+ // No path for mapme default route.
+ HICN_ERROR (
+ "No path for mapme default route (%U). Giving up IU forwarding.",
+ format_fib_prefix, &mm->default_route);
+ return false;
+ }
+
+ hicn_prefix_to_fib_prefix (prefix, &fib_prefix);
+ HICN_DEBUG ("Re-creating FIB entry with next hop on connection");
+ rc = hicn_mapme_add_fib_entry (&fib_prefix, in_face_id, &fib_node_index);
+
+ if (rc != HICN_ERROR_NONE)
+ {
+ return false;
+ }
+
+ // Get the DPO from the fib node index
+ dpo = dpo_from_fib_node_index (fib_node_index);
+
+ // This cannot fail
+ ASSERT (dpo);
+
+// Make sure DPO is hicn
+#ifdef HICN_MAPME_ALLOW_LOCATORS
+ if (!dpo_is_hicn ((dpo)))
+ {
+ /* We have an IP DPO */
+ HICN_ERROR ("Not implemented yet.");
+ return false;
+ }
+#endif
+
+ u32 hicn_dpo_ctx_index = dpo->dpoi_index;
+ u32 hicn_dpo_ctx_index_less_specific_route =
+ dpo_mapme_default_route->dpoi_index;
+
+ tfib_less_specific = TFIB (
+ hicn_strategy_dpo_ctx_get (hicn_dpo_ctx_index_less_specific_route));
+ tfib = TFIB (hicn_strategy_dpo_ctx_get (hicn_dpo_ctx_index));
+
+ for (u8 pos = 0; pos < tfib_less_specific->entry_count; pos++)
+ {
+ HICN_DEBUG (
+ "Adding nexthop to the tfib, dpo index in_face %d, dpo index "
+ "tfib %d",
+ in_face_id, tfib_less_specific->next_hops[pos]);
+ hicn_mapme_tfib_add (tfib, tfib_less_specific->next_hops[pos]);
+ }
+
+ // Update sequence number
+ tfib->seq = params.seq;
+
+ retx_t *retx = vlib_process_signal_event_data (
+ vm, hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_NH_SET, 1, sizeof (retx_t));
+ *retx = (retx_t){ .prefix = *prefix,
+ .dpo = {
+ .dpoi_index = hicn_dpo_ctx_index,
+ .dpoi_type = DPO_FIRST,
+ } };
+
+ return true;
#else
- //ERROR("Received IU for non-existing FIB entry");
+ HICN_ERROR ("Received IU for non-existing FIB entry");
return false;
#endif /* HICN_MAPME_ALLOW_NONEXISTING_FIB_ENTRY */
-
}
#ifdef HICN_MAPME_ALLOW_LOCATORS
if (!dpo_is_hicn ((dpo)))
{
/* We have an IP DPO */
- WARN ("Not implemented yet.");
+ HICN_ERROR ("Not implemented yet.");
return false;
}
#endif
/* Process the hICN DPO */
- hicn_mapme_tfib_t *tfib =
- TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
+ tfib = TFIB (hicn_strategy_dpo_ctx_get (dpo->dpoi_index));
if (tfib == NULL)
{
- WARN ("Unable to get strategy ctx.");
+ HICN_ERROR ("Unable to get strategy ctx.");
return false;
}
@@ -131,9 +378,9 @@ hicn_mapme_process_ctrl (vlib_main_t * vm, vlib_buffer_t * b,
if (params.seq > fib_seq)
{
- DEBUG
- ("Higher sequence number than FIB %d > %d, updating seq and next hops",
- params.seq, fib_seq);
+ HICN_DEBUG (
+ "Higher sequence number than FIB %d > %d, updating seq and next hops",
+ params.seq, fib_seq);
/* This has to be done first to allow processing ack */
tfib->seq = params.seq;
@@ -143,89 +390,89 @@ hicn_mapme_process_ctrl (vlib_main_t * vm, vlib_buffer_t * b,
/* Remove ingress face from TFIB in case it was present */
hicn_mapme_tfib_del (tfib, in_face_id);
+ HICN_DEBUG ("Locks on face %d: %d", in_face_id,
+ hicn_dpoi_get_from_idx (in_face_id)->locks);
+
/* Move next hops to TFIB... but in_face... */
for (u8 pos = 0; pos < tfib->entry_count; pos++)
{
- hicn_face_t * face = hicn_dpoi_get_from_idx(tfib->next_hops[pos]);
- hicn_face_t * in_face = hicn_dpoi_get_from_idx(in_face_id);
- if (dpo_is_adj(&face->dpo))
- {
- ip_adjacency_t * adj = adj_get (dpo->dpoi_index);
- if (ip46_address_cmp(&(adj->sub_type.nbr.next_hop), &(in_face->nat_addr))== 0)
- break;
- }
- DEBUG
- ("Adding nexthop to the tfib, dpo index in_face %d, dpo index tfib %d",
- in_face_id, tfib->next_hops[pos]);
+ if (tfib->next_hops[pos] == in_face_id)
+ continue;
+ HICN_DEBUG (
+ "Adding nexthop to the tfib, dpo index in_face %d, dpo index "
+ "tfib %d",
+ in_face_id, tfib->next_hops[pos]);
hicn_mapme_tfib_add (tfib, tfib->next_hops[pos]);
}
- hicn_mapme_nh_set (tfib, in_face_id);
-
- /* We transmit both the prefix and the full dpo (type will be needed to pick the right transmit node */
- retx_t *retx = vlib_process_signal_event_data (vm,
- hicn_mapme_eventmgr_process_node.
- index,
- HICN_MAPME_EVENT_FACE_NH_SET,
- 1,
- sizeof (retx_t));
- *retx = (retx_t)
- {
- .prefix = prefix,
- .dpo = *dpo
- };
+ int ret = hicn_mapme_nh_set (tfib, in_face_id);
+ HICN_DEBUG ("Locks on face %d: %d", in_face_id,
+ hicn_dpoi_get_from_idx (in_face_id)->locks);
+ if (ret == HICN_ERROR_MAPME_NEXT_HOP_ADDED &&
+ hicn_get_buffer (b)->flags & HICN_BUFFER_FLAGS_NEW_FACE)
+ {
+ hicn_face_unlock_with_id (in_face_id);
+ }
+ /* We transmit both the prefix and the full dpo (type will be needed to
+ * pick the right transmit node */
+ retx_t *retx = vlib_process_signal_event_data (
+ vm, hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_NH_SET, 1, sizeof (retx_t));
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
}
else if (params.seq == fib_seq)
{
- DEBUG ("Same sequence number than FIB %d > %d, adding next hop",
- params.seq, fib_seq);
+ HICN_DEBUG ("Same sequence number than FIB %d > %d, adding next hop",
+ params.seq, fib_seq);
- /* Remove ingress face from TFIB in case it was present */
- hicn_mapme_tfib_del (tfib, in_face_id);
+ /**
+ * Add nh BEFORE removing the face from the tfib, as if the last lock is
+ * held by the tfib, deleting it first would also delete the face,
+ * resulting in a undefined behavior after (Debug mode -> SIGABRT,
+ * Release Mode -> Corrupted memory / SIGSEGV).
+ **/
/* Add ingress face to next hops */
hicn_mapme_nh_add (tfib, in_face_id);
+ /* Remove ingress face from TFIB in case it was present */
+ hicn_mapme_tfib_del (tfib, in_face_id);
+
/* Multipath, multihoming, multiple producers or duplicate interest */
- retx_t *retx = vlib_process_signal_event_data (vm,
- hicn_mapme_eventmgr_process_node.
- index,
- HICN_MAPME_EVENT_FACE_NH_ADD,
- 1,
- sizeof (retx_t));
- *retx = (retx_t)
- {
- .prefix = prefix,
- .dpo = *dpo
- };
+ retx_t *retx = vlib_process_signal_event_data (
+ vm, hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_NH_ADD, 1, sizeof (retx_t));
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
}
- else // params.seq < fib_seq
+ else // params.seq < fib_seq
{
/*
* face is propagating outdated information, we can just consider it as a
- * prevHops
+ * prevHops, unless it is the current nexthop.
*/
+ if (hicn_mapme_nh_has (tfib, in_face_id))
+ {
+ HICN_DEBUG ("Ignored seq %d < fib_seq %d from current nexthop",
+ params.seq, fib_seq);
+ return true;
+ }
+ HICN_DEBUG ("Received seq %d < fib_seq %d, sending backwards",
+ params.seq, fib_seq);
+
hicn_mapme_tfib_add (tfib, in_face_id);
- retx_t *retx = vlib_process_signal_event_data (vm,
- hicn_mapme_eventmgr_process_node.
- index,
- HICN_MAPME_EVENT_FACE_PH_ADD,
- 1,
- sizeof (retx_t));
- *retx = (retx_t)
- {
- .prefix = prefix,
- .dpo = *dpo
- };
+ retx_t *retx = vlib_process_signal_event_data (
+ vm, hicn_mapme_eventmgr_process_node.index,
+ HICN_MAPME_EVENT_FACE_PH_ADD, 1, sizeof (retx_t));
+ *retx = (retx_t){ .prefix = *prefix, .dpo = *dpo };
}
/* We just raise events, the event_mgr is in charge of forging packet. */
return true;
-//ERR_ACK_CREATE:
+// ERR_ACK_CREATE:
ERR_PARSE:
return false;
}
@@ -233,25 +480,27 @@ ERR_PARSE:
vlib_node_registration_t hicn_mapme_ctrl_node;
static uword
-hicn_mapme_ctrl_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+hicn_mapme_ctrl_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
hicn_buffer_t *hb;
hicn_mapme_ctrl_next_t next_index;
u32 n_left_from, *from, *to_next;
n_left_from = frame->n_vectors;
- //hicn_face_id_t in_face;
+ hicn_prefix_t prefix;
+ u32 seq;
+ hicn_mapme_type_t type = UPDATE;
+ int ret;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- while (n_left_from > 0) // buffers in the current frame
+ while (n_left_from > 0) // buffers in the current frame
{
u32 n_left_to_next;
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
while (n_left_from > 0 && n_left_to_next > 0)
{
u32 bi0;
@@ -267,42 +516,61 @@ hicn_mapme_ctrl_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
b0 = vlib_get_buffer (vm, bi0);
hb = hicn_get_buffer (b0);
- /* This determines the next node on which the ack will be sent back */
- u32 next0 = hicn_mapme_ctrl_get_iface_node (hb->face_id);
-
- hicn_mapme_process_ctrl (vm, b0, hb->face_id);
-
- vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hb->face_id;
+ /* This determines the next node on which the ack will be sent back
+ */
+ u32 next0 = HICN_MAPME_CTRL_NEXT_IP6_OUTPUT;
+
+ ret = hicn_mapme_process_ctrl (vm, b0, hb->face_id, &prefix, &seq,
+ &type);
+
+ if (PREDICT_FALSE (ret == 0))
+ {
+ next0 = HICN_MAPME_CTRL_NEXT_ERROR_DROP;
+ seq = ~0;
+ type = UNKNOWN;
+ }
+ else
+ {
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = hb->face_id;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_mapme_ctrl_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->prefix = prefix;
+ t->next_index = next0;
+ t->seq = seq;
+ t->type = type;
+ }
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
-
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- // vlib_node_increment_counter (vm, hicn_mapme_ctrl_node.index,
- // HICN_MAPME_CTRL_ERROR_SWAPPED, pkts_swapped);
+
return frame->n_vectors;
}
/* packet trace format function */
static u8 *
-hicn_mapme_ctrl_format_trace (u8 * s, va_list * args)
+hicn_mapme_ctrl_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
hicn_mapme_ctrl_trace_t *t = va_arg (*args, hicn_mapme_ctrl_trace_t *);
- s = format (s, "MAPME_CTRL: pkt: %d, sw_if_index %d, next index %d",
- (int) t->pkt_type, t->sw_if_index, t->next_index);
+ s = format (s, "MAPME_CTRL: prefix: %U/%d, next_index %u, seq %u, type %u",
+ format_ip46_address, &t->prefix.name, IP46_TYPE_ANY,
+ t->prefix.len, t->next_index, t->seq, t->type);
return (s);
}
-
/*
* Node registration for the MAP-Me node processing special interests
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (hicn_mapme_ctrl_node) =
{
.function = hicn_mapme_ctrl_node_fn,
@@ -326,7 +594,6 @@ VLIB_REGISTER_NODE (hicn_mapme_ctrl_node) =
[HICN_MAPME_CTRL_NEXT_ERROR_DROP] = "error-drop",
},
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/mapme_eventmgr.c b/hicn-plugin/src/mapme_eventmgr.c
index d8b7562f8..bb654edf8 100644
--- a/hicn-plugin/src/mapme_eventmgr.c
+++ b/hicn-plugin/src/mapme_eventmgr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -22,7 +22,14 @@
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/ip6_fib.h>
-#define DEFAULT_TIMEOUT 1.0 /* s */
+#include <hicn/mapme.h>
+
+#define DEFAULT_TIMEOUT 1.0 /* s */
+
+/**
+ * @brief This is a process node reacting to face events.
+ */
+vlib_node_registration_t hicn_mapme_eventmgr_process_node;
hicn_mapme_main_t mapme_main;
@@ -30,10 +37,17 @@ hicn_prefix_t *retx_pool;
uword *retx_hash;
void
-hicn_mapme_init (vlib_main_t * vm)
+hicn_mapme_init (vlib_main_t *vm)
{
mapme_main.vm = vm;
- mapme_main.log_class = vlib_log_register_class ("hicn_mapme", 0);
+ clib_memset_u8 (&mapme_main.default_route, 0,
+ sizeof (mapme_main.default_route));
+}
+
+hicn_mapme_main_t *
+hicn_mapme_get_main ()
+{
+ return &mapme_main;
}
/* borrowed from vnet/fib/ip4_fib.c */
@@ -71,13 +85,12 @@ ip6_fib_table_show_walk (fib_node_index_t fib_entry_index, void *arg)
}
void
-hicn_mapme_process_fib_entry (vlib_main_t * vm, hicn_face_id_t face,
- const fib_node_index_t * fib_entry_index)
+hicn_mapme_process_fib_entry (vlib_main_t *vm, hicn_face_id_t face,
+ const fib_node_index_t *fib_entry_index)
{
const dpo_id_t *load_balance_dpo_id;
load_balance_t *lb;
dpo_id_t *dpo_id;
- fib_entry_t *fib_entry;
load_balance_dpo_id = fib_entry_contribute_ip_forwarding (*fib_entry_index);
@@ -92,53 +105,47 @@ hicn_mapme_process_fib_entry (vlib_main_t * vm, hicn_face_id_t face,
{
/* un-const */
dpo_id = (dpo_id_t *) load_balance_get_bucket_i (lb, i);
-
if (dpo_is_hicn (dpo_id))
{
+#ifdef HICN_DDEBUG
+ fib_entry_t *fib_entry;
fib_entry = fib_entry_get (*fib_entry_index);
- vlib_cli_output (vm, "set face pending %U", format_fib_prefix,
- &fib_entry->fe_prefix);
+ HICN_DEBUG ("set face pending %U", format_fib_prefix,
+ &fib_entry->fe_prefix);
+#endif
}
}
}
void
-hicn_mapme_process_ip4_fib (vlib_main_t * vm, hicn_face_id_t face)
+hicn_mapme_process_ip4_fib (vlib_main_t *vm, hicn_face_id_t face)
{
ip4_main_t *im4 = &ip4_main;
fib_table_t *fib_table;
- int table_id = -1, fib_index = ~0;
- /* *INDENT-OFF* */
- pool_foreach (fib_table, im4->fibs,
- ({
- ip4_fib_t *fib = pool_elt_at_index(im4->v4_fibs, fib_table->ft_index);
-
- if (table_id >= 0 && table_id != (int)fib->table_id)
- continue;
- if (fib_index != ~0 && fib_index != (int)fib->index)
- continue;
+ pool_foreach (fib_table, im4->fibs)
+ {
+ ip4_fib_t *fib = pool_elt_at_index (ip4_fibs, fib_table->ft_index);
- fib_node_index_t *fib_entry_index;
- ip4_fib_show_walk_ctx_t ctx = {
- .ifsw_indicies = NULL,
- };
+ fib_node_index_t *fib_entry_index;
+ ip4_fib_show_walk_ctx_t ctx = {
+ .ifsw_indicies = NULL,
+ };
- ip4_fib_table_walk(fib, ip4_fib_show_walk_cb, &ctx);
- //vec_sort_with_function(ctx.ifsw_indicies, fib_entry_cmp_for_sort);
+ ip4_fib_table_walk (fib, ip4_fib_show_walk_cb, &ctx);
+ // vec_sort_with_function(ctx.ifsw_indicies, fib_entry_cmp_for_sort);
- vec_foreach(fib_entry_index, ctx.ifsw_indicies)
- {
- hicn_mapme_process_fib_entry(vm, face, fib_entry_index);
- }
+ vec_foreach (fib_entry_index, ctx.ifsw_indicies)
+ {
+ hicn_mapme_process_fib_entry (vm, face, fib_entry_index);
+ }
- vec_free(ctx.ifsw_indicies);
- }));
- /* *INDENT-ON* */
+ vec_free (ctx.ifsw_indicies);
+ }
}
void
-hicn_mapme_process_ip6_fib (vlib_main_t * vm, hicn_face_id_t face)
+hicn_mapme_process_ip6_fib (vlib_main_t *vm, hicn_face_id_t face)
{
/* Walk IPv6 FIB */
ip6_main_t *im6 = &ip6_main;
@@ -146,43 +153,39 @@ hicn_mapme_process_ip6_fib (vlib_main_t * vm, hicn_face_id_t face)
ip6_fib_t *fib;
int table_id = -1, fib_index = ~0;
- /* *INDENT-OFF* */
- pool_foreach (fib_table, im6->fibs,
- ({
- fib = pool_elt_at_index(im6->v6_fibs, fib_table->ft_index);
-
- if (table_id >= 0 && table_id != (int)fib->table_id)
- continue;
- if (fib_index != ~0 && fib_index != (int)fib->index)
- continue;
- if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL)
- continue;
+ pool_foreach (fib_table, im6->fibs)
+ {
+ fib = pool_elt_at_index (im6->v6_fibs, fib_table->ft_index);
- fib_node_index_t *fib_entry_index;
- ip6_fib_show_ctx_t ctx = {
- .entries = NULL,
- };
+ if (table_id >= 0 && table_id != (int) fib->table_id)
+ continue;
+ if (fib_index != ~0 && fib_index != (int) fib->index)
+ continue;
+ if (fib_table->ft_flags & FIB_TABLE_FLAG_IP6_LL)
+ continue;
- ip6_fib_table_walk(fib->index, ip6_fib_table_show_walk, &ctx);
- //vec_sort_with_function(ctx.entries, fib_entry_cmp_for_sort);
+ fib_node_index_t *fib_entry_index;
+ ip6_fib_show_ctx_t ctx = {
+ .entries = NULL,
+ };
- vec_foreach(fib_entry_index, ctx.entries)
- {
- hicn_mapme_process_fib_entry(vm, face, fib_entry_index);
- }
+ ip6_fib_table_walk (fib->index, ip6_fib_table_show_walk, &ctx);
+ // vec_sort_with_function(ctx.entries, fib_entry_cmp_for_sort);
- vec_free(ctx.entries);
+ vec_foreach (fib_entry_index, ctx.entries)
+ {
+ hicn_mapme_process_fib_entry (vm, face, fib_entry_index);
+ }
- }));
- /* *INDENT-ON* */
+ vec_free (ctx.entries);
+ }
}
-
/**
* Callback called everytime a new face is created (not including app faces)
*/
void
-hicn_mapme_on_face_added (vlib_main_t * vm, hicn_face_id_t face)
+hicn_mapme_on_face_added (vlib_main_t *vm, hicn_face_id_t face)
{
hicn_mapme_process_ip4_fib (vm, face);
hicn_mapme_process_ip6_fib (vm, face);
@@ -194,36 +197,45 @@ hicn_mapme_on_face_added (vlib_main_t * vm, hicn_face_id_t face)
* it.
*/
#define NUM_RETX_ENTRIES 100
-#define NUM_RETX_SLOT 2
-#define NEXT_SLOT(cur) (1-cur)
-#define CUR retx_array[cur]
-#define NXT retx_array[NEXT_SLOT(cur)]
-#define CURLEN retx_len[cur]
-#define NXTLEN retx_len[NEXT_SLOT(cur)]
-
-static_always_inline void *
-get_packet_buffer (vlib_main_t * vm, u32 node_index, u32 dpoi_index,
- ip46_address_t * addr, hicn_type_t type)
+#define NUM_RETX_SLOT 2
+#define NEXT_SLOT(cur) (1 - cur)
+#define CUR retx_array[cur]
+#define NXT retx_array[NEXT_SLOT (cur)]
+#define CURLEN retx_len[cur]
+#define NXTLEN retx_len[NEXT_SLOT (cur)]
+
+static_always_inline bool
+create_mapme_packet_buffer (vlib_main_t *vm, u32 node_index, u32 dpoi_index,
+ const hicn_prefix_t *prefix,
+ const mapme_params_t *params)
{
vlib_frame_t *f;
- vlib_buffer_t *b; // for newly created packet
+ vlib_buffer_t *b; // for newly created packet
u32 *to_next;
u32 bi;
u8 *buffer;
+ size_t n;
+ hicn_packet_format_t format;
if (vlib_buffer_alloc (vm, &bi, 1) != 1)
{
- clib_warning ("buffer allocation failure");
+ HICN_ERROR ("buffer allocation failure");
return NULL;
}
+ format = (params->protocol == IPPROTO_IPV6) ? HICN_PACKET_FORMAT_IPV6_ICMP :
+ HICN_PACKET_FORMAT_IPV4_ICMP;
+
/* Create a new packet from scratch */
b = vlib_get_buffer (vm, bi);
ASSERT (b->current_data == 0);
/* Face information for next hop node index */
vnet_buffer (b)->ip.adj_index[VLIB_TX] = dpoi_index;
- hicn_get_buffer (b)->type = type;
+
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (b)->pkbuf;
+ hicn_packet_set_format (pkbuf, format);
+ hicn_packet_init_header (pkbuf, 0);
/* Enqueue the packet right now */
f = vlib_get_frame_to_node (vm, node_index);
@@ -234,75 +246,72 @@ get_packet_buffer (vlib_main_t * vm, u32 node_index, u32 dpoi_index,
// pointer to IP layer ? do we need to prepare for ethernet ???
buffer = vlib_buffer_get_current (b);
- b->current_length =
- (type.l1 == IPPROTO_IPV6) ? HICN_MAPME_V6_HDRLEN : HICN_MAPME_V4_HDRLEN;
+ b->current_length = HICN_PACKET_FORMAT_IS_IPV6 (format) ?
+ EXPECTED_MAPME_V6_HDRLEN :
+ EXPECTED_MAPME_V4_HDRLEN;
+
+ n = hicn_mapme_create_packet (buffer, prefix, params);
+
+ if (n <= 0)
+ {
+ HICN_ERROR ("Could not create MAP-Me packet");
+ return false;
+ }
+
+ hicn_packet_set_buffer (pkbuf, vlib_buffer_get_current (b),
+ b->current_length, b->current_length);
+ hicn_packet_analyze (&hicn_get_buffer (b)->pkbuf);
return buffer;
}
static_always_inline bool
-hicn_mapme_send_message (vlib_main_t * vm, const hicn_prefix_t * prefix,
- mapme_params_t * params, hicn_face_id_t face)
+hicn_mapme_send_message (vlib_main_t *vm, const hicn_prefix_t *prefix,
+ mapme_params_t *params, hicn_face_id_t face)
{
- size_t n;
-
/* This should be retrieved from face information */
- DEBUG ("Retransmission for prefix %U seq=%d", format_ip46_address,
- &prefix->name, IP46_TYPE_ANY, params->seq);
+ HICN_DEBUG ("Retransmission for prefix %U/%d seq=%d", format_ip46_address,
+ &prefix->name, IP46_TYPE_ANY, prefix->len, params->seq);
char *node_name = hicn_mapme_get_dpo_face_node (face);
if (!node_name)
{
- clib_warning
- ("Could not determine next node for sending MAP-Me packet");
+ clib_warning ("Could not determine next node for sending MAP-Me packet");
return false;
}
vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) node_name);
u32 node_index = node->index;
- u8 *buffer = get_packet_buffer (vm, node_index, face,
- (ip46_address_t *) prefix,
- (params->protocol ==
- IPPROTO_IPV6) ? HICN_TYPE_IPV6_ICMP :
- HICN_TYPE_IPV4_ICMP);
- n = hicn_mapme_create_packet (buffer, prefix, params);
- if (n <= 0)
- {
- clib_warning ("Could not create MAP-Me packet");
- return false;
- }
-
- return true;
+ return create_mapme_packet_buffer (vm, node_index, face, prefix, params);
}
static_always_inline void
-hicn_mapme_send_updates (vlib_main_t * vm, hicn_prefix_t * prefix,
- dpo_id_t dpo, bool send_all)
+hicn_mapme_send_updates (vlib_main_t *vm, hicn_prefix_t *prefix, dpo_id_t dpo,
+ bool send_all)
{
hicn_mapme_tfib_t *tfib = TFIB (hicn_strategy_dpo_ctx_get (dpo.dpoi_index));
if (!tfib)
{
- DEBUG ("NULL TFIB entry id=%d", dpo.dpoi_index);
+ HICN_DEBUG ("NULL TFIB entry id=%d", dpo.dpoi_index);
return;
}
u8 tfib_last_idx = HICN_PARAM_FIB_ENTRY_NHOPS_MAX - tfib->tfib_entry_count;
mapme_params_t params = {
- .protocol = ip46_address_is_ip4 (&prefix->name)
- ? IPPROTO_IP : IPPROTO_IPV6,
+ .protocol =
+ ip46_address_is_ip4 (&prefix->name.as_ip46) ? IPPROTO_IP : IPPROTO_IPV6,
.type = UPDATE,
.seq = tfib->seq,
};
if (send_all)
{
- for (u8 pos = tfib_last_idx; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX;
- pos++)
+ u8 pos;
+ for (pos = tfib_last_idx; pos < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; pos++)
{
- hicn_mapme_send_message (vm, prefix, &params,
- tfib->next_hops[pos]);
+ hicn_mapme_send_message (vm, prefix, &params, tfib->next_hops[pos]);
}
}
else
@@ -313,17 +322,17 @@ hicn_mapme_send_updates (vlib_main_t * vm, hicn_prefix_t * prefix,
}
static uword
-hicn_mapme_eventmgr_process (vlib_main_t * vm,
- vlib_node_runtime_t * rt, vlib_frame_t * f)
+hicn_mapme_eventmgr_process (vlib_main_t *vm, vlib_node_runtime_t *rt,
+ vlib_frame_t *f)
{
- f64 timeout = 0; /* By default, no timer is run */
+ f64 timeout = 0; /* By default, no timer is run */
f64 current_time, due_time;
u8 idle = 0;
retx_t retx_array[NUM_RETX_SLOT][NUM_RETX_ENTRIES];
memset (retx_array, 0, NUM_RETX_SLOT * NUM_RETX_ENTRIES);
u8 retx_len[NUM_RETX_SLOT] = { 0 };
- u8 cur = 0; /* current slot */
+ u8 cur = 0; /* current slot */
hicn_mapme_init (vm);
@@ -333,8 +342,8 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
* instead of get_event, and we thus need to reimplement timeout
* management on top, as done elsewhere in VPP code.
*
- * The most probable event. For simplicity, for new faces, we pass the same retx_t with no
- * prefix
+ * The most probable event. For simplicity, for new faces, we pass the
+ * same retx_t with no prefix
*/
if (timeout != 0)
{
@@ -346,9 +355,8 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
* management with no error correction accounting for elapsed time.
* Also, we only run a timer when there are pending retransmissions.
*/
- timeout =
- (due_time >
- current_time) ? due_time - current_time : DEFAULT_TIMEOUT;
+ timeout = (due_time > current_time) ? due_time - current_time :
+ DEFAULT_TIMEOUT;
due_time = current_time + timeout;
}
else
@@ -365,10 +373,12 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
{
/*
* A face has been added:
- * - In case of a local app face, we need to advertise a new prefix
+ * - In case of a local app face, we need to advertise a new
+ * prefix
* - For another local face type, we need to advertise local
* prefixes and schedule retransmissions
*/
+ HICN_DEBUG ("Mapme Event: HICN_MAPME_EVENT_FACE_ADD");
retx_t *retx_events = event_data;
for (u8 i = 0; i < vec_len (retx_events); i++)
{
@@ -384,11 +394,12 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
case HICN_MAPME_EVENT_FACE_NH_SET:
{
+ HICN_DEBUG ("Mapme Event: HICN_MAPME_EVENT_FACE_NH_SET");
/*
* An hICN FIB entry has been modified. All operations so far
* have been procedded in the nodes. Here we need to track
- * retransmissions upon timeout: we mark the FIB entry as pending in
- * the second-to-next slot
+ * retransmissions upon timeout: we mark the FIB entry as pending
+ * in the second-to-next slot
*/
/* Mark FIB entry as pending for second-to-next slot */
@@ -413,20 +424,22 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
* Since we retransmit to all prev hops, we can remove this
* (T)FIB entry for the check at the end of the current slot.
*/
- retx_t *retx = (retx_t *) & retx_events[i];
+ retx_t *retx = (retx_t *) &retx_events[i];
retx->rtx_count = 0;
/*
* Transmit IU for all TFIB entries with latest seqno (we have
* at least one for sure!)
*/
+ HICN_DEBUG ("Sending mapme message upon NH_SET event");
hicn_mapme_send_updates (vm, &retx->prefix, retx->dpo, true);
- /* Delete entry_id from retransmissions in the current slot (if present) ... */
+ /* Delete entry_id from retransmissions in the current slot (if
+ * present) ... */
for (u8 j = 0; j < CURLEN; j++)
if (!dpo_cmp (&(CUR[j].dpo), &retx->dpo))
{
- CUR[j].dpo.dpoi_index = ~0; /* sufficient */
+ CUR[j].dpo.dpoi_index = ~0; /* sufficient */
}
/* ... and schedule it for next slot (if not already) */
@@ -434,7 +447,7 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
for (j = 0; j < NXTLEN; j++)
if (!dpo_cmp (&NXT[j].dpo, &retx->dpo))
break;
- if (j == NXTLEN) /* not found */
+ if (j == NXTLEN) /* not found */
NXT[NXTLEN++] = *retx;
}
idle = 0;
@@ -442,14 +455,15 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
break;
case HICN_MAPME_EVENT_FACE_NH_ADD:
+ HICN_DEBUG ("Mapme Event: HICN_MAPME_EVENT_FACE_NH_ADD");
/*
* As per the description of states, this event should add the face
* to the list of next hops, and eventually remove it from TFIB.
* This corresponds to the multipath case.
*
- * In all cases, we assume the propagation was already done when the first
- * interest with the same sequence number was received, so we stop here
- * No change in TFIB = no IU to send
+ * In all cases, we assume the propagation was already done when the
+ * first interest with the same sequence number was received, so we
+ * stop here No change in TFIB = no IU to send
*
* No change in timers.
*/
@@ -461,6 +475,7 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
break;
case HICN_MAPME_EVENT_FACE_PH_ADD:
+ HICN_DEBUG ("Mapme Event: HICN_MAPME_EVENT_FACE_PH_ADD");
/* Back-propagation, interesting even for IN (desync) */
{
retx_t *retx_events = event_data;
@@ -474,6 +489,7 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
break;
case HICN_MAPME_EVENT_FACE_PH_DEL:
+ HICN_DEBUG ("Mapme Event: HICN_MAPME_EVENT_FACE_PH_DEL");
/* Ack : remove an element from TFIB */
break;
@@ -495,27 +511,29 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
{
retx_t *retx = &CUR[pos];
- if (retx->dpo.dpoi_index == ~0) /* deleted entry */
+ if (retx->dpo.dpoi_index == ~0) /* deleted entry */
continue;
hicn_mapme_tfib_t *tfib =
TFIB (hicn_strategy_dpo_ctx_get (retx->dpo.dpoi_index));
if (!tfib)
{
- DEBUG ("NULL TFIB entry for dpoi_index=%d",
- retx->dpo.dpoi_index);
+ HICN_ERROR ("NULL TFIB entry for dpoi_index=%d",
+ retx->dpo.dpoi_index);
continue;
}
hicn_mapme_send_updates (vm, &retx->prefix, retx->dpo, true);
retx->rtx_count++;
- // If we exceed the numver of retransmittion it means that all tfib entries have seens at least HICN_PARAM_RTX_MAX of retransmission
+ // If we exceed the numver of retransmittion it means that all
+ // tfib entries have seens at least HICN_PARAM_RTX_MAX of
+ // retransmission
if (retx->rtx_count < HICN_PARAM_RTX_MAX)
{
/*
- * We did some retransmissions, so let's reschedule a check in the
- * next slot
+ * We did some retransmissions, so let's reschedule a check
+ * in the next slot
*/
NXT[NXTLEN++] = CUR[pos];
idle = 0;
@@ -541,7 +559,6 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
timeout = (idle > 1) ? 0 : DEFAULT_TIMEOUT;
// if (vlib_process_suspend_time_is_zero (timeout)) { ... }
-
}
/* NOTREACHED */
@@ -549,14 +566,13 @@ hicn_mapme_eventmgr_process (vlib_main_t * vm,
}
/* Not static as we need to access it from hicn_face */
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (hicn_mapme_eventmgr_process_node) = { //,static) = {
- .function = hicn_mapme_eventmgr_process,
- .type = VLIB_NODE_TYPE_PROCESS,
- .name = "mapme-eventmgr-process",
- .process_log2_n_stack_bytes = 16,
+VLIB_REGISTER_NODE (hicn_mapme_eventmgr_process_node) = {
+ //,static) = {
+ .function = hicn_mapme_eventmgr_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "mapme-eventmgr-process",
+ .process_log2_n_stack_bytes = 16,
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/mapme_eventmgr.h b/hicn-plugin/src/mapme_eventmgr.h
index b63d16805..559ba2e75 100644
--- a/hicn-plugin/src/mapme_eventmgr.h
+++ b/hicn-plugin/src/mapme_eventmgr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -13,7 +13,7 @@
* limitations under the License.
*/
-#include <vlib/vlib.h> // vlib_node_registration_t (vlib/node.h)
+#include <vlib/vlib.h> // vlib_node_registration_t (vlib/node.h)
#include <hicn/name.h>
@@ -34,19 +34,18 @@ typedef struct
u8 rtx_count; // Number of retransmissions since last tfib addition
} retx_t;
-#define HASH32(x) ((u16)x ^ (x << 16))
+#define HASH32(x) ((u16) x ^ (x << 16))
/**
* @brief This is a process node reacting to face events.
*/
-// not static !
-vlib_node_registration_t hicn_mapme_eventmgr_process_node;
+extern vlib_node_registration_t hicn_mapme_eventmgr_process_node;
/**
* @brief Initialize MAP-Me on forwarder
* @params vm - vlib_main_t pointer
*/
-void hicn_mapme_init (vlib_main_t * vm);
+void hicn_mapme_init (vlib_main_t *vm);
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/mgmt.c b/hicn-plugin/src/mgmt.c
index cfeef6cb6..6e9ce9fd8 100644
--- a/hicn-plugin/src/mgmt.c
+++ b/hicn-plugin/src/mgmt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -21,11 +21,11 @@
#include "mgmt.h"
/* define message IDs */
-#include "hicn_msg_enum.h"
+#include <vpp_plugins/hicn/hicn_msg_enum.h>
/* shared routine betweeen API and CLI, leveraging API message structure */
int
-hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp)
+hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t *rmp)
{
rmp->pkts_processed = 0;
rmp->pkts_interest_count = 0;
@@ -46,46 +46,32 @@ hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp)
vlib_error_main_t *em;
vlib_node_t *n;
- foreach_vlib_main ((
- {
- em = &this_vlib_main->error_main;
- n =
- vlib_get_node (this_vlib_main,
- hicn_interest_pcslookup_node.index);
- u32 node_cntr_base_idx = n->error_heap_index;
- rmp->pkts_processed +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_PROCESSED]);
- rmp->pkts_interest_count +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_INTERESTS]);
- n =
- vlib_get_node (this_vlib_main,
- hicn_data_pcslookup_node.index);
- node_cntr_base_idx = n->error_heap_index;
- rmp->pkts_processed +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_PROCESSED]);
- rmp->pkts_data_count +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_DATAS]);
- n =
- vlib_get_node (this_vlib_main,
- hicn_interest_hitcs_node.index);
- node_cntr_base_idx = n->error_heap_index;
- rmp->pkts_from_cache_count +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_CACHED]);
- n =
- vlib_get_node (this_vlib_main,
- hicn_interest_hitpit_node.index);
- node_cntr_base_idx = n->error_heap_index;
- rmp->interests_aggregated +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_INTEREST_AGG]);
- rmp->interests_retx +=
- clib_host_to_net_u64 (em->counters[node_cntr_base_idx +
- HICNFWD_ERROR_INT_RETRANS]);}));
+ foreach_vlib_main ()
+ {
+ em = &this_vlib_main->error_main;
+ n = vlib_get_node (this_vlib_main, hicn_interest_pcslookup_node.index);
+ u32 node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_PROCESSED]);
+ rmp->pkts_interest_count += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_INTERESTS]);
+ n = vlib_get_node (this_vlib_main, hicn_data_pcslookup_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_processed += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_PROCESSED]);
+ rmp->pkts_data_count += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_DATAS]);
+ n = vlib_get_node (this_vlib_main, hicn_interest_hitcs_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->pkts_from_cache_count += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_CACHED]);
+ n = vlib_get_node (this_vlib_main, hicn_interest_hitpit_node.index);
+ node_cntr_base_idx = n->error_heap_index;
+ rmp->interests_aggregated += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_INTEREST_AGG]);
+ rmp->interests_retx += clib_host_to_net_u64 (
+ em->counters[node_cntr_base_idx + HICNFWD_ERROR_INT_RETRANS]);
+ }
return (HICN_ERROR_NONE);
}
diff --git a/hicn-plugin/src/mgmt.h b/hicn-plugin/src/mgmt.h
index 6db0fe0c1..03e73999f 100644
--- a/hicn-plugin/src/mgmt.h
+++ b/hicn-plugin/src/mgmt.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -18,7 +18,7 @@
#include <vppinfra/error.h>
#include "faces/face.h"
-#include "hicn_api.h"
+#include <vpp_plugins/hicn/hicn_api.h>
/**
* @file mgmt.h
@@ -70,7 +70,7 @@ typedef enum
* Utility to update error counters in all hICN nodes
*/
always_inline void
-update_node_counter (vlib_main_t * vm, u32 node_idx, u32 counter_idx, u64 val)
+update_node_counter (vlib_main_t *vm, u32 node_idx, u32 counter_idx, u64 val)
{
vlib_node_t *node = vlib_get_node (vm, node_idx);
vlib_error_main_t *em = &(vm->error_main);
@@ -79,29 +79,28 @@ update_node_counter (vlib_main_t * vm, u32 node_idx, u32 counter_idx, u64 val)
em->counters[base_idx + counter_idx] = val;
}
-
/*
* Stats for the forwarding node, which end up called "error" even though
* they aren't...
*/
-#define foreach_hicnfwd_error \
- _(PROCESSED, "hICN packets processed") \
- _(INTERESTS, "hICN interests forwarded") \
- _(DATAS, "hICN data msgs forwarded") \
- _(CACHED, "Cached data ") \
- _(NO_PIT, "hICN no PIT entry drops") \
- _(PIT_EXPIRED, "hICN expired PIT entries") \
- _(CS_EXPIRED, "hICN expired CS entries") \
- _(CS_LRU, "hICN LRU CS entries freed") \
- _(NO_BUFS, "No packet buffers") \
- _(INTEREST_AGG, "Interests aggregated") \
- _(INTEREST_AGG_ENTRY, "Interest aggregated per entry") \
- _(INT_RETRANS, "Interest retransmissions") \
- _(INT_COUNT, "Interests in PIT") \
- _(CS_COUNT, "CS total entries") \
- _(CS_NTW_COUNT, "CS ntw entries") \
- _(CS_APP_COUNT, "CS app entries") \
- _(HASH_COLL_HASHTB_COUNT, "Collisions in Hash table")
+#define foreach_hicnfwd_error \
+ _ (PROCESSED, "hICN packets processed") \
+ _ (INTERESTS, "hICN interests forwarded") \
+ _ (DATAS, "hICN data msgs forwarded") \
+ _ (CACHED, "Cached data ") \
+ _ (NO_PIT, "hICN no PIT entry drops") \
+ _ (PIT_EXPIRED, "hICN expired PIT entries") \
+ _ (CS_EXPIRED, "hICN expired CS entries") \
+ _ (CS_LRU, "hICN LRU CS entries freed") \
+ _ (NO_BUFS, "No packet buffers") \
+ _ (INTEREST_AGG, "Interests aggregated") \
+ _ (INTEREST_AGG_ENTRY, "Interest aggregated per entry") \
+ _ (INT_RETRANS, "Interest retransmissions") \
+ _ (INT_COUNT, "Interests in PIT") \
+ _ (CS_COUNT, "CS total entries") \
+ _ (CS_NTW_COUNT, "CS ntw entries") \
+ _ (CS_APP_COUNT, "CS app entries") \
+ _ (HASH_COLL_HASHTB_COUNT, "Collisions in Hash table")
typedef enum
{
@@ -114,9 +113,9 @@ typedef enum
/*
* Declarations
*/
-clib_error_t *hicn_api_plugin_hookup (vlib_main_t * vm);
+clib_error_t *hicn_api_plugin_hookup (vlib_main_t *vm);
-int hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t * rmp);
+int hicn_mgmt_node_stats_get (vl_api_hicn_api_node_stats_get_reply_t *rmp);
#endif /* // __HICN_MGMT_H__ */
diff --git a/hicn-plugin/src/params.h b/hicn-plugin/src/params.h
index 606d50771..324429abe 100644
--- a/hicn-plugin/src/params.h
+++ b/hicn-plugin/src/params.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -23,11 +23,10 @@
*
*/
-
/*
* Features
*/
-#define HICN_FEATURE_CS 1 //1 enable 0 disable
+#define HICN_FEATURE_CS 1 // 1 enable 0 disable
/*
* Face compile-time parameters
@@ -40,81 +39,84 @@ STATIC_ASSERT ((HICN_PARAM_FACES_MAX & (HICN_PARAM_FACES_MAX - 1)) == 0,
/*
* Max length for hICN names
*/
-#define HICN_PARAM_HICN_NAME_LEN_MAX 20 //bytes
+#define HICN_PARAM_HICN_NAME_LEN_MAX 20 // bytes
// Max next - hops supported in a FIB entry
-#define HICN_PARAM_FIB_ENTRY_NHOPS_MAX 10
+#define HICN_PARAM_FIB_ENTRY_NHOPS_MAX 10
// Default and limit on weight, whatever weight means
-#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT 0x10
-#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX 0xff
+#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_DFLT 0x10
+#define HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX 0xff
/*
* PIT compile-time parameters
*/
-#define HICN_PARAM_PIT_ENTRIES_MIN 1024
-#define HICN_PARAM_PIT_ENTRIES_DFLT 1024 * 128
-#define HICN_PARAM_PIT_ENTRIES_MAX 2 * 1024 * 1024
+#define HICN_PARAM_PIT_ENTRIES_MIN 1024
+#define HICN_PARAM_PIT_ENTRIES_DFLT 1024 * 128
+#define HICN_PARAM_PIT_ENTRIES_MAX 2 * 1024 * 1024
// aggregation limit(interest previous hops)
// Supported up to 516. For more than 4 faces this param must
// HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4 must be a power of two
#define HICN_PARAM_PIT_ENTRY_PHOPS_MAX 20
-STATIC_ASSERT ((ceil (log2 ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4)))) ==
- (floor (log2 ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4)))),
- "HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4 must be a power of two");
+// STATIC_ASSERT ((ceil (log2 ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4)))) ==
+// (floor (log2 ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4)))),
+// "HICN_PARAM_PIT_ENTRY_PHOPS_MAX - 4 must be a power of two");
-STATIC_ASSERT ((HICN_PARAM_PIT_ENTRY_PHOPS_MAX <= HICN_PARAM_FACES_MAX),
- "HICN_PARAM_PIT_ENTRY_PHOP_MAX must be <= than HICN_PARAM_FACES_MAX");
+STATIC_ASSERT (
+ (HICN_PARAM_PIT_ENTRY_PHOPS_MAX <= HICN_PARAM_FACES_MAX),
+ "HICN_PARAM_PIT_ENTRY_PHOP_MAX must be <= than HICN_PARAM_FACES_MAX");
-//tFIB parameters
+// tFIB parameters
#define HICN_PARAM_RTX_MAX 10
// PIT lifetime limits on API override this(in seconds, integer type)
-#define HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC 0
-#define HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC 200
+#define HICN_PARAM_PIT_LIFETIME_BOUND_MIN_SEC 0
+#define HICN_PARAM_PIT_LIFETIME_BOUND_MAX_SEC 200
-//PIT lifetime params if not set at API(in mseconds, integer type)
-#define HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS 20000
+// PIT lifetime params if not set at API(in mseconds, integer type)
+#define HICN_PARAM_PIT_LIFETIME_DFLT_MAX_MS 20000
// Face CS reservation params
-#define HICN_PARAM_FACE_MAX_CS_RESERVED 20000 //packets
-#define HICN_PARAM_FACE_MIN_CS_RESERVED 0 //packets
-#define HICN_PARAM_FACE_DFT_CS_RESERVED 20000 //packets
+#define HICN_PARAM_FACE_MAX_CS_RESERVED 20000 // packets
+#define HICN_PARAM_FACE_MIN_CS_RESERVED 0 // packets
+#define HICN_PARAM_FACE_DFT_CS_RESERVED 20000 // packets
/*
* CS compile-time parameters
*/
-#define HICN_PARAM_CS_ENTRIES_MIN 0 // can disable CS
-#define HICN_PARAM_CS_ENTRIES_DFLT 4 * 1024
-#define HICN_PARAM_CS_ENTRIES_MAX 1024 * 1024
+#define HICN_PARAM_CS_ENTRIES_MIN 0 // can disable CS
+#define HICN_PARAM_CS_ENTRIES_DFLT 4 * 1024
+#define HICN_PARAM_CS_ENTRIES_MAX 1024 * 1024
-#define HICN_PARAM_CS_LRU_DEFAULT (16 * 1024)
+#define HICN_PARAM_CS_LRU_DEFAULT (16 * 1024)
/* CS lifetime defines, in mseconds, integer type */
-#define HICN_PARAM_CS_LIFETIME_MIN 0
-#define HICN_PARAM_CS_LIFETIME_DFLT (5 * 60 * 1000) // 300 seconds
-#define HICN_PARAM_CS_LIFETIME_MAX (24 * 3600 * 1000) //24 hours...
+#define HICN_PARAM_CS_LIFETIME_MIN 0
+#define HICN_PARAM_CS_LIFETIME_DFLT (5 * 60 * 1000) // 300 seconds
+#define HICN_PARAM_CS_LIFETIME_MAX (24 * 3600 * 1000) // 24 hours...
/* CS reserved portion for applications */
-#define HICN_PARAM_CS_RESERVED_APP 50 //%
-#define HICN_PARAM_CS_MIN_MBUF 4096 //this seems to be the minumim default number of mbuf we can have in vpp
+#define HICN_PARAM_CS_RESERVED_APP 50 //%
+#define HICN_PARAM_CS_MIN_MBUF \
+ 4096 // this seems to be the minumim default number of mbuf we can have in
+ // vpp
/* Cloning parameters */
/* ip4 */
#define HICN_IP4_VERSION_HEADER_LENGTH 0x45
-#define HICN_IP4_PROTOCOL IP_PROTOCOL_TCP
-#define HICN_IP4_TTL_DEFAULT 128
+#define HICN_IP4_PROTOCOL IP_PROTOCOL_TCP
+#define HICN_IP4_TTL_DEFAULT 128
/* ip6 */
-#define IPV6_DEFAULT_VERSION 6
-#define IPV6_DEFAULT_TRAFFIC_CLASS 0
-#define IPV6_DEFAULT_FLOW_LABEL 0
-#define HCIN_IP6_VERSION_TRAFFIC_FLOW (IPV6_DEFAULT_VERSION << 28) | \
- (IPV6_DEFAULT_TRAFFIC_CLASS << 20) | \
- (IPV6_DEFAULT_FLOW_LABEL & 0xfffff)
-#define HICN_IP6_PROTOCOL IP_PROTOCOL_TCP
+#define IPV6_DEFAULT_VERSION 6
+#define IPV6_DEFAULT_TRAFFIC_CLASS 0
+#define IPV6_DEFAULT_FLOW_LABEL 0
+#define HICN_IP6_VERSION_TRAFFIC_FLOW \
+ (IPV6_DEFAULT_VERSION << 28) | (IPV6_DEFAULT_TRAFFIC_CLASS << 20) | \
+ (IPV6_DEFAULT_FLOW_LABEL & 0xfffff)
+#define HICN_IP6_PROTOCOL IP_PROTOCOL_TCP
#define HICN_IP6_HOP_LIMIT 0x40
#endif /* // __HICN_PARAM_H__ */
diff --git a/hicn-plugin/src/parser.h b/hicn-plugin/src/parser.h
index e79d65831..e9f709481 100644
--- a/hicn-plugin/src/parser.h
+++ b/hicn-plugin/src/parser.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -20,99 +20,92 @@
#include "hicn.h"
#include "error.h"
+#include "infra.h"
/**
* @file parser.h
*/
-/*
- * Key type codes for header, header tlvs, body tlvs, and child tlvs
- */
-
-// FIXME(reuse lib struct, no more control ?)
-enum hicn_pkt_type_e
+always_inline int
+parse (vlib_buffer_t *pkt, uword size)
{
- HICN_PKT_TYPE_INTEREST = 0,
- HICN_PKT_TYPE_CONTENT = 1,
-};
+ if (pkt == NULL)
+ return HICN_ERROR_PARSER_PKT_INVAL;
+
+ int ret = HICN_ERROR_NONE;
+
+ hicn_lifetime_t *lifetime;
+ hicn_payload_type_t payload_type;
+
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (pkt)->pkbuf;
+
+ hicn_packet_set_buffer (pkbuf, vlib_buffer_get_current (pkt), size, size);
+ hicn_packet_analyze (&hicn_get_buffer (pkt)->pkbuf);
+
+ /* get lifetime*/
+ lifetime = &hicn_get_buffer (pkt)->lifetime;
+ hicn_packet_get_lifetime (pkbuf, lifetime);
+
+ if (*lifetime > hicn_main.pit_lifetime_max_ms)
+ *lifetime = hicn_main.pit_lifetime_max_ms;
+
+ /* get payload type */
+ hicn_packet_get_payload_type (pkbuf, &payload_type);
+ hicn_get_buffer (pkt)->payload_type = (u16) (payload_type);
+ return ret;
+
+#if 0
+ hicn_name_t *name; \
+
+ /* get name and name length*/
+ name = &hicn_get_buffer (pkt)->name;
+ ret = hicn_##PACKET_TYPE##_get_name (pkbuf, name);
+ if (PREDICT_FALSE (ret))
+ {
+ if (type.l2 == IPPROTO_ICMPV4 || type.l2 == IPPROTO_ICMPV6)
+ {
+ return HICN_ERROR_PARSER_MAPME_PACKET;
+ }
+ return HICN_ERROR_PARSER_PKT_INVAL;
+ }
+#endif
+}
/**
- * @brief Parse an interest packet
+ * @brief Parse a interest packet
*
* @param pkt vlib buffer holding the interest
- * @param name return variable that will point to the hicn name
- * @param namelen return valiable that will hold the length of the name
- * @param pkt_hdrp return valiable that will point to the packet header
- * @param isv6 return variable that will be equale to 1 is the header is ipv6
+ * @param name [RETURNED] variable that will point to the hicn name
+ * @param namelen [RETURNED] variable that will hold the length of the name
+ * @param port [RETURNED] variable that will hold the source port of the packet
+ * @param pkt_hdrp [RETURNED] valiable that will point to the packet header
+ * @param isv6 [RETURNED] variable that will be equale to 1 is the header is
+ * ipv6
*/
always_inline int
-hicn_interest_parse_pkt (vlib_buffer_t * pkt, hicn_name_t * name,
- u16 * namelen, hicn_header_t ** pkt_hdrp, u8 * isv6)
+hicn_interest_parse_pkt (vlib_buffer_t *pkt, uword size)
{
- if (pkt == NULL)
- return HICN_ERROR_PARSER_PKT_INVAL;
- hicn_header_t *pkt_hdr = vlib_buffer_get_current (pkt);
- *pkt_hdrp = pkt_hdr;
- u8 *ip_pkt = vlib_buffer_get_current (pkt);
- *isv6 = hicn_is_v6 (pkt_hdr);
- u8 ip_proto = (*isv6) * IPPROTO_IPV6;
- u8 next_proto_offset = 6 + (1 - *isv6) * 3;
- //in the ipv6 header the next header field is at byte 6
- // in the ipv4 header the protocol field is at byte 9
- hicn_type_t type = (hicn_type_t) { {
- .l4 = IPPROTO_NONE,.l3 =
- IPPROTO_NONE,.l2 =
- ip_pkt[next_proto_offset],.l1 =
- ip_proto}
- };
- hicn_get_buffer (pkt)->type = type;
-
- hicn_ops_vft[type.l1]->get_interest_name (type, &pkt_hdr->protocol, name);
- *namelen = (1 - (*isv6)) * HICN_V4_NAME_LEN + (*isv6) * HICN_V6_NAME_LEN;
-
- return HICN_ERROR_NONE;
+ return parse (pkt, size);
}
/**
* @brief Parse a data packet
*
- * @param pkt vlib buffer holding the interest
- * @param name return variable that will point to the hicn name
- * @param namelen return valiable that will hold the length of the name
- * @param pkt_hdrp return valiable that will point to the packet header
- * @param isv6 return variable that will be equale to 1 is the header is ipv6
+ * @param pkt vlib buffer holding the data
+ * @param name [RETURNED] variable that will point to the hicn name
+ * @param namelen [RETURNED] variable that will hold the length of the name
+ * @param port [RETURNED] variable that will hold the source port of the packet
+ * @param pkt_hdrp [RETURNED] valiable that will point to the packet header
+ * @param isv6 [RETURNED] variable that will be equale to 1 is the header is
+ * ipv6
*/
always_inline int
-hicn_data_parse_pkt (vlib_buffer_t * pkt, hicn_name_t * name,
- u16 * namelen, hicn_header_t ** pkt_hdrp, u8 * isv6)
+hicn_data_parse_pkt (vlib_buffer_t *pkt, uword size)
{
- if (pkt == NULL)
- return HICN_ERROR_PARSER_PKT_INVAL;
- hicn_header_t *pkt_hdr = vlib_buffer_get_current (pkt);
- *pkt_hdrp = pkt_hdr;
- *pkt_hdrp = pkt_hdr;
- u8 *ip_pkt = vlib_buffer_get_current (pkt);
- *isv6 = hicn_is_v6 (pkt_hdr);
- u8 ip_proto = (*isv6) * IPPROTO_IPV6;
- /*
- * in the ipv6 header the next header field is at byte 6 in the ipv4
- * header the protocol field is at byte 9
- */
- u8 next_proto_offset = 6 + (1 - *isv6) * 3;
- hicn_type_t type = (hicn_type_t) { {.l4 = IPPROTO_NONE,.l3 =
- IPPROTO_NONE,.l2 =
- ip_pkt[next_proto_offset],.l1 =
- ip_proto}
- };
- hicn_get_buffer (pkt)->type = type;
- hicn_ops_vft[type.l1]->get_data_name (type, &pkt_hdr->protocol, name);
- *namelen = (1 - (*isv6)) * HICN_V4_NAME_LEN + (*isv6) * HICN_V6_NAME_LEN;
-
- return HICN_ERROR_NONE;
+ return parse (pkt, size);
}
-
-#endif /* // __HICN_PARSER_H__ */
+#endif /* __HICN_PARSER_H__ */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/pcs.c b/hicn-plugin/src/pcs.c
index 6c44b9d83..564a435a0 100644
--- a/hicn-plugin/src/pcs.c
+++ b/hicn-plugin/src/pcs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,36 +16,41 @@
#include <stdlib.h>
#include <vlib/vlib.h>
-#include "hashtb.h"
#include "pcs.h"
#include "cache_policies/cs_lru.h"
-int
-hicn_pit_create (hicn_pit_cs_t * p, u32 num_elems)
+void
+hicn_pit_create (hicn_pit_cs_t *p, u32 max_pit_elt, u32 max_cs_elt)
{
- int ret =
- hicn_hashtb_alloc (&p->pcs_table, num_elems, sizeof (hicn_pcs_entry_t));
- p->pcs_table->ht_flags |= HICN_HASHTB_FLAG_KEY_FMT_NAME;
+ // Allocate PCS hash table. KEY=Name, VALUE=pool_idx
+ clib_bihash_24_8_t *pcs_table = &p->pcs_table;
+ u32 n_elements = max_pit_elt / BIHASH_KVP_PER_PAGE;
+ clib_bihash_init_24_8 (pcs_table, "hicn_pcs_table", n_elements, 512 << 20);
- p->pcs_pit_count = p->pcs_cs_count = 0;
+ // Allocate pool of PIT/CS entries
+ pool_alloc (p->pcs_entries_pool, max_pit_elt);
- p->policy_state.max =
- HICN_PARAM_CS_LRU_DEFAULT -
- (HICN_PARAM_CS_LRU_DEFAULT * HICN_PARAM_CS_RESERVED_APP / 100);
- p->policy_state.count = 0;
- p->policy_state.head = p->policy_state.tail = 0;
+ // Init counters
+ p->max_pit_size = max_pit_elt;
+ p->pcs_pit_count = p->pcs_cs_count = 0;
+ p->policy_state = hicn_cs_lru_create (max_cs_elt);
+ p->pcs_cs_count = 0;
+ p->pcs_pcs_alloc = 0;
+ p->pcs_pcs_dealloc = 0;
+ p->pcs_pit_count = 0;
+}
- p->policy_vft.hicn_cs_insert = hicn_cs_lru.hicn_cs_insert;
- p->policy_vft.hicn_cs_update = hicn_cs_lru.hicn_cs_update;
- p->policy_vft.hicn_cs_dequeue = hicn_cs_lru.hicn_cs_dequeue;
- p->policy_vft.hicn_cs_delete_get = hicn_cs_lru.hicn_cs_delete_get;
- p->policy_vft.hicn_cs_trim = hicn_cs_lru.hicn_cs_trim;
- p->policy_vft.hicn_cs_flush = hicn_cs_lru.hicn_cs_flush;
+void
+hicn_pit_destroy (hicn_pit_cs_t *p)
+{
+ // Deallocate PCS hash table.
+ clib_bihash_24_8_t *pcs_table = &p->pcs_table;
+ clib_bihash_free_24_8 (pcs_table);
- return (ret);
+ // Deallocate pool of PIT/CS entries
+ pool_free (p->pcs_entries_pool);
}
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/hicn-plugin/src/pcs.h b/hicn-plugin/src/pcs.h
index a9e1ae5a0..2019ddb73 100644
--- a/hicn-plugin/src/pcs.h
+++ b/hicn-plugin/src/pcs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,821 +16,811 @@
#ifndef __HICN_PCS_H__
#define __HICN_PCS_H__
-#include "hashtb.h"
-#include "face_db.h"
#include "strategy_dpo_manager.h"
#include "error.h"
#include "cache_policies/cs_policy.h"
#include "faces/face.h"
+#include <vppinfra/bihash_24_8.h>
+
/**
* @file pcs.h
*
* This file implement the PIT and CS which are collapsed in the same
- * structure, thereore an entry is either a PIT entry of a CS entry.
- * The implementation consist of a hash table where each entry of the
- * hash table contains a PIT or CS entry, some counters to maintain the
- * status of the PIT/CS and the reference to the eviction policy for
- * the CS. The default eviction policy id FIFO.
+ * structure, therefore an entry is either a PIT entry of a CS entry.
+ * The implementation consists of a hash table where each entry of the
+ * hash table contains an index to a pool of PIT/CS entries. Each entry
+ * contains some counters to maintain the status of the PIT/CS and the
+ * reference to the eviction policy for the CS.
+ * The default eviction policy is LRU.
*/
-/* The PIT and CS are stored as a union */
+/*
+ * We need a definition of invalid index. ~0 is reasonable as we don't expect
+ * to reach that many element in the PIT.
+ */
+#define HICN_PCS_INVALID_INDEX ((u32) (~0))
+
+/*
+ * The PIT and CS are stored as a union
+ */
#define HICN_PIT_NULL_TYPE 0
-#define HICN_PIT_TYPE 1
-#define HICN_CS_TYPE 2
+#define HICN_PIT_TYPE 1
+#define HICN_CS_TYPE 2
/*
* Definitions and Forward refs for the time counters we're trying out.
* Counters are maintained by the background process. TODO.
*/
-#define SEC_MS 1000
+#define SEC_MS 1000
#define HICN_INFRA_FAST_TIMER_SECS 1
#define HICN_INFRA_FAST_TIMER_MSECS (HICN_INFRA_FAST_TIMER_SECS * SEC_MS)
#define HICN_INFRA_SLOW_TIMER_SECS 60
#define HICN_INFRA_SLOW_TIMER_MSECS (HICN_INFRA_SLOW_TIMER_SECS * SEC_MS)
+#define HICN_CS_ENTRY_OPAQUE_SIZE 32
+
+#define HICN_FACE_DB_INLINE_FACES 8
+
+#define HICN_PIT_BITMAP_SIZE_U64 HICN_PARAM_FACES_MAX / 64
+#define HICN_PIT_N_HOP_BITMAP_SIZE HICN_PARAM_FACES_MAX
+
/*
- * Note that changing this may change alignment within the PIT struct, so be careful.
+ * PCS entry. We expect this to fit in 3 cache lines, with a maximum of 8
+ * output inline faces and a bitmap of 512 bits. If more faces are needed, a
+ * vector will be allocated, but it will endup out of the 3 cache lines.
*/
-typedef struct __attribute__ ((packed)) hicn_pcs_shared_s
+typedef struct hicn_pcs_entry_s
{
+ /*
+ * First cache line - shared data
+ */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- /* Installation/creation time (vpp float units, for now) */
+ /*
+ * Installation/creation time (vpp float units, for now).
+ * 8 Bytes
+ */
f64 create_time;
- /* Expiration time (vpp float units, for now) */
+ /*
+ * Expiration time (vpp float units, for now)
+ * 8 Bytes
+ */
f64 expire_time;
- /* Shared 'flags' octet */
- u8 entry_flags;
-
- /* Needed to align for the pit or cs portion */
- u8 padding;
-} hicn_pcs_shared_t;
-
-#define HICN_PCS_ENTRY_CS_FLAG 0x01
-
-/*
- * PIT entry, unioned with a CS entry below
- */
-typedef struct __attribute__ ((packed)) hicn_pit_entry_s
-{
+ /*
+ * Name
+ * 24 bytes
+ */
+ hicn_name_t name;
- /* Shared size 8 + 8 + 2 = 18B */
+ /*
+ * Cached hash of the name
+ * 8 bytes
+ */
+ u64 name_hash;
/*
- * Egress next hop (containes the egress face) This id refers to the
- * position of the choosen face in the next_hops array of the dpo */
- /* 18B + 1B = 19B */
- u8 pe_txnh;
+ * Shared 'flags' octet
+ * 1 Byte
+ */
+ u16 flags;
- /* Array of incoming ifaces */
- /* 24B + 32B (8B*4) =56B */
- hicn_face_db_t faces;
+ /*
+ * Number of locks on the PCS entry
+ * 2 Bytes
+ */
+ u16 locks;
-} hicn_pit_entry_t;
+ /*
+ * Second cache line - PIT or CS data
+ */
+ CLIB_ALIGN_MARK (second_part, 64);
-#define HICN_CS_ENTRY_OPAQUE_SIZE HICN_HASH_NODE_APP_DATA_SIZE - 36
+ union
+ {
+ struct
+ {
+ /*
+ * Bitmap used to check if interests are retransmission
+ */
+ u64 bitmap[HICN_PIT_BITMAP_SIZE_U64];
-/*
- * CS entry, unioned with a PIT entry below
- */
-typedef struct __attribute__ ((packed)) hicn_cs_entry_s
-{
- /* 18B + 2B = 20B */
- u16 align;
+ CLIB_ALIGN_MARK (third_part, 64);
- /* Packet buffer, if held */
- /* 20B + 4B = 24B */
- u32 cs_pkt_buf;
+ /*
+ * Total number of faces
+ */
+ u32 n_faces;
- /* Ingress face */
- /* 24B + 4B = 28B */
- hicn_face_id_t cs_rxface;
+ /*
+ * Array of indexes of virtual faces
+ */
+ hicn_face_id_t inline_faces[HICN_FACE_DB_INLINE_FACES];
- /* Linkage for LRU, in the form of hashtable node indexes */
- /* 28B + 8B = 36B */
- u32 cs_lru_prev;
- u32 cs_lru_next;
+ /*
+ * VPP vector of indexes of additional virtual faces, allocated iff
+ * needed
+ */
+ hicn_face_id_t *faces;
+ } pit;
+ struct
+ { /*
+ * Packet buffer, if held
+ * 4 Bytes
+ */
+ u32 cs_pkt_buf;
- /* Reserved for implementing cache policy different than LRU */
- /* 36B + (64 - 36)B = 64B */
- u8 opaque[HICN_CS_ENTRY_OPAQUE_SIZE];
+ /*
+ * Linkage for LRU, in the form of hashtable node indexes
+ * 8 Bytes
+ */
+ u32 cs_lru_prev;
+ u32 cs_lru_next;
+ } cs;
+ } u;
+} hicn_pcs_entry_t;
+STATIC_ASSERT (sizeof (hicn_pcs_entry_t) <= 3 * CLIB_CACHE_LINE_BYTES,
+ "hicn_pcs_entry_t does not fit in 3 cache lines.");
+
+STATIC_ASSERT (0 == offsetof (hicn_pcs_entry_t, cacheline0),
+ "Cacheline0 must be at the beginning of hicn_pcs_entry_t");
+STATIC_ASSERT (64 == offsetof (hicn_pcs_entry_t, second_part),
+ "second_part must be at byte 64 of hicn_pcs_entry_t");
+STATIC_ASSERT (64 == offsetof (hicn_pcs_entry_t, u.pit.bitmap),
+ "u.pit.bitmap must be at byte 64 of hicn_pcs_entry_t");
+STATIC_ASSERT (64 == offsetof (hicn_pcs_entry_t, u.pit.bitmap),
+ "cs_pkt_buf must be at byte 64 of hicn_pcs_entry_t");
+STATIC_ASSERT (128 == offsetof (hicn_pcs_entry_t, u.pit.third_part),
+ "third_part must be at byte 128 of hicn_pcs_entry_t");
+STATIC_ASSERT (128 == offsetof (hicn_pcs_entry_t, u.pit.n_faces),
+ "u.pit.n_faces must be at byte 128 of hicn_pcs_entry_t");
-} __attribute__ ((packed)) hicn_cs_entry_t;
+#define HICN_PCS_ENTRY_CS_FLAG 0x01
/*
- * Combined PIT/CS entry data structure, embedded in a hashtable entry after
- * the common hashtable preamble struct. This MUST fit in the available
- * (fixed) space in a hashtable node.
+ * Forward declarations
*/
-typedef struct hicn_pcs_entry_s
-{
-
- hicn_pcs_shared_t shared;
-
- union
- {
- hicn_pit_entry_t pit;
- hicn_cs_entry_t cs;
- } u;
-} hicn_pcs_entry_t;
+always_inline void hicn_pcs_delete_internal (hicn_pit_cs_t *pitcs,
+ hicn_pcs_entry_t *pcs_entry);
+always_inline void hicn_pcs_entry_remove_lock (hicn_pit_cs_t *pitcs,
+ hicn_pcs_entry_t *pcs_entry);
/*
- * Overall PIT/CS table, based on the common hashtable
+ * Overall PIT/CS table.
*/
typedef struct hicn_pit_cs_s
{
+ // Hash table mapping name to hash entry index
+ clib_bihash_24_8_t pcs_table;
- hicn_hashtb_t *pcs_table;
+ // Total size of PCS
+ u32 max_pit_size;
+
+ // Pool of pcs entries
+ hicn_pcs_entry_t *pcs_entries_pool;
/* Counters for PIT/CS sentries */
u32 pcs_pit_count;
u32 pcs_cs_count;
- u32 pcs_cs_dealloc;
- u32 pcs_pit_dealloc;
-
- /* Total size of PCS */
- u32 pcs_size;
+ u32 pcs_pcs_alloc;
+ u32 pcs_pcs_dealloc;
hicn_cs_policy_t policy_state;
- hicn_cs_policy_vft_t policy_vft;
-
} hicn_pit_cs_t;
-/* Functions declarations */
-int hicn_pit_create (hicn_pit_cs_t * p, u32 num_elems);
+/************************************************************************
+ **************************** Create / Destroy **************************
+ ************************************************************************/
-always_inline void
-hicn_pit_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * pcs_entry, hicn_hash_entry_t * hash_entry,
- hicn_hash_node_t * node, const hicn_dpo_vft_t * dpo_vft,
- dpo_id_t * hicn_dpo_id, hicn_face_id_t inface_id, u8 is_appface);
+void hicn_pit_create (hicn_pit_cs_t *p, u32 max_pit_elt, u32 max_cs_elt);
+void hicn_pit_destroy (hicn_pit_cs_t *p);
-always_inline void
-hicn_pcs_cs_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * old_entry, hicn_pcs_entry_t * entry,
- hicn_hash_node_t * node);
-
-always_inline void
-hicn_pcs_cs_delete (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t ** pcs_entry, hicn_hash_node_t ** node,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+/************************************************************************
+ **************************** Counters getters **************************
+ ************************************************************************/
-always_inline int
-hicn_pcs_cs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hashval,
- u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
- u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow);
+always_inline u32
+hicn_pcs_get_pit_count (const hicn_pit_cs_t *pcs)
+{
+ return pcs->pcs_pit_count;
+}
-always_inline int
-hicn_pcs_cs_insert_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hashval,
- u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
- u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow, hicn_face_id_t inface);
+always_inline u32
+hicn_pcs_get_cs_count (const hicn_pit_cs_t *pcs)
+{
+ return pcs->pcs_cs_count;
+}
-always_inline int
-hicn_pcs_pit_insert (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t * entry,
- hicn_hash_node_t * node, hicn_hash_entry_t ** hash_entry,
- u64 hashval, u32 * node_id, index_t * dpo_ctx_id,
- u8 * vft_id, u8 * is_cs, u8 * hash_entry_id,
- u32 * bucket_id, u8 * bucket_is_overflow);
+always_inline u32
+hicn_pcs_get_pcs_alloc (const hicn_pit_cs_t *pcs)
+{
+ return pcs->pcs_pcs_alloc;
+}
-always_inline void
-hicn_pcs_pit_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_node_t ** node, vlib_main_t * vm,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+always_inline u32
+hicn_pcs_get_pcs_dealloc (const hicn_pit_cs_t *pcs)
+{
+ return pcs->pcs_pcs_dealloc;
+}
-always_inline int
-hicn_pcs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hashval, u32 * node_id,
- index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
- u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow);
+always_inline f64
+hicn_pcs_get_exp_time (f64 cur_time_sec, u64 lifetime_msec)
+{
+ return (cur_time_sec + ((f64) lifetime_msec) / SEC_MS);
+}
+/*
+ * Create key from the name struct.
+ */
always_inline void
-hicn_pcs_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_node_t ** node, vlib_main_t * vm,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+hicn_pcs_get_key_from_name (clib_bihash_kv_24_8_t *kv, const hicn_name_t *name)
+{
+ kv->key[0] = name->prefix.v6.as_u64[0];
+ kv->key[1] = name->prefix.v6.as_u64[1];
+ kv->key[2] = name->suffix;
+}
-always_inline void
-hicn_pcs_remove_lock (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_node_t ** node, vlib_main_t * vm,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id);
+/************************************************************************
+ **************************** LRU Helpers *******************************
+ ************************************************************************/
-always_inline void
-hicn_cs_delete_trimmed (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_entry_t * hash_entry,
- hicn_hash_node_t ** node, vlib_main_t * vm);
+always_inline hicn_cs_policy_t *
+hicn_pcs_get_policy_state (hicn_pit_cs_t *pcs)
+{
+ return &pcs->policy_state;
+}
-/* Function implementation */
-/* Accessor for pit/cs data inside hash table node */
-static inline hicn_pcs_entry_t *
-hicn_pit_get_data (hicn_hash_node_t * node)
+/*
+ * Update the CS LRU, moving this item to the head
+ */
+always_inline void
+hicn_pcs_cs_update_lru (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *entry)
{
- return (hicn_pcs_entry_t *) (hicn_hashtb_node_data (node));
+ hicn_cs_policy_t *policy_state = hicn_pcs_get_policy_state (pitcs);
+ hicn_cs_policy_update (policy_state, pitcs, entry);
}
-/* Init pit/cs data block (usually inside hash table node) */
-static inline void
-hicn_pit_init_data (hicn_pcs_entry_t * p)
+/*
+ * Update the CS LRU, inserting a new item and checking if we need to evict
+ */
+always_inline void
+hicn_pcs_cs_insert_lru (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *entry)
{
- p->shared.entry_flags = 0;
- p->u.pit.faces.n_faces = 0;
- p->u.pit.faces.is_overflow = 0;
- hicn_face_bucket_t *face_bkt;
- pool_get (hicn_face_bucket_pool, face_bkt);
+ hicn_cs_policy_t *policy_state = hicn_pcs_get_policy_state (pitcs);
+ hicn_cs_policy_insert (policy_state, pitcs, entry);
+ pitcs->pcs_cs_count++;
- p->u.pit.faces.next_bucket = face_bkt - hicn_face_bucket_pool;
+ // If we reached the MAX size of the CS, let's evict one
+ if (policy_state->count > policy_state->max)
+ {
+ // We reached the mac number of CS entry. We need to trim one.
+ hicn_pcs_entry_t *pcs_entry;
+ hicn_cs_policy_delete_get (policy_state, pitcs, &pcs_entry);
+
+ // Delete evicted entry from hash table
+ hicn_pcs_entry_remove_lock (pitcs, pcs_entry);
+ }
}
-/* Init pit/cs data block (usually inside hash table node) */
-static inline void
-hicn_cs_init_data (hicn_pcs_entry_t * p)
+/*
+ * Dequeue an entry from the CS LRU
+ */
+always_inline void
+hicn_pcs_cs_dequeue_lru (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *entry)
{
- p->shared.entry_flags = 0;
- p->u.pit.faces.n_faces = 0;
- p->u.pit.faces.is_overflow = 0;
+ // Dequeue the CS entry
+ hicn_cs_policy_t *policy_state = hicn_pcs_get_policy_state (pitcs);
+ hicn_cs_policy_dequeue (policy_state, pitcs, entry);
}
+/************************************************************************
+ **************************** PCS Entry APIs ****************************
+ ************************************************************************/
-static inline f64
-hicn_pcs_get_exp_time (f64 cur_time_sec, u64 lifetime_msec)
+/*
+ * Create new PCS entry
+ */
+always_inline hicn_pcs_entry_t *
+_hicn_pcs_entry_get (hicn_pit_cs_t *pitcs)
{
- return (cur_time_sec + ((f64) lifetime_msec) / SEC_MS);
+ hicn_pcs_entry_t *e;
+ pool_get (pitcs->pcs_entries_pool, e);
+ pitcs->pcs_pcs_alloc++;
+
+ return e;
}
/*
- * Configure CS LRU limit. Zero is accepted, means 'no limit', probably not a
- * good choice.
+ * Init pit/cs data block
*/
-static inline void
-hicn_pit_set_lru_max (hicn_pit_cs_t * p, u32 limit)
+always_inline void
+hicn_pcs_entry_init_data (hicn_pcs_entry_t *p, f64 tnow)
{
- p->policy_state.max = limit;
+ p->flags = 0;
+ p->u.pit.n_faces = 0;
+ p->locks = 1;
+ p->create_time = tnow;
}
/*
- * Accessor for PIT interest counter.
+ * Free PCS entry
*/
-static inline u32
-hicn_pit_get_int_count (const hicn_pit_cs_t * pitcs)
+always_inline void
+hicn_pcs_entry_put (hicn_pit_cs_t *pitcs, const hicn_pcs_entry_t *entry)
{
- return (pitcs->pcs_pit_count);
+ pitcs->pcs_pcs_dealloc++;
+ pool_put (pitcs->pcs_entries_pool, entry);
}
/*
- * Accessor for PIT cs entries counter.
+ * Get index from the entry.
*/
-static inline u32
-hicn_pit_get_cs_count (const hicn_pit_cs_t * pitcs)
+always_inline u32
+hicn_pcs_entry_get_index (const hicn_pit_cs_t *pitcs,
+ const hicn_pcs_entry_t *entry)
{
- return (pitcs->pcs_cs_count);
+ ASSERT (!pool_is_free (pitcs->pcs_entries_pool, entry));
+ return (u32) (entry - pitcs->pcs_entries_pool);
}
-static inline u32
-hicn_pcs_get_ntw_count (const hicn_pit_cs_t * pitcs)
+/*
+ * Get index from the entry.
+ */
+always_inline hicn_pcs_entry_t *
+hicn_pcs_entry_get_entry_from_index (const hicn_pit_cs_t *pitcs, u32 index)
{
- return (pitcs->policy_state.count);
+ ASSERT (!pool_is_free_index (pitcs->pcs_entries_pool, index));
+ return pool_elt_at_index (pitcs->pcs_entries_pool, index);
}
-static inline u32
-hicn_pit_get_htb_bucket_count (const hicn_pit_cs_t * pitcs)
+always_inline hicn_pcs_entry_t *
+hicn_pcs_entry_get_entry_from_index_safe (const hicn_pit_cs_t *pitcs,
+ u32 index)
{
- return (pitcs->pcs_table->ht_overflow_buckets_used);
+ if (!pool_is_free_index (pitcs->pcs_entries_pool, index))
+ return pool_elt_at_index (pitcs->pcs_entries_pool, index);
+
+ return NULL;
}
-static inline int
-hicn_cs_enabled (hicn_pit_cs_t * pit)
+/*
+ * Check if pcs entry is a content store entry
+ */
+always_inline int
+hicn_pcs_entry_is_cs (const hicn_pcs_entry_t *entry)
{
- switch (HICN_FEATURE_CS)
- {
- case 0:
- default:
- return (0);
- case 1:
- return (pit->policy_state.max > 0);
- }
+ ASSERT (entry);
+ return (entry->flags & HICN_PCS_ENTRY_CS_FLAG);
}
/*
- * Delete a PIT/CS entry from the hashtable, freeing the hash node struct.
- * The caller's pointers are zeroed! If cs_trim is true, entry has already
- * been removed from lru list The main purpose of this wrapper is helping
- * maintain the per-PIT stats.
+ * Add lock to PIT entry
*/
always_inline void
-hicn_pcs_delete_internal (hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_entry_t * hash_entry,
- hicn_hash_node_t ** node, vlib_main_t * vm,
- const hicn_dpo_vft_t * dpo_vft,
- dpo_id_t * hicn_dpo_id)
+hicn_pcs_entry_add_lock (hicn_pcs_entry_t *pcs_entry)
{
- hicn_pcs_entry_t *pcs = *pcs_entryp;
+ pcs_entry->locks++;
+}
- ASSERT (pcs == hicn_hashtb_node_data (*node));
+/*
+ * Get/Set expire time from the entry
+ */
+always_inline f64
+hicn_pcs_entry_get_expire_time (hicn_pcs_entry_t *pcs_entry)
+{
+ return pcs_entry->expire_time;
+}
- if (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
- {
- pitcs->pcs_cs_dealloc++;
- /* Free any associated packet buffer */
- vlib_buffer_free_one (vm, pcs->u.cs.cs_pkt_buf);
- pcs->u.cs.cs_pkt_buf = ~0;
- ASSERT ((pcs->u.cs.cs_lru_prev == 0)
- && (pcs->u.cs.cs_lru_prev == pcs->u.cs.cs_lru_next));
- }
- else
- {
- pitcs->pcs_pit_dealloc++;
- hicn_strategy_dpo_ctx_unlock (hicn_dpo_id);
+always_inline void
+hicn_pcs_entry_set_expire_time (hicn_pcs_entry_t *pcs_entry, f64 expire_time)
+{
+ pcs_entry->expire_time = expire_time;
+}
- /* Flush faces */
- hicn_faces_flush (&(pcs->u.pit.faces));
- }
+/*
+ * Get/Set create time from the entry
+ */
+always_inline f64
+hicn_pcs_entry_get_create_time (hicn_pcs_entry_t *pcs_entry)
+{
+ return pcs_entry->create_time;
+}
- hicn_hashtb_delete (pitcs->pcs_table, node, hash_entry->he_msb64);
- *pcs_entryp = NULL;
+always_inline void
+hicn_pcs_entry_set_create_time (hicn_pcs_entry_t *pcs_entry, f64 create_time)
+{
+ pcs_entry->create_time = create_time;
}
/*
- * Convert a PIT entry into a CS entry (assumes that the entry is already in
- * the hashtable.) This is primarily here to maintain the internal counters.
+ * Remove a lock in the entry and delete it if there are no pending lock and
+ * the entry is marked as to be deleted
*/
always_inline void
-hicn_pit_to_cs (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * pcs_entry, hicn_hash_entry_t * hash_entry,
- hicn_hash_node_t * node, const hicn_dpo_vft_t * dpo_vft,
- dpo_id_t * hicn_dpo_id, hicn_face_id_t inface_id, u8 is_appface)
+hicn_pcs_entry_remove_lock (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *pcs_entry)
{
+ // Make sure we are removing a lock on a valid entry
+ ASSERT (pcs_entry->locks > 0);
- /*
- * Different from the insert node. In here we don't need to add a new
- * hash entry.
- */
- pitcs->pcs_pit_count--;
- hicn_strategy_dpo_ctx_unlock (hicn_dpo_id);
- /* Flush faces */
- hicn_faces_flush (&(pcs_entry->u.pit.faces));
-
- hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_CS_ENTRY;
- node->hn_flags |= HICN_HASH_NODE_CS_FLAGS;
- pcs_entry->shared.entry_flags |= HICN_PCS_ENTRY_CS_FLAG;
-
- pcs_entry->u.cs.cs_rxface = inface_id;
-
- /* Update the CS according to the policy */
- hicn_cs_policy_t *policy_state;
- hicn_cs_policy_vft_t *policy_vft;
-
- policy_state = &pitcs->policy_state;
- policy_vft = &pitcs->policy_vft;
-
- policy_vft->hicn_cs_insert (pitcs, node, pcs_entry, policy_state);
- pitcs->pcs_cs_count++;
-
- if (policy_state->count > policy_state->max)
+ if (--pcs_entry->locks == 0)
{
- hicn_hash_node_t *node;
- hicn_pcs_entry_t *pcs_entry;
- hicn_hash_entry_t *hash_entry;
- policy_vft->hicn_cs_delete_get (pitcs, policy_state,
- &node, &pcs_entry, &hash_entry);
-
-
- /*
- * We don't have to decrease the lock (therefore we cannot
- * use hicn_pcs_cs_delete function)
- */
- policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
-
- hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
-
- /* Update the global CS counter */
- pitcs->pcs_cs_count--;
+ hicn_pcs_delete_internal (pitcs, pcs_entry);
}
}
-/* Functions specific for PIT or CS */
+/************************************************************************
+ **************************** CS Entry APIs *****************************
+ ************************************************************************/
-always_inline void
-hicn_pcs_cs_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * old_entry, hicn_pcs_entry_t * entry,
- hicn_hash_node_t * node)
+/*
+ * Create new CS entry
+ */
+always_inline hicn_pcs_entry_t *
+hicn_pcs_entry_cs_get (hicn_pit_cs_t *pitcs, f64 tnow, u32 buffer_index)
{
- hicn_cs_policy_t *policy_state;
- hicn_cs_policy_vft_t *policy_vft;
+ hicn_pcs_entry_t *ret = _hicn_pcs_entry_get (pitcs);
+ hicn_pcs_entry_init_data (ret, tnow);
+ ret->flags = HICN_PCS_ENTRY_CS_FLAG;
+ ret->u.cs.cs_lru_next = HICN_CS_POLICY_END_OF_CHAIN;
+ ret->u.cs.cs_lru_prev = HICN_CS_POLICY_END_OF_CHAIN;
- policy_state = &pitcs->policy_state;
- policy_vft = &pitcs->policy_vft;
+ return ret;
+}
- if (entry->u.cs.cs_rxface != old_entry->u.cs.cs_rxface)
- {
- /* Dequeue content from the old queue */
- policy_vft->hicn_cs_dequeue (pitcs, node, old_entry, policy_state);
-
- old_entry->u.cs.cs_rxface = entry->u.cs.cs_rxface;
- policy_state = &pitcs->policy_state;
- policy_vft = &pitcs->policy_vft;
-
- policy_vft->hicn_cs_insert (pitcs, node, old_entry, policy_state);
-
- if (policy_state->count > policy_state->max)
- {
- hicn_hash_node_t *node;
- hicn_pcs_entry_t *pcs_entry;
- hicn_hash_entry_t *hash_entry;
- policy_vft->hicn_cs_delete_get (pitcs, policy_state,
- &node, &pcs_entry, &hash_entry);
-
- /*
- * We don't have to decrease the lock (therefore we cannot
- * use hicn_pcs_cs_delete function)
- */
- policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
-
- hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
-
- /* Update the global CS counter */
- pitcs->pcs_cs_count--;
- }
- }
- else
- /* Update the CS LRU, moving this item to the head */
- policy_vft->hicn_cs_update (pitcs, node, old_entry, policy_state);
+always_inline u32
+hicn_pcs_entry_cs_get_buffer (hicn_pcs_entry_t *pcs_entry)
+{
+ return pcs_entry->u.cs.cs_pkt_buf;
}
always_inline void
-hicn_pcs_cs_delete (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t ** pcs_entryp, hicn_hash_node_t ** nodep,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+hicn_pcs_entry_cs_set_buffer (hicn_pcs_entry_t *pcs_entry, u32 buffer_index)
{
- if (!(hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
- {
- hicn_cs_policy_t *policy_state;
- hicn_cs_policy_vft_t *policy_vft;
+ pcs_entry->u.cs.cs_pkt_buf = buffer_index;
+}
- policy_state = &pitcs->policy_state;
- policy_vft = &pitcs->policy_vft;
+always_inline u32
+hicn_pcs_entry_cs_get_next (hicn_pcs_entry_t *pcs_entry)
+{
+ return pcs_entry->u.cs.cs_lru_next;
+}
- policy_vft->hicn_cs_dequeue (pitcs, (*nodep), (*pcs_entryp),
- policy_state);
+always_inline void
+hicn_pcs_entry_cs_set_next (hicn_pcs_entry_t *pcs_entry, u32 next)
+{
+ pcs_entry->u.cs.cs_lru_next = next;
+}
- /* Update the global CS counter */
- pitcs->pcs_cs_count--;
- }
+always_inline u32
+hicn_pcs_entry_cs_get_prev (hicn_pcs_entry_t *pcs_entry)
+{
+ return pcs_entry->u.cs.cs_lru_prev;
+}
- /* A data could have been inserted in the CS through a push. In this case locks == 0 */
- hash_entry->locks--;
- if (hash_entry->locks == 0)
- {
- hicn_pcs_delete_internal
- (pitcs, pcs_entryp, hash_entry, nodep, vm, dpo_vft, hicn_dpo_id);
- }
- else
- {
- hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
- }
+always_inline void
+hicn_pcs_entry_cs_set_prev (hicn_pcs_entry_t *pcs_entry, u32 prev)
+{
+ pcs_entry->u.cs.cs_lru_prev = prev;
}
-always_inline int
-hicn_pcs_cs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hashval,
- u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
- u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow)
+/* Init pit/cs data block (usually inside hash table node) */
+always_inline void
+hicn_pcs_entry_cs_free_data (hicn_pcs_entry_t *p)
{
- ASSERT (entry == hicn_hashtb_node_data (node));
+ CLIB_UNUSED (u32 bi) = hicn_pcs_entry_cs_get_buffer (p);
- int ret =
- hicn_hashtb_insert (pitcs->pcs_table, node, hash_entry, hashval, node_id,
- dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
- bucket_is_overflow);
+#ifndef HICN_PCS_TESTING
+ // Release buffer
+ vlib_buffer_free_one (vlib_get_main (), bi);
+#endif
- if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
- {
- /* Mark the entry as a CS entry */
- node->hn_flags |= HICN_HASH_NODE_CS_FLAGS;
- entry->shared.entry_flags |= HICN_PCS_ENTRY_CS_FLAG;
- (*hash_entry)->he_flags |= HICN_HASH_ENTRY_FLAG_CS_ENTRY;
-
- hicn_cs_policy_t *policy_state;
- hicn_cs_policy_vft_t *policy_vft;
-
- policy_state = &pitcs->policy_state;
- policy_vft = &pitcs->policy_vft;
-
- policy_vft->hicn_cs_insert (pitcs, node, entry, policy_state);
- pitcs->pcs_cs_count++;
-
- if (policy_state->count > policy_state->max)
- {
- hicn_hash_node_t *node;
- hicn_pcs_entry_t *pcs_entry;
- hicn_hash_entry_t *hash_entry;
- policy_vft->hicn_cs_delete_get (pitcs, policy_state,
- &node, &pcs_entry, &hash_entry);
-
- /*
- * We don't have to decrease the lock (therefore we cannot
- * use hicn_pcs_cs_delete function)
- */
- policy_vft->hicn_cs_dequeue (pitcs, node, pcs_entry, policy_state);
-
- hicn_cs_delete_trimmed (pitcs, &pcs_entry, hash_entry, &node, vm);
-
- /* Update the global CS counter */
- pitcs->pcs_cs_count--;
- }
- }
- return ret;
+ // Reset the vlib_buffer index
+ hicn_pcs_entry_cs_set_buffer (p, ~0);
}
+/************************************************************************
+ **************************** PIT Entry APIs ****************************
+ ************************************************************************/
+
/*
- * Insert CS entry into the hashtable The main purpose of this wrapper is
- * helping maintain the per-PIT stats.
+ * Init pit/cs data block
*/
-always_inline int
-hicn_pcs_cs_insert_update (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hashval,
- u32 * node_id, index_t * dpo_ctx_id, u8 * vft_id,
- u8 * is_cs, u8 * hash_entry_id, u32 * bucket_id,
- u8 * bucket_is_overflow, hicn_face_id_t inface)
+always_inline hicn_pcs_entry_t *
+hicn_pcs_entry_pit_get (hicn_pit_cs_t *pitcs, f64 tnow,
+ hicn_lifetime_t lifetime)
{
- int ret;
-
- ASSERT (entry == hicn_hashtb_node_data (node));
-
- entry->u.cs.cs_rxface = inface;
- ret =
- hicn_pcs_cs_insert (vm, pitcs, entry, node, hash_entry, hashval, node_id,
- dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
- bucket_is_overflow);
+ hicn_pcs_entry_t *ret = _hicn_pcs_entry_get (pitcs);
+ hicn_pcs_entry_init_data (ret, tnow);
+ clib_memset_u64 (ret->u.pit.bitmap, 0, HICN_PIT_BITMAP_SIZE_U64);
+ ret->u.pit.n_faces = 0;
+ ret->expire_time = hicn_pcs_get_exp_time (tnow, lifetime);
- /* A content already exists in CS with the same name */
- if (ret == HICN_ERROR_HASHTB_EXIST && *is_cs)
- {
- /* Update the entry */
- hicn_hash_node_t *existing_node =
- hicn_hashtb_node_from_idx (pitcs->pcs_table, *node_id);
- hicn_pcs_entry_t *pitp = hicn_pit_get_data (existing_node);
-
- /* Free associated packet buffer and update counter */
- pitcs->pcs_cs_dealloc++;
- vlib_buffer_free_one (vm, pitp->u.cs.cs_pkt_buf);
-
- pitp->shared.create_time = entry->shared.create_time;
- pitp->shared.expire_time = entry->shared.expire_time;
- pitp->u.cs.cs_pkt_buf = entry->u.cs.cs_pkt_buf;
+ return ret;
+}
- hicn_pcs_cs_update (vm, pitcs, pitp, entry, existing_node);
- }
+/*
+ * Free pit/cs data block
+ */
+always_inline void
+hicn_pcs_entry_pit_free_data (hicn_pcs_entry_t *p)
+{
+ // Nothing to do for the moment
+}
- return (ret);
+always_inline u32
+hicn_pcs_entry_pit_get_n_faces (hicn_pcs_entry_t *p)
+{
+ return p->u.pit.n_faces;
}
/*
- * Insert PIT entry into the hashtable The main purpose of this wrapper is
- * helping maintain the per-PIT stats.
+ * Get face id at index index
*/
-always_inline int
-hicn_pcs_pit_insert (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t * entry,
- hicn_hash_node_t * node, hicn_hash_entry_t ** hash_entry,
- u64 hashval, u32 * node_id, index_t * dpo_ctx_id,
- u8 * vft_id, u8 * is_cs, u8 * hash_entry_id,
- u32 * bucket_id, u8 * bucket_is_overflow)
+always_inline hicn_face_id_t
+hicn_pcs_entry_pit_get_dpo_face (const hicn_pcs_entry_t *pit_entry, u32 index)
{
- ASSERT (entry == hicn_hashtb_node_data (node));
-
- int ret =
- hicn_hashtb_insert (pitcs->pcs_table, node, hash_entry, hashval, node_id,
- dpo_ctx_id, vft_id, is_cs, hash_entry_id, bucket_id,
- bucket_is_overflow);
+ // Make sure the entry is PIT
+ ASSERT (!hicn_pcs_entry_is_cs (pit_entry));
- if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
- pitcs->pcs_pit_count++;
+ // Make sure the index is valid
+ ASSERT (index < pit_entry->u.pit.n_faces);
- return ret;
+ if (index < HICN_FACE_DB_INLINE_FACES)
+ return pit_entry->u.pit.inline_faces[index];
+ else
+ return pit_entry->u.pit.faces[index - HICN_FACE_DB_INLINE_FACES];
}
always_inline void
-hicn_pcs_pit_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_node_t ** node, vlib_main_t * vm,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
+hicn_pcs_entry_pit_add_face (hicn_pcs_entry_t *pit_entry,
+ hicn_face_id_t face_id)
{
- hash_entry->locks--;
- if (hash_entry->locks == 0)
+ ASSERT (face_id < HICN_PARAM_FACES_MAX);
+
+ if (pit_entry->u.pit.n_faces < HICN_FACE_DB_INLINE_FACES)
{
- pitcs->pcs_pit_count--;
- hicn_pcs_delete_internal
- (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, hicn_dpo_id);
+ pit_entry->u.pit.inline_faces[pit_entry->u.pit.n_faces] = face_id;
}
else
{
- hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ vec_validate_aligned (pit_entry->u.pit.faces,
+ pit_entry->u.pit.n_faces -
+ HICN_FACE_DB_INLINE_FACES,
+ CLIB_CACHE_LINE_BYTES);
+ pit_entry->u.pit
+ .faces[pit_entry->u.pit.n_faces - HICN_FACE_DB_INLINE_FACES] = face_id;
}
-}
+ pit_entry->u.pit.n_faces++;
-/* Generic functions for PIT/CS */
+ clib_bitmap_set_no_check (pit_entry->u.pit.bitmap, face_id, 1);
+}
/*
- * Insert PIT/CS entry into the hashtable The main purpose of this wrapper is
- * helping maintain the per-PIT stats.
+ * Search face in db
+ */
+always_inline u8
+hicn_pcs_entry_pit_search (const hicn_pcs_entry_t *pit_entry,
+ hicn_face_id_t face_id)
+{
+ ASSERT (face_id < HICN_PARAM_FACES_MAX);
+ return clib_bitmap_get_no_check ((uword *) pit_entry->u.pit.bitmap, face_id);
+}
+
+/************************************************************************
+ **************************** Lookup API ********************************
+ ************************************************************************/
+
+/**
+ * @brief Perform one lookup in the PIT/CS table using the provided name.
+ *
+ * @param pitcs the PIT/CS table
+ * @param name the name to lookup
+ * @param pcs_entry [RETURN] if the entry exists, the entry is returned
+ * @return HICN_ERROR_NONE if the entry is found, HICN_ERROR_PCS_NOT_FOUND
+ * otherwise
*/
always_inline int
-hicn_pcs_insert (vlib_main_t * vm, hicn_pit_cs_t * pitcs,
- hicn_pcs_entry_t * entry, hicn_hash_node_t * node,
- hicn_hash_entry_t ** hash_entry, u64 hashval, u32 * node_id,
- index_t * dpo_ctx_id, u8 * vft_id, u8 * is_cs,
- u8 * hash_entry_id, u32 * bucket_id, u8 * bucket_is_overflow)
+hicn_pcs_lookup_one (hicn_pit_cs_t *pitcs, const hicn_name_t *name,
+ hicn_pcs_entry_t **pcs_entry)
{
int ret;
- if ((*hash_entry)->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
- {
- ret =
- hicn_pcs_cs_insert (vm, pitcs, entry, node, hash_entry, hashval,
- node_id, dpo_ctx_id, vft_id, is_cs, hash_entry_id,
- bucket_id, bucket_is_overflow);
- }
- else
+ // Construct the lookup key
+ clib_bihash_kv_24_8_t kv;
+ hicn_pcs_get_key_from_name (&kv, name);
+
+ // Do a search in the has table
+ ret = clib_bihash_search_inline_24_8 (&pitcs->pcs_table, &kv);
+
+ if (PREDICT_FALSE (ret != 0))
{
- ret =
- hicn_pcs_pit_insert (pitcs, entry, node, hash_entry, hashval, node_id,
- dpo_ctx_id, vft_id, is_cs, hash_entry_id,
- bucket_id, bucket_is_overflow);
+ *pcs_entry = NULL;
+ return HICN_ERROR_PCS_NOT_FOUND;
}
- return (ret);
-}
+ // Retrieve entry from pool
+ *pcs_entry = hicn_pcs_entry_get_entry_from_index (pitcs, (u32) (kv.value));
+ // If the search is successful, we MUST find the entry in the pool.
+ ALWAYS_ASSERT (pcs_entry);
-/*
- * Delete entry if there are no pending lock on the entry, otherwise mark it
- * as to delete.
- */
-always_inline void
-hicn_pcs_delete (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_node_t ** nodep, vlib_main_t * vm,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
-{
- /*
- * If the entry has already been marked as deleted, it has already
- * been dequeue
- */
- if (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_CS_ENTRY)
- {
- hicn_pcs_cs_delete (vm, pitcs, pcs_entryp, nodep, hash_entry,
- dpo_vft, hicn_dpo_id);
- }
- else
+ // If entry found and it is a CS entry, let's update the LRU
+ if (hicn_pcs_entry_is_cs (*pcs_entry))
{
- hicn_pcs_pit_delete (pitcs, pcs_entryp, nodep, vm,
- hash_entry, dpo_vft, hicn_dpo_id);
+ hicn_pcs_cs_update_lru (pitcs, *pcs_entry);
}
-}
-/*
- * Remove a lock in the entry and delete it if there are no pending lock and
- * the entry is marked as to be deleted
- */
-always_inline void
-hicn_pcs_remove_lock (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_node_t ** node, vlib_main_t * vm,
- hicn_hash_entry_t * hash_entry,
- const hicn_dpo_vft_t * dpo_vft, dpo_id_t * hicn_dpo_id)
-{
- hash_entry->locks--;
- if (hash_entry->locks == 0
- && (hash_entry->he_flags & HICN_HASH_ENTRY_FLAG_DELETED))
- {
- hicn_pcs_delete_internal
- (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, hicn_dpo_id);
- }
+ // If the entry is found, return it
+ return HICN_ERROR_NONE;
}
+/************************************************************************
+ **************************** PCS Delete API ****************************
+ ************************************************************************/
+
/*
- * Delete entry which has already been bulk-removed from lru list
+ * Delete a PIT/CS entry from the hashtable.
+ * The caller's pointers are zeroed! If cs_trim is true, entry has already
+ * been removed from lru list The main purpose of this wrapper is helping
+ * maintain the per-PIT stats.
*/
always_inline void
-hicn_cs_delete_trimmed (hicn_pit_cs_t * pitcs, hicn_pcs_entry_t ** pcs_entryp,
- hicn_hash_entry_t * hash_entry,
- hicn_hash_node_t ** node, vlib_main_t * vm)
+hicn_pcs_delete_internal (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *pcs_entry)
{
+ if (pcs_entry->flags & HICN_PCS_ENTRY_CS_FLAG)
+ {
+ // Remove entry from LRU list
+ hicn_pcs_cs_dequeue_lru (pitcs, pcs_entry);
+ // Update counters
+ pitcs->pcs_cs_count--;
- if (hash_entry->locks == 0)
- {
- const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (hash_entry->vft_id);
- dpo_id_t hicn_dpo_id =
- { dpo_vft->hicn_dpo_get_type (), 0, 0, hash_entry->dpo_ctx_id };
+ // Free data
+ hicn_pcs_entry_cs_free_data (pcs_entry);
- hicn_pcs_delete_internal
- (pitcs, pcs_entryp, hash_entry, node, vm, dpo_vft, &hicn_dpo_id);
+ // Sanity check
+ ASSERT ((pcs_entry->u.cs.cs_lru_prev == HICN_CS_POLICY_END_OF_CHAIN) &&
+ (pcs_entry->u.cs.cs_lru_prev == pcs_entry->u.cs.cs_lru_next));
}
else
{
- hash_entry->he_flags |= HICN_HASH_ENTRY_FLAG_DELETED;
+ // Update counters
+ pitcs->pcs_pit_count--;
+ // Flush faces
+ // hicn_faces_flush (&(pcs_entry->u.pit.faces));
}
-}
-/*
- * wrappable counter math (assumed uint16_t): return sum of addends
- */
-always_inline u16
-hicn_infra_seq16_sum (u16 addend1, u16 addend2)
-{
- return (addend1 + addend2);
+ // Delete entry from hash table
+ clib_bihash_kv_24_8_t kv;
+ hicn_pcs_get_key_from_name (&kv, &pcs_entry->name);
+ clib_bihash_add_del_24_8 (&pitcs->pcs_table, &kv, 0 /* is_add */);
+
+ // Free pool entry
+ hicn_pcs_entry_put (pitcs, pcs_entry);
}
-/*
- * for comparing wrapping numbers, return lt,eq,gt 0 for a lt,eq,gt b
- */
+/************************************************************************
+ **************************** PCS Insert API ****************************
+ ************************************************************************/
+
always_inline int
-hicn_infra_seq16_cmp (u16 a, u16 b)
+hicn_pcs_insert (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *entry,
+ const hicn_name_t *name)
{
- return ((int16_t) (a - b));
+ clib_bihash_kv_24_8_t kv;
+ u32 index = hicn_pcs_entry_get_index (pitcs, entry);
+
+ // Construct KV pair and try to add it to hash table
+ hicn_pcs_get_key_from_name (&kv, name);
+ kv.value = index;
+
+ // Get name hash
+ entry->name_hash = clib_bihash_hash_24_8 (&kv);
+ entry->name = *name;
+
+ return clib_bihash_add_del_24_8 (&pitcs->pcs_table, &kv,
+ 2 /* add_but_not_replace */);
}
-/*
- * below are wrappers for lt, le, gt, ge seq16 comparators
+/**
+ * @brief Insert a CS entry in the PIT/CS table. This function DOES NOT check
+ * if the KV is already present in the table. It expects the caller to check
+ * this before trying to insert the new entry.
+ *
+ * @param pitcs the PIT/CS table
+ * @param entry the entry to insert
+ * @param name the name to use to compute the key
+ * @return always_inline
*/
always_inline int
-hicn_infra_seq16_lt (u16 a, u16 b)
+hicn_pcs_cs_insert (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *entry,
+ const hicn_name_t *name)
{
- return (hicn_infra_seq16_cmp (a, b) < 0);
-}
+ // Make sure this is a CS entry
+ ASSERT (hicn_pcs_entry_is_cs (entry));
-always_inline int
-hicn_infra_seq16_le (u16 a, u16 b)
-{
- return (hicn_infra_seq16_cmp (a, b) <= 0);
-}
+ int ret = hicn_pcs_insert (pitcs, entry, name);
-always_inline int
-hicn_infra_seq16_gt (u16 a, u16 b)
-{
- return (hicn_infra_seq16_cmp (a, b) > 0);
+ // Make sure insertion happened
+ ASSERT (ret == 0);
+
+ // New entry, update LRU
+ hicn_pcs_cs_insert_lru (pitcs, entry);
+
+ return HICN_ERROR_NONE;
}
+/**
+ * @brief Insert a PIT entry in the PIT/CS table. This function DOES NOT check
+ * if the KV is already present in the table. It is expected the caller checks
+ * this before trying to insert the new entry.
+ *
+ * @param pitcs
+ * @param name
+ * @param pcs_entry_index
+ * @param dpo_ctx_id
+ * @param vft_id
+ * @param is_cs
+ * @return always_inline
+ */
always_inline int
-hicn_infra_seq16_ge (u16 a, u16 b)
+hicn_pcs_pit_insert (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *entry,
+ const hicn_name_t *name)
{
- return (hicn_infra_seq16_cmp (a, b) >= 0);
-}
-
+ // Insert entry into hash table
+ int ret = hicn_pcs_insert (pitcs, entry, name);
-extern u16 hicn_infra_fast_timer; /* Counts at 1 second intervals */
-extern u16 hicn_infra_slow_timer; /* Counts at 1 minute intervals */
+ // Make sure insertion happened
+ ASSERT (ret == 0);
-/*
- * Utilities to convert lifetime into expiry time based on compressed clock,
- * suitable for the opportunistic hashtable entry timeout processing.
- */
+ // Increment the number of PIT entries if insertion happened
+ pitcs->pcs_pit_count++;
-//convert time in msec to time in clicks
-always_inline u16
-hicn_infra_ms2clicks (u64 time_ms, u64 ms_per_click)
-{
- f64 time_clicks =
- ((f64) (time_ms + ms_per_click - 1)) / ((f64) ms_per_click);
- return ((u16) time_clicks);
+ return HICN_ERROR_NONE;
}
-always_inline u16
-hicn_infra_get_fast_exp_time (u64 lifetime_ms)
-{
- u16 lifetime_clicks =
- hicn_infra_ms2clicks (lifetime_ms, HICN_INFRA_FAST_TIMER_MSECS);
- return (hicn_infra_seq16_sum (hicn_infra_fast_timer, lifetime_clicks));
-}
+/************************************************************************
+ ************************** PCS Conversion API **************************
+ ************************************************************************/
-always_inline u16
-hicn_infra_get_slow_exp_time (u64 lifetime_ms)
+/**
+ * @brief Convert a PIT entry to a CS entry.
+ *
+ * @param vm
+ * @param pitcs
+ * @param pcs_entry
+ * @param hash_entry
+ * @param node
+ * @param dpo_vft
+ * @param hicn_dpo_id
+ * @return always_inline
+ */
+always_inline void
+hicn_pit_to_cs (hicn_pit_cs_t *pitcs, hicn_pcs_entry_t *pit_entry,
+ u32 buffer_index)
{
- u16 lifetime_clicks =
- hicn_infra_ms2clicks (lifetime_ms, HICN_INFRA_SLOW_TIMER_MSECS);
- return (hicn_infra_seq16_sum (hicn_infra_slow_timer, lifetime_clicks));
+ // Different from the insert node. In here we don't need to add a new
+ // hash entry.
+ pitcs->pcs_pit_count--;
+
+ // Flush faces
+ // hicn_faces_flush (&(pit_entry->u.pit.faces));
+
+ // Set the flags
+ pit_entry->flags = HICN_PCS_ENTRY_CS_FLAG;
+
+ // Set the buffer index
+ pit_entry->u.cs.cs_pkt_buf = buffer_index;
+
+ hicn_pcs_cs_insert_lru (pitcs, pit_entry);
}
-#endif /* // __HICN_PCS_H__ */
+#endif /* __HICN_PCS_H__ */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/pg.c b/hicn-plugin/src/pg.c
index 9938e85ba..e3dda0520 100644
--- a/hicn-plugin/src/pg.c
+++ b/hicn-plugin/src/pg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -18,385 +18,54 @@
#include <vnet/pg/pg.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/fib/fib_entry_track.h>
#include "hicn.h"
#include "pg.h"
#include "parser.h"
#include "infra.h"
+#include "route.h"
-/* Registration struct for a graph node */
-vlib_node_registration_t hicn_pg_interest_node;
-vlib_node_registration_t hicn_pg_data_node;
+hicnpg_main_t hicnpg_main = { .index = (u32) 0,
+ .index_ifaces = (u32) 1,
+ .max_seq_number = (u32) ~0,
+ .interest_lifetime = 4,
+ .n_flows = (u32) 0,
+ .n_ifaces = (u32) 1,
+ .sw_if = (u32) 0 };
-/* Stats, which end up called "error" even though they aren't... */
-#define foreach_hicnpg_error \
- _(PROCESSED, "hICN PG packets processed") \
- _(DROPPED, "hICN PG packets dropped") \
- _(INTEREST_MSGS_GENERATED, "hICN PG Interests generated") \
- _(CONTENT_MSGS_RECEIVED, "hICN PG Content msgs received")
-
-typedef enum
-{
-#define _(sym,str) HICNPG_ERROR_##sym,
- foreach_hicnpg_error
-#undef _
- HICNPG_N_ERROR,
-} hicnpg_error_t;
-
-static char *hicnpg_error_strings[] = {
-#define _(sym,string) string,
- foreach_hicnpg_error
-#undef _
-};
-
-/*
- * Next graph nodes, which reference the list in the actual registration
- * block below
+/**
+ * Pool of hicnpg_server_t
*/
-typedef enum
-{
- HICNPG_INTEREST_NEXT_V4_LOOKUP,
- HICNPG_INTEREST_NEXT_V6_LOOKUP,
- HICNPG_INTEREST_NEXT_DROP,
- HICNPG_N_NEXT,
-} hicnpg_interest_next_t;
+hicnpg_server_t *hicnpg_server_pool;
-/* Trace context struct */
-typedef struct
-{
- u32 next_index;
- u32 sw_if_index;
- u8 pkt_type;
- u16 msg_type;
-} hicnpg_trace_t;
-
-hicnpg_main_t hicnpg_main = {
- .index = (u32) 0,
- .index_ifaces = (u32) 1,
- .max_seq_number = (u32) ~ 0,
- .interest_lifetime = 4,
- .n_flows = (u32) 0,
- .n_ifaces = (u32) 1,
- .sw_if = (u32) 0
-};
-
-hicnpg_server_main_t hicnpg_server_main = {
- .node_index = 0,
-};
-
-/* packet trace format function */
-static u8 *
-format_hicnpg_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
-
- s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
- (int) t->pkt_type, (int) t->msg_type,
- t->sw_if_index, t->next_index);
- return (s);
-}
-
-always_inline void
-hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
- u16 lifetime, u32 next_flow, u32 iface);
-
-always_inline void
-hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
- u16 lifetime, u32 next_flow, u32 iface);
-
-always_inline void
-convert_interest_to_data_v4 (vlib_main_t * vm, vlib_buffer_t * b0,
- vlib_buffer_t * rb, u32 bi0);
-
-always_inline void
-convert_interest_to_data_v6 (vlib_main_t * vm, vlib_buffer_t * b0,
- vlib_buffer_t * rb, u32 bi0);
-
-always_inline void
-calculate_tcp_checksum_v4 (vlib_main_t * vm, vlib_buffer_t * b0);
-
-always_inline void
-calculate_tcp_checksum_v6 (vlib_main_t * vm, vlib_buffer_t * b0);
/*
- * Node function for the icn packet-generator client. The goal here is to
- * manipulate/tweak a stream of packets that have been injected by the vpp
- * packet generator to generate icn request traffic.
+ * hicnph servrer FIB node type
*/
-static uword
-hicnpg_client_interest_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- u32 n_left_from, *from, *to_next;
- hicnpg_interest_next_t next_index;
- u32 pkts_processed = 0, pkts_dropped = 0;
- u32 interest_msgs_generated = 0;
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u8 pkt_type0 = 0, pkt_type1 = 0;
- u16 msg_type0 = 0, msg_type1 = 0;
- hicn_header_t *hicn0 = NULL, *hicn1 = NULL;
- hicn_name_t name0, name1;
- u16 namelen0, namelen1;
- hicnpg_main_t *hpgm = &hicnpg_main;
- int iface = 0;
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
-
- while (n_left_from > 0)
- {
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 next0 = HICNPG_INTEREST_NEXT_DROP;
- u32 next1 = HICNPG_INTEREST_NEXT_DROP;
- u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
- u8 isv6_0;
- u8 isv6_1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
+fib_node_type_t hicnpg_server_fib_node_type;
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
- CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
- }
-
- /*
- * speculatively enqueue b0 and b1 to the current
- * next frame
- */
- to_next[0] = bi0 = from[0];
- to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
- vnet_buffer (b0)->sw_if_index[VLIB_RX] = hpgm->sw_if;
- vnet_buffer (b1)->sw_if_index[VLIB_RX] = hpgm->sw_if;
-
- /* Check icn packets, locate names */
- if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
- == HICN_ERROR_NONE)
- {
- /* this node grabs only interests */
-
- /* Increment the appropriate message counter */
- interest_msgs_generated++;
-
- iface = (hpgm->index_ifaces % hpgm->n_ifaces);
- /* Rewrite and send */
- isv6_0 ? hicn_rewrite_interestv6 (vm, b0,
- (hpgm->index /
- hpgm->n_flows) %
- hpgm->max_seq_number,
- hpgm->interest_lifetime,
- hpgm->index % hpgm->n_flows,
- iface) :
- hicn_rewrite_interestv4 (vm, b0,
- (hpgm->index / hpgm->n_flows) %
- hpgm->max_seq_number,
- hpgm->interest_lifetime,
- hpgm->index % hpgm->n_flows, iface);
-
- hpgm->index_ifaces++;
- if (iface == (hpgm->n_ifaces - 1))
- hpgm->index++;
-
- next0 =
- isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
- HICNPG_INTEREST_NEXT_V4_LOOKUP;
- }
- if (hicn_interest_parse_pkt (b1, &name1, &namelen1, &hicn1, &isv6_1)
- == HICN_ERROR_NONE)
- {
- /* this node grabs only interests */
-
- /* Increment the appropriate message counter */
- interest_msgs_generated++;
-
- iface = (hpgm->index_ifaces % hpgm->n_ifaces);
- /* Rewrite and send */
- isv6_1 ? hicn_rewrite_interestv6 (vm, b1,
- (hpgm->index /
- hpgm->n_flows) %
- hpgm->max_seq_number,
- hpgm->interest_lifetime,
- hpgm->index % hpgm->n_flows,
- iface) :
- hicn_rewrite_interestv4 (vm, b1,
- (hpgm->index / hpgm->n_flows) %
- hpgm->max_seq_number,
- hpgm->interest_lifetime,
- hpgm->index % hpgm->n_flows, iface);
-
- hpgm->index_ifaces++;
- if (iface == (hpgm->n_ifaces - 1))
- hpgm->index++;
-
- next1 =
- isv6_1 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
- HICNPG_INTEREST_NEXT_V4_LOOKUP;
- }
- /* Send pkt to next node */
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
- vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
-
- pkts_processed += 2;
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
- {
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
- {
- hicnpg_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = pkt_type0;
- t->msg_type = msg_type0;
- t->sw_if_index = sw_if_index0;
- t->next_index = next0;
- }
- if (b1->flags & VLIB_BUFFER_IS_TRACED)
- {
- hicnpg_trace_t *t =
- vlib_add_trace (vm, node, b1, sizeof (*t));
- t->pkt_type = pkt_type1;
- t->msg_type = msg_type1;
- t->sw_if_index = sw_if_index1;
- t->next_index = next1;
- }
- }
- if (next0 == HICNPG_INTEREST_NEXT_DROP)
- {
- pkts_dropped++;
- }
- if (next1 == HICNPG_INTEREST_NEXT_DROP)
- {
- pkts_dropped++;
- }
- /*
- * verify speculative enqueues, maybe switch current
- * next frame
- */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 next0 = HICNPG_INTEREST_NEXT_DROP;
- u32 sw_if_index0;
- u8 isv6_0;
-
- /* speculatively enqueue b0 to the current next frame */
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- vnet_buffer (b0)->sw_if_index[VLIB_RX] = hpgm->sw_if;
-
- /* Check icn packets, locate names */
- if (hicn_interest_parse_pkt (b0, &name0, &namelen0, &hicn0, &isv6_0)
- == HICN_ERROR_NONE)
- {
- /* this node grabs only interests */
-
- /* Increment the appropriate message counter */
- interest_msgs_generated++;
-
- iface = (hpgm->index_ifaces % hpgm->n_ifaces);
-
- /* Rewrite and send */
- isv6_0 ? hicn_rewrite_interestv6 (vm, b0,
- (hpgm->index /
- hpgm->n_flows) %
- hpgm->max_seq_number,
- hpgm->interest_lifetime,
- hpgm->index % hpgm->n_flows,
- iface) :
- hicn_rewrite_interestv4 (vm, b0,
- (hpgm->index / hpgm->n_flows) %
- hpgm->max_seq_number,
- hpgm->interest_lifetime,
- hpgm->index % hpgm->n_flows, iface);
-
- hpgm->index_ifaces++;
- if (iface == (hpgm->n_ifaces - 1))
- hpgm->index++;
-
- next0 =
- isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
- HICNPG_INTEREST_NEXT_V4_LOOKUP;
- }
- /* Send pkt to ip lookup */
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- hicnpg_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = pkt_type0;
- t->msg_type = msg_type0;
- t->sw_if_index = sw_if_index0;
- t->next_index = next0;
- }
- pkts_processed += 1;
-
- if (next0 == HICNPG_INTEREST_NEXT_DROP)
- {
- pkts_dropped++;
- }
- /*
- * verify speculative enqueue, maybe switch current
- * next frame
- */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
- HICNPG_ERROR_PROCESSED, pkts_processed);
- vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
- HICNPG_ERROR_DROPPED, pkts_dropped);
- vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
- HICNPG_ERROR_INTEREST_MSGS_GENERATED,
- interest_msgs_generated);
+/**
+ * Registered DPO type.
+ */
+dpo_type_t hicnpg_server_dpo_type;
- return (frame->n_vectors);
+static void
+hicnpg_server_restack (hicnpg_server_t *hicnpg_server)
+{
+ dpo_stack (
+ hicnpg_server_dpo_type, fib_proto_to_dpo (hicnpg_server->prefix.fp_proto),
+ &hicnpg_server->dpo,
+ fib_entry_contribute_ip_forwarding (hicnpg_server->fib_entry_index));
}
-void
-hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
- u16 interest_lifetime, u32 next_flow, u32 iface)
+static hicnpg_server_t *
+hicnpg_server_from_fib_node (fib_node_t *node)
{
+#if 1
+ ASSERT (hicnpg_server_fib_node_type == node->fn_type);
+ return ((hicnpg_server_t *) (((char *) node) -
+ STRUCT_OFFSET_OF (hicnpg_server_t, fib_node)));
+#else
hicn_header_t *h0 = vlib_buffer_get_current (b0);
/* Generate the right src and dst corresponding to flow and iface */
@@ -404,16 +73,17 @@ hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
.ip4 = hicnpg_main.pgen_clt_src_addr.ip4,
};
hicn_name_t dst_name = {
- .ip4.prefix_as_ip4 = hicnpg_main.pgen_clt_hicn_name->fp_addr.ip4,
- .ip4.suffix = seq_number,
+ .prefix.v4.as_u32 = hicnpg_main.pgen_clt_hicn_name->fp_addr.ip4.as_u32,
+ .suffix = seq_number,
};
src_addr.ip4.as_u32 += clib_host_to_net_u32 (iface);
- dst_name.ip4.prefix_as_ip4.as_u32 += clib_net_to_host_u32 (next_flow);
+ dst_name.prefix.v4.as_u32 += clib_net_to_host_u32 (next_flow);
/* Update locator and name */
hicn_type_t type = hicn_get_buffer (b0)->type;
- HICN_OPS4->set_interest_locator (type, &h0->protocol, &src_addr);
+ HICN_OPS4->set_interest_locator (type, &h0->protocol,
+ (hicn_ip_address_t *) &src_addr);
HICN_OPS4->set_interest_name (type, &h0->protocol, &dst_name);
/* Update lifetime (currently L4 checksum is not updated) */
@@ -421,44 +91,38 @@ hicn_rewrite_interestv4 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
/* Update checksums */
HICN_OPS4->update_checksums (type, &h0->protocol, 0, 0);
+#endif
}
/**
- * @brief Rewrite the IPv6 header as the next generated packet
- *
- * Set up a name prefix
- * - etiher generate interest in which the name varies only after the prefix
- * (inc : seq_number), then the flow acts on the prefix (CHECK)
- * seq_number => TCP, FLOW =>
- *
- * SRC : pgen_clt_src_addr.ip6 DST = generate name (pgen_clt_hicn_name.ip6)
- * ffff:ffff:ffff:ffff ffff:ffff:ffff:ffff
- * \__/ \__/
- * +iface + flow
- * Source is used to emulate different consumers.
- * FIXME iface is ill-named, better name it consumer id
- * Destination is used to iterate on the content.
+ * Function definition to backwalk a FIB node
*/
-void
-hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
- u16 interest_lifetime, u32 next_flow, u32 iface)
+static fib_node_back_walk_rc_t
+hicnpg_server_fib_back_walk (fib_node_t *node, fib_node_back_walk_ctx_t *ctx)
{
- hicn_header_t *h0 = vlib_buffer_get_current (b0);
+ hicnpg_server_restack (hicnpg_server_from_fib_node (node));
+#if 1
+ return FIB_NODE_BACK_WALK_CONTINUE;
+#else
/* Generate the right src and dst corresponding to flow and iface */
ip46_address_t src_addr = {
.ip6 = hicnpg_main.pgen_clt_src_addr.ip6,
};
hicn_name_t dst_name = {
- .ip6.prefix_as_ip6 = hicnpg_main.pgen_clt_hicn_name->fp_addr.ip6,
- .ip6.suffix = seq_number,
+ .prefix.v6.as_u64 = {
+ hicnpg_main.pgen_clt_hicn_name->fp_addr.ip6.as_u64[0],
+ hicnpg_main.pgen_clt_hicn_name->fp_addr.ip6.as_u64[1],
+ },
+ .suffix = seq_number,
};
src_addr.ip6.as_u32[3] += clib_host_to_net_u32 (iface);
- dst_name.ip6.prefix_as_ip6.as_u32[3] += clib_net_to_host_u32 (next_flow);
+ dst_name.prefix.v6.as_u32[3] += clib_net_to_host_u32 (next_flow);
/* Update locator and name */
hicn_type_t type = hicn_get_buffer (b0)->type;
- HICN_OPS6->set_interest_locator (type, &h0->protocol, &src_addr);
+ HICN_OPS6->set_interest_locator (type, &h0->protocol,
+ (hicn_ip_address_t *) &src_addr);
HICN_OPS6->set_interest_name (type, &h0->protocol, &dst_name);
/* Update lifetime */
@@ -466,859 +130,240 @@ hicn_rewrite_interestv6 (vlib_main_t * vm, vlib_buffer_t * b0, u32 seq_number,
/* Update checksums */
calculate_tcp_checksum_v6 (vm, b0);
+#endif
}
-
-
-void
-calculate_tcp_checksum_v4 (vlib_main_t * vm, vlib_buffer_t * b0)
-{
- ip4_header_t *ip0;
- tcp_header_t *tcp0;
- ip_csum_t sum0;
- u32 tcp_len0;
-
- ip0 = (ip4_header_t *) (vlib_buffer_get_current (b0));
- tcp0 =
- (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip4_header_t));
- tcp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip4_header_t);
-
- /* Initialize checksum with header. */
- if (BITS (sum0) == 32)
- {
- sum0 = clib_mem_unaligned (&ip0->src_address, u32);
- sum0 =
- ip_csum_with_carry (sum0,
- clib_mem_unaligned (&ip0->dst_address, u32));
- }
- else
- sum0 = clib_mem_unaligned (&ip0->src_address, u64);
-
- sum0 = ip_csum_with_carry
- (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
-
- /* Invalidate possibly old checksum. */
- tcp0->checksum = 0;
-
- u32 tcp_offset = sizeof (ip4_header_t);
- sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
-
- tcp0->checksum = ~ip_csum_fold (sum0);
-}
-
-void
-calculate_tcp_checksum_v6 (vlib_main_t * vm, vlib_buffer_t * b0)
+/**
+ * Function definition to get a FIB node from its index
+ */
+static fib_node_t *
+hicnpg_server_fib_node_get (fib_node_index_t index)
{
- ip6_header_t *ip0;
- tcp_header_t *tcp0;
- ip_csum_t sum0;
- u32 tcp_len0;
-
- ip0 = (ip6_header_t *) (vlib_buffer_get_current (b0));
- tcp0 =
- (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip6_header_t));
- tcp_len0 = clib_net_to_host_u16 (ip0->payload_length);
-
- /* Initialize checksum with header. */
- if (BITS (sum0) == 32)
- {
- sum0 = clib_mem_unaligned (&ip0->src_address, u32);
- sum0 =
- ip_csum_with_carry (sum0,
- clib_mem_unaligned (&ip0->dst_address, u32));
- }
- else
- sum0 = clib_mem_unaligned (&ip0->src_address, u64);
-
- sum0 = ip_csum_with_carry
- (sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
-
- /* Invalidate possibly old checksum. */
- tcp0->checksum = 0;
+ hicnpg_server_t *hpg_server;
- u32 tcp_offset = sizeof (ip6_header_t);
- sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+ hpg_server = pool_elt_at_index (hicnpg_server_pool, index);
- tcp0->checksum = ~ip_csum_fold (sum0);
+ return (&hpg_server->fib_node);
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn_pg_interest_node) ={
- .function = hicnpg_client_interest_node_fn,
- .name = "hicnpg-interest",
- .vector_size = sizeof(u32),
- .format_trace = format_hicnpg_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicnpg_error_strings),
- .error_strings = hicnpg_error_strings,
- .n_next_nodes = HICNPG_N_NEXT,
- .next_nodes =
- {
- [HICNPG_INTEREST_NEXT_V4_LOOKUP] = "ip4-lookup",
- [HICNPG_INTEREST_NEXT_V6_LOOKUP] = "ip6-lookup",
- [HICNPG_INTEREST_NEXT_DROP] = "error-drop"
- },
-};
-/* *INDENT-ON* */
-
-/*
- * Next graph nodes, which reference the list in the actual registration
- * block below
+/**
+ * Function definition to inform the FIB node that its last lock has gone.
*/
-typedef enum
+static void
+hicnpg_server_fib_last_lock_gone (fib_node_t *node)
{
- HICNPG_DATA_NEXT_DROP,
- HICNPG_DATA_NEXT_LOOKUP4,
- HICNPG_DATA_NEXT_LOOKUP6,
- HICNPG_DATA_N_NEXT,
-} hicnpg_data_next_t;
+ hicnpg_server_t *hpg_server;
-/* Trace context struct */
-typedef struct
-{
- u32 next_index;
- u32 sw_if_index;
- u8 pkt_type;
- u16 msg_type;
-} icnpg_data_trace_t;
+ hpg_server = hicnpg_server_from_fib_node (node);
-/* packet trace format function */
-static u8 *
-format_hicnpg_data_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+ /**
+ * reset the stacked DPO to unlock it
+ */
+ dpo_reset (&hpg_server->dpo);
- s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
- (int) t->pkt_type, (int) t->msg_type,
- t->sw_if_index, t->next_index);
- return (s);
+ pool_put (hicnpg_server_pool, hpg_server);
}
-
-static_always_inline int
-match_ip4_name (u32 * name, fib_prefix_t * prefix)
+static void
+hicnpg_server_dpo_lock (dpo_id_t *dpo)
{
- u32 xor = 0;
-
- xor = *name & prefix->fp_addr.ip4.data_u32;
-
- return xor == prefix->fp_addr.ip4.data_u32;
+ hicnpg_server_t *hpg_server;
+ hpg_server = hicnpg_server_get (dpo->dpoi_index);
+ fib_node_lock (&hpg_server->fib_node);
}
-static_always_inline int
-match_ip6_name (u8 * name, fib_prefix_t * prefix)
+static void
+hicnpg_server_dpo_unlock (dpo_id_t *dpo)
{
- union
- {
- u32x4 as_u32x4;
- u64 as_u64[2];
- u32 as_u32[4];
- } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
-
- xor_sum.as_u64[0] = ((u64 *) name)[0] & prefix->fp_addr.ip6.as_u64[0];
- xor_sum.as_u64[1] = ((u64 *) name)[1] & prefix->fp_addr.ip6.as_u64[1];
-
- return (xor_sum.as_u64[0] == prefix->fp_addr.ip6.as_u64[0]) &&
- (xor_sum.as_u64[1] == prefix->fp_addr.ip6.as_u64[1]);
+ hicnpg_server_t *hpg_server;
+ hpg_server = hicnpg_server_get (dpo->dpoi_index);
+ fib_node_unlock (&hpg_server->fib_node);
}
-
-/*
- * Return 0,1,2.
- * 0 matches
- * 1 does not match and the prefix is ip4
- * 2 does not match and the prefix is ip6
- */
-static_always_inline u32
-match_data (vlib_buffer_t * b, fib_prefix_t * prefix)
+static u8 *
+format_hicnpg_server_i (u8 *s, va_list *args)
{
- u8 *ptr = vlib_buffer_get_current (b);
- u8 v = *ptr & 0xf0;
- u32 next = 0;
+ index_t hicnpg_server_i = va_arg (*args, index_t);
+ // u32 indent = va_arg (*args, u32);
+ u32 details = va_arg (*args, u32);
+ // vlib_counter_t to;
+ hicnpg_server_t *hpg;
- if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
- {
- if (!match_ip4_name ((u32 *) & (ptr[12]), prefix))
- next = 1;
- }
- else
- if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
- {
- if (!match_ip6_name (&(ptr[8]), prefix))
- next = 2;
- }
+ hpg = hicnpg_server_get (hicnpg_server_i);
- return next;
-}
+ // FIXME
+ s = format (s, "dpo-hicnpg-server:[%d]: ip-fib-index:%d ", hicnpg_server_i,
+ hpg->fib_index);
-/*
- * Return 0,1,2.
- * 0 matches
- * 1 does not match and the prefix is ip4
- * 2 does not match and the prefix is ip6
- */
-static_always_inline u32
-match_interest (vlib_buffer_t * b, fib_prefix_t * prefix)
-{
- u8 *ptr = vlib_buffer_get_current (b);
- u8 v = *ptr & 0xf0;
- u32 next = 0;
-
- if (PREDICT_TRUE (v == 0x40 && ip46_address_is_ip4 (&prefix->fp_addr)))
+ if (FIB_PROTOCOL_IP4 == hpg->prefix.fp_proto)
{
- if (!match_ip4_name ((u32 *) & (ptr[16]), prefix))
- next = 1;
+ s = format (s, "protocol:FIB_PROTOCOL_IP4 prefix: %U",
+ format_fib_prefix, &hpg->prefix);
}
else
- if (PREDICT_TRUE (v == 0x60 && !ip46_address_is_ip4 (&prefix->fp_addr)))
{
- if (!match_ip6_name (&(ptr[24]), prefix))
- next = 2;
+ s = format (s, "protocol:FIB_PROTOCOL_IP6 prefix: %U",
+ format_fib_prefix, &hpg->prefix);
}
- return next;
-}
-
-
+#if 0
+ vlib_get_combined_counter (&(udp_encap_counters), uei, &to);
+ s = format (s, " to:[%Ld:%Ld]]", to.packets, to.bytes);s
+#endif
-
-/*
- * Node function for the icn packet-generator client. The goal here is to
- * manipulate/tweak a stream of packets that have been injected by the vpp
- * packet generator to generate icn request traffic.
- */
-static uword
-hicnpg_client_data_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- u32 n_left_from, *from, *to_next;
- hicnpg_data_next_t next_index;
- u32 pkts_processed = 0;
- u32 content_msgs_received = 0;
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u8 pkt_type0 = 0, pkt_type1 = 0;
- u16 msg_type0 = 1, msg_type1 = 1;
- hicnpg_main_t *hpgm = &hicnpg_main;
-
- from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
-
- while (n_left_from > 0)
+ if (details)
{
- u32 n_left_to_next;
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 next0 = HICNPG_DATA_NEXT_DROP;
- u32 next1 = HICNPG_DATA_NEXT_DROP;
- u32 sw_if_index0, sw_if_index1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
- CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
- }
-
- /*
- * speculatively enqueue b0 and b1 to the current
- * next frame
- */
- to_next[0] = bi0 = from[0];
- to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
-
- next0 =
- HICNPG_DATA_NEXT_DROP + match_data (b0, hpgm->pgen_clt_hicn_name);
- next1 =
- HICNPG_DATA_NEXT_DROP + match_data (b1, hpgm->pgen_clt_hicn_name);
-
- if (PREDICT_FALSE (vnet_get_feature_count
- (vnet_buffer (b0)->feature_arc_index,
- vnet_buffer (b0)->sw_if_index[VLIB_RX]) > 1))
- vnet_feature_next (&next0, b0);
-
- if (PREDICT_FALSE (vnet_get_feature_count
- (vnet_buffer (b1)->feature_arc_index,
- vnet_buffer (b1)->sw_if_index[VLIB_RX]) > 1))
- vnet_feature_next (&next1, b1);
-
-
- if (next0 == HICNPG_DATA_NEXT_DROP)
- {
- /* Increment a counter */
- content_msgs_received++;
- }
-
- if (next1 == HICNPG_DATA_NEXT_DROP)
- {
- /* Increment a counter */
- content_msgs_received++;
- }
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
- {
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
- {
- icnpg_data_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = pkt_type0;
- t->msg_type = msg_type0;
- t->sw_if_index = sw_if_index0;
- t->next_index = next0;
- }
- if (b1->flags & VLIB_BUFFER_IS_TRACED)
- {
- icnpg_data_trace_t *t =
- vlib_add_trace (vm, node, b1, sizeof (*t));
- t->pkt_type = pkt_type1;
- t->msg_type = msg_type1;
- t->sw_if_index = sw_if_index1;
- t->next_index = next1;
- }
- }
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- pkts_processed += 2;
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 next0 = HICNPG_DATA_NEXT_DROP;
- u32 sw_if_index0;
-
- /* speculatively enqueue b0 to the current next frame */
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
-
- next0 =
- HICNPG_DATA_NEXT_DROP + match_data (b0, hpgm->pgen_clt_hicn_name);
-
- if (PREDICT_FALSE (vnet_get_feature_count
- (vnet_buffer (b0)->feature_arc_index,
- vnet_buffer (b0)->sw_if_index[VLIB_RX]) > 1))
- vnet_feature_next (&next0, b0);
-
- if (next0 == HICNPG_DATA_NEXT_DROP)
- {
- /* Increment a counter */
- content_msgs_received++;
- }
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- icnpg_data_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = pkt_type0;
- t->msg_type = msg_type0;
- t->sw_if_index = sw_if_index0;
- t->next_index = next0;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi0, next0);
-
- pkts_processed++;
- }
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ s = format (s, " locks:%d", hpg->fib_node.fn_locks);
+ // s = format (s, "\n%UStacked on:", format_white_space, indent +
+ // 1); s = format (s, "\n%U%U", format_white_space, indent + 2,
+ // format_dpo_id,
+ // &hpg->dpo, indent + 3);
}
- vlib_node_increment_counter (vm, hicn_pg_data_node.index,
- HICNPG_ERROR_PROCESSED, pkts_processed);
- vlib_node_increment_counter (vm, hicn_pg_data_node.index,
- HICNPG_ERROR_CONTENT_MSGS_RECEIVED,
- content_msgs_received);
- return (frame->n_vectors);
+ return s;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn_pg_data_node) =
+static u8 *
+format_hicnpg_server_dpo (u8 *s, va_list *args)
{
- .function = hicnpg_client_data_node_fn,
- .name = "hicnpg-data",
- .vector_size = sizeof(u32),
- .format_trace = format_hicnpg_data_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(hicnpg_error_strings),
- .error_strings = hicnpg_error_strings,
- .n_next_nodes = HICNPG_DATA_N_NEXT,
- .next_nodes =
- {
- [HICNPG_DATA_NEXT_DROP] = "error-drop",
- [HICNPG_DATA_NEXT_LOOKUP4] = "ip4-lookup",
- [HICNPG_DATA_NEXT_LOOKUP6] = "ip6-lookup",
- },
-};
-/* *INDENT-ON* */
+ index_t hpg_server_i = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_data_input_ip4_arc, static)=
- {
- .arc_name = "ip4-unicast",
- .node_name = "hicnpg-data",
- .runs_before = VNET_FEATURES("ip4-inacl"),
- };
-/* *INDENT-ON* */
-
-
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_data_input_ip6_arc, static)=
- {
- .arc_name = "ip6-unicast",
- .node_name = "hicnpg-data",
- .runs_before = VNET_FEATURES("ip6-inacl"),
- };
-/* *INDENT-ON* */
-
-
-/*
- * End of packet-generator client node
- */
+ return (format (s, "%U", format_hicnpg_server_i, hpg_server_i, indent, 1));
+}
/*
- * Beginning of packet-generation server node
+ * Virtual function table registered by hicn pg server
+ * for participation in the FIB object graph.
*/
-
-/* Registration struct for a graph node */
-vlib_node_registration_t hicn_pg_server_node;
-
-/* Stats, which end up called "error" even though they aren't... */
-#define foreach_icnpg_server_error \
-_(PROCESSED, "hICN PG Server packets processed") \
-_(DROPPED, "hICN PG Server packets dropped")
-
-typedef enum
-{
-#define _(sym,str) HICNPG_SERVER_ERROR_##sym,
- foreach_icnpg_server_error
-#undef _
- HICNPG_SERVER_N_ERROR,
-} icnpg_server_error_t;
-
-static char *icnpg_server_error_strings[] = {
-#define _(sym,string) string,
- foreach_icnpg_server_error
-#undef _
+const static fib_node_vft_t hicnpg_server_fib_vft = {
+ .fnv_get = hicnpg_server_fib_node_get,
+ .fnv_last_lock = hicnpg_server_fib_last_lock_gone,
+ .fnv_back_walk = hicnpg_server_fib_back_walk,
};
-/*
- * Next graph nodes, which reference the list in the actual registration
- * block below
- */
-typedef enum
-{
- HICNPG_SERVER_NEXT_V4_LOOKUP,
- HICNPG_SERVER_NEXT_V6_LOOKUP,
- HICNPG_SERVER_NEXT_DROP,
- HICNPG_SERVER_N_NEXT,
-} icnpg_server_next_t;
+const static dpo_vft_t hicnpg_server_dpo_vft = {
+ .dv_lock = hicnpg_server_dpo_lock,
+ .dv_unlock = hicnpg_server_dpo_unlock,
+ .dv_format = format_hicnpg_server_dpo,
+};
-/* Trace context struct */
-typedef struct
-{
- u32 next_index;
- u32 sw_if_index;
- u8 pkt_type;
- u16 msg_type;
-} hicnpg_server_trace_t;
+const static char *const hicnpg_server_ip4_nodes[] = {
+ "hicnpg-server-4",
+ NULL,
+};
-/* packet trace format function */
-static u8 *
-format_icnpg_server_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- hicnpg_server_trace_t *t = va_arg (*args, hicnpg_server_trace_t *);
+const static char *const hicnpg_server_ip6_nodes[] = {
+ "hicnpg-server-6",
+ NULL,
+};
- s =
- format (s,
- "HICNPG SERVER: pkt: %d, msg %d, sw_if_index %d, next index %d",
- (int) t->pkt_type, (int) t->msg_type, t->sw_if_index,
- t->next_index);
- return (s);
-}
+const static char *const *const hicnpg_server_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP4] = hicnpg_server_ip4_nodes,
+ [DPO_PROTO_IP6] = hicnpg_server_ip6_nodes
+};
-/*
- * Node function for the icn packet-generator server.
- */
-static uword
-hicnpg_node_server_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+clib_error_t *
+hicnpg_server_add_and_lock (fib_prefix_t *prefix, u32 *hicnpg_server_index,
+ ip46_address_t *locator, size_t payload_size)
{
- u32 n_left_from, *from, *to_next;
- icnpg_server_next_t next_index;
- u32 pkts_processed = 0, pkts_dropped = 0;
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u8 pkt_type0 = 0, pkt_type1 = 0;
- u16 msg_type0 = 0, msg_type1 = 0;
- hicn_header_t *hicn0 = NULL, *hicn1 = NULL;
- hicn_name_t name0, name1;
- u16 namelen0, namelen1;
-
- hicnpg_server_main_t *hpgsm = &hicnpg_server_main;
+ hicnpg_server_t *hpg;
+ index_t hpgi;
+ u32 fib_index;
+ fib_node_index_t fib_entry_index;
+ u32 buffer_index;
+ vlib_buffer_t *rb = NULL;
- from = vlib_frame_vector_args (frame);
+ // Retrieve hicn fib table
+ fib_index =
+ fib_table_find_or_create_and_lock (prefix->fp_proto, 0, hicn_fib_src);
- n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
+ // Check the prefix we are adding is not already in the table
+ fib_entry_index = fib_table_lookup_exact_match (fib_index, prefix);
- while (n_left_from > 0)
+ if (fib_entry_index != FIB_NODE_INDEX_INVALID)
{
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- u32 next0 = HICNPG_SERVER_NEXT_DROP;
- u32 next1 = HICNPG_SERVER_NEXT_DROP;
- u8 isv6_0 = 0;
- u8 isv6_1 = 0;
- u32 sw_if_index0, sw_if_index1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
- CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
- }
-
- /*
- * speculatively enqueue b0 and b1 to the current
- * next frame
- */
- to_next[0] = bi0 = from[0];
- to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
-
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
- vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
-
- u32 match0 = match_interest (b0, hpgsm->pgen_srv_hicn_name);
- u32 match1 = match_interest (b1, hpgsm->pgen_srv_hicn_name);
-
- if (match0)
- {
- next0 = match0 - 1;
- }
- else
- if (hicn_interest_parse_pkt
- (b0, &name0, &namelen0, &hicn0, &isv6_0) == HICN_ERROR_NONE)
- {
- /* this node grabs only interests */
- vlib_buffer_t *rb = NULL;
- rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
-
- isv6_0 ? convert_interest_to_data_v6 (vm, b0, rb,
- bi0) :
- convert_interest_to_data_v4 (vm, b0, rb, bi0);
-
- next0 =
- isv6_0 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
- HICNPG_SERVER_NEXT_V4_LOOKUP;
- }
-
- if (match1)
- {
- next1 = match1 - 1;
- }
- else
- if (hicn_interest_parse_pkt
- (b1, &name1, &namelen1, &hicn1, &isv6_1) == HICN_ERROR_NONE)
- {
- /* this node grabs only interests */
- vlib_buffer_t *rb = NULL;
- rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
-
- isv6_1 ? convert_interest_to_data_v6 (vm, b1, rb,
- bi1) :
- convert_interest_to_data_v4 (vm, b1, rb, bi1);
-
- next1 =
- isv6_1 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
- HICNPG_SERVER_NEXT_V4_LOOKUP;
- }
- pkts_processed += 2;
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
- {
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
- {
- hicnpg_server_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = pkt_type0;
- t->msg_type = msg_type0;
- t->sw_if_index = sw_if_index0;
- t->next_index = next0;
- }
- if (b1->flags & VLIB_BUFFER_IS_TRACED)
- {
- hicnpg_server_trace_t *t =
- vlib_add_trace (vm, node, b1, sizeof (*t));
- t->pkt_type = pkt_type1;
- t->msg_type = msg_type1;
- t->sw_if_index = sw_if_index1;
- t->next_index = next1;
- }
- }
- if (next0 == HICNPG_SERVER_NEXT_DROP)
- {
- pkts_dropped++;
- }
- if (next1 == HICNPG_SERVER_NEXT_DROP)
- {
- pkts_dropped++;
- }
- /*
- * verify speculative enqueues, maybe switch current
- * next frame
- */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 next0 = HICNPG_SERVER_NEXT_DROP;
- u32 sw_if_index0 = ~0;
- u8 isv6_0 = 0;
-
- /* speculatively enqueue b0 to the current next frame */
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
-
- u32 match0 = match_interest (b0, hpgsm->pgen_srv_hicn_name);
-
- if (match0)
- {
- next0 = match0 - 1;
- }
- else
- if (hicn_interest_parse_pkt
- (b0, &name0, &namelen0, &hicn0, &isv6_0) == HICN_ERROR_NONE)
- {
- /* this node grabs only interests */
- vlib_buffer_t *rb = NULL;
- rb = vlib_get_buffer (vm, hpgsm->pgen_svr_buffer_idx);
-
- isv6_0 ? convert_interest_to_data_v6 (vm, b0, rb,
- bi0) :
- convert_interest_to_data_v4 (vm, b0, rb, bi0);
-
- next0 =
- isv6_0 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
- HICNPG_SERVER_NEXT_V4_LOOKUP;
- }
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b0->flags & VLIB_BUFFER_IS_TRACED)))
- {
- hicnpg_server_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = pkt_type0;
- t->msg_type = msg_type0;
- t->sw_if_index = sw_if_index0;
- t->next_index = next0;
- }
- pkts_processed += 1;
-
- if (next0 == HICNPG_SERVER_NEXT_DROP)
- {
- pkts_dropped++;
- }
- /*
- * verify speculative enqueue, maybe switch current
- * next frame
- */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- }
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ // Route already existing.
+ return clib_error_return (0, "Route exist already.");
}
- vlib_node_increment_counter (vm, hicn_pg_server_node.index,
- HICNPG_SERVER_ERROR_PROCESSED, pkts_processed);
- vlib_node_increment_counter (vm, hicn_pg_server_node.index,
- HICNPG_SERVER_ERROR_DROPPED, pkts_dropped);
-
- return (frame->n_vectors);
-}
-
-void
-convert_interest_to_data_v4 (vlib_main_t * vm, vlib_buffer_t * b0,
- vlib_buffer_t * rb, u32 bi0)
-{
- hicn_header_t *h0 = vlib_buffer_get_current (b0);
+ // Allocate packet buffer
+ int n_buf = vlib_buffer_alloc (vlib_get_main (), &buffer_index, 1);
- /* Get the packet length */
- u16 pkt_len = clib_net_to_host_u16 (h0->v4.ip.len);
-
- /*
- * Rule of thumb: We want the size of the IP packet to be <= 1500 bytes
- */
- u16 bytes_to_copy = rb->current_length;
- if ((bytes_to_copy + pkt_len) > 1500)
+ if (n_buf == 0)
{
- bytes_to_copy = 1500 - pkt_len;
+ return clib_error_return (0, "Impossible to allocate paylod buffer.");
}
- /* Add content to the data packet */
- vlib_buffer_add_data (vm, &bi0, rb->data, bytes_to_copy);
- b0 = vlib_get_buffer (vm, bi0);
-
- h0 = vlib_buffer_get_current (b0);
-
- ip4_address_t src_addr = h0->v4.ip.saddr;
- h0->v4.ip.saddr = h0->v4.ip.daddr;
- h0->v4.ip.daddr = src_addr;
-
- h0->v4.ip.len = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
- h0->v4.ip.csum = ip4_header_checksum ((ip4_header_t *) & (h0->v4.ip));
- calculate_tcp_checksum_v4 (vm, b0);
+ // Initialize the buffer data with zeros
+ rb = vlib_get_buffer (vlib_get_main (), buffer_index);
+ memset (rb->data, 0, payload_size);
+ rb->current_length = payload_size;
+
+ // We can proceed. Get a new hicnpg_server_t
+ pool_get_aligned_zero (hicnpg_server_pool, hpg, 2 * CLIB_CACHE_LINE_BYTES);
+ hpgi = hpg - hicnpg_server_pool;
+
+ // Set DPO
+ dpo_set (
+ &hpg->dpo, hicnpg_server_dpo_type,
+ (ip46_address_is_ip4 (&prefix->fp_addr) ? DPO_PROTO_IP4 : DPO_PROTO_IP6),
+ hpgi);
+
+ // Add the route via the hicnpg_server_dpo_type. In this way packets will
+ // endup in the hicnpg_server node
+ CLIB_UNUSED (fib_node_index_t new_fib_node_index) =
+ fib_table_entry_special_dpo_add (
+ fib_index, prefix, hicn_fib_src,
+ (FIB_ENTRY_FLAG_EXCLUSIVE | FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT),
+ &hpg->dpo);
+
+#if 0
+ vlib_validate_combined_counter (&(udp_encap_counters), uei);
+ vlib_zero_combined_counter (&(udp_encap_counters), uei);
+#endif
+
+ // Init remaining struct fields
+ fib_node_init (&hpg->fib_node, hicnpg_server_fib_node_type);
+ fib_node_lock (&hpg->fib_node);
+ hpg->fib_index = fib_index;
+ hpg->prefix = *prefix;
+ hpg->buffer_index = buffer_index;
+ hpg->fib_entry_index = fib_entry_index;
+ hpg->hicn_locator = *locator;
+
+ // track the destination address
+ // hpg->fib_entry_index =
+ // fib_entry_track (fib_index, &hpg->prefix,
+ // hicnpg_server_fib_node_type, hpgi, &hpg->fib_sibling);
+ // hicnpg_server_restack (hpg);
+
+ HICN_DEBUG ("Calling hicn enable for pg-server face");
+
+ hicn_face_id_t *vec_faces = NULL;
+ fib_node_index_t hicn_fib_node_index;
+ hicn_route_enable (prefix, &hicn_fib_node_index, &vec_faces);
+ if (vec_faces != NULL)
+ vec_free (vec_faces);
+
+ // Return the index of the hicnpg_server_t
+ *hicnpg_server_index = hpgi;
+
+ return NULL;
}
-void
-convert_interest_to_data_v6 (vlib_main_t * vm, vlib_buffer_t * b0,
- vlib_buffer_t * rb, u32 bi0)
+clib_error_t *
+hicn_pg_init (vlib_main_t *vm)
{
- hicn_header_t *h0 = vlib_buffer_get_current (b0);
-
- /* Get the packet length */
- uint16_t pkt_len =
- clib_net_to_host_u16 (h0->v6.ip.len) + sizeof (ip6_header_t);
-
- /*
- * Figure out how many bytes we can add to the content
- *
- * Rule of thumb: We want the size of the IP packet to be <= 1400 bytes
- */
- u16 bytes_to_copy = rb->current_length;
- if ((bytes_to_copy + pkt_len) > 1500)
- {
- bytes_to_copy = 1500 - pkt_len;
- }
- /* Add content to the data packet */
- vlib_buffer_add_data (vm, &bi0, rb->data, bytes_to_copy);
-
- b0 = vlib_get_buffer (vm, bi0);
-
- h0 = vlib_buffer_get_current (b0);
- ip6_address_t src_addr = h0->v6.ip.saddr;
- h0->v6.ip.saddr = h0->v6.ip.daddr;
- h0->v6.ip.daddr = src_addr;
+ hicnpg_server_fib_node_type = fib_node_register_new_type (
+ "hicnpg_server_fib_node", &hicnpg_server_fib_vft);
- h0->v6.ip.len = clib_host_to_net_u16 (vlib_buffer_length_in_chain
- (vm, b0) - sizeof (ip6_header_t));
- h0->v6.tcp.data_offset_and_reserved |= 0x0f;
- h0->v6.tcp.urg_ptr = htons (0xffff);
+ hicnpg_server_dpo_type =
+ dpo_register_new_type (&hicnpg_server_dpo_vft, hicnpg_server_nodes);
- calculate_tcp_checksum_v6 (vm, b0);
+ return NULL;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(hicn_pg_server_node) =
-{
- .function = hicnpg_node_server_fn,
- .name = "hicnpg-server",
- .vector_size = sizeof(u32),
- .format_trace = format_icnpg_server_trace,
- .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(icnpg_server_error_strings),
- .error_strings = icnpg_server_error_strings,
- .n_next_nodes = HICNPG_SERVER_N_NEXT,
- /* edit / add dispositions here */
- .next_nodes =
- {
- [HICNPG_SERVER_NEXT_V4_LOOKUP] = "ip4-lookup",
- [HICNPG_SERVER_NEXT_V6_LOOKUP] = "ip6-lookup",
- [HICNPG_SERVER_NEXT_DROP] = "error-drop",
- },
-};
-/* *INDENT-ON* */
-
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_pg_server_ip6, static)=
- {
- .arc_name = "ip6-unicast",
- .node_name = "hicnpg-server",
- .runs_before = VNET_FEATURES("ip6-inacl"),
- };
-/* *INDENT-ON* */
-
-/* *INDENT-OFF* */
-VNET_FEATURE_INIT(hicn_pg_server_ip4, static)=
- {
- .arc_name = "ip4-unicast",
- .node_name = "hicnpg-server",
- .runs_before = VNET_FEATURES("ip4-inacl"),
- };
-/* *INDENT-ON* */
-
-/*
- * End of packet-generator server node
- */
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/hicn-plugin/src/pg.h b/hicn-plugin/src/pg.h
index 84a391d43..deb585714 100644
--- a/hicn-plugin/src/pg.h
+++ b/hicn-plugin/src/pg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,6 +16,7 @@
#ifndef __HICN_PG_H__
#define __HICN_PG_H__
+#include <vppinfra/pool.h>
/**
* @file pg.h
@@ -25,25 +26,28 @@
* interest it receives with the corresponding data.
* The packet generator is made of three nodes:
* - hicnpg-interest that receives packets from a packet generator interface
- * and manipulate them to generate interests based on the given configuration.
- * This node runs at the client side.
+ * and manipulate them to generate interests based on the given
+ * configuration. This node runs at the client side.
* - hicnpg-data that receives data packets at the client side and counts them.
* This is useful for statistics. The "show err" command will give the number
* of interest issued and data received at the client side
- * - hicnpg-server that recevies and interest and replies with the corresponding
- * data. The data is generated from the interest switching the src and destination
- * address in the packet and appending a payload to the packet.
+ * - hicnpg-server that recevies and interest and replies with the
+ * corresponding data. The data is generated from the interest switching the
+ * src and destination address in the packet and appending a payload to the
+ * packet.
*
*
* These three nodes are inserted in the vlib graph in the following manner:
- * - hicnpg-interest is added as a possible next node of the pg-input node. The packet
- * generator stream then specifies it as next node.
- * - hicnpg-data is added as next hop of the ip4/6-unicast node exploiting the corresponding
- * feature and it runs before the ip4/6-inacl node. In this way, every packet that is
- * received through an interface on which this feature is enabled is sent to this node.
- * - hicnpg-server is added as next hop of the ip4/6-unicast using the corresponding
- * feature and it runs before the ip4/6-inacl node. In this way, every packet that is
- * received through an interface on which this feature is enabled is sent to this node.
+ * - hicnpg-interest is added as a possible next node of the pg-input node. The
+ * packet generator stream then specifies it as next node.
+ * - hicnpg-data is added as next hop of the ip4/6-unicast node exploiting the
+ * corresponding feature and it runs before the ip4/6-inacl node. In this way,
+ * every packet that is received through an interface on which this feature is
+ * enabled is sent to this node.
+ * - hicnpg-server is added as next hop of the ip4/6-unicast using the
+ * corresponding feature and it runs before the ip4/6-inacl node. In this way,
+ * every packet that is received through an interface on which this feature is
+ * enabled is sent to this node.
*
* An example of how to use the pg for hicn is available in the documentation.
*/
@@ -55,19 +59,58 @@
*/
typedef struct hicnpg_main_s
{
- u32 index; //used to compute the sequence number
- fib_prefix_t *pgen_clt_hicn_name; //hICN name to put in the destiantion addess of an interest
- u32 index_ifaces; /* used to mimic interests coming from different consumer */
- u32 n_ifaces; /* The source address will change from interest to interest */
- /* index_ifaces is used to keep a global reference to the iface used */
- /* and it is incremented when we want to change "consumer" */
- /* n_ifaces identifies how many consumers to simulate */
- u32 max_seq_number; //Use to limit the max sequence number
- u32 n_flows; //Use to simulate multiple flows (a flow always have the same hICN name)
- ip46_address_t pgen_clt_src_addr; //Source addess base to use in the interest
-
- u16 interest_lifetime; // Interest lifetime
- u32 sw_if; //Interface where to send interest and receives data
+ /*
+ * used to compute the sequence number
+ */
+ u32 index;
+
+ /*
+ * hICN name to put in the destination addess of an interest
+ */
+ fib_prefix_t *pgen_clt_hicn_name;
+
+ /*
+ * Used to mimic interests coming from different consumer. The source address
+ * will change from interest to interest index_ifaces is used to keep a
+ * global reference to the iface used and it is incremented when we want to
+ * change "consumer"
+ */
+ u32 index_ifaces;
+
+ /*
+ * n_ifaces identifies how many consumers to simulate
+ */
+ u32 n_ifaces;
+
+ /*
+ * Use to limit the max sequence number
+ */
+ u32 max_seq_number;
+
+ /*
+ * Use to simulate multiple flows (a flow always have the same hICN name)
+ */
+ u32 n_flows;
+
+ /*
+ * Source addess base to use in the interest
+ */
+ ip46_address_t pgen_clt_src_addr;
+
+ /*
+ * Interest lifetime
+ */
+ u16 interest_lifetime;
+
+ /*
+ * Interface where to send interest and receives data.
+ */
+ u32 sw_if;
+
+ /*
+ * Fib node type
+ */
+ fib_node_type_t hicn_fib_node_type;
} hicnpg_main_t;
extern hicnpg_main_t hicnpg_main;
@@ -77,18 +120,75 @@ extern hicnpg_main_t hicnpg_main;
*
* It stores the configuration and make it availables to the pg server node.
*/
-typedef struct hicnpg_server_main_s
+typedef struct hicnpg_server_s
{
- u32 node_index;
- /* Arbitrary content */
- u32 pgen_svr_buffer_idx;
- fib_prefix_t *pgen_srv_hicn_name;
-} hicnpg_server_main_t;
+ /*
+ * Prefix served by this packet generator server
+ */
+ fib_prefix_t prefix;
+
+ /*
+ * IP address to put in the destination addess of the data
+ */
+ ip46_address_t hicn_locator;
+
+ /*
+ * Buffer index
+ */
+ u32 buffer_index;
+
+ /**
+ * The DPO used to forward to the next node in the VLIB graph
+ */
+ dpo_id_t dpo;
-extern hicnpg_server_main_t hicnpg_server_main;
+ /*
+ * linkage into the FIB graph
+ */
+ fib_node_t fib_node;
+ /*
+ * Tracking information for the IP destination
+ */
+ fib_node_index_t fib_entry_index;
+
+ /*
+ * The FIB index
+ */
+ index_t fib_index;
+} hicnpg_server_t;
+
+STATIC_ASSERT (sizeof (hicnpg_server_t) <= 2 * CLIB_CACHE_LINE_BYTES,
+ "hicnpg_server_t is too large");
+
+extern hicnpg_server_t hicnpg_server_main;
extern vlib_node_registration_t hicn_pg_interest_node;
extern vlib_node_registration_t hicn_pg_data_node;
+extern dpo_type_t hicnpg_server_dpo_type;
+
+/**
+ * Pool of hicnpg_servers
+ */
+extern hicnpg_server_t *hicnpg_server_pool;
+
+always_inline hicnpg_server_t *
+hicnpg_server_get (index_t hpgi)
+{
+ return pool_elt_at_index (hicnpg_server_pool, hpgi);
+}
+
+always_inline u8
+dpo_is_pgserver (const dpo_id_t *dpo)
+{
+ return (dpo->dpoi_type == hicnpg_server_dpo_type);
+}
+
+clib_error_t *hicnpg_server_add_and_lock (fib_prefix_t *prefix,
+ u32 *hicnpg_server_index,
+ ip46_address_t *locator,
+ size_t payload_size);
+
+clib_error_t *hicn_pg_init (vlib_main_t *vm);
#endif // __HICN_PG_H__
diff --git a/hicn-plugin/src/pg_node.c b/hicn-plugin/src/pg_node.c
new file mode 100644
index 000000000..3d94c1cd1
--- /dev/null
+++ b/hicn-plugin/src/pg_node.c
@@ -0,0 +1,1121 @@
+/*
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/tcp/tcp_packet.h>
+
+#include "hicn.h"
+#include "pg.h"
+#include "parser.h"
+#include "infra.h"
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_pg_interest_node;
+vlib_node_registration_t hicn_pg_data_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_hicnpg_error \
+ _ (PROCESSED, "hICN PG packets processed") \
+ _ (DROPPED, "hICN PG packets dropped") \
+ _ (INTEREST_MSGS_GENERATED, "hICN PG Interests generated") \
+ _ (CONTENT_MSGS_RECEIVED, "hICN PG Content msgs received")
+
+typedef enum
+{
+#define _(sym, str) HICNPG_ERROR_##sym,
+ foreach_hicnpg_error
+#undef _
+ HICNPG_N_ERROR,
+} hicnpg_error_t;
+
+static char *hicnpg_error_strings[] = {
+#define _(sym, string) string,
+ foreach_hicnpg_error
+#undef _
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_INTEREST_NEXT_V4_LOOKUP,
+ HICNPG_INTEREST_NEXT_V6_LOOKUP,
+ HICNPG_INTEREST_NEXT_DROP,
+ HICNPG_N_NEXT,
+} hicnpg_interest_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} hicnpg_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_hicnpg_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+
+ s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type, t->sw_if_index,
+ t->next_index);
+ return (s);
+}
+
+always_inline void hicn_rewrite_interestv4 (vlib_main_t *vm, vlib_buffer_t *b0,
+ u32 seq_number, u16 lifetime,
+ u32 next_flow, u32 iface);
+
+always_inline void hicn_rewrite_interestv6 (vlib_main_t *vm, vlib_buffer_t *b0,
+ u32 seq_number, u16 lifetime,
+ u32 next_flow, u32 iface);
+
+always_inline void convert_interest_to_data_v4 (vlib_main_t *vm,
+ vlib_buffer_t *b0,
+ vlib_buffer_t *rb, u32 bi0);
+
+always_inline void convert_interest_to_data_v6 (vlib_main_t *vm,
+ vlib_buffer_t *b0,
+ vlib_buffer_t *rb, u32 bi0);
+
+always_inline void calculate_tcp_checksum_v4 (vlib_main_t *vm,
+ vlib_buffer_t *b0);
+
+always_inline void calculate_tcp_checksum_v6 (vlib_main_t *vm,
+ vlib_buffer_t *b0);
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the vpp
+ * packet generator to generate icn request traffic.
+ */
+static uword
+hicnpg_client_interest_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicnpg_interest_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 interest_msgs_generated = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ hicnpg_main_t *hpgm = &hicnpg_main;
+ int iface = 0;
+ u32 next0 = HICNPG_INTEREST_NEXT_DROP;
+ u32 next1 = HICNPG_INTEREST_NEXT_DROP;
+ u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
+ u8 isv6_0;
+ u8 isv6_1;
+ u32 n_left_to_next;
+ uword size;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ hicn_buffer_set_flags (b0, HICN_BUFFER_FLAGS_FROM_PG);
+ hicn_buffer_set_flags (b1, HICN_BUFFER_FLAGS_FROM_PG);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = hpgm->sw_if;
+ vnet_buffer (b1)->sw_if_index[VLIB_RX] = hpgm->sw_if;
+
+ /* Check icn packets, locate names */
+ size = vlib_buffer_length_in_chain (vm, b0);
+ if (hicn_interest_parse_pkt (b0, size) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ isv6_0 = hicn_buffer_is_v6 (b0);
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+ /* Rewrite and send */
+ isv6_0 ?
+ hicn_rewrite_interestv6 (
+ vm, b0, (hpgm->index / hpgm->n_flows) % hpgm->max_seq_number,
+ hpgm->interest_lifetime, hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (
+ vm, b0, (hpgm->index / hpgm->n_flows) % hpgm->max_seq_number,
+ hpgm->interest_lifetime, hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next0 = isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ }
+ size = vlib_buffer_length_in_chain (vm, b1);
+ if (hicn_interest_parse_pkt (b1, size) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ isv6_1 = hicn_buffer_is_v6 (b1);
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+ /* Rewrite and send */
+ isv6_1 ?
+ hicn_rewrite_interestv6 (
+ vm, b1, (hpgm->index / hpgm->n_flows) % hpgm->max_seq_number,
+ hpgm->interest_lifetime, hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (
+ vm, b1, (hpgm->index / hpgm->n_flows) % hpgm->max_seq_number,
+ hpgm->interest_lifetime, hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next1 = isv6_1 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ }
+ /* Send pkt to next node */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ if (next0 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueues, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ hicn_buffer_set_flags (b0, HICN_BUFFER_FLAGS_FROM_PG);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = hpgm->sw_if;
+
+ /* Check icn packets, locate names */
+ size = vlib_buffer_length_in_chain (vm, b0);
+ if (hicn_interest_parse_pkt (b0, size) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ isv6_0 = hicn_buffer_is_v6 (b0);
+
+ /* Increment the appropriate message counter */
+ interest_msgs_generated++;
+
+ iface = (hpgm->index_ifaces % hpgm->n_ifaces);
+
+ /* Rewrite and send */
+ isv6_0 ?
+ hicn_rewrite_interestv6 (
+ vm, b0, (hpgm->index / hpgm->n_flows) % hpgm->max_seq_number,
+ hpgm->interest_lifetime, hpgm->index % hpgm->n_flows,
+ iface) :
+ hicn_rewrite_interestv4 (
+ vm, b0, (hpgm->index / hpgm->n_flows) % hpgm->max_seq_number,
+ hpgm->interest_lifetime, hpgm->index % hpgm->n_flows, iface);
+
+ hpgm->index_ifaces++;
+ if (iface == (hpgm->n_ifaces - 1))
+ hpgm->index++;
+
+ next0 = isv6_0 ? HICNPG_INTEREST_NEXT_V6_LOOKUP :
+ HICNPG_INTEREST_NEXT_V4_LOOKUP;
+ }
+ /* Send pkt to ip lookup */
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicnpg_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed += 1;
+
+ if (next0 == HICNPG_INTEREST_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_DROPPED, pkts_dropped);
+ vlib_node_increment_counter (vm, hicn_pg_interest_node.index,
+ HICNPG_ERROR_INTEREST_MSGS_GENERATED,
+ interest_msgs_generated);
+
+ return (frame->n_vectors);
+}
+
+void
+hicn_rewrite_interestv4 (vlib_main_t *vm, vlib_buffer_t *b0, u32 seq_number,
+ u16 interest_lifetime, u32 next_flow, u32 iface)
+{
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (b0)->pkbuf;
+ /* Generate the right src and dst corresponding to flow and iface */
+ ip46_address_t src_addr = {
+ .ip4 = hicnpg_main.pgen_clt_src_addr.ip4,
+ };
+ hicn_name_t dst_name = {
+ .prefix.v4.as_u32 = hicnpg_main.pgen_clt_hicn_name->fp_addr.ip4.as_u32,
+ .suffix = seq_number,
+ };
+
+ src_addr.ip4.as_u32 += clib_host_to_net_u32 (iface);
+ dst_name.prefix.v4.as_u32 += clib_net_to_host_u32 (next_flow);
+
+ /* Update locator and name */
+ hicn_interest_set_locator (pkbuf, (hicn_ip_address_t *) &src_addr);
+ hicn_interest_set_name (pkbuf, &dst_name);
+
+ /* Update lifetime (currently L4 checksum is not updated) */
+ hicn_interest_set_lifetime (pkbuf, interest_lifetime);
+
+ /* Update checksums */
+ hicn_packet_compute_checksum (pkbuf);
+}
+
+/**
+ * @brief Rewrite the IPv6 header as the next generated packet
+ *
+ * Set up a name prefix
+ * - etiher generate interest in which the name varies only after the prefix
+ * (inc : seq_number), then the flow acts on the prefix (CHECK)
+ * seq_number => TCP, FLOW =>
+ *
+ * SRC : pgen_clt_src_addr.ip6 DST = generate name (pgen_clt_hicn_name.ip6)
+ * ffff:ffff:ffff:ffff ffff:ffff:ffff:ffff
+ * \__/ \__/
+ * +iface + flow
+ * Source is used to emulate different consumers.
+ * FIXME iface is ill-named, better name it consumer id
+ * Destination is used to iterate on the content.
+ */
+void
+hicn_rewrite_interestv6 (vlib_main_t *vm, vlib_buffer_t *b0, u32 seq_number,
+ u16 interest_lifetime, u32 next_flow, u32 iface)
+{
+ hicn_packet_buffer_t *pkbuf = &hicn_get_buffer (b0)->pkbuf;
+
+ /* Generate the right src and dst corresponding to flow and iface */
+ ip46_address_t src_addr = {
+ .ip6 = hicnpg_main.pgen_clt_src_addr.ip6,
+ };
+ hicn_name_t dst_name = {
+ .prefix = (hicn_ip_address_t) (hicnpg_main.pgen_clt_hicn_name->fp_addr),
+ .suffix = seq_number,
+ };
+ src_addr.ip6.as_u32[3] += clib_host_to_net_u32 (iface);
+ dst_name.prefix.v6.as_u32[3] += clib_net_to_host_u32 (next_flow);
+
+ /* Update locator and name */
+ hicn_interest_set_locator (pkbuf, (hicn_ip_address_t *) &src_addr);
+ hicn_interest_set_name (pkbuf, &dst_name);
+ /* Update lifetime */
+ hicn_interest_set_lifetime (pkbuf, interest_lifetime);
+
+ /* Update checksums */
+ hicn_packet_compute_checksum (pkbuf);
+ calculate_tcp_checksum_v6 (vm, b0);
+}
+
+void
+calculate_tcp_checksum_v4 (vlib_main_t *vm, vlib_buffer_t *b0)
+{
+ ip4_header_t *ip0;
+ tcp_header_t *tcp0;
+ ip_csum_t sum0;
+ u32 tcp_len0;
+
+ ip0 = (ip4_header_t *) (vlib_buffer_get_current (b0));
+ tcp0 =
+ (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip4_header_t));
+ tcp_len0 = clib_net_to_host_u16 (ip0->length) - sizeof (ip4_header_t);
+
+ /* Initialize checksum with header. */
+ if (BITS (sum0) == 32)
+ {
+ sum0 = clib_mem_unaligned (&ip0->src_address, u32);
+ sum0 =
+ ip_csum_with_carry (sum0, clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry (
+ sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ tcp0->checksum = 0;
+
+ u32 tcp_offset = sizeof (ip4_header_t);
+ sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+
+ tcp0->checksum = ~ip_csum_fold (sum0);
+}
+
+void
+calculate_tcp_checksum_v6 (vlib_main_t *vm, vlib_buffer_t *b0)
+{
+ ip6_header_t *ip0;
+ tcp_header_t *tcp0;
+ ip_csum_t sum0;
+ u32 tcp_len0;
+
+ ip0 = (ip6_header_t *) (vlib_buffer_get_current (b0));
+ tcp0 =
+ (tcp_header_t *) (vlib_buffer_get_current (b0) + sizeof (ip6_header_t));
+ tcp_len0 = clib_net_to_host_u16 (ip0->payload_length);
+
+ /* Initialize checksum with header. */
+ if (BITS (sum0) == 32)
+ {
+ sum0 = clib_mem_unaligned (&ip0->src_address, u32);
+ sum0 =
+ ip_csum_with_carry (sum0, clib_mem_unaligned (&ip0->dst_address, u32));
+ }
+ else
+ sum0 = clib_mem_unaligned (&ip0->src_address, u64);
+
+ sum0 = ip_csum_with_carry (
+ sum0, clib_host_to_net_u32 (tcp_len0 + (ip0->protocol << 16)));
+
+ /* Invalidate possibly old checksum. */
+ tcp0->checksum = 0;
+
+ u32 tcp_offset = sizeof (ip6_header_t);
+ sum0 = ip_incremental_checksum_buffer (vm, b0, tcp_offset, tcp_len0, sum0);
+
+ tcp0->checksum = ~ip_csum_fold (sum0);
+}
+
+VLIB_REGISTER_NODE (hicn_pg_interest_node) = {
+ .function = hicnpg_client_interest_node_fn,
+ .name = "hicnpg-interest",
+ .vector_size = sizeof (u32),
+ .format_trace = format_hicnpg_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN (hicnpg_error_strings),
+ .error_strings = hicnpg_error_strings,
+ .n_next_nodes = HICNPG_N_NEXT,
+ .next_nodes = { [HICNPG_INTEREST_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_INTEREST_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_INTEREST_NEXT_DROP] = "error-drop" },
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_DATA_NEXT_DROP,
+ HICNPG_DATA_NEXT_LOOKUP4,
+ HICNPG_DATA_NEXT_LOOKUP6,
+ HICNPG_DATA_N_NEXT,
+} hicnpg_data_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} icnpg_data_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_hicnpg_data_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_trace_t *t = va_arg (*args, hicnpg_trace_t *);
+
+ s = format (s, "HICNPG: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type, t->sw_if_index,
+ t->next_index);
+ return (s);
+}
+
+/*
+ * Node function for the icn packet-generator client. The goal here is to
+ * manipulate/tweak a stream of packets that have been injected by the vpp
+ * packet generator to generate icn request traffic.
+ */
+static uword
+hicnpg_client_data_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
+{
+ u32 n_left_from, *from, *to_next;
+ hicnpg_data_next_t next_index;
+ u32 pkts_processed = 0;
+ u32 content_msgs_received = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 1, msg_type1 = 1;
+ u32 next0 = HICNPG_DATA_NEXT_DROP;
+ u32 next1 = HICNPG_DATA_NEXT_DROP;
+ u32 sw_if_index0, sw_if_index1;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ next0 = HICNPG_DATA_NEXT_DROP;
+ next1 = HICNPG_DATA_NEXT_DROP;
+
+ // Increment counter
+ content_msgs_received += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ pkts_processed += 2;
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+
+ next0 = HICNPG_DATA_NEXT_DROP;
+
+ // Increment a counter
+ content_msgs_received += 1;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ icnpg_data_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ pkts_processed++;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_data_node.index,
+ HICNPG_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_data_node.index,
+ HICNPG_ERROR_CONTENT_MSGS_RECEIVED,
+ content_msgs_received);
+ return (frame->n_vectors);
+}
+
+VLIB_REGISTER_NODE(hicn_pg_data_node) =
+{
+ .function = hicnpg_client_data_node_fn,
+ .name = "hicnpg-data",
+ .vector_size = sizeof(u32),
+ .format_trace = format_hicnpg_data_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(hicnpg_error_strings),
+ .error_strings = hicnpg_error_strings,
+ .n_next_nodes = HICNPG_DATA_N_NEXT,
+ .next_nodes =
+ {
+ [HICNPG_DATA_NEXT_DROP] = "error-drop",
+ [HICNPG_DATA_NEXT_LOOKUP4] = "ip4-lookup",
+ [HICNPG_DATA_NEXT_LOOKUP6] = "ip6-lookup",
+ },
+};
+
+VNET_FEATURE_INIT (hicn_data_input_ip4_arc, static) = {
+ .arc_name = "ip4-unicast",
+ .node_name = "hicnpg-data",
+ .runs_before = VNET_FEATURES ("ip4-inacl"),
+};
+
+VNET_FEATURE_INIT (hicn_data_input_ip6_arc, static) = {
+ .arc_name = "ip6-unicast",
+ .node_name = "hicnpg-data",
+ .runs_before = VNET_FEATURES ("ip6-inacl"),
+};
+
+/*
+ * End of packet-generator client node
+ */
+
+/*
+ * Beginning of packet-generation server node
+ */
+
+/* Registration struct for a graph node */
+vlib_node_registration_t hicn_pg_server_node;
+
+/* Stats, which end up called "error" even though they aren't... */
+#define foreach_icnpg_server_error \
+ _ (PROCESSED, "hICN PG Server packets processed") \
+ _ (DROPPED, "hICN PG Server packets dropped")
+
+typedef enum
+{
+#define _(sym, str) HICNPG_SERVER_ERROR_##sym,
+ foreach_icnpg_server_error
+#undef _
+ HICNPG_SERVER_N_ERROR,
+} icnpg_server_error_t;
+
+static char *icnpg_server_error_strings[] = {
+#define _(sym, string) string,
+ foreach_icnpg_server_error
+#undef _
+};
+
+/*
+ * Next graph nodes, which reference the list in the actual registration
+ * block below
+ */
+typedef enum
+{
+ HICNPG_SERVER_NEXT_V4_LOOKUP,
+ HICNPG_SERVER_NEXT_V6_LOOKUP,
+ HICNPG_SERVER_NEXT_DROP,
+ HICNPG_SERVER_N_NEXT,
+} icnpg_server_next_t;
+
+/* Trace context struct */
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u8 pkt_type;
+ u16 msg_type;
+} hicnpg_server_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_icnpg_server_trace (u8 *s, va_list *args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ hicnpg_server_trace_t *t = va_arg (*args, hicnpg_server_trace_t *);
+
+ s = format (
+ s, "HICNPG SERVER: pkt: %d, msg %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, (int) t->msg_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+/*
+ * Node function for the icn packet-generator server.
+ */
+static uword
+hicnpg_node_server_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, int isv6)
+{
+ u32 n_left_from, *from, *to_next;
+ icnpg_server_next_t next_index;
+ u32 pkts_processed = 0, pkts_dropped = 0;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+ u8 pkt_type0 = 0, pkt_type1 = 0;
+ u16 msg_type0 = 0, msg_type1 = 0;
+ u32 next0 = HICNPG_SERVER_NEXT_DROP;
+ u32 next1 = HICNPG_SERVER_NEXT_DROP;
+ u32 sw_if_index0, sw_if_index1;
+ u32 hpgi0, hpgi1;
+ hicnpg_server_t *hpg0, *hpg1;
+ u32 n_left_to_next;
+ uword size;
+
+ from = vlib_frame_vector_args (frame);
+
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ CLIB_PREFETCH (p3->data, (2 * CLIB_CACHE_LINE_BYTES), STORE);
+ }
+
+ /*
+ * speculatively enqueue b0 and b1 to the current
+ * next frame
+ */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
+
+ hpgi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ hpgi1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];
+
+ hpg0 = hicnpg_server_get (hpgi0);
+ hpg1 = hicnpg_server_get (hpgi1);
+
+ size = vlib_buffer_length_in_chain (vm, b0);
+ if (hicn_interest_parse_pkt (b0, size) == HICN_ERROR_NONE)
+ {
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpg0->buffer_index);
+
+ isv6 ? convert_interest_to_data_v6 (vm, b0, rb, bi0) :
+ convert_interest_to_data_v4 (vm, b0, rb, bi0);
+
+ next0 = isv6 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ }
+
+ size = vlib_buffer_length_in_chain (vm, b1);
+ if (hicn_interest_parse_pkt (b1, size) == HICN_ERROR_NONE)
+ {
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpg1->buffer_index);
+
+ isv6 ? convert_interest_to_data_v6 (vm, b1, rb, bi1) :
+ convert_interest_to_data_v4 (vm, b1, rb, bi1);
+
+ next1 = isv6 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ }
+ pkts_processed += 2;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->pkt_type = pkt_type1;
+ t->msg_type = msg_type1;
+ t->sw_if_index = sw_if_index1;
+ t->next_index = next1;
+ }
+ }
+ if (next0 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ if (next1 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueues, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, next0,
+ next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+
+ hpgi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+ hpg0 = hicnpg_server_get (hpgi0);
+
+ size = vlib_buffer_length_in_chain (vm, b0);
+ if (hicn_interest_parse_pkt (b0, size) == HICN_ERROR_NONE)
+ {
+ /* this node grabs only interests */
+ vlib_buffer_t *rb = NULL;
+ rb = vlib_get_buffer (vm, hpg0->buffer_index);
+
+ isv6 ? convert_interest_to_data_v6 (vm, b0, rb, bi0) :
+ convert_interest_to_data_v4 (vm, b0, rb, bi0);
+
+ next0 = isv6 ? HICNPG_SERVER_NEXT_V6_LOOKUP :
+ HICNPG_SERVER_NEXT_V4_LOOKUP;
+ }
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicnpg_server_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = pkt_type0;
+ t->msg_type = msg_type0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+ pkts_processed += 1;
+
+ if (next0 == HICNPG_SERVER_NEXT_DROP)
+ {
+ pkts_dropped++;
+ }
+ /*
+ * verify speculative enqueue, maybe switch current
+ * next frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, hicn_pg_server_node.index,
+ HICNPG_SERVER_ERROR_PROCESSED, pkts_processed);
+ vlib_node_increment_counter (vm, hicn_pg_server_node.index,
+ HICNPG_SERVER_ERROR_DROPPED, pkts_dropped);
+
+ return (frame->n_vectors);
+}
+
+void
+convert_interest_to_data_v4 (vlib_main_t *vm, vlib_buffer_t *b0,
+ vlib_buffer_t *rb, u32 bi0)
+{
+ ip4_header_t *ip4 = vlib_buffer_get_current (b0);
+
+ /* Get the packet length */
+ u16 pkt_len = clib_net_to_host_u16 (ip4->length);
+
+ /*
+ * Rule of thumb: We want the size of the IP packet to be <= 1500 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if ((bytes_to_copy + pkt_len) > 1500)
+ {
+ bytes_to_copy = 1500 - pkt_len;
+ }
+ /* Add content to the data packet */
+ vlib_buffer_add_data (vm, &bi0, rb->data, bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ ip4 = vlib_buffer_get_current (b0);
+
+ ip4_address_t src_addr = ip4->src_address;
+ ip4->src_address = ip4->dst_address;
+ ip4->dst_address = src_addr;
+
+ ip4->length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
+ ip4->checksum = ip4_header_checksum (ip4);
+ calculate_tcp_checksum_v4 (vm, b0);
+}
+
+void
+convert_interest_to_data_v6 (vlib_main_t *vm, vlib_buffer_t *b0,
+ vlib_buffer_t *rb, u32 bi0)
+{
+ ip6_header_t *ip6 = vlib_buffer_get_current (b0);
+
+ /* Get the packet length */
+ uint16_t pkt_len =
+ clib_net_to_host_u16 (ip6->payload_length) + sizeof (ip6_header_t);
+
+ /*
+ * Figure out how many bytes we can add to the content
+ *
+ * Rule of thumb: We want the size of the IP packet to be <= 1400 bytes
+ */
+ u16 bytes_to_copy = rb->current_length;
+ if ((bytes_to_copy + pkt_len) > 1500)
+ {
+ bytes_to_copy = 1500 - pkt_len;
+ }
+ /* Add content to the data packet */
+ vlib_buffer_add_data (vm, &bi0, rb->data, bytes_to_copy);
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ ip6 = vlib_buffer_get_current (b0);
+ ip6_address_t src_addr = ip6->src_address;
+ ip6->src_address = ip6->dst_address;
+ ip6->dst_address = src_addr;
+
+ ip6->payload_length = clib_host_to_net_u16 (
+ vlib_buffer_length_in_chain (vm, b0) - sizeof (ip6_header_t));
+
+ tcp_header_t *tcp = (tcp_header_t *) (ip6 + 1);
+ tcp->data_offset_and_reserved |= 0x0f;
+ tcp->urgent_pointer = htons (0xffff);
+
+ calculate_tcp_checksum_v6 (vm, b0);
+}
+
+VLIB_NODE_FN (hicn_pg_server6_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return hicnpg_node_server_fn (vm, node, frame, 1 /* is_v6 */);
+}
+
+VLIB_NODE_FN (hicn_pg_server4_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ return hicnpg_node_server_fn (vm, node, frame, 0 /* is_v6 */);
+}
+
+VLIB_REGISTER_NODE(hicn_pg_server6_node) =
+{
+ .name = "hicnpg-server-6",
+ .vector_size = sizeof(u32),
+ .format_trace = format_icnpg_server_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(icnpg_server_error_strings),
+ .error_strings = icnpg_server_error_strings,
+ .n_next_nodes = HICNPG_SERVER_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICNPG_SERVER_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_SERVER_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_SERVER_NEXT_DROP] = "error-drop",
+ },
+};
+
+VLIB_REGISTER_NODE(hicn_pg_server4_node) =
+{
+ .name = "hicnpg-server-4",
+ .vector_size = sizeof(u32),
+ .format_trace = format_icnpg_server_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(icnpg_server_error_strings),
+ .error_strings = icnpg_server_error_strings,
+ .n_next_nodes = HICNPG_SERVER_N_NEXT,
+ /* edit / add dispositions here */
+ .next_nodes =
+ {
+ [HICNPG_SERVER_NEXT_V4_LOOKUP] = "ip4-lookup",
+ [HICNPG_SERVER_NEXT_V6_LOOKUP] = "ip6-lookup",
+ [HICNPG_SERVER_NEXT_DROP] = "error-drop",
+ },
+};
diff --git a/hicn-plugin/src/route.c b/hicn-plugin/src/route.c
index b569d431e..fa95f7265 100644
--- a/hicn-plugin/src/route.c
+++ b/hicn-plugin/src/route.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2023 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -33,21 +33,23 @@
#include "strategies/dpo_mw.h"
#include "infra.h"
#include "udp_tunnels/udp_tunnel.h"
+#include "mapme.h"
+#include "pg.h"
-#define FIB_SOURCE_HICN 0x04 //Right after the FIB_SOURCE_INTERFACE priority
+#define FIB_SOURCE_HICN 0x04 // Right after the FIB_SOURCE_INTERFACE priority
fib_source_t hicn_fib_src;
fib_node_type_t hicn_fib_node_type;
-ip4_address_t localhost4 = {0};
-ip6_address_t localhost6 = {0};
+ip4_address_t localhost4 = { 0 };
+ip6_address_t localhost6 = { 0 };
int
-hicn_route_get_dpo (const fib_prefix_t * prefix,
- const dpo_id_t ** hicn_dpo, u32 * fib_index)
+hicn_route_get_dpo (const fib_prefix_t *prefix, const dpo_id_t **hicn_dpo,
+ u32 *fib_index)
{
- //fib_prefix_t fib_pfx;
+ // fib_prefix_t fib_pfx;
const dpo_id_t *load_balance_dpo_id;
const dpo_id_t *former_dpo_id;
int found = 0, ret = HICN_ERROR_ROUTE_NOT_FOUND;
@@ -58,9 +60,8 @@ hicn_route_get_dpo (const fib_prefix_t * prefix,
* ASSUMPTION: we use table 0 which is the default table and it is
* already existing and locked
*/
- *fib_index = fib_table_find_or_create_and_lock (prefix->fp_proto,
- HICN_FIB_TABLE,
- hicn_fib_src);
+ *fib_index = fib_table_find_or_create_and_lock (
+ prefix->fp_proto, HICN_FIB_TABLE, hicn_fib_src);
fib_entry_index = fib_table_lookup_exact_match (*fib_index, prefix);
if (fib_entry_index != FIB_NODE_INDEX_INVALID)
@@ -103,288 +104,276 @@ hicn_route_get_dpo (const fib_prefix_t * prefix,
}
int
-hicn_route_set_strategy (fib_prefix_t * prefix, u8 strategy_id)
+hicn_route_set_strategy (fib_prefix_t *prefix, u8 strategy_id)
{
const dpo_id_t *hicn_dpo_id;
- dpo_id_t new_dpo_id = DPO_INVALID;
int ret;
- hicn_dpo_ctx_t *old_hicn_dpo_ctx;
- const hicn_dpo_vft_t *new_dpo_vft;
- index_t new_hicn_dpo_idx;
+ hicn_dpo_ctx_t *hicn_dpo_ctx;
u32 fib_index;
ret = hicn_route_get_dpo (prefix, &hicn_dpo_id, &fib_index);
if (ret == HICN_ERROR_NONE)
{
- old_hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
+ hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
+ const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft_from_id (strategy_id);
- new_dpo_vft = hicn_dpo_get_vft_from_id (strategy_id);
-
- if (new_dpo_vft == NULL || old_hicn_dpo_ctx == NULL)
+ if (hicn_dpo_ctx == NULL || dpo_vft == NULL)
return HICN_ERROR_STRATEGY_NOT_FOUND;
- /* Create a new dpo for the new strategy */
- new_dpo_vft->hicn_dpo_create (hicn_dpo_id->dpoi_proto,
- old_hicn_dpo_ctx->next_hops,
- old_hicn_dpo_ctx->entry_count,
- &new_hicn_dpo_idx);
-
- /* the value we got when we registered */
- dpo_set (&new_dpo_id,
- new_dpo_vft->hicn_dpo_get_type (),
- (ip46_address_is_ip4 (&prefix->fp_addr) ? DPO_PROTO_IP4 :
- DPO_PROTO_IP6), new_hicn_dpo_idx);
-
- /* Here is where we create the "via" like route */
- /*
- * For the moment we use the global one the prefix you want
- * to match Neale suggested -- FIB_SOURCE_HICN the client
- * that is adding them -- no easy explanation at this time…
- */
- fib_node_index_t new_fib_node_index =
- fib_table_entry_special_dpo_update (fib_index,
- prefix,
- hicn_fib_src,
- FIB_ENTRY_FLAG_EXCLUSIVE,
- &new_dpo_id);
-
- dpo_unlock (&new_dpo_id);
- ret =
- (new_fib_node_index !=
- FIB_NODE_INDEX_INVALID) ? HICN_ERROR_NONE :
- HICN_ERROR_ROUTE_NOT_UPDATED;
+ dpo_vft->hicn_dpo_update_type (hicn_dpo_ctx);
}
- //Remember to remove the lock from the table when removing the entry
+ // Remember to remove the lock from the table when removing the entry
return ret;
-
}
-int
-ip_nh_add_helper (fib_protocol_t fib_proto, const fib_prefix_t * rpfx, ip46_address_t * nh, u32 sw_if)
+static int
+ip_nh_add_del_helper (fib_protocol_t fib_proto, const fib_prefix_t *rpfx,
+ ip46_address_t *nh, u32 sw_if, u32 udp_encap_id,
+ dpo_proto_t proto, u8 is_add)
{
fib_route_path_t *rpaths = NULL, rpath;
- u32 fib_index = fib_table_find(fib_proto, 0);
+ u32 fib_index = fib_table_find (fib_proto, 0);
+ clib_memset (&rpath, 0, sizeof (rpath));
+
+ if (nh)
+ {
+ rpath.frp_addr = *nh;
+ rpath.frp_sw_if_index = sw_if;
+ }
+ else if (udp_encap_id != ~0)
+ {
+ rpath.frp_udp_encap_id = udp_encap_id;
+ rpath.frp_flags |= FIB_ROUTE_PATH_UDP_ENCAP;
+ }
- clib_memset(&rpath, 0, sizeof(rpath));
rpath.frp_weight = 1;
- rpath.frp_sw_if_index = sw_if;
- rpath.frp_addr = *nh;
- rpath.frp_proto = ip46_address_is_ip4(nh) ? DPO_PROTO_IP4 : DPO_PROTO_IP6;
+ rpath.frp_proto = proto;
- vec_add1(rpaths, rpath);
+ vec_add1 (rpaths, rpath);
+
+ if (is_add)
+ fib_table_entry_path_add2 (fib_index, rpfx, FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE, rpaths);
+ else
+ fib_table_entry_path_remove2 (fib_index, rpfx, FIB_SOURCE_API, rpaths);
- fib_table_entry_path_add2 (fib_index,
- rpfx,
- FIB_SOURCE_CLI,
- FIB_ENTRY_FLAG_NONE, rpaths);
return 0;
}
int
-ip_nh_del_helper (fib_protocol_t fib_proto, const fib_prefix_t * rpfx, ip46_address_t * nh, u32 sw_if)
+ip_nh_adj_add_del_helper (fib_protocol_t fib_proto, const fib_prefix_t *rpfx,
+ ip46_address_t *nh, u32 sw_if, u8 is_add)
{
- fib_route_path_t *rpaths = NULL, rpath;
-
- u32 fib_index = fib_table_find(fib_proto, 0);
-
- clib_memset(&rpath, 0, sizeof(rpath));
- rpath.frp_weight = 1;
- rpath.frp_sw_if_index = sw_if;
- rpath.frp_addr = *nh;
- rpath.frp_proto = ip46_address_is_ip4(nh) ? DPO_PROTO_IP4 : DPO_PROTO_IP6;
-
- vec_add1(rpaths, rpath);
-
- fib_table_entry_path_remove2 (fib_index,
- rpfx,
- FIB_SOURCE_CLI,
- rpaths);
- return 0;
+ return ip_nh_add_del_helper (
+ fib_proto, rpfx, nh, sw_if, ~0,
+ ip46_address_is_ip4 (nh) ? DPO_PROTO_IP4 : DPO_PROTO_IP6, is_add);
}
+int
+ip_nh_udp_tunnel_add_del_helper (fib_protocol_t fib_proto,
+ const fib_prefix_t *rpfx, u32 uei,
+ dpo_proto_t proto, u8 is_add)
+{
+ return ip_nh_add_del_helper (fib_proto, rpfx, NULL, ~0, uei, proto, is_add);
+}
-static ip46_address_t * get_address(ip46_address_t * nh, u32 sw_if, fib_protocol_t proto)
+static ip46_address_t *
+get_address (ip46_address_t *nh, u32 sw_if, fib_protocol_t proto)
{
- ip46_address_t * local_address = calloc(1, sizeof(ip46_address_t));
+ ip46_address_t *local_address = calloc (1, sizeof (ip46_address_t));
if (proto == FIB_PROTOCOL_IP4)
{
ip_interface_address_t *interface_address;
- ip4_address_t *addr =
- ip4_interface_address_matching_destination (&ip4_main,
- &nh->ip4,
- sw_if,
- &interface_address);
+ ip4_address_t *addr = ip4_interface_address_matching_destination (
+ &ip4_main, &nh->ip4, sw_if, &interface_address);
if (addr == NULL)
- addr = ip4_interface_first_address (&ip4_main,
- sw_if,
- &interface_address);
+ addr =
+ ip4_interface_first_address (&ip4_main, sw_if, &interface_address);
if (addr != NULL)
- ip46_address_set_ip4 (local_address, addr);
+ ip46_address_set_ip4 (local_address, addr);
}
else if (proto == FIB_PROTOCOL_IP6)
{
ip_interface_address_t *interface_address;
- ip6_interface_address_matching_destination (&ip6_main,
- &nh->ip6,
- sw_if,
- &interface_address);
+ ip6_interface_address_matching_destination (&ip6_main, &nh->ip6, sw_if,
+ &interface_address);
ip6_address_t *addr = NULL;
if (interface_address != NULL)
- addr =
- (ip6_address_t *)
- ip_interface_address_get_address (&ip6_main.lookup_main,
- interface_address);
+ addr = (ip6_address_t *) ip_interface_address_get_address (
+ &ip6_main.lookup_main, interface_address);
if (addr == NULL)
- addr = ip6_interface_first_address (&ip6_main, sw_if);
+ addr = ip6_interface_first_address (&ip6_main, sw_if);
if (addr != NULL)
- ip46_address_set_ip6 (local_address, addr);
+ ip46_address_set_ip6 (local_address, addr);
}
return local_address;
}
static void
-sync_hicn_fib_entry(hicn_dpo_ctx_t *fib_entry)
+sync_hicn_fib_entry (hicn_dpo_ctx_t *fib_entry, hicn_face_id_t **pvec_faces)
{
- const dpo_id_t * dpo_loadbalance = fib_entry_contribute_ip_forwarding (fib_entry->fib_entry_index);
- const load_balance_t *lb0 = load_balance_get(dpo_loadbalance->dpoi_index);
- index_t hicn_fib_entry_index = hicn_strategy_dpo_ctx_get_index(fib_entry);
- hicn_face_id_t * vec_faces = 0;
+ hicn_face_id_t *vec_faces = NULL;
+ const dpo_id_t *dpo_loadbalance =
+ fib_entry_contribute_ip_forwarding (fib_entry->fib_entry_index);
+ const fib_entry_t *_fib_entry = fib_entry_get (fib_entry->fib_entry_index);
+ const load_balance_t *lb0 = load_balance_get (dpo_loadbalance->dpoi_index);
+ index_t hicn_fib_entry_index = hicn_strategy_dpo_ctx_get_index (fib_entry);
dpo_id_t temp = DPO_INVALID;
const dpo_id_t *former_dpo = &temp;
int index = 0;
- for (int j = 0; j < lb0->lb_n_buckets; j++) {
- const dpo_id_t * dpo = load_balance_get_bucket_i(lb0,j);
-
- int dpo_comparison = dpo_cmp(former_dpo, dpo);
- former_dpo = dpo;
- /*
- * Loadbalancing in ip replicate the dpo in multiple buckets
- * in order to honor the assigned weights.
- */
- if (dpo_comparison == 0)
- continue;
-
- u32 sw_if = ~0;
- ip46_address_t * nh = NULL;
- hicn_face_id_t face_id = HICN_FACE_NULL;
-
- if (dpo_is_adj(dpo))
- {
- ip_adjacency_t * adj = adj_get (dpo->dpoi_index);
- sw_if = adj->rewrite_header.sw_if_index;
- nh = get_address (&(adj->sub_type.nbr.next_hop), sw_if, fib_entry->proto);
- }
- else if (dpo->dpoi_type == dpo_type_udp_ip4 || dpo->dpoi_type == dpo_type_udp_ip6)
- {
- u8 proto = dpo->dpoi_type == dpo_type_udp_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
- nh = calloc (1, sizeof(ip46_address_t));
- switch (dpo->dpoi_proto)
- {
- case FIB_PROTOCOL_IP6:
- nh = calloc (1, sizeof(ip46_address_t));
- ip46_address_set_ip6(nh, &localhost6);
- break;
- case FIB_PROTOCOL_IP4:
- nh = calloc (1, sizeof(ip46_address_t));
- ip46_address_set_ip4(nh, &localhost4);
- break;
- default:
- nh = calloc (1, sizeof(ip46_address_t));
- }
- udp_tunnel_add_existing (dpo->dpoi_index, proto);
- }
- else //if (dpo_is_drop(dpo))
- {
- sw_if = dpo_get_urpf(dpo);
- nh = calloc (1, sizeof(ip46_address_t));
- }
-
- /* Careful, this adds a lock on the face if it exists */
- hicn_face_add(dpo, nh, sw_if, &face_id, 0);
-
- vec_validate(vec_faces, index);
- vec_faces[index] = face_id;
- index++;
-
- /* Face creation can realloc load_balance_t? Seem the fib_tracking does so. */
- dpo_loadbalance = fib_entry_contribute_ip_forwarding (fib_entry->fib_entry_index);
- lb0 = load_balance_get(dpo_loadbalance->dpoi_index);
- }
-
- const hicn_dpo_vft_t * strategy_vft = hicn_dpo_get_vft(fib_entry->dpo_type);
+
+#define ADD_FACE(nh, dpo_proto) \
+ do \
+ { \
+ /* Careful, this adds a lock on the face if it exists */ \
+ hicn_face_add (dpo, nh, sw_if, &face_id, dpo_proto); \
+ ASSERT (face_id != HICN_FACE_NULL); \
+ vec_validate (vec_faces, index); \
+ vec_faces[index] = face_id; \
+ (index)++; \
+ \
+ /* Face creation can realloc load_balance_t? Seem the fib_tracking does \
+ * so. */ \
+ dpo_loadbalance = \
+ fib_entry_contribute_ip_forwarding (fib_entry->fib_entry_index); \
+ lb0 = load_balance_get (dpo_loadbalance->dpoi_index); \
+ } \
+ while (0)
+
+ for (int j = 0; j < lb0->lb_n_buckets; j++)
+ {
+ const dpo_id_t *dpo = load_balance_get_bucket_i (lb0, j);
+
+ int dpo_comparison = dpo_cmp (former_dpo, dpo);
+ former_dpo = dpo;
+ /*
+ * Loadbalancing in ip replicate the dpo in multiple buckets
+ * in order to honor the assigned weights.
+ */
+ if (dpo_comparison == 0)
+ continue;
+
+ u32 sw_if = ~0;
+ ip46_address_t *nh = NULL;
+ hicn_face_id_t face_id = HICN_FACE_NULL;
+
+ if (dpo_is_adj (dpo))
+ {
+ ip_adjacency_t *adj = adj_get (dpo->dpoi_index);
+ sw_if = adj->rewrite_header.sw_if_index;
+ nh = get_address (&(adj->sub_type.nbr.next_hop), sw_if,
+ fib_entry->proto);
+ ADD_FACE (nh, dpo->dpoi_proto);
+ HICN_DEBUG ("Added new HICN face: %d because of route prefix %U",
+ face_id, format_fib_prefix, &_fib_entry->fe_prefix);
+ }
+ else if (dpo_is_udp_encap (dpo))
+ {
+ dpo_proto_t proto = dpo_udp_encap_get_proto (dpo);
+ ip46_address_t _nh = { 0 };
+ nh = &_nh;
+ switch (_fib_entry->fe_prefix.fp_proto)
+ {
+ case FIB_PROTOCOL_IP6:
+ ip46_address_set_ip6 (nh, &localhost6);
+ ADD_FACE (nh, DPO_PROTO_IP6);
+ break;
+ case FIB_PROTOCOL_IP4:
+ ip46_address_set_ip4 (nh, &localhost4);
+ ADD_FACE (nh, DPO_PROTO_IP4);
+ break;
+ default:
+ continue;
+ }
+ HICN_DEBUG ("Added new UDP face: %d because of route prefix %U",
+ face_id, format_fib_prefix, &_fib_entry->fe_prefix);
+ udp_tunnel_add_existing (dpo->dpoi_index, proto);
+ udp_tunnel_set_face (face_id, proto == DPO_PROTO_IP4);
+ }
+ else if (dpo_is_pgserver (dpo))
+ {
+ hicnpg_server_t *pg_server = hicnpg_server_get (dpo->dpoi_index);
+ ADD_FACE (&pg_server->hicn_locator, dpo->dpoi_proto);
+ }
+ }
+
+ const hicn_dpo_vft_t *strategy_vft = hicn_dpo_get_vft (fib_entry->dpo_type);
int i = 0;
while (i < fib_entry->entry_count)
{
- u32 idx_nh = vec_search(vec_faces, fib_entry->next_hops[i]);
+ u32 idx_nh = vec_search (vec_faces, fib_entry->next_hops[i]);
if (idx_nh == ~0)
- {
- strategy_vft->hicn_dpo_del_nh(fib_entry->next_hops[i], hicn_fib_entry_index);
- }
+ {
+ strategy_vft->hicn_dpo_del_nh (fib_entry->next_hops[i],
+ hicn_fib_entry_index);
+ }
else
- {
- vec_del1(vec_faces, idx_nh);
+ {
+ vec_del1 (vec_faces, idx_nh);
- /* Remove the lock added by hicn_face_add */
- hicn_face_unlock_with_id (fib_entry->next_hops[i]);
- i++;
- }
+ /* Remove the lock added by hicn_face_add */
+ hicn_face_unlock_with_id (fib_entry->next_hops[i]);
+ i++;
+ }
}
hicn_face_id_t *face_id;
- vec_foreach(face_id, vec_faces)
+ vec_foreach (face_id, vec_faces)
{
- strategy_vft->hicn_dpo_add_update_nh(*face_id, hicn_fib_entry_index);
+ strategy_vft->hicn_dpo_add_update_nh (*face_id, hicn_fib_entry_index);
/* Remove the lock added by hicn_face_add */
hicn_face_unlock_with_id (*face_id);
-
}
- vec_free(vec_faces);
+
+ *pvec_faces = vec_faces;
}
static void
enable_disable_data_receiving (fib_protocol_t proto, u32 sw_if, u8 is_enable)
{
if (proto == FIB_PROTOCOL_IP4 && sw_if != ~0)
- vnet_feature_enable_disable ("ip4-local", "hicn-data-input-ip4",
- sw_if, is_enable, 0, 0);
+ vnet_feature_enable_disable ("ip4-local", "hicn-data-input-ip4", sw_if,
+ is_enable, 0, 0);
else if (proto == FIB_PROTOCOL_IP6 && sw_if != ~0)
- vnet_feature_enable_disable ("ip6-local", "hicn-data-input-ip6",
- sw_if, is_enable, 0, 0);
-
+ vnet_feature_enable_disable ("ip6-local", "hicn-data-input-ip6", sw_if,
+ is_enable, 0, 0);
}
-walk_rc_t enable_data_receiving_new_fib_entry (vnet_main_t * vnm,
- vnet_sw_interface_t * si,
- void *ctx)
+walk_rc_t
+enable_data_receiving_new_fib_entry (vnet_main_t *vnm, vnet_sw_interface_t *si,
+ void *ctx)
{
fib_protocol_t *proto = (fib_protocol_t *) ctx;
- enable_disable_data_receiving(*proto, si->sw_if_index, 1);
+ enable_disable_data_receiving (*proto, si->sw_if_index, 1);
return (WALK_CONTINUE);
}
-walk_rc_t disable_data_receiving_rm_fib_entry (vnet_main_t * vnm,
- vnet_sw_interface_t * si,
- void *ctx)
+walk_rc_t
+disable_data_receiving_rm_fib_entry (vnet_main_t *vnm, vnet_sw_interface_t *si,
+ void *ctx)
{
fib_protocol_t *proto = (fib_protocol_t *) ctx;
- enable_disable_data_receiving(*proto, si->sw_if_index, 0);
+ enable_disable_data_receiving (*proto, si->sw_if_index, 0);
return (WALK_CONTINUE);
- }
+}
int
-hicn_route_enable (fib_prefix_t *prefix) {
+hicn_route_enable (const fib_prefix_t *prefix,
+ fib_node_index_t *hicn_fib_node_index,
+ hicn_face_id_t **pvec_faces)
+{
int ret = HICN_ERROR_NONE;
fib_node_index_t fib_entry_index;
@@ -394,7 +383,7 @@ hicn_route_enable (fib_prefix_t *prefix) {
* ASSUMPTION: we use table 0 which is the default table and it is
* already existing and locked
*/
- u32 fib_index = fib_table_find(prefix->fp_proto, 0);
+ u32 fib_index = fib_table_find (prefix->fp_proto, 0);
fib_entry_index = fib_table_lookup_exact_match (fib_index, prefix);
@@ -402,43 +391,51 @@ hicn_route_enable (fib_prefix_t *prefix) {
{
fib_entry_index = fib_table_lookup (fib_index, prefix);
- fib_route_path_t * paths = fib_entry_encode(fib_entry_index);
+ fib_route_path_t *paths = fib_entry_encode (fib_entry_index);
- fib_table_entry_path_add2(fib_index, prefix, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, paths);
+ fib_table_entry_path_add2 (fib_index, prefix, FIB_SOURCE_CLI,
+ FIB_ENTRY_FLAG_NONE, paths);
}
/* Check if the prefix is already enabled */
- u32 fib_hicn_index = fib_table_find(prefix->fp_proto, HICN_FIB_TABLE);
+ u32 fib_hicn_index = fib_table_find (prefix->fp_proto, HICN_FIB_TABLE);
- fib_node_index_t fib_hicn_entry_index = fib_table_lookup_exact_match (fib_hicn_index, prefix);
+ *hicn_fib_node_index = fib_table_lookup_exact_match (fib_hicn_index, prefix);
- if (fib_hicn_entry_index == FIB_NODE_INDEX_INVALID)
+ if (*hicn_fib_node_index == FIB_NODE_INDEX_INVALID)
{
+ HICN_DEBUG (
+ "No route found for %U. Creating DPO and tracking fib prefix.",
+ format_fib_prefix, prefix);
dpo_id_t dpo = DPO_INVALID;
- index_t dpo_idx;
- default_dpo.hicn_dpo_create (prefix->fp_proto, 0, NEXT_HOP_INVALID,
- &dpo_idx);
+ index_t dpo_idx = ~0;
+ default_dpo.hicn_dpo_create (prefix->fp_proto, 0, 0, &dpo_idx);
+ HICN_DEBUG ("Created new DPO_MW_CTX_T: %d.", dpo_idx);
/* the value we got when we registered */
/*
* This should be taken from the name?!? the index of the
* object
*/
- dpo_set (&dpo,
- default_dpo.hicn_dpo_get_type (),
- (ip46_address_is_ip4 (&prefix->fp_addr) ? DPO_PROTO_IP4 :
- DPO_PROTO_IP6), dpo_idx);
+ dpo_set (&dpo, default_dpo.hicn_dpo_get_type (),
+ (ip46_address_is_ip4 (&prefix->fp_addr) ? DPO_PROTO_IP4 :
+ DPO_PROTO_IP6),
+ dpo_idx);
+ HICN_DEBUG (
+ "dpo_set called with parameters: type=%d, proto=%s, index=%d",
+ default_dpo.hicn_dpo_get_type (),
+ ip46_address_is_ip4 (&prefix->fp_addr) ? "DPO_PROTO_IP4" :
+ "DPO_PROTO_IP6",
+ dpo_idx);
- hicn_dpo_ctx_t * fib_entry = hicn_strategy_dpo_ctx_get(dpo_idx);
+ hicn_dpo_ctx_t *fib_entry = hicn_strategy_dpo_ctx_get (dpo_idx);
fib_node_init (&fib_entry->fib_node, hicn_fib_node_type);
fib_node_lock (&fib_entry->fib_node);
- fib_entry->fib_entry_index = fib_entry_track (fib_index,
- prefix,
- hicn_fib_node_type,
- dpo_idx, &fib_entry->fib_sibling);
-
+ fib_entry->fib_entry_index =
+ fib_entry_track (fib_index, prefix, hicn_fib_node_type, dpo_idx,
+ &fib_entry->fib_sibling);
/* Here is where we create the "via" like route */
/*
@@ -446,94 +443,104 @@ hicn_route_enable (fib_prefix_t *prefix) {
* to match Neale suggested -- FIB_SOURCE_HICN the client
* that is adding them -- no easy explanation at this time…
*/
- CLIB_UNUSED (fib_node_index_t new_fib_node_index) =
- fib_table_entry_special_dpo_add (fib_hicn_index,
- prefix,
- hicn_fib_src,
- (FIB_ENTRY_FLAG_EXCLUSIVE |
- FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT),
- &dpo);
+ *hicn_fib_node_index = fib_table_entry_special_dpo_add (
+ fib_hicn_index, prefix, hicn_fib_src,
+ (FIB_ENTRY_FLAG_EXCLUSIVE | FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT), &dpo);
- sync_hicn_fib_entry(fib_entry);
+ HICN_DEBUG ("Calling sync_hicn_fib_entry");
+ sync_hicn_fib_entry (fib_entry, pvec_faces);
/* We added a route, therefore add one lock to the table */
fib_table_lock (fib_index, prefix->fp_proto, hicn_fib_src);
- /* Enable the feature to punt data packet every time we enable a new hicn route
- * For each enable there must be a disable to defenitely disable the feature
+ /* Enable the feature to punt data packet every time we enable a new hicn
+ * route For each enable there must be a disable to defenitely disable
+ * the feature
*
- * We cannot enable only the interfaces on which we send out interest because
- * Data packet might be coming on in different interfaces, as in che case of mpls
- * tunnels (packets are received from the physical nic, not the mpls tunnel interface).
+ * We cannot enable only the interfaces on which we send out interest
+ * because Data packet might be coming on in different interfaces, as in
+ * che case of mpls tunnels (packets are received from the physical nic,
+ * not the mpls tunnel interface).
*/
- vnet_main_t * vnm = vnet_get_main ();
- vnet_sw_interface_walk(vnm, enable_data_receiving_new_fib_entry, &(prefix->fp_proto));
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_walk (vnm, enable_data_receiving_new_fib_entry,
+ (void *) (&prefix->fp_proto));
dpo_unlock (&dpo);
}
else
{
+ HICN_DEBUG ("Found a route for %U. Updating DPO.", format_fib_prefix,
+ prefix);
const dpo_id_t *load_balance_dpo_id;
const dpo_id_t *strategy_dpo_id;
/* Route already existing. We need to update the dpo. */
load_balance_dpo_id =
- fib_entry_contribute_ip_forwarding (fib_hicn_entry_index);
+ fib_entry_contribute_ip_forwarding (*hicn_fib_node_index);
/* The dpo is not a load balance dpo as expected */
if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
- {
- ret = HICN_ERROR_ROUTE_NO_LD;
- goto done;
- }
+ {
+ ret = HICN_ERROR_ROUTE_NO_LD;
+ HICN_ERROR ("DPO is not a load balance.");
+ goto done;
+ }
else
{
load_balance_t *lb =
load_balance_get (load_balance_dpo_id->dpoi_index);
- strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
+ strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
- if (!dpo_is_hicn (strategy_dpo_id))
- {
- ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
- goto done;
- }
+ if (!dpo_is_hicn (strategy_dpo_id))
+ {
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ HICN_ERROR ("DPO is not hicn.");
+ goto done;
+ }
- if (lb->lb_n_buckets > 1)
- {
- ret = HICN_ERROR_ROUTE_MLT_LD;
- goto done;
- }
+ if (lb->lb_n_buckets > 1)
+ {
+ ret = HICN_ERROR_ROUTE_MLT_LD;
+ HICN_ERROR ("Too many load balance buckets.");
+ goto done;
+ }
- hicn_dpo_ctx_t * hicn_fib_entry = hicn_strategy_dpo_ctx_get(strategy_dpo_id->dpoi_index);
+ hicn_dpo_ctx_t *hicn_fib_entry =
+ hicn_strategy_dpo_ctx_get (strategy_dpo_id->dpoi_index);
- sync_hicn_fib_entry(hicn_fib_entry);
- }
+ HICN_DEBUG ("Calling sync_hicn_fib_entry");
+ sync_hicn_fib_entry (hicn_fib_entry, pvec_faces);
+ }
}
- done:
+done:
return ret;
}
int
-hicn_route_disable (fib_prefix_t *prefix) {
+hicn_route_disable (fib_prefix_t *prefix)
+{
int ret = HICN_ERROR_NONE;
/* Check if the prefix is already enabled */
- u32 fib_hicn_index = fib_table_find(prefix->fp_proto, HICN_FIB_TABLE);
+ u32 fib_hicn_index = fib_table_find (prefix->fp_proto, HICN_FIB_TABLE);
- fib_node_index_t fib_hicn_entry_index = fib_table_lookup_exact_match (fib_hicn_index, prefix);
+ fib_node_index_t fib_hicn_entry_index =
+ fib_table_lookup_exact_match (fib_hicn_index, prefix);
if (fib_hicn_entry_index == FIB_NODE_INDEX_INVALID)
{
- return HICN_ERROR_ROUTE_NOT_FOUND;
+ HICN_ERROR ("Route %U not found", format_fib_prefix, prefix);
+ ret = HICN_ERROR_ROUTE_NOT_FOUND;
}
else
{
const dpo_id_t *load_balance_dpo_id;
const dpo_id_t *strategy_dpo_id;
- hicn_dpo_ctx_t * hicn_fib_entry;
+ hicn_dpo_ctx_t *hicn_fib_entry;
/* Route already existing. We need to update the dpo. */
load_balance_dpo_id =
@@ -541,57 +548,87 @@ hicn_route_disable (fib_prefix_t *prefix) {
/* The dpo is not a load balance dpo as expected */
if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
- {
- ret = HICN_ERROR_ROUTE_NO_LD;
- goto done;
- }
+ {
+ HICN_ERROR ("DPO for route %U is not a load balance.",
+ format_ip_prefix, prefix);
+ ret = HICN_ERROR_ROUTE_NO_LD;
+ goto done;
+ }
else
{
load_balance_t *lb =
load_balance_get (load_balance_dpo_id->dpoi_index);
- strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
+ strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
+
+ if (!dpo_is_hicn (strategy_dpo_id))
+ {
+ HICN_ERROR ("ERROR: DPO for route %U is not a hicn.",
+ format_ip_prefix, prefix);
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ goto done;
+ }
- if (!dpo_is_hicn (strategy_dpo_id))
- {
- ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
- goto done;
- }
+ if (lb->lb_n_buckets > 1)
+ {
+ HICN_ERROR ("DPO for route %U contains multiple next hops.",
+ format_ip_prefix, prefix);
+ ret = HICN_ERROR_ROUTE_MLT_LD;
+ goto done;
+ }
+
+ hicn_fib_entry =
+ hicn_strategy_dpo_ctx_get (strategy_dpo_id->dpoi_index);
+
+ HICN_DEBUG ("Found from hicn_strategy_dpo_ctx_get with index %d: %p",
+ strategy_dpo_id->dpoi_index, hicn_fib_entry);
+
+ if (PREDICT_FALSE (!hicn_fib_entry))
+ {
+ HICN_ERROR (
+ "hicn_strategy_dpo_ctx_get for index %d returned NULL",
+ strategy_dpo_id->dpoi_index);
+ ret = HICN_ERROR_ROUTE_DPO_NO_HICN;
+ goto done;
+ }
- if (lb->lb_n_buckets > 1)
- {
- ret = HICN_ERROR_ROUTE_MLT_LD;
- goto done;
- }
+ for (int i = 0; i < hicn_fib_entry->entry_count; i++)
+ {
+ hicn_strategy_dpo_ctx_del_nh (hicn_fib_entry->next_hops[i],
+ hicn_fib_entry);
+ }
- hicn_fib_entry = hicn_strategy_dpo_ctx_get(strategy_dpo_id->dpoi_index);
+ hicn_mapme_tfib_clear ((hicn_mapme_tfib_t *) hicn_fib_entry);
+ }
- for (int i = 0; i < hicn_fib_entry->entry_count; i++)
- {
- hicn_strategy_dpo_ctx_del_nh(hicn_fib_entry->next_hops[i], hicn_fib_entry);
- }
- }
+ HICN_DEBUG (
+ "Calling fib_entry_untrack and fib_table_entry_special_remove "
+ "for route %U.",
+ format_fib_prefix, prefix);
- fib_entry_untrack(hicn_fib_entry->fib_entry_index, hicn_fib_entry->fib_sibling);
+ fib_entry_untrack (hicn_fib_entry->fib_entry_index,
+ hicn_fib_entry->fib_sibling);
fib_table_entry_special_remove (fib_hicn_index, prefix, hicn_fib_src);
+ fib_node_unlock (&hicn_fib_entry->fib_node);
- /* Disable the feature to punt data packet every time we enable a new hicn route */
- vnet_main_t * vnm = vnet_get_main ();
- vnet_sw_interface_walk(vnm, disable_data_receiving_rm_fib_entry, &(prefix->fp_proto));
+ /* Disable the feature to punt data packet every time we enable a new
+ * hicn route */
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_sw_interface_walk (vnm, disable_data_receiving_rm_fib_entry,
+ &(prefix->fp_proto));
}
- done:
+done:
return ret;
}
-
static fib_node_t *
hicn_ctx_node_get (fib_node_index_t index)
{
- hicn_dpo_ctx_t * hicn_ctx;
+ hicn_dpo_ctx_t *hicn_ctx;
- hicn_ctx = hicn_strategy_dpo_ctx_get(index);
+ hicn_ctx = hicn_strategy_dpo_ctx_get (index);
return (&hicn_ctx->fib_node);
}
@@ -602,20 +639,23 @@ hicn_fib_last_lock_gone (fib_node_t *node)
}
static hicn_dpo_ctx_t *
-hicn_ctx_from_fib_node (fib_node_t * node)
+hicn_ctx_from_fib_node (fib_node_t *node)
{
return ((hicn_dpo_ctx_t *) (((char *) node) -
- STRUCT_OFFSET_OF (hicn_dpo_ctx_t, fib_node)));
+ STRUCT_OFFSET_OF (hicn_dpo_ctx_t, fib_node)));
}
static fib_node_back_walk_rc_t
-hicn_fib_back_walk_notify (fib_node_t *node,
- fib_node_back_walk_ctx_t *ctx)
+hicn_fib_back_walk_notify (fib_node_t *node, fib_node_back_walk_ctx_t *ctx)
{
hicn_dpo_ctx_t *fib_entry = hicn_ctx_from_fib_node (node);
- sync_hicn_fib_entry(fib_entry);
+ hicn_face_id_t *vec_faces = NULL;
+ HICN_DEBUG ("Calling sync_hicn_fib_entry from hicn_fib_back_walk_notify");
+ sync_hicn_fib_entry (fib_entry, &vec_faces);
+ if (vec_faces != NULL)
+ vec_free (vec_faces);
return (FIB_NODE_BACK_WALK_CONTINUE);
}
@@ -625,25 +665,22 @@ hicn_fib_show_memory (void)
{
}
-
-static const fib_node_vft_t hicn_fib_vft =
-{
- .fnv_get = hicn_ctx_node_get,
- .fnv_last_lock = hicn_fib_last_lock_gone,
- .fnv_back_walk = hicn_fib_back_walk_notify,
- .fnv_mem_show = hicn_fib_show_memory,
+static const fib_node_vft_t hicn_fib_vft = {
+ .fnv_get = hicn_ctx_node_get,
+ .fnv_last_lock = hicn_fib_last_lock_gone,
+ .fnv_back_walk = hicn_fib_back_walk_notify,
+ .fnv_mem_show = hicn_fib_show_memory,
};
-fib_table_walk_rc_t enable_data_on_existing_hicn(fib_node_index_t fei,
- void *ctx)
+fib_table_walk_rc_t
+enable_data_on_existing_hicn (fib_node_index_t fei, void *ctx)
{
- u32 sw_if = *(u32 *)ctx;
+ u32 sw_if = *(u32 *) ctx;
const dpo_id_t *load_balance_dpo_id;
const dpo_id_t *strategy_dpo_id;
/* Route already existing. We need to update the dpo. */
- load_balance_dpo_id =
- fib_entry_contribute_ip_forwarding (fei);
+ load_balance_dpo_id = fib_entry_contribute_ip_forwarding (fei);
/* The dpo is not a load balance dpo as expected */
if (load_balance_dpo_id->dpoi_type != DPO_LOAD_BALANCE)
@@ -652,92 +689,89 @@ fib_table_walk_rc_t enable_data_on_existing_hicn(fib_node_index_t fei,
}
else
{
- load_balance_t *lb =
- load_balance_get (load_balance_dpo_id->dpoi_index);
+ load_balance_t *lb = load_balance_get (load_balance_dpo_id->dpoi_index);
strategy_dpo_id = load_balance_get_bucket_i (lb, 0);
if (!dpo_is_hicn (strategy_dpo_id))
- {
- goto done;
- }
+ {
+ goto done;
+ }
- enable_disable_data_receiving (strategy_dpo_id->dpoi_proto, sw_if, 1);
+ enable_disable_data_receiving (
+ (fib_protocol_t) strategy_dpo_id->dpoi_proto, sw_if, 1);
}
- done:
+done:
return (FIB_TABLE_WALK_CONTINUE);
}
static clib_error_t *
-set_table_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
+set_table_interface_add_del (vnet_main_t *vnm, u32 sw_if_index, u32 is_add)
{
if (!is_add)
- return HICN_ERROR_NONE;
+ return NULL;
- int rv = ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, HICN_FIB_TABLE, 1);
+ int rv = ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, HICN_FIB_TABLE);
if (!rv)
{
- rv = ip_table_bind (FIB_PROTOCOL_IP6, sw_if_index, HICN_FIB_TABLE, 1);
+ rv = ip_table_bind (FIB_PROTOCOL_IP6, sw_if_index, HICN_FIB_TABLE);
if (rv)
- {
- /* An error occurred. Bind the interface back to the default fib */
- ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, 0, 1);
- }
+ {
+ /* An error occurred. Bind the interface back to the default fib */
+ ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, 0);
+ }
}
- u32 fib_index = fib_table_find(FIB_PROTOCOL_IP4,
- HICN_FIB_TABLE);
+ u32 fib_index = fib_table_find (FIB_PROTOCOL_IP4, HICN_FIB_TABLE);
if (fib_index != ~0)
{
/*
* Walk the ip4 and ip6 fib tables to discover existing hicn fib entries.
* For each of them we need to enable the feature to punt data packets.
*/
- fib_table_walk(fib_index,
- FIB_PROTOCOL_IP4,
- enable_data_on_existing_hicn,
- &sw_if_index);
+ fib_table_walk (fib_index, FIB_PROTOCOL_IP4,
+ enable_data_on_existing_hicn, &sw_if_index);
}
- fib_index = fib_table_find(FIB_PROTOCOL_IP6,
- HICN_FIB_TABLE);
+ fib_index = fib_table_find (FIB_PROTOCOL_IP6, HICN_FIB_TABLE);
if (fib_index != ~0)
{
- fib_table_walk(fib_index,
- FIB_PROTOCOL_IP6,
- enable_data_on_existing_hicn,
- &sw_if_index);
+ fib_table_walk (fib_index, FIB_PROTOCOL_IP6,
+ enable_data_on_existing_hicn, &sw_if_index);
}
- return rv ? clib_error_return (0, "unable to add hicn table to interface") : 0;
+ return rv ? clib_error_return (0, "unable to add hicn table to interface") :
+ 0;
}
-VNET_SW_INTERFACE_ADD_DEL_FUNCTION (set_table_interface_add_del);
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION_PRIO (set_table_interface_add_del,
+ VNET_ITF_FUNC_PRIORITY_HIGH);
void
hicn_route_init ()
{
- vnet_main_t * vnm = vnet_get_main ();
- vlib_main_t * vm = vlib_get_main ();
- hicn_fib_src = fib_source_allocate ("hicn",
- FIB_SOURCE_HICN, FIB_SOURCE_BH_API);
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ hicn_fib_src =
+ fib_source_allocate ("hicn", FIB_SOURCE_HICN, FIB_SOURCE_BH_API);
- hicn_fib_node_type = fib_node_register_new_type(&hicn_fib_vft);
+ hicn_fib_node_type =
+ fib_node_register_new_type ("hicn_route_fib_node", &hicn_fib_vft);
- ip_table_create(FIB_PROTOCOL_IP4, HICN_FIB_TABLE, 1, (const u8 *)"hicn4");
- ip_table_create(FIB_PROTOCOL_IP6, HICN_FIB_TABLE, 1, (const u8 *)"hicn6");
+ ip_table_create (FIB_PROTOCOL_IP4, HICN_FIB_TABLE, 1, (const u8 *) "hicn4");
+ ip_table_create (FIB_PROTOCOL_IP6, HICN_FIB_TABLE, 1, (const u8 *) "hicn6");
u32 sw_if_index;
u8 mac_address[6];
u8 is_specified = 0;
u32 user_instance = 0;
- vnet_create_loopback_interface (&sw_if_index, mac_address,
- is_specified, user_instance);
+ vnet_create_loopback_interface (&sw_if_index, mac_address, is_specified,
+ user_instance);
localhost4.as_u8[0] = 127;
localhost4.as_u8[3] = 1;
@@ -745,8 +779,10 @@ hicn_route_init ()
localhost6.as_u8[15] = 1;
- ip4_add_del_interface_address (vm, sw_if_index, &localhost4, length4, is_del);
- ip6_add_del_interface_address (vm, sw_if_index, &localhost6, length6, is_del);
+ ip4_add_del_interface_address (vm, sw_if_index, &localhost4, length4,
+ is_del);
+ ip6_add_del_interface_address (vm, sw_if_index, &localhost6, length6,
+ is_del);
flags |= VNET_SW_INTERFACE_FLAG_ADMIN_UP;
vnet_sw_interface_set_flags (vnm, sw_if_index, flags);
diff --git a/hicn-plugin/src/route.h b/hicn-plugin/src/route.h
index a1ba86b3d..4fcb219c8 100644
--- a/hicn-plugin/src/route.h
+++ b/hicn-plugin/src/route.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -24,27 +24,30 @@
/**
* @file route.h
*
- * hICN uses a specific vrf to install the routes for a prefix has been enabled to
- * be hicn. It considers the vrf 0 (the default vrf) as the dominating vrf on
- * which every route is stored. Enabling a prefix to be hICN will copy all the routes
- * in the vrf 0 for the given prefi, in the vrf HICN. Every modification made on the
- * vrf 0 on an hICN enabled prefix is reflected in the vrf hICN (through the use of
- * the fib entry tracking functionality). Moreover, we use the lookup in the vrf hICN
- * as a way for punting packet that must be processed as hICN. The implementation will
- * install a special dpo as a single next hop for the vpp load balancer for each entry
- * in the vrf hICN that we enabled. Such dpo will have two purposes: 1) to punt packets
- * to the hICN forwarding pipeline, 2) to point to the righe strategy (the dpoi_index will
- * be an index to the strategy context while the dpoi_type will be an index to the strategy vft).
+ * hICN uses a specific vrf to install the routes for a prefix has been enabled
+ * to be hicn. It considers the vrf 0 (the default vrf) as the dominating vrf
+ * on which every route is stored. Enabling a prefix to be hICN will copy all
+ * the routes in the vrf 0 for the given prefi, in the vrf HICN. Every
+ * modification made on the vrf 0 on an hICN enabled prefix is reflected in the
+ * vrf hICN (through the use of the fib entry tracking functionality).
+ * Moreover, we use the lookup in the vrf hICN as a way for punting packet that
+ * must be processed as hICN. The implementation will install a special dpo as
+ * a single next hop for the vpp load balancer for each entry in the vrf hICN
+ * that we enabled. Such dpo will have two purposes: 1) to punt packets to the
+ * hICN forwarding pipeline, 2) to point to the righe strategy (the dpoi_index
+ * will be an index to the strategy context while the dpoi_type will be an
+ * index to the strategy vft).
*
- * Additionally, hICN assign each interface to the vrf hICN; this is required for
- * the interest lookup. Vpp performs a lookup in the vrf assigned to the interface,
- * therefore if an interface is not assigned to the hICN vrf, the lookup will be done
- * on the vrf 0 and the packet won't be processed through the hicn forwarding pipeline.
+ * Additionally, hICN assign each interface to the vrf hICN; this is required
+ * for the interest lookup. Vpp performs a lookup in the vrf assigned to the
+ * interface, therefore if an interface is not assigned to the hICN vrf, the
+ * lookup will be done on the vrf 0 and the packet won't be processed through
+ * the hicn forwarding pipeline.
*/
/*
- * Adding each interface to the vrf hICN has the side effect that to ping you need to
- * specify the vrf hICN in the command.
+ * Adding each interface to the vrf hICN has the side effect that to ping you
+ * need to specify the vrf hICN in the command.
*/
extern fib_source_t hicn_fib_src;
@@ -58,10 +61,8 @@ extern dpo_type_t udp_encap_dpo_types[FIB_PROTOCOL_MAX];
* @param hicn_dpo return value with the hicn_dpo
* @param fib_index return value with the fib index corresponding to the prefix
*/
-int
-hicn_route_get_dpo (const fib_prefix_t * prefix,
- const dpo_id_t ** hicn_dpo, u32 * fib_index);
-
+int hicn_route_get_dpo (const fib_prefix_t *prefix, const dpo_id_t **hicn_dpo,
+ u32 *fib_index);
/**
* @Brief Set the strategy for a given prefix
@@ -69,44 +70,48 @@ hicn_route_get_dpo (const fib_prefix_t * prefix,
* @param prefix Prefix for which we set the strategy
* @param stretegy_id Index of the strategy to set
*/
-int
-hicn_route_set_strategy (fib_prefix_t * prefix, u32 strategy_id);
+int hicn_route_set_strategy (fib_prefix_t *prefix, u32 strategy_id);
/**
- * @Brief Helper to add a nex hop in the vrf 0. If there are no entries in the
- * vrf 0 that matches with the prefix (epm), a new one is created.
+ * @Brief Helper to add an adj nex hop in the vrf 0. If there are no entries in
+ * the vrf 0 that matches with the prefix (epm), a new one is created.
*
* @param fib_proto FIB_PROTOCOL_IP6 or FIB_PROTOCOL_IP4 (mpls not supported)
- * @param pfx Prefix for which to add a next hop
+ * @param rpfx Prefix for which to add a next hop
* @param nh Next hop to add
* @param sw_if Software interface index to add in the next hop
*/
-int
-ip_nh_add_helper (fib_protocol_t fib_proto, const fib_prefix_t * pfx, ip46_address_t * nh, u32 sw_if);
+int ip_nh_adj_add_del_helper (fib_protocol_t fib_proto,
+ const fib_prefix_t *rpfx, ip46_address_t *nh,
+ u32 sw_if, u8 is_add);
/**
- * @Brief Helper to remove a nex hop in the vrf 0. If there are no entries in the
- * vrf 0 nothing happens.
+ * @Brief Helper to add an udp-tunnel nex hop in the vrf 0. If there are no
+ * entries in the vrf 0 that matches with the prefix (epm), a new one is
+ * created.
*
* @param fib_proto FIB_PROTOCOL_IP6 or FIB_PROTOCOL_IP4 (mpls not supported)
- * @param pfx Prefix for which to remove a next hop
- * @param nh Next hop to remove
- * @param sw_if Software interface index in the next hop definition
+ * @param rpfx Prefix for which to add a next hop
+ * @param uei The UDP ENCAP ID
+ * @param proto The payload proto for this encap
*/
-int
-ip_nh_del_helper (fib_protocol_t fib_proto, const fib_prefix_t * rpfx, ip46_address_t * nh, u32 sw_if);
+int ip_nh_udp_tunnel_add_del_helper (fib_protocol_t fib_proto,
+ const fib_prefix_t *rpfx, u32 uei,
+ dpo_proto_t proto, u8 is_add);
/**
* @Brief Enable an hICN for an ip prefix
*
* @param prefix Prefix for which we enable hICN
* @return HICN_ERROR_NONE if hICN was enabled on the prefix
- * HICN_ERROR_ROUTE_NO_LD if the first dpo for the fib entry corresponding to the prefix is not a load_balancer
- * HICN_ERROR_ROUTE_DPO_NO_HICN if the loadbalancer in the vrf HICN already contains a dpo which is not an hICN one
+ * HICN_ERROR_ROUTE_NO_LD if the first dpo for the fib entry corresponding to
+ * the prefix is not a load_balancer HICN_ERROR_ROUTE_DPO_NO_HICN if the
+ * loadbalancer in the vrf HICN already contains a dpo which is not an hICN one
* HICN_ERROR_ROUTE_MLT_LD if there are more than a dpo in the vpp loadbalancer
*/
-int
-hicn_route_enable (fib_prefix_t *prefix);
+int hicn_route_enable (const fib_prefix_t *prefix,
+ fib_node_index_t *hicn_fib_node_index,
+ hicn_face_id_t **vec_faces);
/**
* @Brief Disable an hICN for an ip prefix. If hICN wasn't enable on the prefix
@@ -114,13 +119,10 @@ hicn_route_enable (fib_prefix_t *prefix);
*
* @param prefix Prefix for which we disable hICN
*/
-int
-hicn_route_disable (fib_prefix_t *prefix);
-
+int hicn_route_disable (fib_prefix_t *prefix);
/* Init route internal strustures */
-void
-hicn_route_init();
+void hicn_route_init ();
#endif /* //__HICN_ROUTE__ */
/*
diff --git a/hicn-plugin/src/state.h b/hicn-plugin/src/state.h
index 37003d0ae..e80e81e29 100644
--- a/hicn-plugin/src/state.h
+++ b/hicn-plugin/src/state.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -21,7 +21,6 @@
#include "hicn.h"
#include "pcs.h"
-#include "hashtb.h"
#include "strategy.h"
#include "strategy_dpo_ctx.h"
#include "strategy_dpo_manager.h"
@@ -29,11 +28,12 @@
/**
* @file plugin_state
*
- * Helper functions to hicn state (hash node, hash entry, strategy vft, dpo vft and dpo context id)
+ * Helper functions to hicn state (hash node, hash entry, strategy vft, dpo vft
+ * and dpo context id)
*
*/
-//TODO exploit this state to prefetch hash nodes and entries.
+// TODO exploit this state to prefetch hash nodes and entries.
/**
* @brief Retrieve the hicn state
@@ -41,34 +41,23 @@
* @param hicnb hicn buffer used to retrieve the hicn state
* @param pitcs pointer to PIT/CS
* @param node node in the hash table referring to the buffer
- * @param strategy_vft return value pointing to the strategy vft corresponding to the buffer
- * @param dpo_vft return value pointing to the dpo vft corresponding to the buffer
- * @param dpo_ctx_id return value pointing to the dpo context id corresponding to the buffer
+ * @param strategy_vft return value pointing to the strategy vft corresponding
+ * to the buffer
+ * @param dpo_vft return value pointing to the dpo vft corresponding to the
+ * buffer
+ * @param dpo_ctx_id return value pointing to the dpo context id corresponding
+ * to the buffer
* @param hash_entry entry in the hash table referring to the buffer
*/
always_inline void
-hicn_get_internal_state (hicn_buffer_t * hicnb, hicn_pit_cs_t * pitcs,
- hicn_hash_node_t ** node,
- const hicn_strategy_vft_t ** strategy_vft,
- const hicn_dpo_vft_t ** dpo_vft, u8 * dpo_ctx_id,
- hicn_hash_entry_t ** hash_entry)
+hicn_get_internal_state (hicn_buffer_t *hicnb, u32 *pit_entry_index,
+ const hicn_strategy_vft_t **strategy_vft,
+ const hicn_dpo_vft_t **dpo_vft, u8 *dpo_ctx_id)
{
- *node = pool_elt_at_index (pitcs->pcs_table->ht_nodes, hicnb->node_id);
+ *pit_entry_index = hicnb->pcs_entry_id;
*strategy_vft = hicn_dpo_get_strategy_vft (hicnb->vft_id);
*dpo_vft = hicn_dpo_get_vft (hicnb->vft_id);
*dpo_ctx_id = hicnb->dpo_ctx_id;
-
- hicn_hash_bucket_t *bucket;
- if (hicnb->hash_bucket_flags & HICN_HASH_NODE_OVERFLOW_BUCKET)
- bucket =
- pool_elt_at_index (pitcs->pcs_table->ht_overflow_buckets,
- hicnb->bucket_id);
- else
- bucket =
- (hicn_hash_bucket_t *) (pitcs->pcs_table->ht_buckets +
- hicnb->bucket_id);
-
- *hash_entry = &(bucket->hb_entries[hicnb->hash_entry_id]);
}
/*
@@ -81,29 +70,25 @@ hicn_get_internal_state (hicn_buffer_t * hicnb, hicn_pit_cs_t * pitcs,
* @brief Store the hicn state in the hicn buffer
*
* @param b vlib buffer holding the hICN packet
- * @param name_hash hash of the hICN name
- * @param node_id id of the node in the hash table referring to the buffer
- * @param dpo_ctx_id id of the dpo context id corresponding to the buffer
- * @param vft_id id of the strategy vft corresponding to the buffer
- * @param hash_entry_id id of the entry in the hash table referring to the buffer
- * @param bucket_id id of the hasth table bucket that holds the hash entry
- * @param bucket_is_overflow 1 if the bucket is from the ht_overflow_buckets pool
- * 0 if the bucket is from the ht_buckets pool
+ * @param pcs_entry_index index of the PCS entry
*/
-always_inline void
-hicn_store_internal_state (vlib_buffer_t * b, u64 name_hash, u32 node_id,
- u8 dpo_ctx_id, u8 vft_id, u8 hash_entry_id,
- u32 bucket_id, u8 bucket_is_overflow)
+always_inline int
+hicn_store_internal_state (vlib_buffer_t *b, u32 pcs_entry_index,
+ u32 dpo_ctx_id)
{
hicn_buffer_t *hicnb = hicn_get_buffer (b);
- hicnb->name_hash = name_hash;
- hicnb->node_id = node_id;
+
hicnb->dpo_ctx_id = dpo_ctx_id;
- hicnb->vft_id = vft_id;
- hicnb->hash_entry_id = hash_entry_id;
- hicnb->bucket_id = bucket_id;
- hicnb->hash_bucket_flags =
- HICN_HASH_NODE_OVERFLOW_BUCKET * bucket_is_overflow;
+ const hicn_dpo_ctx_t *dpo_ctx =
+ hicn_strategy_dpo_ctx_get (hicnb->dpo_ctx_id);
+
+ if (PREDICT_FALSE (dpo_ctx == NULL))
+ return HICN_ERROR_DPO_CTX_NOT_FOUND;
+
+ hicnb->vft_id = dpo_ctx->dpo_type;
+ hicnb->pcs_entry_id = pcs_entry_index;
+
+ return HICN_ERROR_NONE;
}
#endif /* // __HICN_STATE__ */
diff --git a/hicn-plugin/src/strategies/dpo_lr.c b/hicn-plugin/src/strategies/dpo_lr.c
new file mode 100644
index 000000000..59409e25e
--- /dev/null
+++ b/hicn-plugin/src/strategies/dpo_lr.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2023 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_lr.h"
+#include "strategy_lr.h"
+#include "../strategy_dpo_manager.h"
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @brief DPO type value for the lr_strategy
+ */
+static dpo_type_t hicn_dpo_type_lr;
+
+static const hicn_dpo_vft_t hicn_dpo_lr_vft = {
+ .hicn_dpo_is_type = &hicn_dpo_is_type_strategy_lr,
+ .hicn_dpo_get_type = &hicn_dpo_strategy_lr_get_type,
+ .hicn_dpo_module_init = &hicn_dpo_strategy_lr_module_init,
+ .hicn_dpo_create = &hicn_strategy_lr_ctx_create,
+ .hicn_dpo_update_type = &hicn_strategy_lr_update_ctx_type,
+ .hicn_dpo_add_update_nh = &hicn_strategy_lr_ctx_add_nh,
+ .hicn_dpo_del_nh = &hicn_strategy_lr_ctx_del_nh,
+ .hicn_dpo_format = &hicn_dpo_strategy_lr_format
+};
+
+const static dpo_vft_t dpo_strategy_lr_ctx_vft = {
+ .dv_lock = hicn_strategy_dpo_ctx_lock,
+ .dv_unlock = hicn_strategy_dpo_ctx_unlock,
+ .dv_format = hicn_strategy_dpo_format,
+};
+
+int
+hicn_dpo_is_type_strategy_lr (const dpo_id_t *dpo)
+{
+ return dpo->dpoi_type == hicn_dpo_type_lr;
+}
+
+void
+hicn_dpo_strategy_lr_module_init (void)
+{
+ /*
+ * Register our type of dpo
+ */
+ hicn_dpo_type_lr = hicn_dpo_register_new_type (
+ hicn_nodes_strategy, &hicn_dpo_lr_vft, hicn_lr_strategy_get_vft (),
+ &dpo_strategy_lr_ctx_vft);
+}
+
+dpo_type_t
+hicn_dpo_strategy_lr_get_type (void)
+{
+ return hicn_dpo_type_lr;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+u8 *
+hicn_dpo_strategy_lr_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent)
+{
+ int i = 0;
+
+ s = format (s, "hicn-lr");
+
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ u8 *buf = NULL;
+ if (i < dpo_ctx->entry_count)
+ buf = format (NULL, "FIB");
+ else if (i >= HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
+ buf = format (NULL, "TFIB");
+ else
+ continue;
+
+ s = format (s, "\n");
+ s = format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i], indent);
+ s = format (s, " %s", buf);
+ }
+
+ return (s);
+}
+
+void
+hicn_strategy_lr_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_ctx;
+
+ /* Allocate a hicn_dpo_ctx on the vpp pool and initialize it */
+ hicn_strategy_ctx = hicn_strategy_dpo_ctx_alloc ();
+
+ *dpo_idx = hicn_strategy_dpo_ctx_get_index (hicn_strategy_ctx);
+
+ init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_lr,
+ (dpo_proto_t) proto);
+}
+
+void
+hicn_strategy_lr_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx)
+{
+ hicn_strategy_ctx->dpo_type = hicn_dpo_type_lr;
+ // don't care to reset data, it is not used
+}
+
+int
+hicn_strategy_lr_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ u8 pos = 0;
+
+ if (hicn_strategy_dpo_ctx == NULL)
+ {
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
+
+ hicn_strategy_dpo_ctx_add_nh (nh, hicn_strategy_dpo_ctx, &pos);
+ // nothing else to initialize in this strategy
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_strategy_lr_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ // No need to change the current_nhop. It will be updated at the next
+ // selection.
+ return hicn_strategy_dpo_ctx_del_nh (face_id, hicn_strategy_dpo_ctx);
+}
diff --git a/hicn-plugin/src/strategies/dpo_lr.h b/hicn-plugin/src/strategies/dpo_lr.h
new file mode 100644
index 000000000..757b96db6
--- /dev/null
+++ b/hicn-plugin/src/strategies/dpo_lr.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2023 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_LR_H__
+#define __HICN_DPO_LR_H__
+
+#include <vnet/dpo/dpo.h>
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @file dpo_lr.h
+ *
+ * This file implements the strategy vtf (see strategy.h) and
+ * the dpo vft (see strategy_dpo_manager.h) for the strategy
+ * local-remote.
+ */
+
+typedef struct hicn_strategy_lr_ctx_s
+{
+} hicn_strategy_lr_ctx_t;
+
+/**
+ * @brief Format the dpo ctx for a human-readable string
+ *
+ * @param s String to which to append the formatted dpo ctx
+ * @param dpo_ctx DPO context
+ * @param indent Indentation
+ *
+ * @result The string with the formatted dpo ctx
+ */
+u8 *hicn_dpo_strategy_lr_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent);
+
+/**
+ * @brief Retrieve an hicn_strategy_lr_ctx object
+ *
+ * @param indext Index of the hicn_dpo_ctx to retrieve
+ * @return The hicn_dpo_ctx object or NULL
+ */
+hicn_dpo_ctx_t *hicn_strategy_lr_ctx_get (index_t index);
+
+/**
+ * @brief Create a new local-remote ctx
+ *
+ * @param proto The protocol to which the dpo is meant for (see vpp docs)
+ * @param next_hop A list of next hops to be inserted in the dpo ctx
+ * @param nh_len Size of the list
+ * @param dpo_idx index_t that will hold the index of the created dpo ctx
+ * @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
+ */
+void hicn_strategy_lr_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx);
+
+/**
+ * @brief Update existing ctx setting it to local-remote
+ *
+ * @param hicn_strategy_ctx pointer to the ctx to update
+ */
+void hicn_strategy_lr_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx);
+
+/**
+ * @brief Add or update a next hop in the dpo ctx.
+ *
+ * This function is meant to be used in the control plane and not in the data
+ * plane, as it is not optimized for the latter.
+ *
+ * @param nh Next hop to insert in the dpo ctx
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
+ */
+int hicn_strategy_lr_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx);
+
+/**
+ * @brief Delete a next hop in the dpo ctx.
+ *
+ * @param face_id Face identifier of the next hop
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
+ */
+int hicn_strategy_lr_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx);
+
+/**
+ * @brief Prefetch a dpo
+ *
+ * @param dpo_idx Index of the dpo ctx to prefetch
+ */
+void hicn_strategy_lr_ctx_prefetch (index_t dpo_idx);
+
+/**
+ * @brief Return true if the dpo is of type strategy local-remote
+ *
+ * @param dpo Dpo to check the type
+ */
+int hicn_dpo_is_type_strategy_lr (const dpo_id_t *dpo);
+
+/**
+ * @brief Initialize the local-remote strategy
+ */
+void hicn_dpo_strategy_lr_module_init (void);
+
+/**
+ * @brief Return the dpo type for the local-remote strategy
+ */
+dpo_type_t hicn_dpo_strategy_lr_get_type (void);
+
+#endif // __HICN_DPO_LR_H__
diff --git a/hicn-plugin/src/strategies/dpo_mw.c b/hicn-plugin/src/strategies/dpo_mw.c
index 12c77bce8..9283f6a43 100644
--- a/hicn-plugin/src/strategies/dpo_mw.c
+++ b/hicn-plugin/src/strategies/dpo_mw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -28,13 +28,20 @@ static const hicn_dpo_vft_t hicn_dpo_mw_vft = {
.hicn_dpo_get_type = &hicn_dpo_strategy_mw_get_type,
.hicn_dpo_module_init = &hicn_dpo_strategy_mw_module_init,
.hicn_dpo_create = &hicn_strategy_mw_ctx_create,
+ .hicn_dpo_update_type = &hicn_strategy_mw_update_ctx_type,
.hicn_dpo_add_update_nh = &hicn_strategy_mw_ctx_add_nh,
.hicn_dpo_del_nh = &hicn_strategy_mw_ctx_del_nh,
- .hicn_dpo_format = &hicn_strategy_mw_format_ctx
+ .hicn_dpo_format = &hicn_dpo_strategy_mw_format
+};
+
+const static dpo_vft_t dpo_strategy_mw_ctx_vft = {
+ .dv_lock = &hicn_strategy_dpo_ctx_lock,
+ .dv_unlock = &hicn_strategy_dpo_ctx_unlock,
+ .dv_format = &hicn_strategy_dpo_format
};
int
-hicn_dpo_is_type_strategy_mw (const dpo_id_t * dpo)
+hicn_dpo_is_type_strategy_mw (const dpo_id_t *dpo)
{
return dpo->dpoi_type == hicn_dpo_type_mw;
}
@@ -45,10 +52,9 @@ hicn_dpo_strategy_mw_module_init (void)
/*
* Register our type of dpo
*/
- hicn_dpo_type_mw =
- hicn_dpo_register_new_type (hicn_nodes_strategy, &hicn_dpo_mw_vft,
- hicn_mw_strategy_get_vft (),
- &dpo_strategy_mw_ctx_vft);
+ hicn_dpo_type_mw = hicn_dpo_register_new_type (
+ hicn_nodes_strategy, &hicn_dpo_mw_vft, hicn_mw_strategy_get_vft (),
+ &dpo_strategy_mw_ctx_vft);
}
dpo_type_t
@@ -59,28 +65,11 @@ hicn_dpo_strategy_mw_get_type (void)
//////////////////////////////////////////////////////////////////////////////////////////////////
-
-u8 *
-hicn_strategy_mw_format_ctx (u8 * s, int n, ...)
-{
- va_list args;
- va_start (args, n);
- s = format_hicn_strategy_mw_ctx (s, &args);
- return s;
-}
-
u8 *
-format_hicn_strategy_mw_ctx (u8 * s, va_list * ap)
+hicn_dpo_strategy_mw_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent)
{
- int i = 0;
- index_t index = va_arg (*ap, index_t);
- hicn_dpo_ctx_t *dpo_ctx = NULL;
hicn_strategy_mw_ctx_t *mw_dpo_ctx = NULL;
- u32 indent = va_arg (*ap, u32);;
-
- dpo_ctx = hicn_strategy_dpo_ctx_get (index);
- if (dpo_ctx == NULL)
- return s;
+ int i = 0;
mw_dpo_ctx = (hicn_strategy_mw_ctx_t *) dpo_ctx->data;
@@ -90,16 +79,13 @@ format_hicn_strategy_mw_ctx (u8 * s, va_list * ap)
u8 *buf = NULL;
if (i < dpo_ctx->entry_count)
buf = format (NULL, "FIB");
- else if (i >=
- HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
+ else if (i >= HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
buf = format (NULL, "TFIB");
else
continue;
s = format (s, "\n");
- s =
- format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i],
- indent);
+ s = format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i], indent);
s = format (s, "weight %u", mw_dpo_ctx->weight[i]);
s = format (s, " %s", buf);
}
@@ -108,8 +94,9 @@ format_hicn_strategy_mw_ctx (u8 * s, va_list * ap)
}
void
-hicn_strategy_mw_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
- int nh_len, index_t * dpo_idx)
+hicn_strategy_mw_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx)
{
hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx;
hicn_dpo_ctx_t *hicn_strategy_ctx;
@@ -120,8 +107,22 @@ hicn_strategy_mw_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_h
*dpo_idx = hicn_strategy_dpo_ctx_get_index (hicn_strategy_ctx);
- init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_mw, proto);
+ HICN_DEBUG ("Successful hicn_strategy_dpo_ctx_alloc with index %d",
+ *dpo_idx);
+
+ init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_mw,
+ (dpo_proto_t) proto);
+
+ memset (hicn_strategy_mw_ctx->weight, 0, HICN_PARAM_FIB_ENTRY_NHOPS_MAX);
+}
+
+void
+hicn_strategy_mw_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx)
+{
+ hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx;
+ hicn_strategy_mw_ctx = (hicn_strategy_mw_ctx_t *) hicn_strategy_ctx->data;
+ hicn_strategy_ctx->dpo_type = hicn_dpo_type_mw;
memset (hicn_strategy_mw_ctx->weight, 0, HICN_PARAM_FIB_ENTRY_NHOPS_MAX);
}
@@ -138,7 +139,7 @@ hicn_strategy_mw_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx)
hicn_strategy_dpo_ctx_add_nh (nh, hicn_strategy_dpo_ctx, &pos);
hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
- (hicn_strategy_mw_ctx_t *) & hicn_strategy_dpo_ctx->data;
+ (hicn_strategy_mw_ctx_t *) &hicn_strategy_dpo_ctx->data;
hicn_strategy_mw_ctx->weight[pos] = DEFAULT_WEIGHT;
return HICN_ERROR_NONE;
@@ -148,7 +149,8 @@ int
hicn_strategy_mw_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx)
{
hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
- //No need to flush the weights, they are initialized when a dpo_ctx is created;
+ // No need to flush the weights, they are initialized when a dpo_ctx is
+ // created;
return hicn_strategy_dpo_ctx_del_nh (face_id, hicn_strategy_dpo_ctx);
}
diff --git a/hicn-plugin/src/strategies/dpo_mw.h b/hicn-plugin/src/strategies/dpo_mw.h
index 433c415fb..f3ccf7e30 100644
--- a/hicn-plugin/src/strategies/dpo_mw.h
+++ b/hicn-plugin/src/strategies/dpo_mw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -38,17 +38,12 @@ typedef struct hicn_strategy_mw_ctx_s
* @brief Format the dpo ctx for a human-readable string
*
* @param s String to which to append the formatted dpo ctx
- * @param ap List of parameters for the formatting
+ * @param dpo_ctx DPO context
+ * @param indent Indentation
*
* @result The string with the formatted dpo ctx
*/
-u8 *format_hicn_strategy_mw_ctx (u8 * s, va_list * ap);
-
-const static dpo_vft_t dpo_strategy_mw_ctx_vft = {
- .dv_lock = hicn_strategy_dpo_ctx_lock,
- .dv_unlock = hicn_strategy_dpo_ctx_unlock,
- .dv_format = format_hicn_strategy_mw_ctx,
-};
+u8 *hicn_dpo_strategy_mw_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent);
/**
* @brief Retrieve an hicn_strategy_mw_ctx object
@@ -67,15 +62,22 @@ hicn_dpo_ctx_t *hicn_strategy_mw_ctx_get (index_t index);
* @param dpo_idx index_t that will hold the index of the created dpo ctx
* @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
*/
-void
-hicn_strategy_mw_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
- int nh_len, index_t * dpo_idx);
+void hicn_strategy_mw_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx);
+
+/**
+ * @brief Update existing ctx setting it to mw
+ *
+ * @param hicn_strategy_ctx pointer to the ctx to update
+ */
+void hicn_strategy_mw_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx);
/**
* @brief Add or update a next hop in the dpo ctx.
*
- * This function is meant to be used in the control plane and not in the data plane,
- * as it is not optimized for the latter.
+ * This function is meant to be used in the control plane and not in the data
+ * plane, as it is not optimized for the latter.
*
* @param nh Next hop to insert in the dpo ctx
* @param dpo_idx Index of the dpo ctx to update with the new or updated next
@@ -108,7 +110,7 @@ void hicn_strategy_mw_ctx_prefetch (index_t dpo_idx);
*
* @param dpo Dpo to check the type
*/
-int hicn_dpo_is_type_strategy_mw (const dpo_id_t * dpo);
+int hicn_dpo_is_type_strategy_mw (const dpo_id_t *dpo);
/**
* @brief Initialize the Maximum Weight strategy
@@ -120,24 +122,6 @@ void hicn_dpo_strategy_mw_module_init (void);
*/
dpo_type_t hicn_dpo_strategy_mw_get_type (void);
-/**
- * @brief Format the dpo ctx for the strategy Maximum Weight
- *
- * @param s String to append the formatted dpo ctx
- * @param ap List of arguments to format
- */
-u8 *format_hicn_dpo_strategy_mw (u8 * s, va_list * ap);
-
-/**
- * @brief Format the dpo ctx for the strategy Maximum Weight. To
- * call from other functions
- *
- * @param s String to append the formatted dpo ctx
- * @param ... List of arguments to format
- */
-u8 *hicn_strategy_mw_format_ctx (u8 * s, int n, ...);
-
-
#endif // __HICN_DPO_MW_H__
/*
diff --git a/hicn-plugin/src/strategies/dpo_rp.c b/hicn-plugin/src/strategies/dpo_rp.c
new file mode 100644
index 000000000..eb32083b9
--- /dev/null
+++ b/hicn-plugin/src/strategies/dpo_rp.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_rp.h"
+#include "strategy_rp.h"
+#include "../strategy_dpo_manager.h"
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @brief DPO type value for the rp_strategy
+ */
+static dpo_type_t hicn_dpo_type_rp;
+
+static const hicn_dpo_vft_t hicn_dpo_rp_vft = {
+ .hicn_dpo_is_type = &hicn_dpo_is_type_strategy_rp,
+ .hicn_dpo_get_type = &hicn_dpo_strategy_rp_get_type,
+ .hicn_dpo_module_init = &hicn_dpo_strategy_rp_module_init,
+ .hicn_dpo_create = &hicn_strategy_rp_ctx_create,
+ .hicn_dpo_update_type = &hicn_strategy_rp_update_ctx_type,
+ .hicn_dpo_add_update_nh = &hicn_strategy_rp_ctx_add_nh,
+ .hicn_dpo_del_nh = &hicn_strategy_rp_ctx_del_nh,
+ .hicn_dpo_format = &hicn_dpo_strategy_rp_format
+};
+
+const static dpo_vft_t dpo_strategy_rp_ctx_vft = {
+ .dv_lock = hicn_strategy_dpo_ctx_lock,
+ .dv_unlock = hicn_strategy_dpo_ctx_unlock,
+ .dv_format = hicn_strategy_dpo_format,
+};
+
+int
+hicn_dpo_is_type_strategy_rp (const dpo_id_t *dpo)
+{
+ return dpo->dpoi_type == hicn_dpo_type_rp;
+}
+
+void
+hicn_dpo_strategy_rp_module_init (void)
+{
+ /*
+ * Register our type of dpo
+ */
+ hicn_dpo_type_rp = hicn_dpo_register_new_type (
+ hicn_nodes_strategy, &hicn_dpo_rp_vft, hicn_rp_strategy_get_vft (),
+ &dpo_strategy_rp_ctx_vft);
+}
+
+dpo_type_t
+hicn_dpo_strategy_rp_get_type (void)
+{
+ return hicn_dpo_type_rp;
+}
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+u8 *
+hicn_dpo_strategy_rp_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent)
+{
+ int i = 0;
+
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ {
+ u8 *buf = NULL;
+ if (i < dpo_ctx->entry_count)
+ buf = format (NULL, "FIB");
+ else if (i >= HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
+ buf = format (NULL, "TFIB");
+ else
+ continue;
+
+ s = format (s, "\n");
+ s = format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i], indent);
+ s = format (s, " %s", buf);
+ }
+
+ return (s);
+}
+
+void
+hicn_strategy_rp_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_ctx;
+
+ /* Allocate a hicn_dpo_ctx on the vpp pool and initialize it */
+ hicn_strategy_ctx = hicn_strategy_dpo_ctx_alloc ();
+
+ *dpo_idx = hicn_strategy_dpo_ctx_get_index (hicn_strategy_ctx);
+
+ init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_rp,
+ (dpo_proto_t) proto);
+}
+
+void
+hicn_strategy_rp_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx)
+{
+ hicn_strategy_ctx->dpo_type = hicn_dpo_type_rp;
+ // don't care to reset data, it is not used
+}
+
+int
+hicn_strategy_rp_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ u8 pos = 0;
+
+ if (hicn_strategy_dpo_ctx == NULL)
+ {
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
+
+ hicn_strategy_dpo_ctx_add_nh (nh, hicn_strategy_dpo_ctx, &pos);
+ // nothing else to initialize in this strategy
+ return HICN_ERROR_NONE;
+}
+
+int
+hicn_strategy_rp_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx)
+{
+ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+ // No need to change the current_nhop. It will be updated at the next
+ // selection.
+ return hicn_strategy_dpo_ctx_del_nh (face_id, hicn_strategy_dpo_ctx);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategies/dpo_rp.h b/hicn-plugin/src/strategies/dpo_rp.h
new file mode 100644
index 000000000..bc3384b63
--- /dev/null
+++ b/hicn-plugin/src/strategies/dpo_rp.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_DPO_RP_H__
+#define __HICN_DPO_RP_H__
+
+#include <vnet/dpo/dpo.h>
+#include "../strategy_dpo_ctx.h"
+
+/**
+ * @file dpo_rp.h
+ *
+ * This file implements the strategy vtf (see strategy.h) and
+ * the dpo vft (see strategy_dpo_manager.h) for the strategy
+ * replication.
+ */
+
+typedef struct hicn_strategy_rp_ctx_s
+{
+} hicn_strategy_rp_ctx_t;
+
+/**
+ * @brief Format the dpo ctx for a human-readable string
+ *
+ * @param s String to which to append the formatted dpo ctx
+ * @param dpo_ctx DPO context
+ * @param indent Indentation
+ *
+ * @result The string with the formatted dpo ctx
+ */
+u8 *hicn_dpo_strategy_rp_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent);
+
+/**
+ * @brief Retrieve an hicn_strategy_rp_ctx object
+ *
+ * @param indext Index of the hicn_dpo_ctx to retrieve
+ * @return The hicn_dpo_ctx object or NULL
+ */
+hicn_dpo_ctx_t *hicn_strategy_rp_ctx_get (index_t index);
+
+/**
+ * @brief Create a new replication ctx
+ *
+ * @param proto The protocol to which the dpo is meant for (see vpp docs)
+ * @param next_hop A list of next hops to be inserted in the dpo ctx
+ * @param nh_len Size of the list
+ * @param dpo_idx index_t that will hold the index of the created dpo ctx
+ * @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
+ */
+void hicn_strategy_rp_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx);
+
+/**
+ * @brief Update existing ctx setting it to rp
+ *
+ * @param hicn_strategy_ctx pointer to the ctx to update
+ */
+void hicn_strategy_rp_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx);
+
+/**
+ * @brief Add or update a next hop in the dpo ctx.
+ *
+ * This function is meant to be used in the control plane and not in the data
+ * plane, as it is not optimized for the latter.
+ *
+ * @param nh Next hop to insert in the dpo ctx
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
+ */
+int hicn_strategy_rp_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx);
+
+/**
+ * @brief Delete a next hop in the dpo ctx.
+ *
+ * @param face_id Face identifier of the next hop
+ * @param dpo_idx Index of the dpo ctx to update with the new or updated next
+ * hop
+ * @return HICN_ERROR_NONE if the update or insert was fine,
+ * otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
+ */
+int hicn_strategy_rp_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx);
+
+/**
+ * @brief Prefetch a dpo
+ *
+ * @param dpo_idx Index of the dpo ctx to prefetch
+ */
+void hicn_strategy_rp_ctx_prefetch (index_t dpo_idx);
+
+/**
+ * @brief Return true if the dpo is of type strategy rp
+ *
+ * @param dpo Dpo to check the type
+ */
+int hicn_dpo_is_type_strategy_rp (const dpo_id_t *dpo);
+
+/**
+ * @brief Initialize the Replication strategy
+ */
+void hicn_dpo_strategy_rp_module_init (void);
+
+/**
+ * @brief Return the dpo type for the Replication strategy
+ */
+dpo_type_t hicn_dpo_strategy_rp_get_type (void);
+
+#endif // __HICN_DPO_RP_H__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/strategies/dpo_rr.c b/hicn-plugin/src/strategies/dpo_rr.c
index adb7e1025..1fb72b953 100644
--- a/hicn-plugin/src/strategies/dpo_rr.c
+++ b/hicn-plugin/src/strategies/dpo_rr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -28,13 +28,20 @@ static const hicn_dpo_vft_t hicn_dpo_rr_vft = {
.hicn_dpo_get_type = &hicn_dpo_strategy_rr_get_type,
.hicn_dpo_module_init = &hicn_dpo_strategy_rr_module_init,
.hicn_dpo_create = &hicn_strategy_rr_ctx_create,
+ .hicn_dpo_update_type = &hicn_strategy_rr_update_ctx_type,
.hicn_dpo_add_update_nh = &hicn_strategy_rr_ctx_add_nh,
.hicn_dpo_del_nh = &hicn_strategy_rr_ctx_del_nh,
- .hicn_dpo_format = &hicn_strategy_rr_format_ctx
+ .hicn_dpo_format = &hicn_dpo_strategy_rr_format
+};
+
+const static dpo_vft_t dpo_strategy_rr_ctx_vft = {
+ .dv_lock = hicn_strategy_dpo_ctx_lock,
+ .dv_unlock = hicn_strategy_dpo_ctx_unlock,
+ .dv_format = hicn_strategy_dpo_format,
};
int
-hicn_dpo_is_type_strategy_rr (const dpo_id_t * dpo)
+hicn_dpo_is_type_strategy_rr (const dpo_id_t *dpo)
{
return dpo->dpoi_type == hicn_dpo_type_rr;
}
@@ -45,10 +52,9 @@ hicn_dpo_strategy_rr_module_init (void)
/*
* Register our type of dpo
*/
- hicn_dpo_type_rr =
- hicn_dpo_register_new_type (hicn_nodes_strategy, &hicn_dpo_rr_vft,
- hicn_rr_strategy_get_vft (),
- &dpo_strategy_rr_ctx_vft);
+ hicn_dpo_type_rr = hicn_dpo_register_new_type (
+ hicn_nodes_strategy, &hicn_dpo_rr_vft, hicn_rr_strategy_get_vft (),
+ &dpo_strategy_rr_ctx_vft);
}
dpo_type_t
@@ -59,50 +65,29 @@ hicn_dpo_strategy_rr_get_type (void)
//////////////////////////////////////////////////////////////////////////////////////////////////
-
-u8 *
-hicn_strategy_rr_format_ctx (u8 * s, int n, ...)
-{
- va_list args;
- va_start (args, n);
- s = format_hicn_strategy_rr_ctx (s, &args);
- return s;
-}
-
u8 *
-format_hicn_strategy_rr_ctx (u8 * s, va_list * ap)
+hicn_dpo_strategy_rr_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent)
{
int i = 0;
- index_t index = va_arg (*ap, index_t);
- hicn_dpo_ctx_t *dpo_ctx = NULL;
hicn_strategy_rr_ctx_t *rr_dpo_ctx = NULL;
- u32 indent = va_arg (*ap, u32);
-
- dpo_ctx = hicn_strategy_dpo_ctx_get (index);
- if (dpo_ctx == NULL)
- return s;
rr_dpo_ctx = (hicn_strategy_rr_ctx_t *) dpo_ctx->data;
- s =
- format (s, "hicn-rr, next hop Face %d",
- dpo_ctx->next_hops[rr_dpo_ctx->current_nhop]);
+ s = format (s, "hicn-rr, next hop Face %d",
+ dpo_ctx->next_hops[rr_dpo_ctx->current_nhop]);
for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
{
u8 *buf = NULL;
if (i < dpo_ctx->entry_count)
buf = format (NULL, "FIB");
- else if (i >=
- HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
+ else if (i >= HICN_PARAM_FIB_ENTRY_NHOPS_MAX - dpo_ctx->tfib_entry_count)
buf = format (NULL, "TFIB");
else
continue;
s = format (s, "\n");
- s =
- format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i],
- indent);
+ s = format (s, "%U ", format_hicn_face, dpo_ctx->next_hops[i], indent);
s = format (s, " %s", buf);
}
@@ -110,8 +95,9 @@ format_hicn_strategy_rr_ctx (u8 * s, va_list * ap)
}
void
-hicn_strategy_rr_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
- int nh_len, index_t * dpo_idx)
+hicn_strategy_rr_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx)
{
hicn_strategy_rr_ctx_t *hicn_strategy_rr_ctx;
hicn_dpo_ctx_t *hicn_strategy_ctx;
@@ -122,8 +108,19 @@ hicn_strategy_rr_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_h
*dpo_idx = hicn_strategy_dpo_ctx_get_index (hicn_strategy_ctx);
- init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_rr, proto);
+ init_dpo_ctx (hicn_strategy_ctx, next_hop, nh_len, hicn_dpo_type_rr,
+ (dpo_proto_t) proto);
+
+ hicn_strategy_rr_ctx->current_nhop = 0;
+}
+
+void
+hicn_strategy_rr_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx)
+{
+ hicn_strategy_rr_ctx_t *hicn_strategy_rr_ctx;
+ hicn_strategy_rr_ctx = (hicn_strategy_rr_ctx_t *) hicn_strategy_ctx->data;
+ hicn_strategy_ctx->dpo_type = hicn_dpo_type_rr;
hicn_strategy_rr_ctx->current_nhop = 0;
}
@@ -139,7 +136,7 @@ hicn_strategy_rr_ctx_add_nh (hicn_face_id_t nh, index_t dpo_idx)
}
hicn_strategy_dpo_ctx_add_nh (nh, hicn_strategy_dpo_ctx, &pos);
- //nothing else to initialize in this strategy
+ // nothing else to initialize in this strategy
return HICN_ERROR_NONE;
}
@@ -147,7 +144,8 @@ int
hicn_strategy_rr_ctx_del_nh (hicn_face_id_t face_id, index_t dpo_idx)
{
hicn_dpo_ctx_t *hicn_strategy_dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
- //No need to change the current_nhop. It will be updated at the next selection.
+ // No need to change the current_nhop. It will be updated at the next
+ // selection.
return hicn_strategy_dpo_ctx_del_nh (face_id, hicn_strategy_dpo_ctx);
}
diff --git a/hicn-plugin/src/strategies/dpo_rr.h b/hicn-plugin/src/strategies/dpo_rr.h
index e4e5b5372..363174bb2 100644
--- a/hicn-plugin/src/strategies/dpo_rr.h
+++ b/hicn-plugin/src/strategies/dpo_rr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -27,7 +27,6 @@
* round robin.
*/
-
/**
* Context for the Round Robin strategy
*/
@@ -41,17 +40,12 @@ typedef struct hicn_strategy_rr_ctx_s
* @brief Format the dpo ctx for a human-readable string
*
* @param s String to which to append the formatted dpo ctx
- * @param ap List of parameters for the formatting
+ * @param dpo_ctx DPO context
+ * @param indent Indentation
*
* @result The string with the formatted dpo ctx
*/
-u8 *format_hicn_strategy_rr_ctx (u8 * s, va_list * ap);
-
-const static dpo_vft_t dpo_strategy_rr_ctx_vft = {
- .dv_lock = hicn_strategy_dpo_ctx_lock,
- .dv_unlock = hicn_strategy_dpo_ctx_unlock,
- .dv_format = format_hicn_strategy_rr_ctx,
-};
+u8 *hicn_dpo_strategy_rr_format (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent);
/**
* @brief Retrieve an hicn_strategy_rr_ctx object
@@ -70,15 +64,22 @@ hicn_dpo_ctx_t *hicn_strategy_rr_ctx_get (index_t index);
* @param dpo_idx index_t that will hold the index of the created dpo ctx
* @return HICN_ERROR_NONE if the creation was fine, otherwise EINVAL
*/
-void
-hicn_strategy_rr_ctx_create (fib_protocol_t proto, const hicn_face_id_t * next_hop,
- int nh_len, index_t * dpo_idx);
+void hicn_strategy_rr_ctx_create (fib_protocol_t proto,
+ const hicn_face_id_t *next_hop, int nh_len,
+ index_t *dpo_idx);
+
+/**
+ * @brief Update existing ctx setting it to rr
+ *
+ * @param hicn_strategy_ctx pointer to the ctx to update
+ */
+void hicn_strategy_rr_update_ctx_type (hicn_dpo_ctx_t *hicn_strategy_ctx);
/**
* @brief Add or update a next hop in the dpo ctx.
*
- * This function is meant to be used in the control plane and not in the data plane,
- * as it is not optimized for the latter.
+ * This function is meant to be used in the control plane and not in the data
+ * plane, as it is not optimized for the latter.
*
* @param nh Next hop to insert in the dpo ctx
* @param dpo_idx Index of the dpo ctx to update with the new or updated next
@@ -111,7 +112,7 @@ void hicn_strategy_rr_ctx_prefetch (index_t dpo_idx);
*
* @param dpo Dpo to check the type
*/
-int hicn_dpo_is_type_strategy_rr (const dpo_id_t * dpo);
+int hicn_dpo_is_type_strategy_rr (const dpo_id_t *dpo);
/**
* @brief Initialize the Round Robin strategy
@@ -123,24 +124,6 @@ void hicn_dpo_strategy_rr_module_init (void);
*/
dpo_type_t hicn_dpo_strategy_rr_get_type (void);
-/**
- * @brief Format the dpo ctx for the strategy Round Robin
- *
- * @param s String to append the formatted dpo ctx
- * @param ap List of arguments to format
- */
-u8 *format_hicn_dpo_strategy_rr (u8 * s, va_list * ap);
-
-/**
- * @brief Format the dpo ctx for the strategy Round Robin. To
- * call from other functions
- *
- * @param s String to append the formatted dpo ctx
- * @param ... List of arguments to format
- */
-u8 *hicn_strategy_rr_format_ctx (u8 * s, int n, ...);
-
-
#endif // __HICN_DPO_RR_H__
/*
diff --git a/hicn-plugin/src/strategies/strategy_lr.c b/hicn-plugin/src/strategies/strategy_lr.c
new file mode 100644
index 000000000..6bb4097ec
--- /dev/null
+++ b/hicn-plugin/src/strategies/strategy_lr.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_lr.h"
+#include "../strategy.h"
+#include "../strategy_dpo_ctx.h"
+#include "../faces/face.h"
+#include "../strategy_dpo_manager.h"
+
+/* Simple strategy that forwards intertests to all next hops */
+/* It does not require to exend the hicn_dpo */
+void hicn_receive_data_lr (index_t dpo_idx, int nh_idx);
+void hicn_add_interest_lr (index_t dpo_idx);
+int hicn_send_after_aggregation_lr (index_t dpo_idx, hicn_face_id_t in_face);
+void hicn_on_interest_timeout_lr (index_t dpo_idx);
+u32 hicn_select_next_hop_lr (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len);
+u8 *hicn_strategy_format_trace_lr (u8 *s, hicn_strategy_trace_t *t);
+u8 *hicn_strategy_format_lr (u8 *s, va_list *ap);
+
+static hicn_strategy_vft_t hicn_strategy_lr_vft = {
+ .hicn_receive_data = &hicn_receive_data_lr,
+ .hicn_add_interest = &hicn_add_interest_lr,
+ .hicn_send_after_aggregation = &hicn_send_after_aggregation_lr,
+ .hicn_on_interest_timeout = &hicn_on_interest_timeout_lr,
+ .hicn_select_next_hop = &hicn_select_next_hop_lr,
+ .hicn_format_strategy_trace = &hicn_strategy_format_trace_lr,
+ .hicn_format_strategy = &hicn_strategy_format_lr
+};
+
+/*
+ * Return the vft of the strategy.
+ */
+hicn_strategy_vft_t *
+hicn_lr_strategy_get_vft (void)
+{
+ return &hicn_strategy_lr_vft;
+}
+
+/* DPO should be given in input as it containes all the information to
+ * calculate the next hops*/
+u32
+hicn_select_next_hop_lr (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len)
+{
+ hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+
+ *len = 0;
+
+ if (dpo_ctx == NULL || dpo_ctx->entry_count == 0)
+ {
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
+
+ // Check if input face is local face
+ int in_is_local = hicn_face_is_local (in_face);
+
+ int i = 0;
+ while (i < MAX_OUT_FACES && i < dpo_ctx->entry_count)
+ {
+ if (hicn_face_is_local (dpo_ctx->next_hops[i]) != in_is_local)
+ {
+ outfaces[0] = dpo_ctx->next_hops[i];
+ *len = 1;
+ break;
+ }
+
+ i++;
+ }
+
+ return HICN_ERROR_NONE;
+}
+
+void
+hicn_add_interest_lr (index_t dpo_ctx_idx)
+{
+ /* Nothing to do */
+}
+
+int
+hicn_send_after_aggregation_lr (index_t dpo_idx, hicn_face_id_t in_face)
+{
+ if (hicn_face_is_local (in_face))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+void
+hicn_on_interest_timeout_lr (index_t dpo_idx)
+{
+ /* Nothing to do in the lr strategy when we receive an interest */
+}
+
+void
+hicn_receive_data_lr (index_t dpo_idx, int nh_idx)
+{
+ /* nothing to do */
+}
+
+/* packet trace format function */
+u8 *
+hicn_strategy_format_trace_lr (u8 *s, hicn_strategy_trace_t *t)
+{
+ s = format (s,
+ "Strategy_lr: pkt: %d, sw_if_index %d, next index %d, dpo_type "
+ "%d, out_face %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index, t->dpo_type,
+ t->out_face);
+ return (s);
+}
+
+u8 *
+hicn_strategy_format_lr (u8 *s, va_list *ap)
+{
+
+ u32 indent = va_arg (*ap, u32);
+ s = format (
+ s, "Local-Remote: send from local face to remote only and viceversa \n",
+ indent);
+ return (s);
+}
diff --git a/hicn-plugin/src/strategies/strategy_lr.h b/hicn-plugin/src/strategies/strategy_lr.h
new file mode 100644
index 000000000..826eb89a0
--- /dev/null
+++ b/hicn-plugin/src/strategies/strategy_lr.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __HICN_STRATEGY_LR_H__
+#define __HICN_STRATEGY_LR_H__
+
+#include "../strategy.h"
+
+/**
+ * @file strategy_lr.h
+ *
+ * This file implements the local-remote strategy. In this
+ * strategy, if the interest is received from an application (local)
+ * face, it is then sent to a remote next hop. Viceversa, when an interest
+ * is received from a remote face, it is then sent to a local face.
+ */
+
+/**
+ * @brief Return the vft for the local-remote strategy
+ */
+hicn_strategy_vft_t *hicn_lr_strategy_get_vft (void);
+
+#endif // __HICN_STRATEGY_LR_H__
diff --git a/hicn-plugin/src/strategies/strategy_mw.c b/hicn-plugin/src/strategies/strategy_mw.c
index fe4d5896a..1f0210fbe 100644
--- a/hicn-plugin/src/strategies/strategy_mw.c
+++ b/hicn-plugin/src/strategies/strategy_mw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,24 +16,24 @@
#include "../strategy.h"
#include "../strategy_dpo_ctx.h"
#include "../faces/face.h"
-#include "../hashtb.h"
#include "../strategy_dpo_manager.h"
/* Simple strategy that chooses the next hop with the maximum weight */
/* It does not require to exend the hicn_dpo */
void hicn_receive_data_mw (index_t dpo_idx, int nh_idx);
-void hicn_add_interest_mw (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+void hicn_add_interest_mw (index_t dpo_idx);
+int hicn_send_after_aggregation_mw (index_t dpo_idx, hicn_face_id_t in_face);
void hicn_on_interest_timeout_mw (index_t dpo_idx);
-u32 hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx,
- hicn_face_id_t* outface);
+u32 hicn_select_next_hop_mw (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len);
u32 get_strategy_node_index_mw (void);
-u8 *hicn_strategy_format_trace_mw (u8 * s, hicn_strategy_trace_t * t);
-u8 *hicn_strategy_format_mw (u8 * s, va_list * ap);
-
+u8 *hicn_strategy_format_trace_mw (u8 *s, hicn_strategy_trace_t *t);
+u8 *hicn_strategy_format_mw (u8 *s, va_list *ap);
static hicn_strategy_vft_t hicn_strategy_mw_vft = {
.hicn_receive_data = &hicn_receive_data_mw,
.hicn_add_interest = &hicn_add_interest_mw,
+ .hicn_send_after_aggregation = &hicn_send_after_aggregation_mw,
.hicn_on_interest_timeout = &hicn_on_interest_timeout_mw,
.hicn_select_next_hop = &hicn_select_next_hop_mw,
.hicn_format_strategy_trace = hicn_strategy_format_trace_mw,
@@ -49,14 +49,19 @@ hicn_mw_strategy_get_vft (void)
return &hicn_strategy_mw_vft;
}
-/* DPO should be give in input as it containes all the information to calculate the next hops*/
+/* DPO should be give in input as it containes all the information to calculate
+ * the next hops*/
u32
-hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx, hicn_face_id_t* outface)
+hicn_select_next_hop_mw (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len)
{
hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
if (dpo_ctx == NULL)
- return HICN_ERROR_STRATEGY_NOT_FOUND;
+ {
+ *len = 0;
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
hicn_strategy_mw_ctx_t *hicn_strategy_mw_ctx =
(hicn_strategy_mw_ctx_t *) dpo_ctx->data;
@@ -65,25 +70,27 @@ hicn_select_next_hop_mw (index_t dpo_idx, int *nh_idx, hicn_face_id_t* outface)
for (int i = 0; i < dpo_ctx->entry_count; i++)
{
if (hicn_strategy_mw_ctx->weight[next_hop_index] <
- hicn_strategy_mw_ctx->weight[i])
- {
- next_hop_index = i;
- }
+ hicn_strategy_mw_ctx->weight[i])
+ {
+ next_hop_index = i;
+ }
}
- *outface = dpo_ctx->next_hops[next_hop_index];
+ outfaces[0] = dpo_ctx->next_hops[next_hop_index];
+ *len = 1;
return HICN_ERROR_NONE;
}
void
-hicn_add_interest_mw (index_t dpo_ctx_idx, hicn_hash_entry_t * hash_entry)
+hicn_add_interest_mw (index_t dpo_ctx_idx)
{
- hash_entry->dpo_ctx_id = dpo_ctx_idx;
- dpo_id_t hicn_dpo_id =
- { hicn_dpo_strategy_mw_get_type (), 0, 0, dpo_ctx_idx };
- hicn_strategy_dpo_ctx_lock (&hicn_dpo_id);
- hash_entry->vft_id = hicn_dpo_get_vft_id (&hicn_dpo_id);
+}
+
+int
+hicn_send_after_aggregation_mw (index_t dpo_idx, hicn_face_id_t in_face)
+{
+ return false;
}
void
@@ -97,10 +104,9 @@ hicn_receive_data_mw (index_t dpo_idx, int nh_idx)
{
}
-
/* packet trace format function */
u8 *
-hicn_strategy_format_trace_mw (u8 * s, hicn_strategy_trace_t * t)
+hicn_strategy_format_trace_mw (u8 *s, hicn_strategy_trace_t *t)
{
s = format (s, "Strategy_mw: pkt: %d, sw_if_index %d, next index %d",
(int) t->pkt_type, t->sw_if_index, t->next_index);
@@ -108,14 +114,14 @@ hicn_strategy_format_trace_mw (u8 * s, hicn_strategy_trace_t * t)
}
u8 *
-hicn_strategy_format_mw (u8 * s, va_list * ap)
+hicn_strategy_format_mw (u8 *s, va_list *ap)
{
u32 indent = va_arg (*ap, u32);
- s =
- format (s,
- "Static Weights: weights are updated by the control plane, next hop is the one with the maximum weight.\n",
- indent);
+ s = format (s,
+ "Static Weights: weights are updated by the control plane, next "
+ "hop is the one with the maximum weight.\n",
+ indent);
return (s);
}
diff --git a/hicn-plugin/src/strategies/strategy_mw.h b/hicn-plugin/src/strategies/strategy_mw.h
index 9e0078b23..186e8c5ab 100644
--- a/hicn-plugin/src/strategies/strategy_mw.h
+++ b/hicn-plugin/src/strategies/strategy_mw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
diff --git a/hicn-plugin/src/strategies/strategy_mw_cli.c b/hicn-plugin/src/strategies/strategy_mw_cli.c
index 636d7effa..4ace68423 100644
--- a/hicn-plugin/src/strategies/strategy_mw_cli.c
+++ b/hicn-plugin/src/strategies/strategy_mw_cli.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -26,9 +26,9 @@
#include "dpo_mw.h"
static clib_error_t *
-hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t *vm,
+ unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
clib_error_t *cl_err = 0;
int ret = HICN_ERROR_NONE;
@@ -54,19 +54,17 @@ hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
;
else
{
- return clib_error_return (0, "%s",
- get_error_string
- (HICN_ERROR_CLI_INVAL));
+ return clib_error_return (
+ 0, "%s", get_error_string (HICN_ERROR_CLI_INVAL));
}
-
}
}
if (((weight < 0) || (weight > HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX)))
{
- cl_err = clib_error_return (0,
- "Next-hop weight must be between 0 and %d",
- (int) HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX);
+ cl_err =
+ clib_error_return (0, "Next-hop weight must be between 0 and %d",
+ (int) HICN_PARAM_FIB_ENTRY_NHOP_WGHT_MAX);
goto done;
}
@@ -77,24 +75,22 @@ hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
goto done;
}
- prefix.fp_proto =
- ip46_address_is_ip4 (&prefix.
- fp_addr) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ prefix.fp_proto = ip46_address_is_ip4 (&prefix.fp_addr) ? FIB_PROTOCOL_IP4 :
+ FIB_PROTOCOL_IP6;
ret = hicn_route_get_dpo (&prefix, &hicn_dpo_id, &fib_index);
if (ret == HICN_ERROR_NONE)
{
hicn_dpo_ctx = hicn_strategy_dpo_ctx_get (hicn_dpo_id->dpoi_index);
- if (hicn_dpo_ctx == NULL
- || hicn_dpo_id->dpoi_type != hicn_dpo_strategy_mw_get_type ())
+ if (hicn_dpo_ctx == NULL ||
+ hicn_dpo_id->dpoi_type != hicn_dpo_strategy_mw_get_type ())
{
cl_err = clib_error_return (0, get_error_string (ret));
goto done;
}
- hicn_strategy_mw_ctx_t *mw_dpo =
- (hicn_strategy_mw_ctx_t *) hicn_dpo_ctx;
+ hicn_strategy_mw_ctx_t *mw_dpo = (hicn_strategy_mw_ctx_t *) hicn_dpo_ctx;
int idx = ~0;
for (int i = 0; i < hicn_dpo_ctx->entry_count; i++)
if (hicn_dpo_ctx->next_hops[i] == faceid)
@@ -102,10 +98,8 @@ hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
if (idx == ~0)
{
- cl_err =
- clib_error_return (0,
- get_error_string
- (HICN_ERROR_STRATEGY_NH_NOT_FOUND));
+ cl_err = clib_error_return (
+ 0, get_error_string (HICN_ERROR_STRATEGY_NH_NOT_FOUND));
goto done;
}
@@ -114,24 +108,21 @@ hicn_mw_strategy_cli_set_weight_command_fn (vlib_main_t * vm,
else
{
cl_err = clib_error_return (0, get_error_string (ret));
-
}
done:
return (cl_err);
-
}
/* cli declaration for 'strategy mw' */
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND(hicn_mw_strategy_cli_set_weight_command, static)=
-{
+
+VLIB_CLI_COMMAND (hicn_mw_strategy_cli_set_weight_command, static) = {
.path = "hicn strategy mw set",
- .short_help = "hicn strategy mw set prefix <prefix> face <face_id> weight <weight>",
+ .short_help =
+ "hicn strategy mw set prefix <prefix> face <face_id> weight <weight>",
.function = hicn_mw_strategy_cli_set_weight_command_fn,
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/strategies/strategy_rp.c b/hicn-plugin/src/strategies/strategy_rp.c
new file mode 100644
index 000000000..a44da1323
--- /dev/null
+++ b/hicn-plugin/src/strategies/strategy_rp.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2021 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dpo_rp.h"
+#include "../strategy.h"
+#include "../strategy_dpo_ctx.h"
+#include "../faces/face.h"
+#include "../strategy_dpo_manager.h"
+
+/* Simple strategy that forwards intertests to all next hops */
+/* It does not require to exend the hicn_dpo */
+void hicn_receive_data_rp (index_t dpo_idx, int nh_idx);
+void hicn_add_interest_rp (index_t dpo_idx);
+int hicn_send_after_aggregation_rp (index_t dpo_idx, hicn_face_id_t in_face);
+void hicn_on_interest_timeout_rp (index_t dpo_idx);
+u32 hicn_select_next_hop_rp (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len);
+u8 *hicn_strategy_format_trace_rp (u8 *s, hicn_strategy_trace_t *t);
+u8 *hicn_strategy_format_rp (u8 *s, va_list *ap);
+
+static hicn_strategy_vft_t hicn_strategy_rp_vft = {
+ .hicn_receive_data = &hicn_receive_data_rp,
+ .hicn_add_interest = &hicn_add_interest_rp,
+ .hicn_on_interest_timeout = &hicn_on_interest_timeout_rp,
+ .hicn_select_next_hop = &hicn_select_next_hop_rp,
+ .hicn_format_strategy_trace = &hicn_strategy_format_trace_rp,
+ .hicn_format_strategy = &hicn_strategy_format_rp
+};
+
+/*
+ * Return the vft of the strategy.
+ */
+hicn_strategy_vft_t *
+hicn_rp_strategy_get_vft (void)
+{
+ return &hicn_strategy_rp_vft;
+}
+
+/* DPO should be give in input as it containes all the information to calculate
+ * the next hops*/
+u32
+hicn_select_next_hop_rp (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len)
+{
+ hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
+
+ if (dpo_ctx == NULL || dpo_ctx->entry_count == 0)
+ {
+ *len = 0;
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
+
+ int i = 0;
+ while (i < MAX_OUT_FACES && i < dpo_ctx->entry_count)
+ {
+ outfaces[i] = dpo_ctx->next_hops[i];
+ i++;
+ }
+ *len = i;
+
+ return HICN_ERROR_NONE;
+}
+
+void
+hicn_add_interest_rp (index_t dpo_ctx_idx)
+{
+ /* Nothing to do */
+}
+
+int
+hicn_send_after_aggregation_rp (index_t dpo_idx, hicn_face_id_t in_face)
+{
+ return false;
+}
+
+void
+hicn_on_interest_timeout_rp (index_t dpo_idx)
+{
+ /* Nothing to do in the rp strategy when we receive an interest */
+}
+
+void
+hicn_receive_data_rp (index_t dpo_idx, int nh_idx)
+{
+ /* nothing to do */
+}
+
+/* packet trace format function */
+u8 *
+hicn_strategy_format_trace_rp (u8 *s, hicn_strategy_trace_t *t)
+{
+ s = format (s, "Strategy_rp: pkt: %d, sw_if_index %d, next index %d",
+ (int) t->pkt_type, t->sw_if_index, t->next_index);
+ return (s);
+}
+
+u8 *
+hicn_strategy_format_rp (u8 *s, va_list *ap)
+{
+
+ u32 indent = va_arg (*ap, u32);
+ s = format (s, "Replication: send to all the next hops \n", indent);
+ return (s);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/hicn_msg_enum.h b/hicn-plugin/src/strategies/strategy_rp.h
index fcf2a1e87..c308497cc 100644
--- a/hicn-plugin/src/hicn_msg_enum.h
+++ b/hicn-plugin/src/strategies/strategy_rp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -13,27 +13,29 @@
* limitations under the License.
*/
-#ifndef __HICN_MSG_ENUM_H__
-#define __HICN_MSG_ENUM_H__
+#ifndef __HICN_STRATEGY_RP_H__
+#define __HICN_STRATEGY_RP_H__
-#include <vppinfra/byte_order.h>
+#include "../strategy.h"
/**
- * @file
+ * @file strategy_rp.h
+ *
+ * This file implements the replication strategy. In this
+ * strategy all the next hops are used to send an intertest.
+ */
+
+/**
+ * @brief Return the vft for the Replication strategy
*/
-#define vl_msg_id(n, h) n,
-typedef enum
-{
-#include <hicn/hicn_all_api_h.h>
- /* We'll want to know how many messages IDs we need... */
- VL_MSG_FIRST_AVAILABLE,
-} vl_msg_id_t;
-#undef vl_msg_id
+hicn_strategy_vft_t *hicn_rp_strategy_get_vft (void);
-#endif /* __HICN_MSG_ENUM_H__ */
+#endif // __HICN_STRATEGY_RP_H__
/*
* fd.io coding-style-patch-verification: ON
*
- * Local Variables: eval: (c-set-style "gnu") End:
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
*/
diff --git a/hicn-plugin/src/strategies/strategy_rr.c b/hicn-plugin/src/strategies/strategy_rr.c
index 4c65ce52a..af3cb320a 100644
--- a/hicn-plugin/src/strategies/strategy_rr.c
+++ b/hicn-plugin/src/strategies/strategy_rr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -17,19 +17,18 @@
#include "../strategy.h"
#include "../strategy_dpo_ctx.h"
#include "../faces/face.h"
-#include "../hashtb.h"
#include "../strategy_dpo_manager.h"
/* Simple strategy that chooses the next hop with the maximum weight */
/* It does not require to exend the hicn_dpo */
void hicn_receive_data_rr (index_t dpo_idx, int nh_idx);
-void hicn_add_interest_rr (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
+void hicn_add_interest_rr (index_t dpo_idx);
+int hicn_send_after_aggregation_rr (index_t dpo_idx, hicn_face_id_t in_face);
void hicn_on_interest_timeout_rr (index_t dpo_idx);
-u32 hicn_select_next_hop_rr (index_t dpo_idx, int *nh_idx,
- hicn_face_id_t* outface);
-u8 *hicn_strategy_format_trace_rr (u8 * s, hicn_strategy_trace_t * t);
-u8 *hicn_strategy_format_rr (u8 * s, va_list * ap);
-
+u32 hicn_select_next_hop_rr (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len);
+u8 *hicn_strategy_format_trace_rr (u8 *s, hicn_strategy_trace_t *t);
+u8 *hicn_strategy_format_rr (u8 *s, va_list *ap);
static hicn_strategy_vft_t hicn_strategy_rr_vft = {
.hicn_receive_data = &hicn_receive_data_rr,
@@ -49,20 +48,25 @@ hicn_rr_strategy_get_vft (void)
return &hicn_strategy_rr_vft;
}
-/* DPO should be give in input as it containes all the information to calculate the next hops*/
+/* DPO should be give in input as it containes all the information to calculate
+ * the next hops*/
u32
-hicn_select_next_hop_rr (index_t dpo_idx, int *nh_idx, hicn_face_id_t* outface)
+hicn_select_next_hop_rr (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len)
{
hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo_idx);
if (dpo_ctx == NULL)
- return HICN_ERROR_STRATEGY_NOT_FOUND;
+ {
+ *len = 0;
+ return HICN_ERROR_STRATEGY_NOT_FOUND;
+ }
hicn_strategy_rr_ctx_t *hicn_strategy_rr_ctx =
(hicn_strategy_rr_ctx_t *) dpo_ctx->data;
- *outface =
- dpo_ctx->next_hops[hicn_strategy_rr_ctx->current_nhop];
+ outfaces[0] = dpo_ctx->next_hops[hicn_strategy_rr_ctx->current_nhop];
+ *len = 1;
hicn_strategy_rr_ctx->current_nhop =
(hicn_strategy_rr_ctx->current_nhop + 1) % dpo_ctx->entry_count;
@@ -71,13 +75,14 @@ hicn_select_next_hop_rr (index_t dpo_idx, int *nh_idx, hicn_face_id_t* outface)
}
void
-hicn_add_interest_rr (index_t dpo_ctx_idx, hicn_hash_entry_t * hash_entry)
+hicn_add_interest_rr (index_t dpo_ctx_idx)
{
- hash_entry->dpo_ctx_id = dpo_ctx_idx;
- dpo_id_t hicn_dpo_id =
- { hicn_dpo_strategy_rr_get_type (), 0, 0, dpo_ctx_idx };
- hicn_strategy_dpo_ctx_lock (&hicn_dpo_id);
- hash_entry->vft_id = hicn_dpo_get_vft_id (&hicn_dpo_id);
+}
+
+int
+hicn_send_after_aggregation_rr (index_t dpo_idx, hicn_face_id_t in_face)
+{
+ return false;
}
void
@@ -91,10 +96,9 @@ hicn_receive_data_rr (index_t dpo_idx, int nh_idx)
{
}
-
/* packet trace format function */
u8 *
-hicn_strategy_format_trace_rr (u8 * s, hicn_strategy_trace_t * t)
+hicn_strategy_format_trace_rr (u8 *s, hicn_strategy_trace_t *t)
{
s = format (s, "Strategy_rr: pkt: %d, sw_if_index %d, next index %d",
(int) t->pkt_type, t->sw_if_index, t->next_index);
@@ -102,14 +106,14 @@ hicn_strategy_format_trace_rr (u8 * s, hicn_strategy_trace_t * t)
}
u8 *
-hicn_strategy_format_rr (u8 * s, va_list * ap)
+hicn_strategy_format_rr (u8 *s, va_list *ap)
{
u32 indent = va_arg (*ap, u32);
- s =
- format (s,
- "Round Robin: next hop is chosen ciclying between all the available next hops, one after the other.\n",
- indent);
+ s = format (s,
+ "Round Robin: next hop is chosen ciclying between all the "
+ "available next hops, one after the other.\n",
+ indent);
return (s);
}
diff --git a/hicn-plugin/src/strategies/strategy_rr.h b/hicn-plugin/src/strategies/strategy_rr.h
index 4dfe76b43..fb7520180 100644
--- a/hicn-plugin/src/strategies/strategy_rr.h
+++ b/hicn-plugin/src/strategies/strategy_rr.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
diff --git a/hicn-plugin/src/strategy.h b/hicn-plugin/src/strategy.h
index d949f38a4..135aeea7b 100644
--- a/hicn-plugin/src/strategy.h
+++ b/hicn-plugin/src/strategy.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -17,25 +17,24 @@
#define __HICN_STRATEGY__
#include "hicn.h"
-#include "hashtb.h"
#include "mgmt.h"
#include "faces/face.h"
/**
* @file strategy.h
*
- * A strategy is defined as a dpo and a set of function (vft) that will be called
- * during the packet processing. A strategy is associated to an entry in the fib by
- * assigning the corresponding dpo to the fib entry. The dpo points to a hICN dpo
- * context (ctx) which contains the information needed by the strategy to compute
- * the next hop. Each strategy hash its own dpo type, which means that the dpo_type
- * uniquely identifies a strategy and its vft. The strategy node will use the dpo_type
- * to retrieve the corresponding vft.
- * Here we provide:
- * - a template for the callbacks to implement in order to create a new strategy
- * (hicn_fwd_strategy_t)
- * - a default implementation for the strategy node which will call the strategy
- * functions while processing the interest packets
+ * A strategy is defined as a dpo and a set of function (vft) that will be
+ * called during the packet processing. A strategy is associated to an entry in
+ * the fib by assigning the corresponding dpo to the fib entry. The dpo points
+ * to a hICN dpo context (ctx) which contains the information needed by the
+ * strategy to compute the next hop. Each strategy hash its own dpo type, which
+ * means that the dpo_type uniquely identifies a strategy and its vft. The
+ * strategy node will use the dpo_type to retrieve the corresponding vft. Here
+ * we provide:
+ * - a template for the callbacks to implement in order to create a new
+ * strategy (hicn_fwd_strategy_t)
+ * - a default implementation for the strategy node which will call the
+ * strategy functions while processing the interest packets
*/
/* Trace context struct */
@@ -45,17 +44,19 @@ typedef struct
u32 sw_if_index;
u8 pkt_type;
dpo_type_t dpo_type;
+ hicn_face_id_t out_face;
} hicn_strategy_trace_t;
typedef struct hicn_strategy_vft_s
{
void (*hicn_receive_data) (index_t dpo_idx, int nh_idx);
void (*hicn_on_interest_timeout) (index_t dpo_idx);
- void (*hicn_add_interest) (index_t dpo_idx, hicn_hash_entry_t * pit_entry);
- u32 (*hicn_select_next_hop) (index_t dpo_idx, int *nh_idx,
- hicn_face_id_t* outface);
+ void (*hicn_add_interest) (index_t dpo_idx);
+ int (*hicn_send_after_aggregation) (index_t dpo_idx, hicn_face_id_t in_face);
+ u32 (*hicn_select_next_hop) (index_t dpo_idx, hicn_face_id_t in_face,
+ hicn_face_id_t *outfaces, u16 *len);
u8 *(*hicn_format_strategy_trace) (u8 *, hicn_strategy_trace_t *);
- u8 *(*hicn_format_strategy) (u8 * s, va_list * ap);
+ u8 *(*hicn_format_strategy) (u8 *s, va_list *ap);
/**< Format an hICN dpo*/
} hicn_strategy_vft_t;
@@ -69,25 +70,23 @@ typedef enum
HICN_STRATEGY_N_NEXT,
} hicn_strategy_next_t;
-const static char *const hicn_ip6_nodes[] =
-{
- "hicn6-iface-input", // this is the name you give your node in VLIB_REGISTER_NODE
- NULL,
+const static char *const hicn_ip6_nodes[] = {
+ "hicn6-iface-input", // this is the name you give your node in
+ // VLIB_REGISTER_NODE
+ NULL,
};
-const static char *const hicn_ip4_nodes[] =
-{
- "hicn4-iface-input", // this is the name you give your node in VLIB_REGISTER_NODE
- NULL,
+const static char *const hicn_ip4_nodes[] = {
+ "hicn4-iface-input", // this is the name you give your node in
+ // VLIB_REGISTER_NODE
+ NULL,
};
-const static char *const *const hicn_nodes_strategy[DPO_PROTO_NUM] =
-{
- [DPO_PROTO_IP6] = hicn_ip6_nodes,
- [DPO_PROTO_IP4] = hicn_ip4_nodes,
+const static char *const *const hicn_nodes_strategy[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP6] = hicn_ip6_nodes,
+ [DPO_PROTO_IP4] = hicn_ip4_nodes,
};
-
extern vlib_node_registration_t hicn_strategy_node;
#endif /* //__HICN_STRATEGY__ */
diff --git a/hicn-plugin/src/strategy_dpo_ctx.c b/hicn-plugin/src/strategy_dpo_ctx.c
index 342c78bb5..eb4173944 100644
--- a/hicn-plugin/src/strategy_dpo_ctx.c
+++ b/hicn-plugin/src/strategy_dpo_ctx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2023 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -21,56 +21,55 @@ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_pool;
void
hicn_strategy_init_dpo_ctx_pool ()
{
- pool_init_fixed (hicn_strategy_dpo_ctx_pool, 256);
-
+ pool_alloc_aligned (hicn_strategy_dpo_ctx_pool, 256,
+ 2 * CLIB_CACHE_LINE_BYTES);
}
void
-hicn_strategy_dpo_ctx_lock (dpo_id_t * dpo)
+hicn_strategy_dpo_ctx_lock (dpo_id_t *dpo)
{
hicn_dpo_ctx_t *dpo_ctx = hicn_strategy_dpo_ctx_get (dpo->dpoi_index);
- if (dpo_ctx != NULL)
+ if (PREDICT_TRUE (dpo_ctx != NULL))
{
dpo_ctx->locks++;
+ HICN_DEBUG ("Locking DPO CTX with index %d. Lock now: %d",
+ dpo->dpoi_index, dpo_ctx->locks);
+ }
+ else
+ {
+ HICN_ERROR ("Trying to lock NULL spo_ctx with index %d",
+ dpo->dpoi_index);
}
}
void
-hicn_strategy_dpo_ctx_unlock (dpo_id_t * dpo)
+hicn_strategy_dpo_ctx_unlock (dpo_id_t *dpo)
{
hicn_dpo_ctx_t *hicn_strategy_dpo_ctx =
(hicn_dpo_ctx_t *) hicn_strategy_dpo_ctx_get (dpo->dpoi_index);
- if (hicn_strategy_dpo_ctx != NULL)
+ if (PREDICT_TRUE (hicn_strategy_dpo_ctx != NULL))
{
hicn_strategy_dpo_ctx->locks--;
+ HICN_DEBUG ("Unlcking DPO CTX with index %d. Lock now: %d",
+ dpo->dpoi_index, hicn_strategy_dpo_ctx->locks);
if (0 == hicn_strategy_dpo_ctx->locks)
{
+ HICN_DEBUG ("Releasing DPO CTX %d", dpo->dpoi_index);
pool_put (hicn_strategy_dpo_ctx_pool, hicn_strategy_dpo_ctx);
}
}
-}
-
-u8 *
-hicn_strategy_dpo_format_ctx (u8 * s, va_list * ap)
-{
- index_t index = va_arg (*ap, index_t);
- hicn_dpo_ctx_t *dpo = NULL;
- u32 indent = va_arg (*ap, u32);
-
- dpo = (hicn_dpo_ctx_t *) hicn_strategy_dpo_ctx_get (index);
-
- const hicn_dpo_vft_t *dpo_vft = hicn_dpo_get_vft (dpo->dpo_type);
-
- s = dpo_vft->hicn_dpo_format (s, 2, index, indent);
-
- return (s);
+ else
+ {
+ HICN_ERROR ("Trying to unlock NULL spo_ctx with index %d",
+ dpo->dpoi_index);
+ }
}
index_t
-hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t * cd)
+hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t *cd)
{
return (cd - hicn_strategy_dpo_ctx_pool);
}
@@ -91,26 +90,29 @@ hicn_strategy_dpo_ctx_get (index_t index)
hicn_dpo_ctx_t *
hicn_strategy_dpo_ctx_alloc ()
{
+ HICN_DEBUG ("Allocating new DPO CTX");
hicn_dpo_ctx_t *dpo_ctx;
- pool_get (hicn_strategy_dpo_ctx_pool, dpo_ctx);
+ pool_get_aligned (hicn_strategy_dpo_ctx_pool, dpo_ctx,
+ 2 * CLIB_CACHE_LINE_BYTES);
+ dpo_ctx->locks = 0;
return dpo_ctx;
}
int
-hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
- u8 * pos)
+hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t *dpo_ctx,
+ u8 *pos)
{
int empty = dpo_ctx->entry_count;
- /* Iterate through the list of faces to find if the face is already a next hop */
+ /* Iterate through the list of faces to find if the face is already a next
+ * hop */
for (int i = 0; i < dpo_ctx->entry_count; i++)
{
if (nh == dpo_ctx->next_hops[i])
{
/* If face is marked as deleted, ignore it */
- hicn_face_t *face =
- hicn_dpoi_get_from_idx (dpo_ctx->next_hops[i]);
+ hicn_face_t *face = hicn_dpoi_get_from_idx (dpo_ctx->next_hops[i]);
if (face->flags & HICN_FACE_FLAGS_DELETED)
{
continue;
@@ -134,8 +136,7 @@ hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
}
int
-hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
- hicn_dpo_ctx_t * dpo_ctx)
+hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id, hicn_dpo_ctx_t *dpo_ctx)
{
int ret = HICN_ERROR_DPO_CTX_NOT_FOUND;
hicn_face_id_t invalid = NEXT_HOP_INVALID;
@@ -154,7 +155,6 @@ hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
}
return ret;
-
}
/*
diff --git a/hicn-plugin/src/strategy_dpo_ctx.h b/hicn-plugin/src/strategy_dpo_ctx.h
index 214ed88ad..ae642a350 100644
--- a/hicn-plugin/src/strategy_dpo_ctx.h
+++ b/hicn-plugin/src/strategy_dpo_ctx.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -25,16 +25,19 @@
/**
* @file strategy_dpo_ctx.h
*
- * This file implements the general hICN DPO ctx (shared among all the strategies).
+ * This file implements the general hICN DPO ctx (shared among all the
+ * strategies).
*
- * An hICN DPO ctx contains the list of next hops, auxiliaries fields to maintain the dpo, map-me
- * specifics (tfib_entry_count and seq), the dpo_type and 64B to let each strategy to store additional
- * information. Each next hop is an hicn_face_id_t that refers to an index for an hICN face. The
- * dpo_type is used to identify the strategy and to retrieve the vft corresponding to the strategy
- * (see strategy.h) and to the dpo ctx (see strategy_dpo_manager.h)
+ * An hICN DPO ctx contains the list of next hops, auxiliaries fields to
+ * maintain the dpo, map-me specifics (tfib_entry_count and seq), the dpo_type
+ * and 64B to let each strategy to store additional information. Each next hop
+ * is an hicn_face_id_t that refers to an index for an hICN face. The dpo_type
+ * is used to identify the strategy and to retrieve the vft corresponding to
+ * the strategy (see strategy.h) and to the dpo ctx (see
+ * strategy_dpo_manager.h)
*/
-//FIB table for hicn. 0 is the default one used by ip
+// FIB table for hicn. 0 is the default one used by ip
#define HICN_FIB_TABLE 10
#define NEXT_HOP_INVALID ~0
@@ -57,7 +60,7 @@ typedef struct __attribute__ ((packed)) hicn_dpo_ctx_s
dpo_type_t dpo_type;
/* 46B + 2B = 48B */
- u8 padding; /* To align to 8B */
+ u8 padding; /* To align to 8B */
/* 48 + 4B = 52; last sequence number */
u32 seq;
@@ -92,10 +95,11 @@ extern hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_pool;
* @param dpo_type Type of dpo. It identifies the strategy.
*/
always_inline void
-init_dpo_ctx (hicn_dpo_ctx_t * dpo_ctx, const hicn_face_id_t * next_hop,
+init_dpo_ctx (hicn_dpo_ctx_t *dpo_ctx, const hicn_face_id_t *next_hop,
int nh_len, dpo_type_t dpo_type, dpo_proto_t proto)
{
hicn_face_id_t invalid = NEXT_HOP_INVALID;
+ int i = 0;
dpo_ctx->entry_count = 0;
dpo_ctx->locks = 0;
@@ -105,20 +109,18 @@ init_dpo_ctx (hicn_dpo_ctx_t * dpo_ctx, const hicn_face_id_t * next_hop,
dpo_ctx->seq = INIT_SEQ;
dpo_ctx->dpo_type = dpo_type;
- dpo_ctx->proto = proto;
+ dpo_ctx->proto = (fib_protocol_t) proto;
- for (int i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX && i < nh_len; i++)
+ for (i = 0; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX && i < nh_len; i++)
{
dpo_ctx->next_hops[i] = next_hop[i];
dpo_ctx->entry_count++;
}
-
- for (int i = nh_len; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
+ for (; i < HICN_PARAM_FIB_ENTRY_NHOPS_MAX; i++)
{
dpo_ctx->next_hops[i] = invalid;
}
-
}
/**
@@ -140,27 +142,27 @@ hicn_dpo_ctx_t *hicn_strategy_dpo_ctx_get (index_t index);
/**
* @brief Retrieve the index of the hICN dpo ctx
*/
-index_t hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t * cd);
+index_t hicn_strategy_dpo_ctx_get_index (hicn_dpo_ctx_t *cd);
/**
* @brief Lock the dpo of a strategy ctx
*
* @param dpo Identifier of the dpo of the strategy ctx
*/
-void hicn_strategy_dpo_ctx_lock (dpo_id_t * dpo);
+void hicn_strategy_dpo_ctx_lock (dpo_id_t *dpo);
/**
* @brief Unlock the dpo of a strategy ctx
*
* @param dpo Identifier of the dpo of the strategy ctx
*/
-void hicn_strategy_dpo_ctx_unlock (dpo_id_t * dpo);
+void hicn_strategy_dpo_ctx_unlock (dpo_id_t *dpo);
/**
* @brief Add or update a next hop in the dpo ctx.
*
- * This function is meant to be used in the control plane and not in the data plane,
- * as it is not optimized for the latter.
+ * This function is meant to be used in the control plane and not in the data
+ * plane, as it is not optimized for the latter.
*
* @param nh Next hop to insert in the dpo ctx
* @param dpo_ctx Dpo ctx to update with the new or updated next hop
@@ -168,9 +170,8 @@ void hicn_strategy_dpo_ctx_unlock (dpo_id_t * dpo);
* @return HICN_ERROR_NONE if the update or insert was fine,
* otherwise HICN_ERROR_DPO_CTX_NOT_FOUND
*/
-int
-hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
- u8 * pos);
+int hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t *dpo_ctx,
+ u8 *pos);
/**
* @brief Delete a next hop in the dpo ctx.
@@ -180,10 +181,8 @@ hicn_strategy_dpo_ctx_add_nh (hicn_face_id_t nh, hicn_dpo_ctx_t * dpo_ctx,
* @return HICN_ERROR_NONE if the update or insert was fine,
* otherwise HICN_ERROR_DPO_CTS_NOT_FOUND
*/
-int
-hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
- hicn_dpo_ctx_t * dpo_ctx);
-
+int hicn_strategy_dpo_ctx_del_nh (hicn_face_id_t face_id,
+ hicn_dpo_ctx_t *dpo_ctx);
STATIC_ASSERT (sizeof (hicn_dpo_ctx_t) <= 2 * CLIB_CACHE_LINE_BYTES,
"sizeof hicn_dpo_ctx_t is greater than 128B");
diff --git a/hicn-plugin/src/strategy_dpo_manager.c b/hicn-plugin/src/strategy_dpo_manager.c
index f8d41a372..1e136b47b 100644
--- a/hicn-plugin/src/strategy_dpo_manager.c
+++ b/hicn-plugin/src/strategy_dpo_manager.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -19,6 +19,8 @@
#include "strategy_dpo_ctx.h"
#include "strategies/dpo_mw.h"
#include "strategies/dpo_rr.h"
+#include "strategies/dpo_rp.h"
+#include "strategies/dpo_lr.h"
#include "strategy.h"
#include "faces/face.h"
@@ -33,9 +35,9 @@ hicn_dpo_vft_t default_dpo;
dpo_type_t
hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
- const hicn_dpo_vft_t * hicn_dpo_vft,
- const hicn_strategy_vft_t * hicn_strategy_vft,
- const dpo_vft_t * dpo_ctx_vft)
+ const hicn_dpo_vft_t *hicn_dpo_vft,
+ const hicn_strategy_vft_t *hicn_strategy_vft,
+ const dpo_vft_t *dpo_ctx_vft)
{
dpo_type_t dpo_type = dpo_register_new_type (dpo_ctx_vft, hicn_nodes);
vec_validate (hicn_dpo_vfts, dpo_type);
@@ -52,7 +54,7 @@ hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
}
u32
-dpo_is_hicn (const dpo_id_t * dpo)
+dpo_is_hicn (const dpo_id_t *dpo)
{
for (int i = 0; i < hicn_strategies; i++)
{
@@ -63,7 +65,7 @@ dpo_is_hicn (const dpo_id_t * dpo)
}
dpo_type_t
-hicn_dpo_get_vft_id (const dpo_id_t * dpo)
+hicn_dpo_get_vft_id (const dpo_id_t *dpo)
{
return dpo->dpoi_type;
}
@@ -92,24 +94,35 @@ hicn_dpo_get_strategy_vft_from_id (u8 vfts_id)
return hicn_strategy_vfts[strategies_id[vfts_id]];
}
+u8 *
+hicn_strategy_dpo_format (u8 *s, va_list *ap)
+{
+ index_t index = va_arg (*ap, index_t);
+ hicn_dpo_ctx_t *dpo_ctx = NULL;
+ u32 indent = va_arg (*ap, u32);
+
+ dpo_ctx = hicn_strategy_dpo_ctx_get (index);
+ if (dpo_ctx == NULL)
+ return s;
+
+ return hicn_dpo_vfts[dpo_ctx->dpo_type]->hicn_dpo_format (s, dpo_ctx,
+ indent);
+}
+
void
hicn_dpos_init (void)
{
hicn_strategy_init_dpo_ctx_pool ();
hicn_dpo_strategy_mw_module_init ();
hicn_dpo_strategy_rr_module_init ();
+ hicn_dpo_strategy_rp_module_init ();
+ hicn_dpo_strategy_lr_module_init ();
- default_dpo.hicn_dpo_is_type = &hicn_dpo_is_type_strategy_mw;
- default_dpo.hicn_dpo_get_type = &hicn_dpo_strategy_mw_get_type;
- default_dpo.hicn_dpo_module_init = &hicn_dpo_strategy_mw_module_init;
- default_dpo.hicn_dpo_create = &hicn_strategy_mw_ctx_create;
- default_dpo.hicn_dpo_add_update_nh = &hicn_strategy_mw_ctx_add_nh;
- default_dpo.hicn_dpo_del_nh = &hicn_strategy_mw_ctx_del_nh;
- default_dpo.hicn_dpo_format = &hicn_strategy_mw_format_ctx;
+ default_dpo = *hicn_dpo_vfts[hicn_dpo_strategy_mw_get_type ()];
}
u8 *
-format_hicn_strategy_list (u8 * s, int n, ...)
+format_hicn_strategy_list (u8 *s, int n, ...)
{
va_list ap;
va_start (ap, n);
@@ -120,19 +133,20 @@ format_hicn_strategy_list (u8 * s, int n, ...)
indent += 4;
int i;
vec_foreach_index (i, strategies_id)
- {
- s = format (s, "%U (%d) ", format_white_space, indent, i);
- s = hicn_strategy_vfts[strategies_id[i]]->hicn_format_strategy (s, &ap);
- }
+ {
+ s = format (s, "%U (%d) ", format_white_space, indent, i);
+ s = hicn_strategy_vfts[strategies_id[i]]->hicn_format_strategy (s, &ap);
+ }
return (s);
}
-u8
+int
hicn_dpo_strategy_id_is_valid (int strategy_id)
{
return vec_len (strategies_id) > strategy_id ?
- HICN_ERROR_NONE : HICN_ERROR_DPO_MGR_ID_NOT_VALID;
+ HICN_ERROR_NONE :
+ HICN_ERROR_DPO_MGR_ID_NOT_VALID;
}
int
@@ -148,7 +162,7 @@ hicn_strategy_get_all_available (void)
* time.
*/
void
-hicn_dpo_register (const hicn_dpo_vft_t * hicn_dpo)
+hicn_dpo_register (const hicn_dpo_vft_t *hicn_dpo)
{
hicn_dpo->hicn_dpo_module_init ();
}
diff --git a/hicn-plugin/src/strategy_dpo_manager.h b/hicn-plugin/src/strategy_dpo_manager.h
index e96e050d9..0163cd679 100644
--- a/hicn-plugin/src/strategy_dpo_manager.h
+++ b/hicn-plugin/src/strategy_dpo_manager.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -28,34 +28,39 @@
* information to choose the next hop,
* - a dpo vft that specify how to update the hICN DPO ctx when a next hop is
* added, deleted or updated,
- * - a strategy containing (see strategy.h): (i) the vpp node that processes Interest packets
- * subjected to such strategy, (ii) the definition of the vft that defines
- * the hICN strategy functions
- * An hICN DPO is places as the sole next hop in the vpp loadbalancer, and it containes
- * a list of next hops that will be used by the associated strategy when forwarding
- * interest packets.
+ * - a strategy containing (see strategy.h): (i) the vpp node that processes
+ * Interest packets subjected to such strategy, (ii) the definition of the vft
+ * that defines the hICN strategy functions An hICN DPO is places as the sole
+ * next hop in the vpp loadbalancer, and it containes a list of next hops that
+ * will be used by the associated strategy when forwarding interest packets.
*/
/**
* @brief Definition of the virtual function table for a hICN DPO.
*
* The following virtual function table
- * template that glues together the fuction to interact with the context and the
- * creating the dpo
+ * template that glues together the fuction to interact with the context and
+ * the creating the dpo
*/
typedef struct hicn_dpo_vft_s
{
- int (*hicn_dpo_is_type) (const dpo_id_t * dpo);
- /**< Check if the type of the
- hICN DPO is the expected */
- dpo_type_t (*hicn_dpo_get_type) (void);
- /**< Return the type of the hICN dpo */
- void (*hicn_dpo_module_init) (void); /**< Initialize the hICN dpo */
- void (*hicn_dpo_create) (fib_protocol_t proto, const hicn_face_id_t * nh, int nh_len, index_t * dpo_idx); /**< Create the context of the hICN dpo */
- int (*hicn_dpo_add_update_nh) (hicn_face_id_t nh, index_t dpo_idx); /**< Add a next hop to the hICN dpo context */
+ int (*hicn_dpo_is_type) (const dpo_id_t *dpo);
+ /**< Check if the type of the
+ hICN DPO is the expected */
+ dpo_type_t (*hicn_dpo_get_type) (void);
+ /**< Return the type of the hICN dpo */
+ void (*hicn_dpo_module_init) (void); /**< Initialize the hICN dpo */
+ void (*hicn_dpo_create) (
+ fib_protocol_t proto, const hicn_face_id_t *nh, int nh_len,
+ index_t *dpo_idx); /**< Create the context of the hICN dpo */
+ void (*hicn_dpo_update_type) (
+ hicn_dpo_ctx_t *hicn_strategy_ctx); /**change dpo type */
+ int (*hicn_dpo_add_update_nh) (
+ hicn_face_id_t nh,
+ index_t dpo_idx); /**< Add a next hop to the hICN dpo context */
int (*hicn_dpo_del_nh) (hicn_face_id_t face_id, index_t dpo_idx);
- u8 *(*hicn_dpo_format) (u8 * s, int, ...);
- /**< Format an hICN dpo*/
+ u8 *(*hicn_dpo_format) (u8 *s, hicn_dpo_ctx_t *dpo_ctx, u32 indent);
+ /**< Format an hICN dpo*/
} hicn_dpo_vft_t;
/*
@@ -74,19 +79,17 @@ extern hicn_dpo_vft_t default_dpo;
* the FIB entry to which the hICN DPO is applied. This list must contain the
* name of the strategy node (or nodes in case of differentiation between IPv4
* and IPv6). Unless really needed otherwise (i.e., different implementation of
- * iface input), the list of node to use should be one provided in the strategy.h
- * (hicn_nodes_strategy)
+ * iface input), the list of node to use should be one provided in the
+ * strategy.h (hicn_nodes_strategy)
* @param hicn_dpo_vft The structure holding the virtual function table to
* interact with the hICN dpo and its context.
* @param hicn_strategy_vft The structure holding the virtual function table
* containing the hICN strategy functions.
* @return the dpo type registered in the VPP Data plane graph.
*/
-dpo_type_t
-hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
- const hicn_dpo_vft_t * hicn_dpo_vft,
- const hicn_strategy_vft_t *
- hicn_strategy_vft, const dpo_vft_t * dpo_ctx_vft);
+dpo_type_t hicn_dpo_register_new_type (
+ const char *const *const *hicn_nodes, const hicn_dpo_vft_t *hicn_dpo_vft,
+ const hicn_strategy_vft_t *hicn_strategy_vft, const dpo_vft_t *dpo_ctx_vft);
/**
* @brief Check if the type of the dpo is among the list of hicn dpo types
@@ -96,7 +99,7 @@ hicn_dpo_register_new_type (const char *const *const *hicn_nodes,
* @param dpo The id of the dpo to which check the type
* @return 1 if there is a match, 0 otherwise.
*/
-u32 dpo_is_hicn (const dpo_id_t * dpo);
+u32 dpo_is_hicn (const dpo_id_t *dpo);
/**
* @brief Return the dpo_vtf and strategy_vtf identifier
@@ -105,9 +108,10 @@ u32 dpo_is_hicn (const dpo_id_t * dpo);
* retrieve the corresponding dpo_vtf/strategy_vtf identifier.
*
* @param dpo The id of the dpo to which check the type
- * @return the dpo_vft/strategy_vft id or HICN_ERROR_DPO_NOT_FOUND in case the dpo is not an hICN dpo.
+ * @return the dpo_vft/strategy_vft id or HICN_ERROR_DPO_NOT_FOUND in case the
+ * dpo is not an hICN dpo.
*/
-u8 hicn_dpo_get_vft_id (const dpo_id_t * dpo);
+u8 hicn_dpo_get_vft_id (const dpo_id_t *dpo);
/**
* @brief Get the vft to manage the dpo context.
@@ -159,16 +163,17 @@ void hicn_dpos_init (void);
*
* @result The string with the list of hICN DPO (strategies)
*/
-u8 *format_hicn_strategy_list (u8 * s, int n, ...);
+u8 *format_hicn_strategy_list (u8 *s, int n, ...);
/**
- * @brief Check if a given id points to a strategy and the corresponding dpo ctx
+ * @brief Check if a given id points to a strategy and the corresponding dpo
+ * ctx
*
* @param The id of the strategy to check.
*
* @result HICN_ERROR_NONE is the id is valid, otherwise EINVAL
*/
-u8 hicn_dpo_strategy_id_is_valid (int strategy_id);
+int hicn_dpo_strategy_id_is_valid (int strategy_id);
/**
* @brief Return the number of available strategies. This number can be used to
@@ -182,7 +187,12 @@ int hicn_strategy_get_all_available (void);
* @brief Registers a module at compilation time to be initialized as part of
* the ctor.
*/
-void hicn_dpo_register (const hicn_dpo_vft_t * hicn_dpo);
+void hicn_dpo_register (const hicn_dpo_vft_t *hicn_dpo);
+
+/**
+ * @brief Format strategy DPO.
+ */
+u8 *hicn_strategy_dpo_format (u8 *s, va_list *ap);
#endif /* // __HICN_STRATEGY_DPO_MANAGER_H__ */
diff --git a/hicn-plugin/src/strategy_node.c b/hicn-plugin/src/strategy_node.c
index 0659a871a..add5772a4 100644
--- a/hicn-plugin/src/strategy_node.c
+++ b/hicn-plugin/src/strategy_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021-2022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -20,11 +20,11 @@
#include "parser.h"
#include "strategy.h"
#include "strategy_dpo_ctx.h"
-#include "face_db.h"
#include "infra.h"
#include "mgmt.h"
#include "pcs.h"
#include "state.h"
+#include "data_fwd.h"
#include "strategies/strategy_mw.h"
/* Registration struct for a graph node */
@@ -49,7 +49,7 @@ static char *hicn_strategy_error_strings[] = {
/* packet trace format function */
u8 *
-hicn_strategy_format_trace (u8 * s, va_list * args)
+hicn_strategy_format_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -60,104 +60,32 @@ hicn_strategy_format_trace (u8 * s, va_list * args)
return vft->hicn_format_strategy_trace (s, t);
}
-
-always_inline int
-hicn_new_interest (hicn_strategy_runtime_t * rt, vlib_buffer_t * b0,
- u32 * next, f64 tnow, u8 * nameptr,
- u16 namelen, hicn_face_id_t outface, int nh_idx,
- index_t dpo_ctx_id0, const hicn_strategy_vft_t * strategy,
- dpo_type_t dpo_type, u8 isv6,
- vl_api_hicn_api_node_stats_get_reply_t * stats)
+always_inline void
+drop_packet (vlib_main_t *vm, u32 bi0, u32 *n_left_to_next, u32 *next0,
+ u32 **to_next, u32 *next_index, vlib_node_runtime_t *node)
{
- int ret;
- hicn_hash_node_t *nodep;
- hicn_pcs_entry_t *pitp;
- hicn_header_t *hicn0;
- hicn_main_t *sm = &hicn_main;
- hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
- u32 node_id0 = 0;
- u8 vft_id0 = dpo_type;
- u8 is_cs0 = 0;
- u8 hash_entry_id = 0;
- u8 bucket_is_overflow = 0;
- u32 bucket_id = ~0;
-
-
- /* Create PIT node and init PIT entry */
- nodep = hicn_hashtb_alloc_node (rt->pitcs->pcs_table);
- if (PREDICT_FALSE (nodep == NULL))
- {
- /* Nothing we can do - no mem */
- *next = HICN_STRATEGY_NEXT_ERROR_DROP;
- return HICN_ERROR_HASHTB_NOMEM;
- }
- pitp = hicn_pit_get_data (nodep);
- hicn_pit_init_data (pitp);
- pitp->shared.create_time = tnow;
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
- hicn0 = vlib_buffer_get_current (b0);
- hicn_lifetime_t imsg_lifetime;
- hicn_type_t type = hicnb0->type;
- hicn_ops_vft[type.l1]->get_lifetime (type, &hicn0->protocol,
- &imsg_lifetime);
+ *next0 = HICN_STRATEGY_NEXT_ERROR_DROP;
- if (imsg_lifetime > sm->pit_lifetime_max_ms)
- {
- imsg_lifetime = sm->pit_lifetime_max_ms;
- }
- pitp->shared.expire_time = hicn_pcs_get_exp_time (tnow, imsg_lifetime);
-
- /* Set up the hash node and insert it */
- hicn_hash_entry_t *hash_entry;
- hicn_hashtb_init_node (rt->pitcs->pcs_table, nodep, nameptr, namelen);
+ (*to_next)[0] = bi0;
+ *to_next += 1;
+ *n_left_to_next -= 1;
- ret =
- hicn_pcs_pit_insert (rt->pitcs, pitp, nodep, &hash_entry,
- hicnb0->name_hash, &node_id0, &dpo_ctx_id0, &vft_id0,
- &is_cs0, &hash_entry_id, &bucket_id,
- &bucket_is_overflow);
-
- if (ret == HICN_ERROR_NONE)
+ // Maybe trace
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (b0->flags & VLIB_BUFFER_IS_TRACED)))
{
- strategy->hicn_add_interest (vnet_buffer (b0)->ip.adj_index[VLIB_TX],
- hash_entry);
-
- /* Add face */
- hicn_face_db_add_face (hicnb0->face_id, &(pitp->u.pit.faces));
-
- *next = isv6 ? HICN_STRATEGY_NEXT_INTEREST_FACE6 :
- HICN_STRATEGY_NEXT_INTEREST_FACE4;
-
- vnet_buffer (b0)->ip.adj_index[VLIB_TX] = outface;
- stats->pkts_interest_count++;
- pitp->u.pit.pe_txnh = nh_idx;
- }
- else
- {
- /* Interest aggregate in PIT */
- if (ret == HICN_ERROR_HASHTB_EXIST)
- {
- hicn_store_internal_state (b0, hicnb0->name_hash, node_id0,
- dpo_ctx_id0, vft_id0, hash_entry_id,
- bucket_id, bucket_is_overflow);
- // We need to take a lock as the lock is not taken on the hash
- // entry because it is a CS entry (hash_insert function).
- hash_entry->locks++;
- *next =
- is_cs0 ? HICN_STRATEGY_NEXT_INTEREST_HITCS :
- HICN_STRATEGY_NEXT_INTEREST_HITPIT;
- }
- else
- {
- /* Send the packet to the interest-hitpit node */
- *next = HICN_STRATEGY_NEXT_ERROR_DROP;
- }
- hicn_faces_flush (&(pitp->u.pit.faces));
- hicn_hashtb_free_node (rt->pitcs->pcs_table, nodep);
+ hicn_strategy_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST;
+ t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->next_index = *next0;
+ t->dpo_type = hicn_get_buffer (b0)->vft_id;
+ t->out_face = -1;
}
- return (ret);
-
+ vlib_validate_buffer_enqueue_x1 (vm, node, *next_index, *to_next,
+ *n_left_to_next, bi0, *next0);
}
/*
@@ -165,15 +93,23 @@ hicn_new_interest (hicn_strategy_runtime_t * rt, vlib_buffer_t * b0,
* ipv6/tcp
*/
uword
-hicn_strategy_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+hicn_strategy_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
+ int ret;
u32 n_left_from, *from, *to_next, n_left_to_next;
hicn_strategy_next_t next_index;
hicn_strategy_runtime_t *rt = NULL;
vl_api_hicn_api_node_stats_get_reply_t stats = { 0 };
- f64 tnow;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ hicn_face_id_t outfaces[MAX_OUT_FACES];
+ u32 clones[MAX_OUT_FACES];
+ u16 outfaces_len;
+ u32 next0;
+ const hicn_strategy_vft_t *strategy;
+ hicn_buffer_t *hicnb0;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -181,101 +117,108 @@ hicn_strategy_fn (vlib_main_t * vm,
rt = vlib_node_get_runtime_data (vm, hicn_strategy_node.index);
rt->pitcs = &hicn_main.pitcs;
/* Capture time in vpp terms */
- tnow = vlib_time_now (vm);
+ next0 = next_index;
while (n_left_from > 0)
{
-
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_left_from > 0 && n_left_to_next > 0)
{
- u8 isv6;
- u8 *nameptr;
- u16 namelen;
- hicn_name_t name;
- hicn_header_t *hicn0;
- vlib_buffer_t *b0;
- u32 bi0;
- hicn_face_id_t outface;
- int nh_idx;
- u32 next0 = next_index;
- int ret;
-
- /* Prefetch for next iteration. */
+ // Prefetch for next iteration
if (n_left_from > 1)
{
vlib_buffer_t *b1;
b1 = vlib_get_buffer (vm, from[1]);
- CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&b1->trace_handle, 2 * CLIB_CACHE_LINE_BYTES,
- STORE);
+ CLIB_PREFETCH (b1, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
}
- /* Dequeue a packet buffer */
+
+ // Dequeue a packet buffer
bi0 = from[0];
from += 1;
n_left_from -= 1;
- to_next[0] = bi0;
- to_next += 1;
- n_left_to_next -= 1;
-
b0 = vlib_get_buffer (vm, bi0);
- next0 = HICN_STRATEGY_NEXT_ERROR_DROP;
- hicn_dpo_ctx_t *dpo_ctx =
- hicn_strategy_dpo_ctx_get (vnet_buffer (b0)->ip.
- adj_index[VLIB_TX]);
- const hicn_strategy_vft_t *strategy =
- hicn_dpo_get_strategy_vft (dpo_ctx->dpo_type);
+ // Drop by default
+ next0 = HICN_STRATEGY_NEXT_ERROR_DROP;
- ret = hicn_interest_parse_pkt (b0, &name, &namelen, &hicn0, &isv6);
+ // Increment counters
stats.pkts_processed++;
- /* Select next hop */
- /*
- * Double check that the interest has been through
- * the interest-pcslookup node due to misconfiguration in
- * the punting rules.
- */
- if (PREDICT_TRUE
- (ret == HICN_ERROR_NONE && HICN_IS_NAMEHASH_CACHED (b0)
- && strategy->hicn_select_next_hop (vnet_buffer (b0)->
- ip.adj_index[VLIB_TX],
- &nh_idx,
- &outface) ==
- HICN_ERROR_NONE))
+
+ hicnb0 = hicn_get_buffer (b0);
+
+ // Get the strategy VFT
+ strategy = hicn_dpo_get_strategy_vft (hicnb0->vft_id);
+
+ // Check we have at least one next hop for the packet
+ ret = strategy->hicn_select_next_hop (
+ hicnb0->dpo_ctx_id, hicnb0->face_id, outfaces, &outfaces_len);
+
+ if (PREDICT_FALSE (ret != HICN_ERROR_NONE || outfaces_len == 0))
+ {
+ drop_packet (vm, bi0, &n_left_to_next, &next0, &to_next,
+ &next_index, node);
+ continue;
+ }
+
+ // Set next node
+ next0 = hicn_buffer_is_v6 (b0) ? HICN_STRATEGY_NEXT_INTEREST_FACE6 :
+ HICN_STRATEGY_NEXT_INTEREST_FACE4;
+
+ if (PREDICT_TRUE (ret == HICN_ERROR_NONE))
{
- /*
- * No need to check if parsing was successful
- * here. Already checked in the interest_pcslookup
- * node
- */
- nameptr = (u8 *) (&name);
- hicn_new_interest (rt, b0, &next0, tnow, nameptr, namelen,
- outface, nh_idx,
- vnet_buffer (b0)->ip.adj_index[VLIB_TX],
- strategy, dpo_ctx->dpo_type, isv6, &stats);
+ // Clone interest if needed
+ if (outfaces_len > 1)
+ {
+ ret = vlib_buffer_clone (vm, bi0, clones, (u16) outfaces_len,
+ CLIB_CACHE_LINE_BYTES * 2);
+ ASSERT (ret == outfaces_len);
+ }
+ else
+ {
+ clones[0] = bi0;
+ }
+
+ // Send interest to next hops
+ for (u32 nh = 0; nh < outfaces_len; nh++)
+ {
+ vlib_buffer_t *local_b0 = vlib_get_buffer (vm, clones[nh]);
+
+ to_next[0] = clones[nh];
+ to_next += 1;
+ n_left_to_next -= 1;
+
+ vnet_buffer (local_b0)->ip.adj_index[VLIB_TX] = outfaces[nh];
+ stats.pkts_interest_count++;
+
+ // Maybe trace
+ if (PREDICT_FALSE (
+ (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ (local_b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ hicn_strategy_trace_t *t =
+ vlib_add_trace (vm, node, local_b0, sizeof (*t));
+ t->pkt_type = HICN_PACKET_TYPE_INTEREST;
+ t->sw_if_index =
+ vnet_buffer (local_b0)->sw_if_index[VLIB_RX];
+ t->next_index = next0;
+ t->dpo_type = hicnb0->vft_id;
+ t->out_face = outfaces[nh];
+ }
+
+ /*
+ * Fix in case of a wrong speculation. Needed for
+ * cloning the data in the right frame
+ */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ clones[nh], next0);
+ }
}
- /* Maybe trace */
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
- (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ else
{
- hicn_strategy_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->pkt_type = HICN_PKT_TYPE_CONTENT;
- t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- t->next_index = next0;
- t->dpo_type = dpo_ctx->dpo_type;
+ drop_packet (vm, bi0, &n_left_from, &next0, &to_next,
+ &next_index, node);
}
- /*
- * Verify speculative enqueue, maybe switch current
- * next frame
- */
- /*
- * Fix in case of a wrong speculation. Needed for
- * cloning the data in the right frame
- */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
@@ -293,7 +236,6 @@ hicn_strategy_fn (vlib_main_t * vm,
/*
* Node registration for the forwarder node
*/
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (hicn_strategy_node) =
{
.name = "hicn-strategy",
@@ -315,7 +257,6 @@ VLIB_REGISTER_NODE (hicn_strategy_node) =
},
};
-
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/hicn-plugin/src/test/CMakeLists.txt b/hicn-plugin/src/test/CMakeLists.txt
new file mode 100644
index 000000000..89ad24fbb
--- /dev/null
+++ b/hicn-plugin/src/test/CMakeLists.txt
@@ -0,0 +1,67 @@
+# Copyright (c) 2022 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#############################################################
+# Dependencies
+#############################################################
+
+# We need a pure C test framework for the hicn-plugin.
+include (UnityTestFrameworkImport)
+find_package(Threads REQUIRED)
+
+##############################################################
+# Test sources
+##############################################################
+list(APPEND TESTS_SRC
+ main.c
+ vpp.c
+ test_pcs.c
+)
+
+
+##############################################################
+# Link libraries
+##############################################################
+set(TEST_LIBRARIES
+ ${VPP_LIBRARIES}
+ ${UNITY_LIBRARIES}
+ ${LIBHICN_SHARED}
+ ${HICNPLUGIN_SHARED}
+ Threads::Threads
+)
+
+set (
+ LINK_FLAGS
+ "-Wl,-unresolved-symbols=ignore-all"
+)
+
+
+##############################################################
+# Build single unit test executable and add it to test list
+##############################################################
+build_executable(hicnplugin_tests
+ NO_INSTALL
+ SOURCES ${TESTS_SRC}
+ LINK_LIBRARIES
+ ${TEST_LIBRARIES}
+ INCLUDE_DIRS
+ $<TARGET_PROPERTY:${HICNPLUGIN_SHARED},INCLUDE_DIRECTORIES>
+ ${UNITY_INCLUDE_DIRS}
+ DEPENDS unity ${HICNPLUGIN_SHARED}
+ COMPONENT ${HICN_PLUGIN}
+ DEFINITIONS ${COMPILER_DEFINITIONS}
+ COMPILE_OPTIONS ${COMPILER_OPTIONS}
+ LINK_FLAGS ${LINK_FLAGS}
+)
+
+unity_add_test_internal(hicnplugin_tests)
diff --git a/hicn-plugin/src/hicn_api.h b/hicn-plugin/src/test/main.c
index ec10a6bbd..82caba18a 100644
--- a/hicn-plugin/src/hicn_api.h
+++ b/hicn-plugin/src/test/main.c
@@ -1,5 +1,6 @@
+
/*
- * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Copyright (c) 20022 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -13,26 +14,18 @@
* limitations under the License.
*/
-#ifndef __HICN_API_H__
-#define __HICN_API_H__
-
-/**
- * @file
- */
-
-
-#define HICN_STRATEGY_NULL ~0
-#define HICN_FIB_TABLE 10
+#include <unity_fixture.h>
+#include "vpp.h"
-/* define message structures */
-#define vl_typedefs
-#include <hicn/hicn_all_api_h.h>
-#undef vl_typedefs
+static void
+RunAllTests (void)
+{
+ RUN_TEST_GROUP (PCS);
+}
-#endif /* // __HICN_API_H___ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables: eval: (c-set-style "gnu") End:
- */
+int
+main (int argc, const char *argv[])
+{
+ vpp_init ();
+ return UnityMain (argc, argv, RunAllTests);
+} \ No newline at end of file
diff --git a/hicn-plugin/src/test/test_pcs.c b/hicn-plugin/src/test/test_pcs.c
new file mode 100644
index 000000000..8e7416b8b
--- /dev/null
+++ b/hicn-plugin/src/test/test_pcs.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright (c) 20022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define HICN_PCS_TESTING
+#include "vpp.h"
+#include <pcs.h>
+
+#include <unity.h>
+#include <unity_fixture.h>
+
+/*
+ * Global PCS instance common to each test
+ */
+static hicn_pit_cs_t global_pcs;
+
+TEST_GROUP (PCS);
+
+/**
+ * Default PIT elements
+ */
+#define MAX_PIT_ELEMENTS 1000000
+
+/**
+ * Default CS elements
+ */
+#define MAX_CS_ELEMENTS (MAX_PIT_ELEMENTS / 10)
+
+TEST_SETUP (PCS)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+ hicn_pit_create (pcs, MAX_PIT_ELEMENTS, MAX_CS_ELEMENTS);
+}
+
+TEST_TEAR_DOWN (PCS)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+ hicn_pit_destroy (pcs);
+}
+
+TEST (PCS, Create)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Check counters
+ TEST_ASSERT_EQUAL (0, pcs->pcs_pit_count);
+ TEST_ASSERT_EQUAL (0, pcs->pcs_cs_count);
+ TEST_ASSERT_EQUAL (0, pcs->pcs_pcs_alloc);
+ TEST_ASSERT_EQUAL (0, pcs->pcs_pcs_dealloc);
+ TEST_ASSERT_EQUAL (MAX_PIT_ELEMENTS, pcs->max_pit_size);
+ TEST_ASSERT_EQUAL (MAX_CS_ELEMENTS, pcs->policy_state.max);
+
+ printf ("PIT entry size: %lu", sizeof (hicn_pcs_entry_t));
+}
+
+TEST (PCS, Destroy)
+{
+ // Global PCS instance
+}
+
+TEST (PCS, LookupEmpty)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ hicn_name_t name;
+ int ret = hicn_name_create ("b001::abcd", 0, &name);
+ TEST_ASSERT_EQUAL (0, ret);
+
+ hicn_pcs_entry_t *pcs_entry;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ TEST_ASSERT_EQUAL (HICN_ERROR_PCS_NOT_FOUND, ret);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry);
+}
+
+TEST (PCS, InsertPITEntryAndLookup)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Add entry to the PCS
+ int ret = 0;
+
+ // Allocate name
+ hicn_name_t name;
+ hicn_name_create ("b001::1234", 0, &name);
+
+ // Create PCS entry
+ hicn_pcs_entry_t *pcs_entry;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ // We will not find the entry
+ TEST_ASSERT_EQUAL (ret, HICN_ERROR_PCS_NOT_FOUND);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry);
+
+ // Get a new PIT entry from the pool
+ // TODO Check if the hicn_pcs_entry_pit_get is needed here
+ pcs_entry = hicn_pcs_entry_pit_get (pcs, 0, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_alloc (pcs), 1);
+
+ // Insert PIT entry
+ ret = hicn_pcs_pit_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry
+ hicn_pcs_entry_t *pcs_entry_ret = NULL;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_NOT_NULL (pcs_entry_ret);
+ TEST_ASSERT_EQUAL (pcs_entry, pcs_entry_ret);
+
+ // Release PIT entry
+ hicn_pcs_entry_remove_lock (pcs, pcs_entry);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_dealloc (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry again, we should not find it
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_PCS_NOT_FOUND, ret);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry_ret);
+}
+
+TEST (PCS, InsertCSEntryAndLookup)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Add entry to the PCS
+ int ret = 0;
+
+ // Allocate name
+ hicn_name_t name;
+ hicn_name_create ("b008::5555", 0, &name);
+
+ // Create PCS entry
+ hicn_pcs_entry_t *pcs_entry;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ // We will not find the entry
+ TEST_ASSERT_EQUAL (ret, HICN_ERROR_PCS_NOT_FOUND);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry);
+
+ // Get a buffer
+ u32 buffer_index = 198274;
+
+ // Get a new entry from the pool
+ pcs_entry = hicn_pcs_entry_cs_get (pcs, 0, buffer_index);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (1, hicn_pcs_get_pcs_alloc (pcs));
+
+ // Insert CS entry
+ ret = hicn_pcs_cs_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 1);
+
+ // Lookup CS entry
+ hicn_pcs_entry_t *pcs_entry_ret = NULL;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_TRUE (pcs_entry_ret->flags & HICN_PCS_ENTRY_CS_FLAG);
+ TEST_ASSERT_NOT_NULL (pcs_entry_ret);
+ TEST_ASSERT_EQUAL (pcs_entry, pcs_entry_ret);
+
+ // Release CS entry
+ hicn_pcs_entry_remove_lock (pcs, pcs_entry);
+ TEST_ASSERT_EQUAL (1, hicn_pcs_get_pcs_dealloc (pcs));
+ TEST_ASSERT_EQUAL (0, hicn_pcs_get_pit_count (pcs));
+ TEST_ASSERT_EQUAL (0, hicn_pcs_get_cs_count (pcs));
+
+ // Lookup CS entry again, we should not find it
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_PCS_NOT_FOUND, ret);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry_ret);
+}
+
+TEST (PCS, PitToCS)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Add entry to the PCS
+ int ret = 0;
+
+ // Allocate name
+ hicn_name_t name;
+ hicn_name_create ("b001::1234", 0, &name);
+
+ // Create PCS entry
+ hicn_pcs_entry_t *pcs_entry;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ // We will not find the entry
+ TEST_ASSERT_EQUAL (ret, HICN_ERROR_PCS_NOT_FOUND);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry);
+
+ // Get a new entry from the pool
+ // TODO Check if the hicn_pcs_entry_pit_get is needed here
+ pcs_entry = hicn_pcs_entry_pit_get (pcs, 0, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_alloc (pcs), 1);
+
+ // Insert PIT entry
+ ret = hicn_pcs_pit_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry
+ hicn_pcs_entry_t *pcs_entry_ret = NULL;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_NOT_NULL (pcs_entry_ret);
+ TEST_ASSERT_EQUAL (pcs_entry, pcs_entry_ret);
+
+ // Found the PIT entry we inserted before.
+ // Double check is not a CS
+ TEST_ASSERT_FALSE (pcs_entry_ret->flags & HICN_PCS_ENTRY_CS_FLAG);
+
+ // Turn the PIT entry into a CS
+ hicn_pit_to_cs (pcs, pcs_entry, /* random buffer index */ 12345);
+
+ // Check counters
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 1);
+
+ // Make sure entry is now a CS
+ TEST_ASSERT_TRUE (pcs_entry_ret->flags & HICN_PCS_ENTRY_CS_FLAG);
+}
+
+TEST (PCS, CheckCSLruConsistency)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Add entry to the PCS
+ int ret = 0;
+
+ // Allocate name
+ hicn_name_t name;
+ hicn_name_create ("b001::1234", 0, &name);
+
+ // Create CS entry
+ hicn_pcs_entry_t *pcs_entry;
+ // Get a new entry from the pool
+ // TODO Check if the hicn_pcs_entry_pit_get is needed here
+ pcs_entry = hicn_pcs_entry_cs_get (pcs, 0, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_alloc (pcs), 1);
+
+ // Insert CS entry
+ ret = hicn_pcs_cs_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+
+ // Get pcs_entry index
+ uint32_t pcs_entry_index = hicn_pcs_entry_get_index (pcs, pcs_entry);
+
+ // Check LRU
+ hicn_cs_policy_t *policy_state = hicn_pcs_get_policy_state (pcs);
+
+ // Make sure MAX corresponds to what we set
+ TEST_ASSERT_EQUAL (MAX_CS_ELEMENTS, hicn_cs_policy_get_max (policy_state));
+
+ TEST_ASSERT_EQUAL (pcs_entry_index, hicn_cs_policy_get_head (policy_state));
+ TEST_ASSERT_EQUAL (pcs_entry_index, hicn_cs_policy_get_tail (policy_state));
+ TEST_ASSERT_EQUAL (1, hicn_cs_policy_get_count (policy_state));
+
+ // Check pointers of the entry
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_next (pcs_entry));
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_prev (pcs_entry));
+
+ // Lookup the entry itself
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ // Check again the pointers of the entry
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_next (pcs_entry));
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_prev (pcs_entry));
+
+ // Remove CS entry
+ hicn_pcs_entry_remove_lock (pcs, pcs_entry);
+ TEST_ASSERT_EQUAL (1, hicn_pcs_get_pcs_dealloc (pcs));
+ TEST_ASSERT_EQUAL (0, hicn_pcs_get_pit_count (pcs));
+ TEST_ASSERT_EQUAL (0, hicn_pcs_get_cs_count (pcs));
+
+ // Check again LRU
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_cs_policy_get_head (policy_state));
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_cs_policy_get_tail (policy_state));
+ TEST_ASSERT_EQUAL (0, hicn_cs_policy_get_count (policy_state));
+
+ // Let's insert now 2 entries
+ hicn_pcs_entry_t *pcs_entry0;
+ hicn_pcs_entry_t *pcs_entry1;
+
+ pcs_entry0 = hicn_pcs_entry_cs_get (pcs, 0, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry0);
+ hicn_name_t name0;
+ hicn_name_create ("b001::abcd", 123, &name0);
+ u32 index0 = hicn_pcs_entry_get_index (pcs, pcs_entry0);
+
+ pcs_entry1 = hicn_pcs_entry_cs_get (pcs, 0, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry1);
+ hicn_name_t name1;
+ hicn_name_create ("b001::9999", 321, &name1);
+ u32 index1 = hicn_pcs_entry_get_index (pcs, pcs_entry1);
+
+ // Insert CS entry
+ ret = hicn_pcs_cs_insert (pcs, pcs_entry0, &name0);
+ ret = hicn_pcs_cs_insert (pcs, pcs_entry1, &name1);
+
+ // Check LRU. index1 was inserted last, so it should be at the head
+ TEST_ASSERT_EQUAL (index1, hicn_cs_policy_get_head (policy_state));
+ // index0 was inserted first, so it should be at the tail
+ TEST_ASSERT_EQUAL (index0, hicn_cs_policy_get_tail (policy_state));
+ // And count shoould be 2
+ TEST_ASSERT_EQUAL (2, hicn_cs_policy_get_count (policy_state));
+
+ // Check pointers of the entries
+
+ // pcs_entry0 should be at the tail
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_next (pcs_entry0));
+ TEST_ASSERT_EQUAL (index1, hicn_pcs_entry_cs_get_prev (pcs_entry0));
+
+ // pcs_entry1 should be at the head
+ TEST_ASSERT_EQUAL (index0, hicn_pcs_entry_cs_get_next (pcs_entry1));
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_prev (pcs_entry1));
+
+ // Let's lookup for entry 0 and check if the LRU is updated correctly
+ ret = hicn_pcs_lookup_one (pcs, &name0, &pcs_entry);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (index0, hicn_pcs_entry_get_index (pcs, pcs_entry));
+
+ // Check pointers of the entries
+
+ // pcs_entry1 should be at the tail
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_next (pcs_entry1));
+ TEST_ASSERT_EQUAL (index0, hicn_pcs_entry_cs_get_prev (pcs_entry1));
+
+ // pcs_entry0 should be at the head
+ TEST_ASSERT_EQUAL (index1, hicn_pcs_entry_cs_get_next (pcs_entry0));
+ TEST_ASSERT_EQUAL (HICN_CS_POLICY_END_OF_CHAIN,
+ hicn_pcs_entry_cs_get_prev (pcs_entry0));
+
+ // index0 should be now the head
+ TEST_ASSERT_EQUAL (index0, hicn_cs_policy_get_head (policy_state));
+ // index1 should be now the tail
+ TEST_ASSERT_EQUAL (index1, hicn_cs_policy_get_tail (policy_state));
+}
+
+TEST (PCS, CheckCSLruMax)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+ int i, ret = 0;
+ ;
+ u32 pcs_entry_index = 0;
+ u32 pcs_entry_index0 = 0;
+ u32 pcs_entry_index1 = 0;
+ hicn_pcs_entry_t *pcs_entry = NULL;
+ hicn_name_t name;
+
+ const hicn_cs_policy_t *policy_state = hicn_pcs_get_policy_state (pcs);
+
+ for (i = 0; i < MAX_CS_ELEMENTS; i++)
+ {
+ // Allocate name
+ ret = hicn_name_create ("b004::aaaa", i, &name);
+ TEST_ASSERT_EQUAL (0, ret);
+
+ // Create CS entry
+ // Get a new entry from the pool
+ // TODO Check if the hicn_pcs_entry_pit_get is needed here
+ pcs_entry = hicn_pcs_entry_cs_get (pcs, 0, i);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (i + 1, hicn_pcs_get_pcs_alloc (pcs));
+
+ pcs_entry_index = hicn_pcs_entry_get_index (pcs, pcs_entry);
+
+ if (i == 0)
+ {
+ pcs_entry_index0 = pcs_entry_index;
+ }
+
+ if (i == 1)
+ {
+ pcs_entry_index1 = pcs_entry_index;
+ }
+
+ // Insert CS entry
+ ret = hicn_pcs_cs_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), i + 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+
+ // Check LRU
+ TEST_ASSERT_EQUAL (pcs_entry_index,
+ hicn_cs_policy_get_head (policy_state));
+ TEST_ASSERT_EQUAL (pcs_entry_index0,
+ hicn_cs_policy_get_tail (policy_state));
+ TEST_ASSERT_EQUAL (i + 1, hicn_cs_policy_get_count (policy_state));
+ }
+
+ // In this moment the CS should be full
+ TEST_ASSERT_EQUAL (hicn_cs_policy_get_max (policy_state),
+ hicn_cs_policy_get_count (policy_state));
+
+ // Next insertion should:
+ // - evict the tail
+ // - update the head
+ // - make a coffee because I am tired
+ ret = hicn_name_create ("b004::aaaa", i, &name);
+ TEST_ASSERT_EQUAL (0, ret);
+
+ pcs_entry = hicn_pcs_entry_cs_get (pcs, 0, i);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (i + 1, hicn_pcs_get_pcs_alloc (pcs));
+
+ pcs_entry_index = hicn_pcs_entry_get_index (pcs, pcs_entry);
+
+ // Insert CS entry
+ ret = hicn_pcs_cs_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), i);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+
+ // Check LRU
+ TEST_ASSERT_EQUAL (pcs_entry_index, hicn_cs_policy_get_head (policy_state));
+ // pcs_entry_index1 should be have eveicted, and pcs_entry_index1 should be
+ // the tail
+ TEST_ASSERT_EQUAL (pcs_entry_index1, hicn_cs_policy_get_tail (policy_state));
+
+ // Make pcs_entry_index0 was freed.
+ TEST_ASSERT_TRUE (
+ pool_is_free_index (pcs->pcs_entries_pool, pcs_entry_index0));
+}
+
+TEST (PCS, AddIngressFacesToPITEntry)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Add entry to the PCS
+ int ret = 0;
+
+ // Allocate name
+ hicn_name_t name;
+ hicn_name_create ("b001::9876", 0, &name);
+
+ // Create PCS entry
+ hicn_pcs_entry_t *pcs_entry;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ // We will not find the entry
+ TEST_ASSERT_EQUAL (ret, HICN_ERROR_PCS_NOT_FOUND);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry);
+
+ // Get a new entry from the pool
+ // TODO Check if the hicn_pcs_entry_pit_get is needed here
+ f64 tnow = 10.0;
+ pcs_entry = hicn_pcs_entry_pit_get (pcs, tnow, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_alloc (pcs), 1);
+
+ const u32 faceid = 20;
+
+ // The face should not be in the PIT entry
+ TEST_ASSERT_EQUAL (0, hicn_pcs_entry_pit_search (pcs_entry, faceid));
+
+ // Add ingress face to pit entry
+ hicn_pcs_entry_pit_add_face (pcs_entry, faceid);
+
+ // Insert PIT entry
+ ret = hicn_pcs_pit_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry
+ hicn_pcs_entry_t *pcs_entry_ret = NULL;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_NOT_NULL (pcs_entry_ret);
+ TEST_ASSERT_EQUAL (pcs_entry, pcs_entry_ret);
+
+ // Check everything is fine
+ ret = hicn_pcs_entry_pit_search (pcs_entry_ret, faceid);
+ // Face 20 should be in the entry
+ TEST_ASSERT_EQUAL (ret, 1);
+
+ // Get faces and make sure
+ // - there is only one face
+ // - the face is 20
+ TEST_ASSERT_EQUAL (1, hicn_pcs_entry_pit_get_n_faces (pcs_entry_ret));
+ TEST_ASSERT_EQUAL (20, hicn_pcs_entry_pit_get_dpo_face (pcs_entry_ret, 0));
+
+ // Release PIT entry
+ hicn_pcs_entry_remove_lock (pcs, pcs_entry_ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_dealloc (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry again, we should not find it
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_PCS_NOT_FOUND, ret);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry_ret);
+}
+
+TEST (PCS, AddIngressFacesToPitEntryCornerCases)
+{
+ hicn_pit_cs_t *pcs = &global_pcs;
+
+ // Add entry to the PCS
+ int ret = 0;
+
+ // Allocate name
+ hicn_name_t name;
+ hicn_name_create ("b001::9876", 0, &name);
+
+ // Create PCS entry
+ hicn_pcs_entry_t *pcs_entry;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry);
+
+ // We will not find the entry
+ TEST_ASSERT_EQUAL (ret, HICN_ERROR_PCS_NOT_FOUND);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry);
+
+ // Get a new entry from the pool
+ // TODO Check if the hicn_pcs_entry_pit_get is needed here
+ f64 tnow = 10.0;
+ pcs_entry = hicn_pcs_entry_pit_get (pcs, tnow, 0);
+ TEST_ASSERT_NOT_NULL (pcs_entry);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_alloc (pcs), 1);
+
+ // Let's initialize HICN_FACE_DB_INLINE_FACES + 1 face IDs
+ u32 faceids[HICN_FACE_DB_INLINE_FACES + 1];
+ for (u32 i = 0; i < HICN_FACE_DB_INLINE_FACES + 1; i++)
+ faceids[i] = rand () % HICN_PARAM_FACES_MAX;
+
+ // The faces should not be in the PIT entry
+ for (u32 i = 0; i < HICN_FACE_DB_INLINE_FACES + 1; i++)
+ TEST_ASSERT_EQUAL (0, hicn_pcs_entry_pit_search (pcs_entry, faceids[i]));
+
+ // Add ingress faces to pit entry
+ for (u32 i = 0; i < HICN_FACE_DB_INLINE_FACES + 1; i++)
+ hicn_pcs_entry_pit_add_face (pcs_entry, faceids[i]);
+
+ // Insert PIT entry
+ ret = hicn_pcs_pit_insert (pcs, pcs_entry, &name);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry
+ hicn_pcs_entry_t *pcs_entry_ret = NULL;
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_NONE, ret);
+ TEST_ASSERT_NOT_NULL (pcs_entry_ret);
+ TEST_ASSERT_EQUAL (pcs_entry, pcs_entry_ret);
+
+ // Check everything is fine
+ for (u32 i = 0; i < HICN_FACE_DB_INLINE_FACES + 1; i++)
+ {
+ ret = hicn_pcs_entry_pit_search (pcs_entry_ret, faceids[i]);
+ // Face 20 should be in the entry
+ TEST_ASSERT_EQUAL (1, ret);
+ }
+
+ // Get faces and make sure
+ // - there are HICN_FACE_DB_INLINE_FACES + 1 faces
+ // - the first HICN_FACE_DB_INLINE_FACES are stored in the PIT entry
+ // - the face HICN_FACE_DB_INLINE_FACES + 1 is stored in the array of
+ // additional faces, so outside PIT entry
+ TEST_ASSERT_EQUAL (HICN_FACE_DB_INLINE_FACES + 1,
+ hicn_pcs_entry_pit_get_n_faces (pcs_entry_ret));
+ for (u32 i = 0; i < HICN_FACE_DB_INLINE_FACES + 1; i++)
+ TEST_ASSERT_EQUAL (faceids[i],
+ hicn_pcs_entry_pit_get_dpo_face (pcs_entry_ret, i));
+
+ // Release PIT entry
+ hicn_pcs_entry_remove_lock (pcs, pcs_entry_ret);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pcs_dealloc (pcs), 1);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_pit_count (pcs), 0);
+ TEST_ASSERT_EQUAL (hicn_pcs_get_cs_count (pcs), 0);
+
+ // Lookup PIT entry again, we should not find it
+ ret = hicn_pcs_lookup_one (pcs, &name, &pcs_entry_ret);
+ TEST_ASSERT_EQUAL (HICN_ERROR_PCS_NOT_FOUND, ret);
+ TEST_ASSERT_EQUAL (NULL, pcs_entry_ret);
+}
+
+TEST_GROUP_RUNNER (PCS)
+{
+ RUN_TEST_CASE (PCS, Create)
+ RUN_TEST_CASE (PCS, Destroy)
+ RUN_TEST_CASE (PCS, LookupEmpty)
+ RUN_TEST_CASE (PCS, InsertPITEntryAndLookup)
+ RUN_TEST_CASE (PCS, InsertCSEntryAndLookup)
+ RUN_TEST_CASE (PCS, PitToCS)
+ RUN_TEST_CASE (PCS, CheckCSLruConsistency)
+ RUN_TEST_CASE (PCS, CheckCSLruMax)
+ RUN_TEST_CASE (PCS, AddIngressFacesToPITEntry)
+ RUN_TEST_CASE (PCS, AddIngressFacesToPitEntryCornerCases)
+}
diff --git a/hicn-plugin/src/test/vpp.c b/hicn-plugin/src/test/vpp.c
new file mode 100644
index 000000000..e1f38e23d
--- /dev/null
+++ b/hicn-plugin/src/test/vpp.c
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _GNU_SOURCE
+#include <pthread.h>
+#include <sched.h>
+
+#include <vppinfra/cpu.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/threads.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vpp/vnet/config.h>
+#include <vlibmemory/memclnt.api_enum.h> /* To get the last static message id */
+#include <limits.h>
+
+/*
+ * Load plugins from /usr/lib/vpp_plugins by default
+ */
+char *vlib_plugin_path = NULL;
+char *vat_plugin_path = NULL;
+
+static void
+vpp_find_plugin_path ()
+{
+ extern char *vat_plugin_path;
+ char *p, path[PATH_MAX];
+ int rv;
+ u8 *s;
+
+ /* find executable path */
+ if ((rv = readlink ("/proc/self/exe", path, PATH_MAX - 1)) == -1)
+ return;
+
+ /* readlink doesn't provide null termination */
+ path[rv] = 0;
+
+ /* strip filename */
+ if ((p = strrchr (path, '/')) == 0)
+ return;
+ *p = 0;
+
+ /* strip bin/ */
+ if ((p = strrchr (path, '/')) == 0)
+ return;
+ *p = 0;
+
+ s = format (0, "%s/" CLIB_LIB_DIR "/vpp_plugins", path, path);
+ vec_add1 (s, 0);
+ vlib_plugin_path = (char *) s;
+
+ s = format (0, "%s/" CLIB_LIB_DIR "/vpp_api_test_plugins", path, path);
+ vec_add1 (s, 0);
+ vat_plugin_path = (char *) s;
+}
+
+static void
+vpe_main_init (vlib_main_t *vm)
+{
+#if VPP_API_TEST_BUILTIN > 0
+ void vat_plugin_hash_create (void);
+#endif
+
+ if (CLIB_DEBUG > 0)
+ vlib_unix_cli_set_prompt ("DBGvpp# ");
+ else
+ vlib_unix_cli_set_prompt ("vpp# ");
+
+ /* Turn off network stack components which we don't want */
+ vlib_mark_init_function_complete (vm, srp_init);
+
+ /*
+ * Create the binary api plugin hashes before loading plugins
+ */
+#if VPP_API_TEST_BUILTIN > 0
+ vat_plugin_hash_create ();
+#endif
+
+ if (!vlib_plugin_path)
+ vpp_find_plugin_path ();
+}
+
+/*
+ * Default path for runtime data
+ */
+char *vlib_default_runtime_dir = "vpp";
+
+int
+vpp_init_internal (int argc, char *argv[])
+{
+ int i;
+ void vl_msg_api_set_first_available_msg_id (u16);
+ uword main_heap_size = (1ULL << 30);
+ u8 *sizep;
+ u32 size;
+ clib_mem_page_sz_t main_heap_log2_page_sz = CLIB_MEM_PAGE_SZ_DEFAULT;
+ clib_mem_page_sz_t default_log2_hugepage_sz = CLIB_MEM_PAGE_SZ_UNKNOWN;
+ unformat_input_t input, sub_input;
+ u8 *s = 0, *v = 0;
+ int main_core = 1;
+ cpu_set_t cpuset;
+ void *main_heap;
+
+#if __x86_64__
+ CLIB_UNUSED (const char *msg) =
+ "ERROR: This binary requires CPU with %s extensions.\n";
+#define _(a, b) \
+ if (!clib_cpu_supports_##a ()) \
+ { \
+ fprintf (stderr, msg, b); \
+ exit (1); \
+ }
+
+#if __AVX2__
+ _ (avx2, "AVX2")
+#endif
+#if __AVX__
+ _ (avx, "AVX")
+#endif
+#if __SSE4_2__
+ _ (sse42, "SSE4.2")
+#endif
+#if __SSE4_1__
+ _ (sse41, "SSE4.1")
+#endif
+#if __SSSE3__
+ _ (ssse3, "SSSE3")
+#endif
+#if __SSE3__
+ _ (sse3, "SSE3")
+#endif
+#undef _
+#endif
+ /*
+ * Load startup config from file.
+ * usage: vpp -c /etc/vpp/startup.conf
+ */
+ if ((argc == 3) && !strncmp (argv[1], "-c", 2))
+ {
+ FILE *fp;
+ char inbuf[4096];
+ int argc_ = 1;
+ char **argv_ = NULL;
+ char *arg = NULL;
+ char *p;
+
+ fp = fopen (argv[2], "r");
+ if (fp == NULL)
+ {
+ fprintf (stderr, "open configuration file '%s' failed\n", argv[2]);
+ return 1;
+ }
+ argv_ = calloc (1, sizeof (char *));
+ if (argv_ == NULL)
+ {
+ fclose (fp);
+ return 1;
+ }
+ arg = strndup (argv[0], 1024);
+ if (arg == NULL)
+ {
+ fclose (fp);
+ free (argv_);
+ return 1;
+ }
+ argv_[0] = arg;
+
+ while (1)
+ {
+ if (fgets (inbuf, 4096, fp) == 0)
+ break;
+ p = strtok (inbuf, " \t\n");
+ while (p != NULL)
+ {
+ if (*p == '#')
+ break;
+ argc_++;
+ char **tmp = realloc (argv_, argc_ * sizeof (char *));
+ if (tmp == NULL)
+ {
+ fclose (fp);
+ return 1;
+ }
+ argv_ = tmp;
+ arg = strndup (p, 1024);
+ if (arg == NULL)
+ {
+ free (argv_);
+ fclose (fp);
+ return 1;
+ }
+ argv_[argc_ - 1] = arg;
+ p = strtok (NULL, " \t\n");
+ }
+ }
+
+ fclose (fp);
+
+ char **tmp = realloc (argv_, (argc_ + 1) * sizeof (char *));
+ if (tmp == NULL)
+ {
+ free (argv_);
+ return 1;
+ }
+ argv_ = tmp;
+ argv_[argc_] = NULL;
+
+ argc = argc_;
+ argv = argv_;
+ }
+
+ /*
+ * Look for and parse the "heapsize" config parameter.
+ * Manual since none of the clib infra has been bootstrapped yet.
+ *
+ * Format: heapsize <nn>[mM][gG]
+ */
+
+ for (i = 1; i < (argc - 1); i++)
+ {
+ if (!strncmp (argv[i], "plugin_path", 11))
+ {
+ if (i < (argc - 1))
+ vlib_plugin_path = argv[++i];
+ }
+ if (!strncmp (argv[i], "test_plugin_path", 16))
+ {
+ if (i < (argc - 1))
+ vat_plugin_path = argv[++i];
+ }
+ else if (!strncmp (argv[i], "heapsize", 8))
+ {
+ sizep = (u8 *) argv[i + 1];
+ size = 0;
+ while (sizep && *sizep >= '0' && *sizep <= '9')
+ {
+ size *= 10;
+ size += *sizep++ - '0';
+ }
+ if (size == 0)
+ {
+ fprintf (
+ stderr,
+ "warning: heapsize parse error '%s', use default %lld\n",
+ argv[i], (long long int) main_heap_size);
+ goto defaulted;
+ }
+
+ main_heap_size = size;
+
+ if (*sizep == 'g' || *sizep == 'G')
+ main_heap_size <<= 30;
+ else if (*sizep == 'm' || *sizep == 'M')
+ main_heap_size <<= 20;
+ }
+ else if (!strncmp (argv[i], "main-core", 9))
+ {
+ if (i < (argc - 1))
+ {
+ errno = 0;
+ unsigned long x = strtol (argv[++i], 0, 0);
+ if (errno == 0)
+ main_core = x;
+ }
+ }
+ }
+defaulted:
+
+ /* temporary heap */
+ clib_mem_init (0, 1 << 20);
+ unformat_init_command_line (&input, (char **) argv);
+
+ while (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&input, "memory %v", &v))
+ {
+ unformat_init_vector (&sub_input, v);
+ v = 0;
+ while (unformat_check_input (&sub_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (&sub_input, "main-heap-size %U",
+ unformat_memory_size, &main_heap_size))
+ ;
+ else if (unformat (&sub_input, "main-heap-page-size %U",
+ unformat_log2_page_size,
+ &main_heap_log2_page_sz))
+ ;
+ else if (unformat (&sub_input, "default-hugepage-size %U",
+ unformat_log2_page_size,
+ &default_log2_hugepage_sz))
+ ;
+ else
+ {
+ fformat (stderr, "unknown 'memory' config input '%U'\n",
+ format_unformat_error, &sub_input);
+ exit (1);
+ }
+ }
+ unformat_free (&sub_input);
+ }
+ else if (!unformat (&input, "%s %v", &s, &v))
+ break;
+
+ vec_reset_length (s);
+ vec_reset_length (v);
+ }
+ vec_free (s);
+ vec_free (v);
+
+ unformat_free (&input);
+
+ /* set process affinity for main thread */
+ CPU_ZERO (&cpuset);
+ CPU_SET (main_core, &cpuset);
+ pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
+
+ /* Set up the plugin message ID allocator right now... */
+ vl_msg_api_set_first_available_msg_id (VL_MSG_MEMCLNT_LAST + 1);
+
+ /* destroy temporary heap and create main one */
+ clib_mem_destroy ();
+
+ if ((main_heap = clib_mem_init_with_page_size (main_heap_size,
+ main_heap_log2_page_sz)))
+ {
+ /* Figure out which numa runs the main thread */
+ __os_numa_index = clib_get_current_numa_node ();
+
+ if (default_log2_hugepage_sz != CLIB_MEM_PAGE_SZ_UNKNOWN)
+ clib_mem_set_log2_default_hugepage_size (default_log2_hugepage_sz);
+
+ /* and use the main heap as that numa's numa heap */
+ clib_mem_set_per_numa_heap (main_heap);
+ vlib_main_init ();
+ vpe_main_init (vlib_get_first_main ());
+ return 0;
+ }
+ else
+ {
+ {
+ int rv __attribute__ ((unused)) =
+ write (2, "Main heap allocation failure!\r\n", 31);
+ }
+ return 1;
+ }
+}
+
+static clib_error_t *
+memory_config (vlib_main_t *vm, unformat_input_t *input)
+{
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (memory_config, "memory");
+
+static clib_error_t *
+heapsize_config (vlib_main_t *vm, unformat_input_t *input)
+{
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (heapsize_config, "heapsize");
+
+static clib_error_t *
+placeholder_path_config (vlib_main_t *vm, unformat_input_t *input)
+{
+ u8 *junk;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%s", &junk))
+ {
+ vec_free (junk);
+ return 0;
+ }
+ else
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, input);
+ }
+ return 0;
+}
+
+static clib_error_t *
+plugin_path_config (vlib_main_t *vm, unformat_input_t *input)
+{
+ return placeholder_path_config (vm, input);
+}
+
+VLIB_CONFIG_FUNCTION (plugin_path_config, "plugin_path");
+
+static clib_error_t *
+test_plugin_path_config (vlib_main_t *vm, unformat_input_t *input)
+{
+ return placeholder_path_config (vm, input);
+}
+
+VLIB_CONFIG_FUNCTION (test_plugin_path_config, "test_plugin_path");
+
+void vl_msg_api_post_mortem_dump (void);
+void vlib_post_mortem_dump (void);
+
+void
+os_panic (void)
+{
+ vl_msg_api_post_mortem_dump ();
+ vlib_post_mortem_dump ();
+ abort ();
+}
+
+void vhost_user_unmap_all (void) __attribute__ ((weak));
+void
+vhost_user_unmap_all (void)
+{
+}
+
+void
+os_exit (int code)
+{
+ static int recursion_block;
+
+ if (code)
+ {
+ if (recursion_block)
+ abort ();
+
+ recursion_block = 1;
+
+ vl_msg_api_post_mortem_dump ();
+ vlib_post_mortem_dump ();
+ vhost_user_unmap_all ();
+ abort ();
+ }
+ exit (code);
+}
+
+#ifdef BARRIER_TRACING
+void
+vl_msg_api_barrier_trace_context (const char *context)
+{
+ vlib_worker_threads[0].barrier_context = context;
+}
+#endif
+
+void
+vl_msg_api_barrier_sync (void)
+{
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+}
+
+void
+vl_msg_api_barrier_release (void)
+{
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+}
+
+/* This application needs 1 thread stack for the stats pthread */
+u32
+vlib_app_num_thread_stacks_needed (void)
+{
+ return 1;
+}
+
+/*
+ * Depending on the configuration selected above,
+ * it may be necessary to generate stub graph nodes.
+ * It is never OK to ignore "node 'x' refers to unknown node 'y'
+ * messages!
+ */
+
+#include <vppinfra/bihash_8_8.h>
+
+static clib_error_t *
+show_bihash_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ int i;
+ clib_bihash_8_8_t *h;
+ int verbose = 0;
+
+ if (unformat (input, "verbose"))
+ verbose = 1;
+
+ for (i = 0; i < vec_len (clib_all_bihashes); i++)
+ {
+ h = (clib_bihash_8_8_t *) clib_all_bihashes[i];
+ vlib_cli_output (vm, "\n%U", h->fmt_fn, h, verbose);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_bihash_command, static) = {
+ .path = "show bihash",
+ .short_help = "show bihash",
+ .function = show_bihash_command_fn,
+};
+/* *INDENT-ON* */
+
+#ifdef CLIB_SANITIZE_ADDR
+/* default options for Address Sanitizer */
+const char *
+__asan_default_options (void)
+{
+ return VPP_SANITIZE_ADDR_OPTIONS;
+}
+#endif /* CLIB_SANITIZE_ADDR */
+
+/***********************************
+ * Missing Symbol in vpp libraries
+ ***********************************/
+
+typedef struct stat_segment_directory_entry_s stat_segment_directory_entry_t;
+typedef void (*stat_segment_update_fn) (stat_segment_directory_entry_t *e,
+ u32 i);
+clib_error_t *
+stat_segment_register_gauge (u8 *name, stat_segment_update_fn update_fn,
+ u32 caller_index)
+{
+ return NULL;
+}
+
+u8 *
+format_vl_api_address_union (u8 *s, va_list *args)
+{
+ return NULL;
+}
+
+int
+vpp_init ()
+{
+#define N_ARGS 3
+#define BUFFER_LEN 1024
+ // Get executable path
+ char buffer[BUFFER_LEN];
+ int ret = readlink ("/proc/self/exe", buffer, BUFFER_LEN);
+
+ ASSERT (ret < BUFFER_LEN);
+
+ if (ret >= BUFFER_LEN)
+ {
+ return -1;
+ }
+
+ buffer[ret] = '\0';
+
+ char *argv[N_ARGS] = { buffer, "unix { nodaemon }", NULL };
+ return vpp_init_internal (N_ARGS - 1, argv);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/hicn-plugin/src/test/vpp.h b/hicn-plugin/src/test/vpp.h
new file mode 100644
index 000000000..fe680f6b6
--- /dev/null
+++ b/hicn-plugin/src/test/vpp.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __VPP__
+#define __VPP__
+
+int vpp_init ();
+
+#endif \ No newline at end of file
diff --git a/hicn-plugin/src/udp_tunnels/udp_decap.h b/hicn-plugin/src/udp_tunnels/udp_decap.h
index 9ddb8a73b..7dc13f272 100644
--- a/hicn-plugin/src/udp_tunnels/udp_decap.h
+++ b/hicn-plugin/src/udp_tunnels/udp_decap.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
diff --git a/hicn-plugin/src/udp_tunnels/udp_decap_node.c b/hicn-plugin/src/udp_tunnels/udp_decap_node.c
index 5603f20f9..2ac1b1ecb 100644
--- a/hicn-plugin/src/udp_tunnels/udp_decap_node.c
+++ b/hicn-plugin/src/udp_tunnels/udp_decap_node.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -37,9 +37,9 @@ typedef enum
typedef enum
{
- UDP6_DECAP_NEXT_LOOKUP_IP4,
- UDP6_DECAP_NEXT_LOOKUP_IP6,
- UDP6_DECAP_N_NEXT,
+ UDP6_DECAP_NEXT_LOOKUP_IP4,
+ UDP6_DECAP_NEXT_LOOKUP_IP6,
+ UDP6_DECAP_N_NEXT,
} udp6_decap_next_t;
typedef struct udp4_decap_trace_t_
@@ -66,9 +66,8 @@ typedef struct udp_decap_trace_t_
u8 ishicn;
} udp_decap_trace_t;
-
static u8 *
-format_udp_decap_trace (u8 * s, va_list * args)
+format_udp_decap_trace (u8 *s, va_list *args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
@@ -78,51 +77,55 @@ format_udp_decap_trace (u8 * s, va_list * args)
if (t->isv6)
{
- s = format (s, "%U\n %U \n %s",
- format_ip4_header, &t->udp6.ip, sizeof (t->udp6.ip),
- format_udp_header, &t->udp6.udp, sizeof (t->udp6.udp),
- t->ishicn ? "hICN udp tunnel" : "");
+ s = format (s, "%U\n %U \n %s", format_ip4_header, &t->udp6.ip,
+ sizeof (t->udp6.ip), format_udp_header, &t->udp6.udp,
+ sizeof (t->udp6.udp), t->ishicn ? "hICN udp tunnel" : "");
}
else
{
- s = format (s, "%U\n %U \n %s",
- format_ip4_header, &t->udp4.ip, sizeof (t->udp4.ip),
- format_udp_header, &t->udp4.udp, sizeof (t->udp4.udp),
- t->ishicn ? "hICN udp tunnel" : "");
+ s = format (s, "%U\n %U \n %s", format_ip4_header, &t->udp4.ip,
+ sizeof (t->udp4.ip), format_udp_header, &t->udp4.udp,
+ sizeof (t->udp4.udp), t->ishicn ? "hICN udp tunnel" : "");
}
return (s);
}
static_always_inline void
-udp_decap_trace_buffer (vlib_main_t * vm, vlib_node_runtime_t * node,
- u8 isv6, vlib_buffer_t * b)
+udp_decap_trace_buffer (vlib_main_t *vm, vlib_node_runtime_t *node, u8 isv6,
+ vlib_buffer_t *b)
{
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) &&
(b->flags & VLIB_BUFFER_IS_TRACED)))
{
- udp_decap_trace_t *t =
- vlib_add_trace (vm, node, b, sizeof (*t));
+ udp_decap_trace_t *t = vlib_add_trace (vm, node, b, sizeof (*t));
t->isv6 = isv6;
- hicn_buffer_t *hb = hicn_get_buffer(b);
+ hicn_buffer_t *hb = hicn_get_buffer (b);
if (isv6)
- {
- clib_memcpy(&(t->udp6.udp), vlib_buffer_get_current(b) + sizeof(ip6_header_t), sizeof(udp_header_t));
- clib_memcpy(&(t->udp6.ip), vlib_buffer_get_current(b), sizeof(ip6_header_t));
- t->ishicn = hb->flags & hb->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL;
- }
+ {
+ clib_memcpy (&(t->udp6.udp),
+ vlib_buffer_get_current (b) + sizeof (ip6_header_t),
+ sizeof (udp_header_t));
+ clib_memcpy (&(t->udp6.ip), vlib_buffer_get_current (b),
+ sizeof (ip6_header_t));
+ t->ishicn =
+ hb->flags & hb->flags & HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL;
+ }
else
- {
- clib_memcpy(&(t->udp4.udp), vlib_buffer_get_current(b) + sizeof(ip4_header_t), sizeof(udp_header_t));
- clib_memcpy(&(t->udp4.ip), vlib_buffer_get_current(b), sizeof(ip4_header_t));
- t->ishicn = hb->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL;
- }
+ {
+ clib_memcpy (&(t->udp4.udp),
+ vlib_buffer_get_current (b) + sizeof (ip4_header_t),
+ sizeof (udp_header_t));
+ clib_memcpy (&(t->udp4.ip), vlib_buffer_get_current (b),
+ sizeof (ip4_header_t));
+ t->ishicn = hb->flags & HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL;
+ }
}
}
static uword
-udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+udp4_decap_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
@@ -138,11 +141,11 @@ udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* Dual loop, X2 */
while (n_left_from >= 8 && n_left_to_next >= 4)
{
- vlib_buffer_t *b0, *b1, *b2, *b3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
u32 bi0, bi1, bi2, bi3;
u32 next0, next1, next2, next3;
- {
+ {
vlib_buffer_t *b4, *b5, *b6, *b7;
b4 = vlib_get_buffer (vm, from[4]);
b5 = vlib_get_buffer (vm, from[5]);
@@ -172,108 +175,133 @@ udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
+ b3 = vlib_get_buffer (vm, bi3);
u8 *ptr0 = vlib_buffer_get_current (b0);
- u8 *ptr1 = vlib_buffer_get_current (b1);
- u8 *ptr2 = vlib_buffer_get_current (b2);
- u8 *ptr3 = vlib_buffer_get_current (b3);
+ u8 *ptr1 = vlib_buffer_get_current (b1);
+ u8 *ptr2 = vlib_buffer_get_current (b2);
+ u8 *ptr3 = vlib_buffer_get_current (b3);
u8 v0 = *ptr0 & 0xf0;
u8 v1 = *ptr1 & 0xf0;
u8 v2 = *ptr2 & 0xf0;
u8 v3 = *ptr3 & 0xf0;
- u8 advance = sizeof(ip4_header_t) + sizeof(udp_header_t);
+ u8 advance = sizeof (ip4_header_t) + sizeof (udp_header_t);
- vlib_buffer_advance(b0, -advance);
- vlib_buffer_advance(b1, -advance);
- vlib_buffer_advance(b2, -advance);
- vlib_buffer_advance(b3, -advance);
+ vlib_buffer_advance (b0, -advance);
+ vlib_buffer_advance (b1, -advance);
+ vlib_buffer_advance (b2, -advance);
+ vlib_buffer_advance (b3, -advance);
- u8 *outer_ptr0 = vlib_buffer_get_current (b0);
- u8 *outer_ptr1 = vlib_buffer_get_current (b1);
- u8 *outer_ptr2 = vlib_buffer_get_current (b2);
- u8 *outer_ptr3 = vlib_buffer_get_current (b3);
- u8 outer_v0 = *outer_ptr0 & 0xf0;
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 *outer_ptr1 = vlib_buffer_get_current (b1);
+ u8 *outer_ptr2 = vlib_buffer_get_current (b2);
+ u8 *outer_ptr3 = vlib_buffer_get_current (b3);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
u8 outer_v1 = *outer_ptr1 & 0xf0;
u8 outer_v2 = *outer_ptr2 & 0xf0;
u8 outer_v3 = *outer_ptr3 & 0xf0;
- ip46_address_t src0 = {0};
- ip46_address_t src1 = {0};
- ip46_address_t src2 = {0};
- ip46_address_t src3 = {0};
-
- ip46_address_t dst0 = {0};
- ip46_address_t dst1 = {0};
- ip46_address_t dst2 = {0};
- ip46_address_t dst3 = {0};
-
- udp_header_t * udp0 = NULL;
- udp_header_t * udp1 = NULL;
- udp_header_t * udp2 = NULL;
- udp_header_t * udp3 = NULL;
-
- ip46_address_set_ip4(&src0, &((ip4_header_t *)outer_ptr0)->src_address);
- ip46_address_set_ip4(&dst0, &((ip4_header_t *)outer_ptr0)->dst_address);
- udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip4_header_t));
- next0 = v0 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
-
- ip46_address_set_ip4(&src1, &((ip4_header_t *)outer_ptr1)->src_address);
- ip46_address_set_ip4(&dst1, &((ip4_header_t *)outer_ptr1)->dst_address);
- udp1 = (udp_header_t *)(outer_ptr1 + sizeof(ip4_header_t));
- next1 = v1 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
-
- ip46_address_set_ip4(&src2, &((ip4_header_t *)outer_ptr2)->src_address);
- ip46_address_set_ip4(&dst2, &((ip4_header_t *)outer_ptr2)->dst_address);
- udp2 = (udp_header_t *)(outer_ptr2 + sizeof(ip4_header_t));
- next2 = v2 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
-
- ip46_address_set_ip4(&src3, &((ip4_header_t *)outer_ptr3)->src_address);
- ip46_address_set_ip4(&dst3, &((ip4_header_t *)outer_ptr3)->dst_address);
- udp3 = (udp_header_t *)(outer_ptr3 + sizeof(ip4_header_t));
- next3 = v3 == 0x40? UDP4_DECAP_NEXT_LOOKUP_IP4 : UDP4_DECAP_NEXT_LOOKUP_IP6;
-
- hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
- hicnb0 = hicn_get_buffer(b0);
- hicnb1 = hicn_get_buffer(b1);
- hicnb2 = hicn_get_buffer(b2);
- hicnb3 = hicn_get_buffer(b3);
-
-
- /* Udp encap-decap tunnels have dst and src addresses and port swapped */
- vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
- vnet_buffer (b1)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst1, &src1, udp1->dst_port, udp1->src_port);
- vnet_buffer (b2)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst2, &src2, udp2->dst_port, udp2->src_port);
- vnet_buffer (b3)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst3, &src3, udp3->dst_port, udp3->src_port);
-
- if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb0->flags |= (outer_v0 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- if (vnet_buffer (b1)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb1->flags |= (outer_v1 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- if (vnet_buffer (b2)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb2->flags |= (outer_v2 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- if (vnet_buffer (b3)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb3->flags |= (outer_v3 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- udp_decap_trace_buffer (vm, node, 1, b0);
- udp_decap_trace_buffer (vm, node, 1, b1);
- udp_decap_trace_buffer (vm, node, 1, b2);
- udp_decap_trace_buffer (vm, node, 1, b3);
-
- vlib_buffer_advance(b0, advance);
- vlib_buffer_advance(b1, advance);
- vlib_buffer_advance(b2, advance);
- vlib_buffer_advance(b3, advance);
-
- vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ ip46_address_t src0 = { 0 };
+ ip46_address_t src1 = { 0 };
+ ip46_address_t src2 = { 0 };
+ ip46_address_t src3 = { 0 };
+
+ ip46_address_t dst0 = { 0 };
+ ip46_address_t dst1 = { 0 };
+ ip46_address_t dst2 = { 0 };
+ ip46_address_t dst3 = { 0 };
+
+ udp_header_t *udp0 = NULL;
+ udp_header_t *udp1 = NULL;
+ udp_header_t *udp2 = NULL;
+ udp_header_t *udp3 = NULL;
+
+ ip46_address_set_ip4 (&src0,
+ &((ip4_header_t *) outer_ptr0)->src_address);
+ ip46_address_set_ip4 (&dst0,
+ &((ip4_header_t *) outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *) (outer_ptr0 + sizeof (ip4_header_t));
+ next0 = v0 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4 :
+ UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip4 (&src1,
+ &((ip4_header_t *) outer_ptr1)->src_address);
+ ip46_address_set_ip4 (&dst1,
+ &((ip4_header_t *) outer_ptr1)->dst_address);
+ udp1 = (udp_header_t *) (outer_ptr1 + sizeof (ip4_header_t));
+ next1 = v1 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4 :
+ UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip4 (&src2,
+ &((ip4_header_t *) outer_ptr2)->src_address);
+ ip46_address_set_ip4 (&dst2,
+ &((ip4_header_t *) outer_ptr2)->dst_address);
+ udp2 = (udp_header_t *) (outer_ptr2 + sizeof (ip4_header_t));
+ next2 = v2 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4 :
+ UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip4 (&src3,
+ &((ip4_header_t *) outer_ptr3)->src_address);
+ ip46_address_set_ip4 (&dst3,
+ &((ip4_header_t *) outer_ptr3)->dst_address);
+ udp3 = (udp_header_t *) (outer_ptr3 + sizeof (ip4_header_t));
+ next3 = v3 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4 :
+ UDP4_DECAP_NEXT_LOOKUP_IP6;
+
+ hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
+ hicnb0 = hicn_get_buffer (b0);
+ hicnb1 = hicn_get_buffer (b1);
+ hicnb2 = hicn_get_buffer (b2);
+ hicnb3 = hicn_get_buffer (b3);
+
+ /* Udp encap-decap tunnels have dst and src addresses and port
+ * swapped */
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get_create (
+ &dst0, &src0, udp0->dst_port, udp0->src_port);
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] = udp_tunnel_get_create (
+ &dst1, &src1, udp1->dst_port, udp1->src_port);
+ vnet_buffer (b2)->ip.adj_index[VLIB_RX] = udp_tunnel_get_create (
+ &dst2, &src2, udp2->dst_port, udp2->src_port);
+ vnet_buffer (b3)->ip.adj_index[VLIB_RX] = udp_tunnel_get_create (
+ &dst3, &src3, udp3->dst_port, udp3->src_port);
+
+ ASSERT (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID);
+ ASSERT (vnet_buffer (b1)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID);
+ ASSERT (vnet_buffer (b2)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID);
+ ASSERT (vnet_buffer (b3)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID);
+
+ hicnb0->flags =
+ (outer_v0 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ hicnb1->flags =
+ (outer_v1 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ hicnb2->flags =
+ (outer_v2 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ hicnb3->flags =
+ (outer_v3 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ udp_decap_trace_buffer (vm, node, 1, b0);
+ udp_decap_trace_buffer (vm, node, 1, b1);
+ udp_decap_trace_buffer (vm, node, 1, b2);
+ udp_decap_trace_buffer (vm, node, 1, b3);
+
+ vlib_buffer_advance (b0, advance);
+ vlib_buffer_advance (b1, advance);
+ vlib_buffer_advance (b2, advance);
+ vlib_buffer_advance (b3, advance);
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
n_left_to_next, bi0, bi1, bi2, bi3,
next0, next1, next2, next3);
}
@@ -283,7 +311,7 @@ udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
{
vlib_buffer_t *b0;
u32 bi0;
- /* udp_encap_t *udp_tunnel0 = NULL; */
+ /* udp_encap_t *udp_tunnel0 = NULL; */
u32 next0;
if (n_left_from > 1)
@@ -300,42 +328,49 @@ udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
to_next += 1;
n_left_to_next -= 1;
- b0 = vlib_get_buffer (vm, bi0);
+ b0 = vlib_get_buffer (vm, bi0);
u8 *ptr0 = vlib_buffer_get_current (b0);
u8 v0 = *ptr0 & 0xf0;
- u8 advance = sizeof(ip4_header_t) + sizeof(udp_header_t);;
+ u8 advance = sizeof (ip4_header_t) + sizeof (udp_header_t);
+ ;
- vlib_buffer_advance(b0, -advance);
+ vlib_buffer_advance (b0, -advance);
- u8 *outer_ptr0 = vlib_buffer_get_current (b0);
- u8 outer_v0 = *outer_ptr0 & 0xf0;
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
- ip46_address_t src0 = {0};
- ip46_address_t dst0 = {0};
- udp_header_t * udp0 = NULL;
+ ip46_address_t src0 = { 0 };
+ ip46_address_t dst0 = { 0 };
+ udp_header_t *udp0 = NULL;
- ip46_address_set_ip4(&src0, &((ip4_header_t *)outer_ptr0)->src_address);
- ip46_address_set_ip4(&dst0, &((ip4_header_t *)outer_ptr0)->dst_address);
- udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip4_header_t));
- next0 = v0 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4: UDP4_DECAP_NEXT_LOOKUP_IP6;
+ ip46_address_set_ip4 (&src0,
+ &((ip4_header_t *) outer_ptr0)->src_address);
+ ip46_address_set_ip4 (&dst0,
+ &((ip4_header_t *) outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *) (outer_ptr0 + sizeof (ip4_header_t));
+ next0 = v0 == 0x40 ? UDP4_DECAP_NEXT_LOOKUP_IP4 :
+ UDP4_DECAP_NEXT_LOOKUP_IP6;
- hicn_buffer_t *hicnb0 = hicn_get_buffer(b0);
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
- vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get_create (
+ &dst0, &src0, udp0->dst_port, udp0->src_port);
- if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb0->flags |= (outer_v0 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+ ASSERT (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
+ UDP_TUNNEL_INVALID);
- udp_decap_trace_buffer (vm, node, 1, b0);
+ hicnb0->flags |=
+ (outer_v0 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
- vlib_buffer_advance(b0, advance);
+ udp_decap_trace_buffer (vm, node, 1, b0);
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi0, next0);
+ vlib_buffer_advance (b0, advance);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
@@ -343,33 +378,26 @@ udp4_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
return (frame->n_vectors);
}
-
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(udp4_decap_node) =
-{
+VLIB_REGISTER_NODE (udp4_decap_node) = {
.function = udp4_decap_node_fn,
.name = "udp4-decap",
- .vector_size = sizeof(u32),
+ .vector_size = sizeof (u32),
.format_trace = format_udp_decap_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(udp_decap_error_strings),
+ .n_errors = ARRAY_LEN (udp_decap_error_strings),
.error_strings = udp_decap_error_strings,
.n_next_nodes = UDP4_DECAP_N_NEXT,
/* edit / add dispositions here */
- .next_nodes =
- {
- [UDP4_DECAP_NEXT_LOOKUP_IP4] = "ip4-lookup",
- [UDP4_DECAP_NEXT_LOOKUP_IP6] = "ip6-lookup"
- },
+ .next_nodes = { [UDP4_DECAP_NEXT_LOOKUP_IP4] = "ip4-lookup",
+ [UDP4_DECAP_NEXT_LOOKUP_IP6] = "ip6-lookup" },
};
-/* *INDENT-ON* */
static uword
-udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+udp6_decap_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame)
{
u32 n_left_from, *from, *to_next, next_index;
@@ -385,11 +413,11 @@ udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
/* Dual loop, X2 */
while (n_left_from >= 8 && n_left_to_next >= 4)
{
- vlib_buffer_t *b0, *b1, *b2, *b3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
u32 bi0, bi1, bi2, bi3;
u32 next0, next1, next2, next3;
- {
+ {
vlib_buffer_t *b4, *b5, *b6, *b7;
b4 = vlib_get_buffer (vm, from[4]);
b5 = vlib_get_buffer (vm, from[5]);
@@ -422,105 +450,125 @@ udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
b3 = vlib_get_buffer (vm, bi3);
u8 *ptr0 = vlib_buffer_get_current (b0);
- u8 *ptr1 = vlib_buffer_get_current (b1);
- u8 *ptr2 = vlib_buffer_get_current (b2);
- u8 *ptr3 = vlib_buffer_get_current (b3);
+ u8 *ptr1 = vlib_buffer_get_current (b1);
+ u8 *ptr2 = vlib_buffer_get_current (b2);
+ u8 *ptr3 = vlib_buffer_get_current (b3);
u8 v0 = *ptr0 & 0xf0;
u8 v1 = *ptr1 & 0xf0;
u8 v2 = *ptr2 & 0xf0;
u8 v3 = *ptr3 & 0xf0;
- u8 advance = sizeof(ip6_header_t) + sizeof(udp_header_t);
+ u8 advance = sizeof (ip6_header_t) + sizeof (udp_header_t);
- vlib_buffer_advance(b0, -advance);
- vlib_buffer_advance(b1, -advance);
- vlib_buffer_advance(b2, -advance);
- vlib_buffer_advance(b3, -advance);
+ vlib_buffer_advance (b0, -advance);
+ vlib_buffer_advance (b1, -advance);
+ vlib_buffer_advance (b2, -advance);
+ vlib_buffer_advance (b3, -advance);
- u8 *outer_ptr0 = vlib_buffer_get_current (b0);
- u8 *outer_ptr1 = vlib_buffer_get_current (b1);
- u8 *outer_ptr2 = vlib_buffer_get_current (b2);
- u8 *outer_ptr3 = vlib_buffer_get_current (b3);
- u8 outer_v0 = *outer_ptr0 & 0xf0;
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 *outer_ptr1 = vlib_buffer_get_current (b1);
+ u8 *outer_ptr2 = vlib_buffer_get_current (b2);
+ u8 *outer_ptr3 = vlib_buffer_get_current (b3);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
u8 outer_v1 = *outer_ptr1 & 0xf0;
u8 outer_v2 = *outer_ptr2 & 0xf0;
u8 outer_v3 = *outer_ptr3 & 0xf0;
- ip46_address_t src0 = {0};
- ip46_address_t src1 = {0};
- ip46_address_t src2 = {0};
- ip46_address_t src3 = {0};
-
- ip46_address_t dst0 = {0};
- ip46_address_t dst1 = {0};
- ip46_address_t dst2 = {0};
- ip46_address_t dst3 = {0};
-
- udp_header_t * udp0 = NULL;
- udp_header_t * udp1 = NULL;
- udp_header_t * udp2 = NULL;
- udp_header_t * udp3 = NULL;
-
- ip46_address_set_ip6(&src0, &((ip6_header_t *)outer_ptr0)->src_address);
- ip46_address_set_ip6(&dst0, &((ip6_header_t *)outer_ptr0)->dst_address);
- udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip6_header_t));
- next0 = v0 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
-
- ip46_address_set_ip6(&src1, &((ip6_header_t *)outer_ptr1)->src_address);
- ip46_address_set_ip6(&dst1, &((ip6_header_t *)outer_ptr1)->dst_address);
- udp1 = (udp_header_t *)(outer_ptr1 + sizeof(ip6_header_t));
- next1 = v1 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
-
- ip46_address_set_ip6(&src2, &((ip6_header_t *)outer_ptr2)->src_address);
- ip46_address_set_ip6(&dst2, &((ip6_header_t *)outer_ptr2)->dst_address);
- udp2 = (udp_header_t *)(outer_ptr2 + sizeof(ip6_header_t));
- next2 = v2 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
-
- ip46_address_set_ip6(&src3, &((ip6_header_t *)outer_ptr3)->src_address);
- ip46_address_set_ip6(&dst3, &((ip6_header_t *)outer_ptr3)->dst_address);
- udp3 = (udp_header_t *)(outer_ptr3 + sizeof(ip6_header_t));
- next3 = v3 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
-
- hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
- hicnb0 = hicn_get_buffer(b0);
- hicnb1 = hicn_get_buffer(b1);
- hicnb2 = hicn_get_buffer(b2);
- hicnb3 = hicn_get_buffer(b3);
-
-
- /* Udp encap-decap tunnels have dst and src addresses and port swapped */
- vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
- vnet_buffer (b1)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst1, &src1, udp1->dst_port, udp1->src_port);
- vnet_buffer (b2)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst2, &src2, udp2->dst_port, udp2->src_port);
- vnet_buffer (b3)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst3, &src3, udp3->dst_port, udp3->src_port);
-
- if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb0->flags |= (outer_v0 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- if (vnet_buffer (b1)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb1->flags |= (outer_v1 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- if (vnet_buffer (b2)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb2->flags |= (outer_v2 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- if (vnet_buffer (b3)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb3->flags |= (outer_v3 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
-
- udp_decap_trace_buffer (vm, node, 0, b0);
- udp_decap_trace_buffer (vm, node, 0, b1);
- udp_decap_trace_buffer (vm, node, 0, b2);
- udp_decap_trace_buffer (vm, node, 0, b3);
-
- vlib_buffer_advance(b0, advance);
- vlib_buffer_advance(b1, advance);
- vlib_buffer_advance(b2, advance);
- vlib_buffer_advance(b3, advance);
-
- vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ ip46_address_t src0 = { 0 };
+ ip46_address_t src1 = { 0 };
+ ip46_address_t src2 = { 0 };
+ ip46_address_t src3 = { 0 };
+
+ ip46_address_t dst0 = { 0 };
+ ip46_address_t dst1 = { 0 };
+ ip46_address_t dst2 = { 0 };
+ ip46_address_t dst3 = { 0 };
+
+ udp_header_t *udp0 = NULL;
+ udp_header_t *udp1 = NULL;
+ udp_header_t *udp2 = NULL;
+ udp_header_t *udp3 = NULL;
+
+ ip46_address_set_ip6 (&src0,
+ &((ip6_header_t *) outer_ptr0)->src_address);
+ ip46_address_set_ip6 (&dst0,
+ &((ip6_header_t *) outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *) (outer_ptr0 + sizeof (ip6_header_t));
+ next0 = v0 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 :
+ UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip6 (&src1,
+ &((ip6_header_t *) outer_ptr1)->src_address);
+ ip46_address_set_ip6 (&dst1,
+ &((ip6_header_t *) outer_ptr1)->dst_address);
+ udp1 = (udp_header_t *) (outer_ptr1 + sizeof (ip6_header_t));
+ next1 = v1 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 :
+ UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip6 (&src2,
+ &((ip6_header_t *) outer_ptr2)->src_address);
+ ip46_address_set_ip6 (&dst2,
+ &((ip6_header_t *) outer_ptr2)->dst_address);
+ udp2 = (udp_header_t *) (outer_ptr2 + sizeof (ip6_header_t));
+ next2 = v2 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 :
+ UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ ip46_address_set_ip6 (&src3,
+ &((ip6_header_t *) outer_ptr3)->src_address);
+ ip46_address_set_ip6 (&dst3,
+ &((ip6_header_t *) outer_ptr3)->dst_address);
+ udp3 = (udp_header_t *) (outer_ptr3 + sizeof (ip6_header_t));
+ next3 = v3 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 :
+ UDP6_DECAP_NEXT_LOOKUP_IP6;
+
+ hicn_buffer_t *hicnb0, *hicnb1, *hicnb2, *hicnb3;
+ hicnb0 = hicn_get_buffer (b0);
+ hicnb1 = hicn_get_buffer (b1);
+ hicnb2 = hicn_get_buffer (b2);
+ hicnb3 = hicn_get_buffer (b3);
+
+ /* Udp encap-decap tunnels have dst and src addresses and port
+ * swapped */
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] =
+ udp_tunnel_get (&dst0, &src0, udp0->dst_port, udp0->src_port);
+ vnet_buffer (b1)->ip.adj_index[VLIB_RX] =
+ udp_tunnel_get (&dst1, &src1, udp1->dst_port, udp1->src_port);
+ vnet_buffer (b2)->ip.adj_index[VLIB_RX] =
+ udp_tunnel_get (&dst2, &src2, udp2->dst_port, udp2->src_port);
+ vnet_buffer (b3)->ip.adj_index[VLIB_RX] =
+ udp_tunnel_get (&dst3, &src3, udp3->dst_port, udp3->src_port);
+
+ if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] != UDP_TUNNEL_INVALID)
+ hicnb0->flags |=
+ (outer_v0 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b1)->ip.adj_index[VLIB_RX] != UDP_TUNNEL_INVALID)
+ hicnb1->flags |=
+ (outer_v1 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b2)->ip.adj_index[VLIB_RX] != UDP_TUNNEL_INVALID)
+ hicnb2->flags |=
+ (outer_v2 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ if (vnet_buffer (b3)->ip.adj_index[VLIB_RX] != UDP_TUNNEL_INVALID)
+ hicnb3->flags |=
+ (outer_v3 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+
+ udp_decap_trace_buffer (vm, node, 0, b0);
+ udp_decap_trace_buffer (vm, node, 0, b1);
+ udp_decap_trace_buffer (vm, node, 0, b2);
+ udp_decap_trace_buffer (vm, node, 0, b3);
+
+ vlib_buffer_advance (b0, advance);
+ vlib_buffer_advance (b1, advance);
+ vlib_buffer_advance (b2, advance);
+ vlib_buffer_advance (b3, advance);
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
n_left_to_next, bi0, bi1, bi2, bi3,
next0, next1, next2, next3);
}
@@ -530,7 +578,7 @@ udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
{
vlib_buffer_t *b0;
u32 bi0;
- /* udp_encap_t *udp_tunnel0 = NULL; */
+ /* udp_encap_t *udp_tunnel0 = NULL; */
u32 next0;
if (n_left_from > 1)
@@ -547,42 +595,46 @@ udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
to_next += 1;
n_left_to_next -= 1;
- b0 = vlib_get_buffer (vm, bi0);
+ b0 = vlib_get_buffer (vm, bi0);
u8 *ptr0 = vlib_buffer_get_current (b0);
u8 v0 = *ptr0 & 0xf0;
- u8 advance = sizeof(ip6_header_t) + sizeof(udp_header_t);
-
- vlib_buffer_advance(b0, -advance);
+ u8 advance = sizeof (ip6_header_t) + sizeof (udp_header_t);
- u8 *outer_ptr0 = vlib_buffer_get_current (b0);
- u8 outer_v0 = *outer_ptr0 & 0xf0;
+ vlib_buffer_advance (b0, -advance);
- ip46_address_t src0 = {0};
- ip46_address_t dst0 = {0};
- udp_header_t * udp0 = NULL;
+ u8 *outer_ptr0 = vlib_buffer_get_current (b0);
+ u8 outer_v0 = *outer_ptr0 & 0xf0;
- ip46_address_set_ip6(&src0, &((ip6_header_t *)outer_ptr0)->src_address);
- ip46_address_set_ip6(&dst0, &((ip6_header_t *)outer_ptr0)->dst_address);
- udp0 = (udp_header_t *)(outer_ptr0 + sizeof(ip6_header_t));
- next0 = v0 == 0x40? UDP6_DECAP_NEXT_LOOKUP_IP4 : UDP6_DECAP_NEXT_LOOKUP_IP6;
+ ip46_address_t src0 = { 0 };
+ ip46_address_t dst0 = { 0 };
+ udp_header_t *udp0 = NULL;
- hicn_buffer_t *hicnb0 = hicn_get_buffer(b0);
+ ip46_address_set_ip6 (&src0,
+ &((ip6_header_t *) outer_ptr0)->src_address);
+ ip46_address_set_ip6 (&dst0,
+ &((ip6_header_t *) outer_ptr0)->dst_address);
+ udp0 = (udp_header_t *) (outer_ptr0 + sizeof (ip6_header_t));
+ next0 = v0 == 0x40 ? UDP6_DECAP_NEXT_LOOKUP_IP4 :
+ UDP6_DECAP_NEXT_LOOKUP_IP6;
- vnet_buffer (b0)->ip.adj_index[VLIB_RX] = udp_tunnel_get(&dst0, &src0, udp0->dst_port, udp0->src_port);
+ hicn_buffer_t *hicnb0 = hicn_get_buffer (b0);
- if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] !=
- UDP_TUNNEL_INVALID)
- hicnb0->flags |= (outer_v0 == 0x40? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL : HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
+ vnet_buffer (b0)->ip.adj_index[VLIB_RX] =
+ udp_tunnel_get (&dst0, &src0, udp0->dst_port, udp0->src_port);
- udp_decap_trace_buffer (vm, node, 0, b0);
+ if (vnet_buffer (b0)->ip.adj_index[VLIB_RX] != UDP_TUNNEL_INVALID)
+ hicnb0->flags |=
+ (outer_v0 == 0x40 ? HICN_BUFFER_FLAGS_FROM_UDP4_TUNNEL :
+ HICN_BUFFER_FLAGS_FROM_UDP6_TUNNEL);
- vlib_buffer_advance(b0, advance);
+ udp_decap_trace_buffer (vm, node, 0, b0);
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi0, next0);
+ vlib_buffer_advance (b0, advance);
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
@@ -590,29 +642,22 @@ udp6_decap_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
return (frame->n_vectors);
}
-
/*
* Node registration for the interest forwarder node
*/
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE(udp6_decap_node) =
-{
+VLIB_REGISTER_NODE (udp6_decap_node) = {
.function = udp6_decap_node_fn,
.name = "udp6-decap",
- .vector_size = sizeof(u32),
+ .vector_size = sizeof (u32),
.format_trace = format_udp_decap_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(udp_decap_error_strings),
+ .n_errors = ARRAY_LEN (udp_decap_error_strings),
.error_strings = udp_decap_error_strings,
.n_next_nodes = UDP6_DECAP_N_NEXT,
/* edit / add dispositions here */
- .next_nodes =
- {
- [UDP6_DECAP_NEXT_LOOKUP_IP4] = "ip4-lookup",
- [UDP6_DECAP_NEXT_LOOKUP_IP6] = "ip6-lookup"
- },
+ .next_nodes = { [UDP6_DECAP_NEXT_LOOKUP_IP4] = "ip4-lookup",
+ [UDP6_DECAP_NEXT_LOOKUP_IP6] = "ip6-lookup" },
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/udp_tunnels/udp_tunnel.c b/hicn-plugin/src/udp_tunnels/udp_tunnel.c
index 872e4cd82..5b0c0a9ef 100644
--- a/hicn-plugin/src/udp_tunnels/udp_tunnel.c
+++ b/hicn-plugin/src/udp_tunnels/udp_tunnel.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -16,6 +16,8 @@
#include <vnet/vnet.h>
#include <vppinfra/bihash_40_8.h>
#include <vnet/fib/fib_table.h>
+#include <vnet/udp/udp_local.h>
+#include "../faces/iface_node.h"
#include "../error.h"
#include "../strategy_dpo_ctx.h"
@@ -25,63 +27,67 @@ clib_bihash_40_8_t udp_tunnels_hashtb;
dpo_type_t dpo_type_udp_ip4;
dpo_type_t dpo_type_udp_ip6;
-u32 udp_tunnel_add (fib_protocol_t proto,
- index_t fib_index,
- const ip46_address_t * src_ip,
- const ip46_address_t * dst_ip,
- u16 src_port,
- u16 dst_port,
- udp_encap_fixup_flags_t flags)
+u32
+udp_tunnel_add (fib_protocol_t proto, index_t fib_index,
+ const ip46_address_t *src_ip, const ip46_address_t *dst_ip,
+ u16 src_port, u16 dst_port, udp_encap_fixup_flags_t flags)
{
- vlib_main_t *vm = vlib_get_main();
+ vlib_main_t *vm = vlib_get_main ();
clib_bihash_kv_40_8_t kv;
- clib_memcpy(&kv.key[0], src_ip, sizeof(ip46_address_t));
- clib_memcpy(&kv.key[2], dst_ip, sizeof(ip46_address_t));
- kv.key[4] = (clib_host_to_net_u16(src_port) << 16) + clib_host_to_net_u16(dst_port);
+ clib_memcpy (&kv.key[0], src_ip, sizeof (ip46_address_t));
+ clib_memcpy (&kv.key[2], dst_ip, sizeof (ip46_address_t));
+ kv.key[4] =
+ (clib_host_to_net_u16 (src_port) << 16) + clib_host_to_net_u16 (dst_port);
clib_bihash_kv_40_8_t value;
int rv = clib_bihash_search_40_8 (&udp_tunnels_hashtb, &kv, &value);
if (rv != 0)
{
- u32 uei = udp_encap_add_and_lock(proto, fib_index, src_ip, dst_ip, src_port, dst_port, flags);
+ u32 uei = udp_encap_add_and_lock (proto, fib_index, src_ip, dst_ip,
+ src_port, dst_port, flags);
kv.value = uei;
- clib_bihash_add_del_40_8(&udp_tunnels_hashtb, &kv, 1);
+ clib_bihash_add_del_40_8 (&udp_tunnels_hashtb, &kv, 1);
value.value = kv.value;
if (proto == FIB_PROTOCOL_IP4)
- {
- udp_register_dst_port(vm, src_port, udp4_decap_node.index, 1);
- }
+ {
+ udp_register_dst_port (vm, src_port, udp4_decap_node.index, 1);
+ }
else
- {
- udp_register_dst_port(vm, src_port, udp6_decap_node.index, 0);
- }
+ {
+ udp_register_dst_port (vm, src_port, udp6_decap_node.index, 0);
+ }
}
return value.value;
}
-void udp_tunnel_add_existing (index_t uei, dpo_proto_t proto)
+void
+udp_tunnel_add_existing (index_t uei, dpo_proto_t proto)
{
- vlib_main_t *vm = vlib_get_main();
- udp_encap_t * udp_encap = udp_encap_get(uei);
+ vlib_main_t *vm = vlib_get_main ();
+ udp_encap_t *udp_encap = udp_encap_get (uei);
clib_bihash_kv_40_8_t kv;
- ip46_address_t src = {0};
- ip46_address_t dst = {0};
+ ip46_address_t src = { 0 };
+ ip46_address_t dst = { 0 };
u16 src_port = 0, dst_port = 0;
switch (proto)
{
case DPO_PROTO_IP4:
- ip46_address_set_ip4(&src, &(udp_encap->ue_hdrs.ip4.ue_ip4.src_address));
- ip46_address_set_ip4(&dst, &(udp_encap->ue_hdrs.ip4.ue_ip4.dst_address));
+ ip46_address_set_ip4 (&src,
+ &(udp_encap->ue_hdrs.ip4.ue_ip4.src_address));
+ ip46_address_set_ip4 (&dst,
+ &(udp_encap->ue_hdrs.ip4.ue_ip4.dst_address));
src_port = udp_encap->ue_hdrs.ip4.ue_udp.src_port;
dst_port = udp_encap->ue_hdrs.ip4.ue_udp.dst_port;
break;
case DPO_PROTO_IP6:
- ip46_address_set_ip6(&src, &(udp_encap->ue_hdrs.ip6.ue_ip6.src_address));
- ip46_address_set_ip6(&dst, &(udp_encap->ue_hdrs.ip6.ue_ip6.dst_address));
+ ip46_address_set_ip6 (&src,
+ &(udp_encap->ue_hdrs.ip6.ue_ip6.src_address));
+ ip46_address_set_ip6 (&dst,
+ &(udp_encap->ue_hdrs.ip6.ue_ip6.dst_address));
src_port = udp_encap->ue_hdrs.ip6.ue_udp.src_port;
dst_port = udp_encap->ue_hdrs.ip6.ue_udp.dst_port;
break;
@@ -89,43 +95,43 @@ void udp_tunnel_add_existing (index_t uei, dpo_proto_t proto)
break;
}
- clib_memcpy(&kv.key[0], &src, sizeof(ip46_address_t));
- clib_memcpy(&kv.key[2], &dst, sizeof(ip46_address_t));
- kv.key[4] = (src_port << 16) + dst_port ;
+ clib_memcpy (&kv.key[0], &src, sizeof (ip46_address_t));
+ clib_memcpy (&kv.key[2], &dst, sizeof (ip46_address_t));
+ kv.key[4] = (src_port << 16) + dst_port;
kv.value = uei;
- clib_bihash_add_del_40_8(&udp_tunnels_hashtb, &kv, 1);
+ clib_bihash_add_del_40_8 (&udp_tunnels_hashtb, &kv, 1);
if (proto == DPO_PROTO_IP4)
{
- udp_register_dst_port(vm, clib_net_to_host_u16(src_port), udp4_decap_node.index, 1);
+ udp_register_dst_port (vm, clib_net_to_host_u16 (src_port),
+ udp4_decap_node.index, 1);
}
else
{
- udp_register_dst_port(vm, clib_net_to_host_u16(src_port), udp6_decap_node.index, 0);
+ udp_register_dst_port (vm, clib_net_to_host_u16 (src_port),
+ udp6_decap_node.index, 0);
}
}
-int udp_tunnel_del (fib_protocol_t proto,
- index_t fib_index,
- const ip46_address_t * src_ip,
- const ip46_address_t * dst_ip,
- u16 src_port,
- u16 dst_port,
- udp_encap_fixup_flags_t flags)
+int
+udp_tunnel_del (fib_protocol_t proto, index_t fib_index,
+ const ip46_address_t *src_ip, const ip46_address_t *dst_ip,
+ u16 src_port, u16 dst_port, udp_encap_fixup_flags_t flags)
{
clib_bihash_kv_40_8_t kv;
- clib_memcpy(&kv.key[0], src_ip, sizeof(ip46_address_t));
- clib_memcpy(&kv.key[2], dst_ip, sizeof(ip46_address_t));
- kv.key[4] = (clib_host_to_net_u16(src_port) << 16) + clib_host_to_net_u16(dst_port);
+ clib_memcpy (&kv.key[0], src_ip, sizeof (ip46_address_t));
+ clib_memcpy (&kv.key[2], dst_ip, sizeof (ip46_address_t));
+ kv.key[4] =
+ (clib_host_to_net_u16 (src_port) << 16) + clib_host_to_net_u16 (dst_port);
clib_bihash_kv_40_8_t value;
int ret = clib_bihash_search_40_8 (&udp_tunnels_hashtb, &kv, &value);
if (ret == 0)
{
- udp_encap_unlock((u32)value.value);
- clib_bihash_add_del_40_8(&udp_tunnels_hashtb, &kv, 0);
+ udp_encap_unlock ((u32) value.value);
+ clib_bihash_add_del_40_8 (&udp_tunnels_hashtb, &kv, 0);
ret = HICN_ERROR_NONE;
}
else
@@ -136,62 +142,95 @@ int udp_tunnel_del (fib_protocol_t proto,
return ret;
}
-u32 udp_tunnel_get(const ip46_address_t * src_ip,
- const ip46_address_t * dst_ip,
- u16 src_port,
- u16 dst_port)
+u32
+udp_tunnel_get (const ip46_address_t *src_ip, const ip46_address_t *dst_ip,
+ u16 src_port, u16 dst_port)
{
clib_bihash_kv_40_8_t kv;
- clib_memcpy(&kv.key[0], src_ip, sizeof(ip46_address_t));
- clib_memcpy(&kv.key[2], dst_ip, sizeof(ip46_address_t));
+ clib_memcpy (&kv.key[0], src_ip, sizeof (ip46_address_t));
+ clib_memcpy (&kv.key[2], dst_ip, sizeof (ip46_address_t));
kv.key[4] = (src_port << 16) + dst_port;
clib_bihash_kv_40_8_t value;
int ret = clib_bihash_search_40_8 (&udp_tunnels_hashtb, &kv, &value);
- return ret == 0 ? (u32)value.value : UDP_TUNNEL_INVALID;
+ return ret == 0 ? (u32) value.value : UDP_TUNNEL_INVALID;
}
+u32
+udp_tunnel_get_create (const ip46_address_t *src_ip,
+ const ip46_address_t *dst_ip, u16 src_port,
+ u16 dst_port)
+{
+ u32 ret = udp_tunnel_get (src_ip, dst_ip, src_port, dst_port);
+ if (ret == UDP_TUNNEL_INVALID)
+ {
+ fib_protocol_t proto =
+ ip46_address_is_ip4 (src_ip) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+
+ index_t fib_index = fib_table_find (proto, HICN_FIB_TABLE);
+
+ ret = udp_tunnel_add (
+ proto, fib_index, src_ip, dst_ip, clib_net_to_host_u16 (src_port),
+ clib_net_to_host_u16 (dst_port), UDP_ENCAP_FIXUP_NONE);
+ }
-void udp_tunnel_init()
+ return ret;
+}
+
+void
+udp_tunnel_set_face (hicn_face_id_t face_id, int isv4)
{
- clib_bihash_init_40_8(&udp_tunnels_hashtb, "udp encap table",
- 2048, 256 << 20);
+ hicn_face_t *face = NULL;
+ face = hicn_dpoi_get_from_idx (face_id);
+ ASSERT (face);
+ ASSERT (dpo_is_udp_encap (&face->dpo));
+
+ face->iface_next = isv4 ? HICN4_IFACE_OUTPUT_NEXT_UDP4_ENCAP :
+ HICN4_IFACE_OUTPUT_NEXT_UDP6_ENCAP;
+}
+
+void
+udp_tunnel_init ()
+{
+ clib_bihash_init_40_8 (&udp_tunnels_hashtb, "udp encap table", 2048,
+ 256 << 20);
/*
* Udp encap does not expose the dpo type when it registers.
* In the following we understand what is the dpo type for a udp_encap dpo.
*/
- ip46_address_t src = {0};
- ip46_address_t dst = {0};
+ ip46_address_t src = { 0 };
+ ip46_address_t dst = { 0 };
src.ip6.as_u8[15] = 1;
dst.ip6.as_u8[15] = 2;
u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6, HICN_FIB_TABLE);
- u32 uei = udp_encap_add_and_lock(FIB_PROTOCOL_IP6, fib_index, &src, &dst, 4444, 4444, UDP_ENCAP_FIXUP_NONE);
+ u32 uei = udp_encap_add_and_lock (FIB_PROTOCOL_IP6, fib_index, &src, &dst,
+ 4444, 4444, UDP_ENCAP_FIXUP_NONE);
dpo_id_t temp = DPO_INVALID;
- udp_encap_contribute_forwarding(uei, DPO_PROTO_IP6, &temp);
+ udp_encap_contribute_forwarding (uei, DPO_PROTO_IP6, &temp);
dpo_type_udp_ip6 = temp.dpoi_type;
- udp_encap_unlock(uei);
+ udp_encap_unlock (uei);
dpo_id_t temp2 = DPO_INVALID;
fib_index = fib_table_find (FIB_PROTOCOL_IP4, HICN_FIB_TABLE);
- uei = udp_encap_add_and_lock(FIB_PROTOCOL_IP4, fib_index, &src, &dst, 4444, 4444, UDP_ENCAP_FIXUP_NONE);
- udp_encap_contribute_forwarding(uei, DPO_PROTO_IP4, &temp2);
+ uei = udp_encap_add_and_lock (FIB_PROTOCOL_IP4, fib_index, &src, &dst, 4444,
+ 4444, UDP_ENCAP_FIXUP_NONE);
+ udp_encap_contribute_forwarding (uei, DPO_PROTO_IP4, &temp2);
dpo_type_udp_ip4 = temp2.dpoi_type;
- udp_encap_unlock(uei);
+ udp_encap_unlock (uei);
}
static clib_error_t *
-udp_tunnel_command_fn (vlib_main_t * vm,
- unformat_input_t * main_input,
- vlib_cli_command_t * cmd)
+udp_tunnel_command_fn (vlib_main_t *vm, unformat_input_t *main_input,
+ vlib_cli_command_t *cmd)
{
unformat_input_t _line_input, *line_input = &_line_input;
clib_error_t *error = NULL;
- ip46_address_t src_ip = {0}, dst_ip = {0};
+ ip46_address_t src_ip = { 0 }, dst_ip = { 0 };
u32 table_id, src_port, dst_port;
fib_protocol_t fproto;
u8 is_del;
@@ -207,29 +246,27 @@ udp_tunnel_command_fn (vlib_main_t * vm,
{
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (line_input, "index %d", &uei))
- ;
- else if (unformat (line_input, "add"))
- is_del = 0;
- else if (unformat (line_input, "del"))
- is_del = 1;
- else if (unformat (line_input, "%U %U",
- unformat_ip4_address,
- &src_ip.ip4, unformat_ip4_address, &dst_ip.ip4))
- fproto = FIB_PROTOCOL_IP4;
- else if (unformat (line_input, "%U %U",
- unformat_ip6_address,
- &src_ip.ip6, unformat_ip6_address, &dst_ip.ip6))
- fproto = FIB_PROTOCOL_IP6;
- else if (unformat (line_input, "%d %d", &src_port, &dst_port))
- ;
- else if (unformat (line_input, "table-id %d", &table_id))
- ;
- else
- {
- error = unformat_parse_error (line_input);
- goto done;
- }
+ if (unformat (line_input, "index %d", &uei))
+ ;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "%U %U", unformat_ip4_address,
+ &src_ip.ip4, unformat_ip4_address, &dst_ip.ip4))
+ fproto = FIB_PROTOCOL_IP4;
+ else if (unformat (line_input, "%U %U", unformat_ip6_address,
+ &src_ip.ip6, unformat_ip6_address, &dst_ip.ip6))
+ fproto = FIB_PROTOCOL_IP6;
+ else if (unformat (line_input, "%d %d", &src_port, &dst_port))
+ ;
+ else if (unformat (line_input, "table-id %d", &table_id))
+ ;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
}
}
@@ -242,37 +279,35 @@ udp_tunnel_command_fn (vlib_main_t * vm,
if (!is_del && fproto != FIB_PROTOCOL_MAX)
{
- uei = udp_tunnel_add(fproto, fib_index, &src_ip, &dst_ip, src_port, dst_port, UDP_ENCAP_FIXUP_NONE);
+ uei = udp_tunnel_add (fproto, fib_index, &src_ip, &dst_ip, src_port,
+ dst_port, UDP_ENCAP_FIXUP_NONE);
vlib_cli_output (vm, "udp-encap: %d\n", uei);
}
else if (is_del)
{
- int ret = udp_tunnel_del(fproto, fib_index, &src_ip, &dst_ip, src_port, dst_port, UDP_ENCAP_FIXUP_NONE);
- error = (ret == HICN_ERROR_NONE) ? 0 : clib_error_return (0, "%s\n",
- get_error_string
- (ret));
+ int ret = udp_tunnel_del (fproto, fib_index, &src_ip, &dst_ip, src_port,
+ dst_port, UDP_ENCAP_FIXUP_NONE);
+ error = (ret == HICN_ERROR_NONE) ?
+ 0 :
+ clib_error_return (0, "%s\n", get_error_string (ret));
}
else
{
error = clib_error_return (0, "specify some IP addresses");
}
- done:
+done:
unformat_free (line_input);
return error;
-
}
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (udp_tunnel_command, static) =
- {
- .path = "udp tunnel",
- .short_help = "udp tunnel [add/del] src_address dst_address src_port dst_port",
- .function = udp_tunnel_command_fn,
- };
-/* *INDENT-ON* */
-
+VLIB_CLI_COMMAND (udp_tunnel_command, static) = {
+ .path = "udp tunnel",
+ .short_help =
+ "udp tunnel [add/del] src_address dst_address src_port dst_port",
+ .function = udp_tunnel_command_fn,
+};
/*
* fd.io coding-style-patch-verification: ON
diff --git a/hicn-plugin/src/udp_tunnels/udp_tunnel.h b/hicn-plugin/src/udp_tunnels/udp_tunnel.h
index 2ec92056c..f7865f58a 100644
--- a/hicn-plugin/src/udp_tunnels/udp_tunnel.h
+++ b/hicn-plugin/src/udp_tunnels/udp_tunnel.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -20,15 +20,17 @@
#include <vppinfra/error.h>
#include <vnet/udp/udp_encap.h>
+#include "../faces/face.h"
+
/**
* @file udp_tunnel.h
*
* This file implements bidirectional udp tunnels. Udp tunnels exploit
* the udp encap functionality in vpp. In particular, a udp tunnel creates
* an udp encap object with the information for encapsulating packets and it
- * implements the udp decap node. The udp decap node checks if a udp tunnel exists
- * before performing the decapsulation. If the tunnel does not exist the packet
- * is dropped.
+ * implements the udp decap node. The udp decap node checks if a udp tunnel
+ * exists before performing the decapsulation. If the tunnel does not exist the
+ * packet is dropped.
*/
#define UDP_TUNNEL_INVALID ~0
@@ -52,13 +54,9 @@ extern vlib_node_registration_t udp6_decap_node;
*
* @return return the id of the tunnel
*/
-u32 udp_tunnel_add (fib_protocol_t proto,
- index_t fib_index,
- const ip46_address_t * src_ip,
- const ip46_address_t * dst_ip,
- u16 src_port,
- u16 dst_port,
- udp_encap_fixup_flags_t flags);
+u32 udp_tunnel_add (fib_protocol_t proto, index_t fib_index,
+ const ip46_address_t *src_ip, const ip46_address_t *dst_ip,
+ u16 src_port, u16 dst_port, udp_encap_fixup_flags_t flags);
/**
* @brief Retrieve the index of a udp tunnel (same id of the udp encap)
@@ -70,10 +68,21 @@ u32 udp_tunnel_add (fib_protocol_t proto,
*
* @return id of the udp tunnel/encap
*/
-u32 udp_tunnel_get(const ip46_address_t * src_ip,
- const ip46_address_t * dst_ip,
- u16 src_port,
- u16 dst_port);
+u32 udp_tunnel_get (const ip46_address_t *src_ip, const ip46_address_t *dst_ip,
+ u16 src_port, u16 dst_port);
+
+/**
+ * @brief Get udp tunnel UEI. Creates the tunnel if does not exist already.
+ *
+ * @param src_ip source address of the tunnel
+ * @param dst_ip destination address of the tunnel
+ * @param src_port source port
+ * @param src_port destination port
+ * @return id of the udp tunnel/encap
+ */
+u32 udp_tunnel_get_create (const ip46_address_t *src_ip,
+ const ip46_address_t *dst_ip, u16 src_port,
+ u16 dst_port);
/**
* @brief Delete a udp tunnel
@@ -89,13 +98,9 @@ u32 udp_tunnel_get(const ip46_address_t * src_ip,
* @return HICN_ERROR_UDP_TUNNEL_NOT_FOUND if the tunnel was not found
* or HICN_ERROR_NONE if the tunnel has been deleted
*/
-int udp_tunnel_del (fib_protocol_t proto,
- index_t fib_index,
- const ip46_address_t * src_ip,
- const ip46_address_t * dst_ip,
- u16 src_port,
- u16 dst_port,
- udp_encap_fixup_flags_t flags);
+int udp_tunnel_del (fib_protocol_t proto, index_t fib_index,
+ const ip46_address_t *src_ip, const ip46_address_t *dst_ip,
+ u16 src_port, u16 dst_port, udp_encap_fixup_flags_t flags);
/**
* @brief Add a udp tunnel from an existing udp encap
@@ -106,9 +111,27 @@ int udp_tunnel_del (fib_protocol_t proto,
void udp_tunnel_add_existing (index_t uei, dpo_proto_t proto);
/**
+ * @brief Check if DPO is UDP encap
+ */
+always_inline int
+dpo_is_udp_encap (const dpo_id_t *dpo)
+{
+ return dpo->dpoi_type == dpo_type_udp_ip4 ||
+ dpo->dpoi_type == dpo_type_udp_ip6;
+}
+
+always_inline dpo_proto_t
+dpo_udp_encap_get_proto (const dpo_id_t *dpo)
+{
+ return dpo->dpoi_type == dpo_type_udp_ip4 ? DPO_PROTO_IP4 : DPO_PROTO_IP6;
+}
+
+void udp_tunnel_set_face (hicn_face_id_t face_id, int isv4);
+
+/**
* @brief Init the udp tunnel module
*
*/
-void udp_tunnel_init();
+void udp_tunnel_init ();
#endif
diff --git a/hicn-plugin/src/utils.h b/hicn-plugin/src/utils.h
index 689942ab6..7a3492732 100644
--- a/hicn-plugin/src/utils.h
+++ b/hicn-plugin/src/utils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Copyright (c) 2021 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -30,7 +30,7 @@
* @param name hicn name to print
*/
always_inline void
-hicn_print_name6 (hicn_name_t * name)
+hicn_print_name6 (hicn_name_t *name)
{
u8 *s0;
s0 = format (0, "Source addr %U, seq_number %u", format_ip6_address,
@@ -46,14 +46,13 @@ hicn_print_name6 (hicn_name_t * name)
* @param hicn0 hICN header to print
*/
always_inline void
-hicn_print6 (hicn_header_t * hicn0)
+hicn_print6 (hicn_header_t *hicn0)
{
vlib_main_t *vm = vlib_get_main ();
u8 *s0;
s0 = format (0, "Source addr %U:%u, dest addr %U:%u", format_ip6_address,
- &(hicn0->v6.ip.saddr),
- clib_net_to_host_u32 (hicn0->v6.tcp.seq), format_ip6_address,
- &(hicn0->v6.ip.daddr),
+ &(hicn0->v6.ip.saddr), clib_net_to_host_u32 (hicn0->v6.tcp.seq),
+ format_ip6_address, &(hicn0->v6.ip.daddr),
clib_net_to_host_u32 (hicn0->v6.tcp.seq));
vlib_cli_output (vm, "%s\n", s0);
@@ -65,13 +64,12 @@ hicn_print6 (hicn_header_t * hicn0)
* @param hicn0 hICN header to print
*/
always_inline void
-hicn_print4 (hicn_header_t * hicn0)
+hicn_print4 (hicn_header_t *hicn0)
{
u8 *s0;
s0 = format (0, "Source addr %U:%u, dest addr %U:%u", format_ip4_address,
- &(hicn0->v4.ip.saddr),
- clib_net_to_host_u32 (hicn0->v4.tcp.seq), format_ip4_address,
- &(hicn0->v4.ip.daddr),
+ &(hicn0->v4.ip.saddr), clib_net_to_host_u32 (hicn0->v4.tcp.seq),
+ format_ip4_address, &(hicn0->v4.ip.daddr),
clib_net_to_host_u32 (hicn0->v4.tcp.seq));
printf ("%s\n", s0);
@@ -79,7 +77,6 @@ hicn_print4 (hicn_header_t * hicn0)
#endif /* // __HICN_UTILS_H__ */
-
/*
* fd.io coding-style-patch-verification: ON
*