summaryrefslogtreecommitdiffstats
path: root/libtransport/src/io_modules/memif
diff options
context:
space:
mode:
Diffstat (limited to 'libtransport/src/io_modules/memif')
-rw-r--r--libtransport/src/io_modules/memif/CMakeLists.txt56
-rw-r--r--libtransport/src/io_modules/memif/hicn_vapi.c229
-rw-r--r--libtransport/src/io_modules/memif/hicn_vapi.h82
-rw-r--r--libtransport/src/io_modules/memif/memif_connector.cc493
-rw-r--r--libtransport/src/io_modules/memif/memif_connector.h130
-rw-r--r--libtransport/src/io_modules/memif/memif_vapi.c127
-rw-r--r--libtransport/src/io_modules/memif/memif_vapi.h54
-rw-r--r--libtransport/src/io_modules/memif/vpp_forwarder_module.cc263
-rw-r--r--libtransport/src/io_modules/memif/vpp_forwarder_module.h83
9 files changed, 1517 insertions, 0 deletions
diff --git a/libtransport/src/io_modules/memif/CMakeLists.txt b/libtransport/src/io_modules/memif/CMakeLists.txt
new file mode 100644
index 000000000..c8a930e7b
--- /dev/null
+++ b/libtransport/src/io_modules/memif/CMakeLists.txt
@@ -0,0 +1,56 @@
+# Copyright (c) 2021 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
+
+find_package(Vpp REQUIRED)
+find_package(Libmemif REQUIRED)
+
+if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR)
+ find_package(HicnPlugin REQUIRED)
+ find_package(SafeVapi REQUIRED)
+else()
+ list(APPEND DEPENDENCIES
+ ${SAFE_VAPI_SHARED}
+ )
+endif()
+
+list(APPEND MODULE_HEADER_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_vapi.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/memif_connector.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/memif_vapi.h
+ ${CMAKE_CURRENT_SOURCE_DIR}/vpp_forwarder_module.h
+)
+
+list(APPEND MODULE_SOURCE_FILES
+ ${CMAKE_CURRENT_SOURCE_DIR}/hicn_vapi.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/memif_connector.cc
+ ${CMAKE_CURRENT_SOURCE_DIR}/memif_vapi.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/vpp_forwarder_module.cc
+)
+
+build_module(memif_module
+ SHARED
+ SOURCES ${MODULE_SOURCE_FILES}
+ DEPENDS ${DEPENDENCIES}
+ COMPONENT lib${LIBTRANSPORT}
+ LINK_LIBRARIES ${LIBMEMIF_LIBRARIES} ${SAFE_VAPI_LIBRARIES}
+ INCLUDE_DIRS
+ ${LIBTRANSPORT_INCLUDE_DIRS}
+ ${LIBTRANSPORT_INTERNAL_INCLUDE_DIRS}
+ ${VPP_INCLUDE_DIRS}
+ ${LIBMEMIF_INCLUDE_DIRS}
+ ${SAFE_VAPI_INCLUDE_DIRS}
+ DEFINITIONS ${COMPILER_DEFINITIONS}
+ COMPILE_OPTIONS ${COMPILE_FLAGS}
+)
diff --git a/libtransport/src/io_modules/memif/hicn_vapi.c b/libtransport/src/io_modules/memif/hicn_vapi.c
new file mode 100644
index 000000000..b83a36b47
--- /dev/null
+++ b/libtransport/src/io_modules/memif/hicn_vapi.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hicn/transport/config.h>
+#include <hicn/transport/utils/log.h>
+#include <io_modules/memif/hicn_vapi.h>
+
+#define HICN_VPP_PLUGIN
+#include <hicn/name.h>
+#undef HICN_VPP_PLUGIN
+
+#include <vapi/hicn.api.vapi.h>
+#include <vapi/ip.api.vapi.h>
+#include <vapi/vapi_safe.h>
+#include <vlib/vlib.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/ip/format.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vpp_plugins/hicn/error.h>
+#include <vppinfra/error.h>
+
+/////////////////////////////////////////////////////
+const char *HICN_ERROR_STRING[] = {
+#define _(a, b, c) c,
+ foreach_hicn_error
+#undef _
+};
+/////////////////////////////////////////////////////
+
+/*********************** Missing Symbol in vpp libraries
+ * *************************/
+u8 *format_vl_api_address_union(u8 *s, va_list *args) { return NULL; }
+
+/*********************************************************************************/
+
+DEFINE_VAPI_MSG_IDS_HICN_API_JSON
+DEFINE_VAPI_MSG_IDS_IP_API_JSON
+
+static vapi_error_e register_prod_app_cb(
+ vapi_ctx_t ctx, void *callback_ctx, vapi_error_e rv, bool is_last,
+ vapi_payload_hicn_api_register_prod_app_reply *reply) {
+ hicn_producer_output_params *output_params =
+ (hicn_producer_output_params *)callback_ctx;
+
+ if (reply == NULL) return rv;
+
+ output_params->cs_reserved = reply->cs_reserved;
+ output_params->prod_addr = (ip_address_t *)malloc(sizeof(ip_address_t));
+ memset(output_params->prod_addr, 0, sizeof(ip_address_t));
+ if (reply->prod_addr.af == ADDRESS_IP6)
+ memcpy(&output_params->prod_addr->v6, reply->prod_addr.un.ip6,
+ sizeof(ip6_address_t));
+ else
+ memcpy(&output_params->prod_addr->v4, reply->prod_addr.un.ip4,
+ sizeof(ip4_address_t));
+ output_params->face_id = reply->faceid;
+
+ return reply->retval;
+}
+
+int hicn_vapi_register_prod_app(vapi_ctx_t ctx,
+ hicn_producer_input_params *input_params,
+ hicn_producer_output_params *output_params) {
+ vapi_lock();
+ vapi_msg_hicn_api_register_prod_app *msg =
+ vapi_alloc_hicn_api_register_prod_app(ctx);
+
+ if (ip46_address_is_ip4((ip46_address_t *)&input_params->prefix->address)) {
+ memcpy(&msg->payload.prefix.address.un.ip4, &input_params->prefix->address,
+ sizeof(ip4_address_t));
+ msg->payload.prefix.address.af = ADDRESS_IP4;
+ } else {
+ memcpy(&msg->payload.prefix.address.un.ip6, &input_params->prefix->address,
+ sizeof(ip6_address_t));
+ msg->payload.prefix.address.af = ADDRESS_IP6;
+ }
+ msg->payload.prefix.len = input_params->prefix->len;
+
+ msg->payload.swif = input_params->swif;
+ msg->payload.cs_reserved = input_params->cs_reserved;
+
+ int ret = vapi_hicn_api_register_prod_app(ctx, msg, register_prod_app_cb,
+ output_params);
+ vapi_unlock();
+ return ret;
+}
+
+static vapi_error_e face_prod_del_cb(
+ vapi_ctx_t ctx, void *callback_ctx, vapi_error_e rv, bool is_last,
+ vapi_payload_hicn_api_face_prod_del_reply *reply) {
+ if (reply == NULL) return rv;
+
+ return reply->retval;
+}
+
+int hicn_vapi_face_prod_del(vapi_ctx_t ctx,
+ hicn_del_face_app_input_params *input_params) {
+ vapi_lock();
+ vapi_msg_hicn_api_face_prod_del *msg = vapi_alloc_hicn_api_face_prod_del(ctx);
+
+ msg->payload.faceid = input_params->face_id;
+
+ int ret = vapi_hicn_api_face_prod_del(ctx, msg, face_prod_del_cb, NULL);
+ vapi_unlock();
+ return ret;
+}
+
+static vapi_error_e register_cons_app_cb(
+ vapi_ctx_t ctx, void *callback_ctx, vapi_error_e rv, bool is_last,
+ vapi_payload_hicn_api_register_cons_app_reply *reply) {
+ hicn_consumer_output_params *output_params =
+ (hicn_consumer_output_params *)callback_ctx;
+
+ if (reply == NULL) return rv;
+
+ output_params->src6 = (ip_address_t *)malloc(sizeof(ip_address_t));
+ output_params->src4 = (ip_address_t *)malloc(sizeof(ip_address_t));
+ memset(output_params->src6, 0, sizeof(ip_address_t));
+ memset(output_params->src4, 0, sizeof(ip_address_t));
+ memcpy(&output_params->src6->v6, &reply->src_addr6.un.ip6,
+ sizeof(ip6_address_t));
+ memcpy(&output_params->src4->v4, &reply->src_addr4.un.ip4,
+ sizeof(ip4_address_t));
+
+ output_params->face_id1 = reply->faceid1;
+ output_params->face_id2 = reply->faceid2;
+
+ return reply->retval;
+}
+
+int hicn_vapi_register_cons_app(vapi_ctx_t ctx,
+ hicn_consumer_input_params *input_params,
+ hicn_consumer_output_params *output_params) {
+ vapi_lock();
+ vapi_msg_hicn_api_register_cons_app *msg =
+ vapi_alloc_hicn_api_register_cons_app(ctx);
+
+ msg->payload.swif = input_params->swif;
+
+ int ret = vapi_hicn_api_register_cons_app(ctx, msg, register_cons_app_cb,
+ output_params);
+ vapi_unlock();
+ return ret;
+}
+
+static vapi_error_e face_cons_del_cb(
+ vapi_ctx_t ctx, void *callback_ctx, vapi_error_e rv, bool is_last,
+ vapi_payload_hicn_api_face_cons_del_reply *reply) {
+ if (reply == NULL) return rv;
+
+ return reply->retval;
+}
+
+int hicn_vapi_face_cons_del(vapi_ctx_t ctx,
+ hicn_del_face_app_input_params *input_params) {
+ vapi_lock();
+ vapi_msg_hicn_api_face_cons_del *msg = vapi_alloc_hicn_api_face_cons_del(ctx);
+
+ msg->payload.faceid = input_params->face_id;
+
+ int ret = vapi_hicn_api_face_cons_del(ctx, msg, face_cons_del_cb, NULL);
+ vapi_unlock();
+ return ret;
+}
+
+static vapi_error_e reigster_route_cb(
+ vapi_ctx_t ctx, void *callback_ctx, vapi_error_e rv, bool is_last,
+ vapi_payload_ip_route_add_del_reply *reply) {
+ if (reply == NULL) return rv;
+
+ return reply->retval;
+}
+
+int hicn_vapi_register_route(vapi_ctx_t ctx,
+ hicn_producer_set_route_params *input_params) {
+ vapi_lock();
+ vapi_msg_ip_route_add_del *msg = vapi_alloc_ip_route_add_del(ctx, 1);
+
+ msg->payload.is_add = 1;
+ if (ip46_address_is_ip4((ip46_address_t *)(input_params->prod_addr))) {
+ memcpy(&msg->payload.route.prefix.address.un.ip4,
+ &input_params->prefix->address.v4, sizeof(ip4_address_t));
+ msg->payload.route.prefix.address.af = ADDRESS_IP4;
+ msg->payload.route.prefix.len = input_params->prefix->len;
+ } else {
+ memcpy(&msg->payload.route.prefix.address.un.ip6,
+ &input_params->prefix->address.v6, sizeof(ip6_address_t));
+ msg->payload.route.prefix.address.af = ADDRESS_IP6;
+ msg->payload.route.prefix.len = input_params->prefix->len;
+ }
+
+ msg->payload.route.paths[0].sw_if_index = ~0;
+ msg->payload.route.paths[0].table_id = 0;
+ if (ip46_address_is_ip4((ip46_address_t *)(input_params->prod_addr))) {
+ memcpy(&(msg->payload.route.paths[0].nh.address.ip4),
+ input_params->prod_addr->v4.as_u8, sizeof(ip4_address_t));
+ msg->payload.route.paths[0].proto = FIB_API_PATH_NH_PROTO_IP4;
+ } else {
+ memcpy(&(msg->payload.route.paths[0].nh.address.ip6),
+ input_params->prod_addr->v6.as_u8, sizeof(ip6_address_t));
+ msg->payload.route.paths[0].proto = FIB_API_PATH_NH_PROTO_IP6;
+ }
+
+ msg->payload.route.paths[0].type = FIB_API_PATH_FLAG_NONE;
+ msg->payload.route.paths[0].flags = FIB_API_PATH_FLAG_NONE;
+
+ int ret = vapi_ip_route_add_del(ctx, msg, reigster_route_cb, NULL);
+
+ vapi_unlock();
+ return ret;
+}
+
+char *hicn_vapi_get_error_string(int ret_val) {
+ return get_error_string(ret_val);
+}
diff --git a/libtransport/src/io_modules/memif/hicn_vapi.h b/libtransport/src/io_modules/memif/hicn_vapi.h
new file mode 100644
index 000000000..e94c97749
--- /dev/null
+++ b/libtransport/src/io_modules/memif/hicn_vapi.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2017-2020 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <hicn/transport/config.h>
+#include <hicn/util/ip_address.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <vapi/vapi.h>
+
+#include "stdint.h"
+
+typedef struct {
+ ip_prefix_t* prefix;
+ uint32_t swif;
+ uint32_t cs_reserved;
+} hicn_producer_input_params;
+
+typedef struct {
+ uint32_t swif;
+} hicn_consumer_input_params;
+
+typedef struct {
+ uint32_t face_id;
+} hicn_del_face_app_input_params;
+
+typedef struct {
+ uint32_t cs_reserved;
+ ip_address_t* prod_addr;
+ uint32_t face_id;
+} hicn_producer_output_params;
+
+typedef struct {
+ ip_address_t* src4;
+ ip_address_t* src6;
+ uint32_t face_id1;
+ uint32_t face_id2;
+} hicn_consumer_output_params;
+
+typedef struct {
+ ip_prefix_t* prefix;
+ ip_address_t* prod_addr;
+} hicn_producer_set_route_params;
+
+int hicn_vapi_register_prod_app(vapi_ctx_t ctx,
+ hicn_producer_input_params* input_params,
+ hicn_producer_output_params* output_params);
+
+int hicn_vapi_register_cons_app(vapi_ctx_t ctx,
+ hicn_consumer_input_params* input_params,
+ hicn_consumer_output_params* output_params);
+
+int hicn_vapi_register_route(vapi_ctx_t ctx,
+ hicn_producer_set_route_params* input_params);
+
+int hicn_vapi_face_cons_del(vapi_ctx_t ctx,
+ hicn_del_face_app_input_params* input_params);
+
+int hicn_vapi_face_prod_del(vapi_ctx_t ctx,
+ hicn_del_face_app_input_params* input_params);
+
+char* hicn_vapi_get_error_string(int ret_val);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/libtransport/src/io_modules/memif/memif_connector.cc b/libtransport/src/io_modules/memif/memif_connector.cc
new file mode 100644
index 000000000..4a688d68f
--- /dev/null
+++ b/libtransport/src/io_modules/memif/memif_connector.cc
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hicn/transport/errors/not_implemented_exception.h>
+#include <io_modules/memif/memif_connector.h>
+#include <sys/epoll.h>
+
+#include <cstdlib>
+
+extern "C" {
+#include <memif/libmemif.h>
+};
+
+#define CANCEL_TIMER 1
+
+namespace transport {
+
+namespace core {
+
+struct memif_connection {
+ uint16_t index;
+ /* memif conenction handle */
+ memif_conn_handle_t conn;
+ /* transmit queue id */
+ uint16_t tx_qid;
+ /* tx buffers */
+ memif_buffer_t *tx_bufs;
+ /* allocated tx buffers counter */
+ /* number of tx buffers pointing to shared memory */
+ uint16_t tx_buf_num;
+ /* rx buffers */
+ memif_buffer_t *rx_bufs;
+ /* allcoated rx buffers counter */
+ /* number of rx buffers pointing to shared memory */
+ uint16_t rx_buf_num;
+ /* interface ip address */
+ uint8_t ip_addr[4];
+};
+
+std::once_flag MemifConnector::flag_;
+utils::EpollEventReactor MemifConnector::main_event_reactor_;
+
+MemifConnector::MemifConnector(PacketReceivedCallback &&receive_callback,
+ PacketSentCallback &&packet_sent,
+ OnCloseCallback &&close_callback,
+ OnReconnectCallback &&on_reconnect,
+ asio::io_service &io_service,
+ std::string app_name)
+ : Connector(std::move(receive_callback), std::move(packet_sent),
+ std::move(close_callback), std::move(on_reconnect)),
+ memif_worker_(nullptr),
+ timer_set_(false),
+ send_timer_(std::make_unique<utils::FdDeadlineTimer>(event_reactor_)),
+ disconnect_timer_(
+ std::make_unique<utils::FdDeadlineTimer>(event_reactor_)),
+ io_service_(io_service),
+ memif_connection_(std::make_unique<memif_connection_t>()),
+ tx_buf_counter_(0),
+ is_reconnection_(false),
+ data_available_(false),
+ app_name_(app_name),
+ socket_filename_("") {
+ std::call_once(MemifConnector::flag_, &MemifConnector::init, this);
+}
+
+MemifConnector::~MemifConnector() { close(); }
+
+void MemifConnector::init() {
+ /* initialize memory interface */
+ int err = memif_init(controlFdUpdate, const_cast<char *>(app_name_.c_str()),
+ nullptr, nullptr, nullptr);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_init: %s", memif_strerror(err));
+ }
+}
+
+void MemifConnector::connect(uint32_t memif_id, long memif_mode) {
+ state_ = State::CONNECTING;
+
+ memif_id_ = memif_id;
+ socket_filename_ = "/run/vpp/memif.sock";
+
+ createMemif(memif_id, memif_mode, nullptr);
+
+ work_ = std::make_unique<asio::io_service::work>(io_service_);
+
+ while (state_ != State::CONNECTED) {
+ MemifConnector::main_event_reactor_.runOneEvent();
+ }
+
+ int err;
+
+ /* get interrupt queue id */
+ int fd = -1;
+ err = memif_get_queue_efd(memif_connection_->conn, 0, &fd);
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_get_queue_efd: %s", memif_strerror(err));
+ return;
+ }
+
+ // Remove fd from main epoll
+ main_event_reactor_.delFileDescriptor(fd);
+
+ // Add fd to epoll of instance
+ event_reactor_.addFileDescriptor(
+ fd, EPOLLIN, [this](const utils::Event &evt) -> int {
+ return onInterrupt(memif_connection_->conn, this, 0);
+ });
+
+ memif_worker_ = std::make_unique<std::thread>(
+ std::bind(&MemifConnector::threadMain, this));
+}
+
+int MemifConnector::createMemif(uint32_t index, uint8_t mode, char *s) {
+ memif_connection_t *c = memif_connection_.get();
+
+ /* setting memif connection arguments */
+ memif_conn_args_t args;
+ memset(&args, 0, sizeof(args));
+
+ args.is_master = mode;
+ args.log2_ring_size = MEMIF_LOG2_RING_SIZE;
+ args.buffer_size = MEMIF_BUF_SIZE;
+ args.num_s2m_rings = 1;
+ args.num_m2s_rings = 1;
+ strncpy((char *)args.interface_name, IF_NAME, strlen(IF_NAME) + 1);
+ args.mode = memif_interface_mode_t::MEMIF_INTERFACE_MODE_IP;
+
+ int err;
+
+ err = memif_create_socket(&args.socket, socket_filename_.c_str(), nullptr);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ throw errors::RuntimeException(memif_strerror(err));
+ }
+
+ args.interface_id = index;
+ /* last argument for memif_create (void * private_ctx) is used by user
+ to identify connection. this context is returned with callbacks */
+
+ /* default interrupt */
+ if (s == nullptr) {
+ err = memif_create(&c->conn, &args, onConnect, onDisconnect, onInterrupt,
+ this);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ throw errors::RuntimeException(memif_strerror(err));
+ }
+ }
+
+ c->index = (uint16_t)index;
+ c->tx_qid = 0;
+ /* alloc memif buffers */
+ c->rx_buf_num = 0;
+ c->rx_bufs = static_cast<memif_buffer_t *>(
+ malloc(sizeof(memif_buffer_t) * MAX_MEMIF_BUFS));
+ c->tx_buf_num = 0;
+ c->tx_bufs = static_cast<memif_buffer_t *>(
+ malloc(sizeof(memif_buffer_t) * MAX_MEMIF_BUFS));
+
+ // memif_set_rx_mode (c->conn, MEMIF_RX_MODE_POLLING, 0);
+
+ return 0;
+}
+
+int MemifConnector::deleteMemif() {
+ memif_connection_t *c = memif_connection_.get();
+
+ if (c->rx_bufs) {
+ free(c->rx_bufs);
+ }
+
+ c->rx_bufs = nullptr;
+ c->rx_buf_num = 0;
+
+ if (c->tx_bufs) {
+ free(c->tx_bufs);
+ }
+
+ c->tx_bufs = nullptr;
+ c->tx_buf_num = 0;
+
+ int err;
+ /* disconenct then delete memif connection */
+ err = memif_delete(&c->conn);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_delete: %s", memif_strerror(err));
+ }
+
+ if (TRANSPORT_EXPECT_FALSE(c->conn != nullptr)) {
+ TRANSPORT_LOGE("memif delete fail");
+ }
+
+ return 0;
+}
+
+int MemifConnector::controlFdUpdate(int fd, uint8_t events, void *private_ctx) {
+ /* convert memif event definitions to epoll events */
+ if (events & MEMIF_FD_EVENT_DEL) {
+ return MemifConnector::main_event_reactor_.delFileDescriptor(fd);
+ }
+
+ uint32_t evt = 0;
+
+ if (events & MEMIF_FD_EVENT_READ) {
+ evt |= EPOLLIN;
+ }
+
+ if (events & MEMIF_FD_EVENT_WRITE) {
+ evt |= EPOLLOUT;
+ }
+
+ if (events & MEMIF_FD_EVENT_MOD) {
+ return MemifConnector::main_event_reactor_.modFileDescriptor(fd, evt);
+ }
+
+ return MemifConnector::main_event_reactor_.addFileDescriptor(
+ fd, evt, [](const utils::Event &evt) -> int {
+ uint32_t event = 0;
+ int memif_err = 0;
+
+ if (evt.events & EPOLLIN) {
+ event |= MEMIF_FD_EVENT_READ;
+ }
+
+ if (evt.events & EPOLLOUT) {
+ event |= MEMIF_FD_EVENT_WRITE;
+ }
+
+ if (evt.events & EPOLLERR) {
+ event |= MEMIF_FD_EVENT_ERROR;
+ }
+
+ memif_err = memif_control_fd_handler(evt.data.fd, event);
+
+ if (TRANSPORT_EXPECT_FALSE(memif_err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_control_fd_handler: %s",
+ memif_strerror(memif_err));
+ }
+
+ return 0;
+ });
+}
+
+int MemifConnector::bufferAlloc(long n, uint16_t qid) {
+ memif_connection_t *c = memif_connection_.get();
+ int err;
+ uint16_t r;
+ /* set data pointer to shared memory and set buffer_len to shared mmeory
+ * buffer len */
+ err = memif_buffer_alloc(c->conn, qid, c->tx_bufs, n, &r, 2000);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_buffer_alloc: %s", memif_strerror(err));
+ return -1;
+ }
+
+ c->tx_buf_num += r;
+ return r;
+}
+
+int MemifConnector::txBurst(uint16_t qid) {
+ memif_connection_t *c = memif_connection_.get();
+ int err;
+ uint16_t r;
+ /* inform peer memif interface about data in shared memory buffers */
+ /* mark memif buffers as free */
+ err = memif_tx_burst(c->conn, qid, c->tx_bufs, c->tx_buf_num, &r);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_tx_burst: %s", memif_strerror(err));
+ }
+
+ // err = memif_refill_queue(c->conn, qid, r, 0);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_tx_burst: %s", memif_strerror(err));
+ c->tx_buf_num -= r;
+ return -1;
+ }
+
+ c->tx_buf_num -= r;
+ return 0;
+}
+
+void MemifConnector::sendCallback(const std::error_code &ec) {
+ timer_set_ = false;
+
+ if (TRANSPORT_EXPECT_TRUE(!ec && state_ == State::CONNECTED)) {
+ doSend();
+ }
+}
+
+void MemifConnector::processInputBuffer(std::uint16_t total_packets) {
+ utils::MemBuf::Ptr ptr;
+
+ for (; total_packets > 0; total_packets--) {
+ if (input_buffer_.pop(ptr)) {
+ receive_callback_(this, *ptr, std::make_error_code(std::errc(0)));
+ }
+ }
+}
+
+/* informs user about connected status. private_ctx is used by user to identify
+ connection (multiple connections WIP) */
+int MemifConnector::onConnect(memif_conn_handle_t conn, void *private_ctx) {
+ MemifConnector *connector = (MemifConnector *)private_ctx;
+ connector->state_ = State::CONNECTED;
+ memif_refill_queue(conn, 0, -1, 0);
+
+ return 0;
+}
+
+/* informs user about disconnected status. private_ctx is used by user to
+ identify connection (multiple connections WIP) */
+int MemifConnector::onDisconnect(memif_conn_handle_t conn, void *private_ctx) {
+ MemifConnector *connector = (MemifConnector *)private_ctx;
+ connector->state_ = State::CLOSED;
+ return 0;
+}
+
+void MemifConnector::threadMain() { event_reactor_.runEventLoop(1000); }
+
+int MemifConnector::onInterrupt(memif_conn_handle_t conn, void *private_ctx,
+ uint16_t qid) {
+ MemifConnector *connector = (MemifConnector *)private_ctx;
+
+ memif_connection_t *c = connector->memif_connection_.get();
+ int err = MEMIF_ERR_SUCCESS, ret_val;
+ uint16_t total_packets = 0;
+ uint16_t rx;
+
+ do {
+ err = memif_rx_burst(conn, qid, c->rx_bufs, MAX_MEMIF_BUFS, &rx);
+ ret_val = err;
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS &&
+ err != MEMIF_ERR_NOBUF)) {
+ TRANSPORT_LOGE("memif_rx_burst: %s", memif_strerror(err));
+ goto error;
+ }
+
+ c->rx_buf_num += rx;
+
+ if (TRANSPORT_EXPECT_FALSE(connector->io_service_.stopped())) {
+ TRANSPORT_LOGE("socket stopped: ignoring %u packets", rx);
+ goto error;
+ }
+
+ std::size_t packet_length;
+ for (int i = 0; i < rx; i++) {
+ auto buffer = connector->getRawBuffer();
+ packet_length = (c->rx_bufs + i)->len;
+ std::memcpy(buffer.first, (c->rx_bufs + i)->data, packet_length);
+ auto packet = connector->getPacketFromBuffer(buffer.first, packet_length);
+
+ if (!connector->input_buffer_.push(std::move(packet))) {
+ TRANSPORT_LOGE("Error pushing packet. Ring buffer full.");
+
+ // TODO Here we should consider the possibility to signal the congestion
+ // to the application, that would react properly (e.g. slow down
+ // message)
+ }
+ }
+
+ /* mark memif buffers and shared memory buffers as free */
+ /* free processed buffers */
+
+ err = memif_refill_queue(conn, qid, rx, 0);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_buffer_free: %s", memif_strerror(err));
+ }
+
+ c->rx_buf_num -= rx;
+ total_packets += rx;
+
+ } while (ret_val == MEMIF_ERR_NOBUF);
+
+ connector->io_service_.post(
+ std::bind(&MemifConnector::processInputBuffer, connector, total_packets));
+
+ return 0;
+
+error:
+ err = memif_refill_queue(c->conn, qid, rx, 0);
+
+ if (TRANSPORT_EXPECT_FALSE(err != MEMIF_ERR_SUCCESS)) {
+ TRANSPORT_LOGE("memif_buffer_free: %s", memif_strerror(err));
+ }
+ c->rx_buf_num -= rx;
+
+ return 0;
+}
+
+void MemifConnector::close() {
+ if (state_ != State::CLOSED) {
+ disconnect_timer_->expiresFromNow(std::chrono::microseconds(50));
+ disconnect_timer_->asyncWait([this](const std::error_code &ec) {
+ deleteMemif();
+ event_reactor_.stop();
+ work_.reset();
+ });
+
+ if (memif_worker_ && memif_worker_->joinable()) {
+ memif_worker_->join();
+ }
+ }
+}
+
+void MemifConnector::send(Packet &packet) {
+ {
+ utils::SpinLock::Acquire locked(write_msgs_lock_);
+ output_buffer_.push_back(packet.shared_from_this());
+ }
+#if CANCEL_TIMER
+ if (!timer_set_) {
+ timer_set_ = true;
+ send_timer_->expiresFromNow(std::chrono::microseconds(50));
+ send_timer_->asyncWait(
+ std::bind(&MemifConnector::sendCallback, this, std::placeholders::_1));
+ }
+#endif
+}
+
+int MemifConnector::doSend() {
+ std::size_t max = 0;
+ int32_t n = 0;
+ std::size_t size = 0;
+
+ {
+ utils::SpinLock::Acquire locked(write_msgs_lock_);
+ size = output_buffer_.size();
+ }
+
+ do {
+ max = size < MAX_MEMIF_BUFS ? size : MAX_MEMIF_BUFS;
+ n = bufferAlloc(max, memif_connection_->tx_qid);
+
+ if (TRANSPORT_EXPECT_FALSE(n < 0)) {
+ TRANSPORT_LOGE("Error allocating buffers.");
+ return -1;
+ }
+
+ for (uint16_t i = 0; i < n; i++) {
+ utils::SpinLock::Acquire locked(write_msgs_lock_);
+
+ auto packet = output_buffer_.front().get();
+ const utils::MemBuf *current = packet;
+ std::size_t offset = 0;
+ uint8_t *shared_buffer =
+ reinterpret_cast<uint8_t *>(memif_connection_->tx_bufs[i].data);
+ do {
+ std::memcpy(shared_buffer + offset, current->data(), current->length());
+ offset += current->length();
+ current = current->next();
+ } while (current != packet);
+
+ memif_connection_->tx_bufs[i].len = uint32_t(offset);
+
+ output_buffer_.pop_front();
+ }
+
+ txBurst(memif_connection_->tx_qid);
+
+ utils::SpinLock::Acquire locked(write_msgs_lock_);
+ size = output_buffer_.size();
+ } while (size > 0);
+
+ return 0;
+}
+
+void MemifConnector::send(const uint8_t *packet, std::size_t len) {
+ throw errors::NotImplementedException();
+}
+
+} // end namespace core
+
+} // end namespace transport
diff --git a/libtransport/src/io_modules/memif/memif_connector.h b/libtransport/src/io_modules/memif/memif_connector.h
new file mode 100644
index 000000000..bed3516dc
--- /dev/null
+++ b/libtransport/src/io_modules/memif/memif_connector.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <hicn/transport/config.h>
+#include <hicn/transport/core/connector.h>
+#include <hicn/transport/portability/portability.h>
+#include <hicn/transport/utils/ring_buffer.h>
+//#include <hicn/transport/core/hicn_vapi.h>
+#include <utils/epoll_event_reactor.h>
+#include <utils/fd_deadline_timer.h>
+
+#include <asio.hpp>
+#include <deque>
+#include <mutex>
+#include <thread>
+
+#define _Static_assert static_assert
+
+namespace transport {
+
+namespace core {
+
+typedef struct memif_connection memif_connection_t;
+
+#define APP_NAME "libtransport"
+#define IF_NAME "vpp_connection"
+
+#define MEMIF_BUF_SIZE 2048
+#define MEMIF_LOG2_RING_SIZE 13
+#define MAX_MEMIF_BUFS (1 << MEMIF_LOG2_RING_SIZE)
+
+class MemifConnector : public Connector {
+ using memif_conn_handle_t = void *;
+ using PacketRing = utils::CircularFifo<utils::MemBuf::Ptr, queue_size>;
+
+ public:
+ MemifConnector(PacketReceivedCallback &&receive_callback,
+ PacketSentCallback &&packet_sent,
+ OnCloseCallback &&close_callback,
+ OnReconnectCallback &&on_reconnect,
+ asio::io_service &io_service,
+ std::string app_name = "Libtransport");
+
+ ~MemifConnector() override;
+
+ void send(Packet &packet) override;
+
+ void send(const uint8_t *packet, std::size_t len) override;
+
+ void close() override;
+
+ void connect(uint32_t memif_id, long memif_mode);
+
+ TRANSPORT_ALWAYS_INLINE uint32_t getMemifId() { return memif_id_; };
+
+ private:
+ void init();
+
+ int doSend();
+
+ int createMemif(uint32_t index, uint8_t mode, char *s);
+
+ uint32_t getMemifConfiguration();
+
+ int deleteMemif();
+
+ static int controlFdUpdate(int fd, uint8_t events, void *private_ctx);
+
+ static int onConnect(memif_conn_handle_t conn, void *private_ctx);
+
+ static int onDisconnect(memif_conn_handle_t conn, void *private_ctx);
+
+ static int onInterrupt(memif_conn_handle_t conn, void *private_ctx,
+ uint16_t qid);
+
+ void threadMain();
+
+ int txBurst(uint16_t qid);
+
+ int bufferAlloc(long n, uint16_t qid);
+
+ void sendCallback(const std::error_code &ec);
+
+ void processInputBuffer(std::uint16_t total_packets);
+
+ private:
+ static utils::EpollEventReactor main_event_reactor_;
+ static std::unique_ptr<std::thread> main_worker_;
+
+ int epfd;
+ std::unique_ptr<std::thread> memif_worker_;
+ utils::EpollEventReactor event_reactor_;
+ std::atomic_bool timer_set_;
+ std::unique_ptr<utils::FdDeadlineTimer> send_timer_;
+ std::unique_ptr<utils::FdDeadlineTimer> disconnect_timer_;
+ asio::io_service &io_service_;
+ std::unique_ptr<asio::io_service::work> work_;
+ std::unique_ptr<memif_connection_t> memif_connection_;
+ uint16_t tx_buf_counter_;
+
+ PacketRing input_buffer_;
+ bool is_reconnection_;
+ bool data_available_;
+ uint32_t memif_id_;
+ uint8_t memif_mode_;
+ std::string app_name_;
+ uint16_t transmission_index_;
+ utils::SpinLock write_msgs_lock_;
+ std::string socket_filename_;
+
+ static std::once_flag flag_;
+};
+
+} // end namespace core
+
+} // end namespace transport
diff --git a/libtransport/src/io_modules/memif/memif_vapi.c b/libtransport/src/io_modules/memif/memif_vapi.c
new file mode 100644
index 000000000..b3da2b012
--- /dev/null
+++ b/libtransport/src/io_modules/memif/memif_vapi.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fcntl.h>
+#include <hicn/transport/config.h>
+#include <inttypes.h>
+#include <io_modules/memif/memif_vapi.h>
+#include <semaphore.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <vapi/vapi_safe.h>
+#include <vppinfra/clib.h>
+
+DEFINE_VAPI_MSG_IDS_MEMIF_API_JSON
+
+static vapi_error_e memif_details_cb(vapi_ctx_t ctx, void *callback_ctx,
+ vapi_error_e rv, bool is_last,
+ vapi_payload_memif_details *reply) {
+ uint32_t *last_memif_id = (uint32_t *)callback_ctx;
+ uint32_t current_memif_id = 0;
+ if (reply != NULL) {
+ current_memif_id = reply->id;
+ } else {
+ return rv;
+ }
+
+ if (current_memif_id >= *last_memif_id) {
+ *last_memif_id = current_memif_id + 1;
+ }
+
+ return rv;
+}
+
+int memif_vapi_get_next_memif_id(vapi_ctx_t ctx, uint32_t *memif_id) {
+ vapi_lock();
+ vapi_msg_memif_dump *msg = vapi_alloc_memif_dump(ctx);
+ int ret = vapi_memif_dump(ctx, msg, memif_details_cb, memif_id);
+ vapi_unlock();
+ return ret;
+}
+
+static vapi_error_e memif_create_cb(vapi_ctx_t ctx, void *callback_ctx,
+ vapi_error_e rv, bool is_last,
+ vapi_payload_memif_create_reply *reply) {
+ memif_output_params_t *output_params = (memif_output_params_t *)callback_ctx;
+
+ if (reply == NULL) return rv;
+
+ output_params->sw_if_index = reply->sw_if_index;
+
+ return rv;
+}
+
+int memif_vapi_create_memif(vapi_ctx_t ctx, memif_create_params_t *input_params,
+ memif_output_params_t *output_params) {
+ vapi_lock();
+ vapi_msg_memif_create *msg = vapi_alloc_memif_create(ctx);
+
+ int ret = 0;
+ if (input_params->socket_id == ~0) {
+ // invalid socket-id
+ ret = -1;
+ goto END;
+ }
+
+ if (!is_pow2(input_params->ring_size)) {
+ // ring size must be power of 2
+ ret = -1;
+ goto END;
+ }
+
+ if (input_params->rx_queues > 255 || input_params->rx_queues < 1) {
+ // rx queue must be between 1 - 255
+ ret = -1;
+ goto END;
+ }
+
+ if (input_params->tx_queues > 255 || input_params->tx_queues < 1) {
+ // tx queue must be between 1 - 255
+ ret = -1;
+ goto END;
+ }
+
+ msg->payload.role = input_params->role;
+ msg->payload.mode = input_params->mode;
+ msg->payload.rx_queues = input_params->rx_queues;
+ msg->payload.tx_queues = input_params->tx_queues;
+ msg->payload.id = input_params->id;
+ msg->payload.socket_id = input_params->socket_id;
+ msg->payload.ring_size = input_params->ring_size;
+ msg->payload.buffer_size = input_params->buffer_size;
+
+ ret = vapi_memif_create(ctx, msg, memif_create_cb, output_params);
+END:
+ vapi_unlock();
+ return ret;
+}
+
+static vapi_error_e memif_delete_cb(vapi_ctx_t ctx, void *callback_ctx,
+ vapi_error_e rv, bool is_last,
+ vapi_payload_memif_delete_reply *reply) {
+ if (reply == NULL) return rv;
+
+ return reply->retval;
+}
+
+int memif_vapi_delete_memif(vapi_ctx_t ctx, uint32_t sw_if_index) {
+ vapi_lock();
+ vapi_msg_memif_delete *msg = vapi_alloc_memif_delete(ctx);
+
+ msg->payload.sw_if_index = sw_if_index;
+
+ int ret = vapi_memif_delete(ctx, msg, memif_delete_cb, NULL);
+ vapi_unlock();
+ return ret;
+}
diff --git a/libtransport/src/io_modules/memif/memif_vapi.h b/libtransport/src/io_modules/memif/memif_vapi.h
new file mode 100644
index 000000000..bcf06ed43
--- /dev/null
+++ b/libtransport/src/io_modules/memif/memif_vapi.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <hicn/transport/config.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <vapi/memif.api.vapi.h>
+
+#include "stdint.h"
+
+typedef struct memif_create_params_s {
+ uint8_t role;
+ uint8_t mode;
+ uint8_t rx_queues;
+ uint8_t tx_queues;
+ uint32_t id;
+ uint32_t socket_id;
+ uint8_t secret[24];
+ uint32_t ring_size;
+ uint16_t buffer_size;
+ uint8_t hw_addr[6];
+} memif_create_params_t;
+
+typedef struct memif_output_params_s {
+ uint32_t sw_if_index;
+} memif_output_params_t;
+
+int memif_vapi_get_next_memif_id(vapi_ctx_t ctx, uint32_t *memif_id);
+
+int memif_vapi_create_memif(vapi_ctx_t ctx, memif_create_params_t *input_params,
+ memif_output_params_t *output_params);
+
+int memif_vapi_delete_memif(vapi_ctx_t ctx, uint32_t sw_if_index);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/libtransport/src/io_modules/memif/vpp_forwarder_module.cc b/libtransport/src/io_modules/memif/vpp_forwarder_module.cc
new file mode 100644
index 000000000..dcbcd7ed0
--- /dev/null
+++ b/libtransport/src/io_modules/memif/vpp_forwarder_module.cc
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hicn/transport/config.h>
+#include <hicn/transport/errors/not_implemented_exception.h>
+#include <io_modules/memif/hicn_vapi.h>
+#include <io_modules/memif/memif_connector.h>
+#include <io_modules/memif/memif_vapi.h>
+#include <io_modules/memif/vpp_forwarder_module.h>
+
+extern "C" {
+#include <memif/libmemif.h>
+};
+
+typedef enum { MASTER = 0, SLAVE = 1 } memif_role_t;
+
+#define MEMIF_DEFAULT_RING_SIZE 2048
+#define MEMIF_DEFAULT_RX_QUEUES 1
+#define MEMIF_DEFAULT_TX_QUEUES 1
+#define MEMIF_DEFAULT_BUFFER_SIZE 2048
+
+namespace transport {
+
+namespace core {
+
+VPPForwarderModule::VPPForwarderModule()
+ : IoModule(),
+ connector_(nullptr),
+ sw_if_index_(~0),
+ face_id1_(~0),
+ face_id2_(~0),
+ is_consumer_(false) {}
+
+VPPForwarderModule::~VPPForwarderModule() { delete connector_; }
+
+void VPPForwarderModule::init(
+ Connector::PacketReceivedCallback &&receive_callback,
+ Connector::OnReconnectCallback &&reconnect_callback,
+ asio::io_service &io_service, const std::string &app_name) {
+ if (!connector_) {
+ connector_ =
+ new MemifConnector(std::move(receive_callback), 0, 0,
+ std::move(reconnect_callback), io_service, app_name);
+ }
+}
+
+void VPPForwarderModule::processControlMessageReply(
+ utils::MemBuf &packet_buffer) {
+ throw errors::NotImplementedException();
+}
+
+bool VPPForwarderModule::isControlMessage(const uint8_t *message) {
+ return false;
+}
+
+bool VPPForwarderModule::isConnected() { return connector_->isConnected(); };
+
+void VPPForwarderModule::send(Packet &packet) {
+ IoModule::send(packet);
+ connector_->send(packet);
+}
+
+void VPPForwarderModule::send(const uint8_t *packet, std::size_t len) {
+ counters_.tx_packets++;
+ counters_.tx_bytes += len;
+
+ // Perfect forwarding
+ connector_->send(packet, len);
+}
+
+std::uint32_t VPPForwarderModule::getMtu() { return interface_mtu; }
+
+/**
+ * @brief Create a memif interface in the local VPP forwarder.
+ */
+uint32_t VPPForwarderModule::getMemifConfiguration() {
+ memif_create_params_t input_params = {0};
+
+ int ret = memif_vapi_get_next_memif_id(VPPForwarderModule::sock_, &memif_id_);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(
+ "Error getting next memif id. Could not create memif interface.");
+ }
+
+ input_params.id = memif_id_;
+ input_params.role = memif_role_t::MASTER;
+ input_params.mode = memif_interface_mode_t::MEMIF_INTERFACE_MODE_IP;
+ input_params.rx_queues = MEMIF_DEFAULT_RX_QUEUES;
+ input_params.tx_queues = MEMIF_DEFAULT_TX_QUEUES;
+ input_params.ring_size = MEMIF_DEFAULT_RING_SIZE;
+ input_params.buffer_size = MEMIF_DEFAULT_BUFFER_SIZE;
+
+ memif_output_params_t output_params = {0};
+
+ ret = memif_vapi_create_memif(VPPForwarderModule::sock_, &input_params,
+ &output_params);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(
+ "Error creating memif interface in the local VPP forwarder.");
+ }
+
+ return output_params.sw_if_index;
+}
+
+void VPPForwarderModule::consumerConnection() {
+ hicn_consumer_input_params input = {0};
+ hicn_consumer_output_params output = {0};
+ ip_address_t ip4_address;
+ ip_address_t ip6_address;
+
+ output.src4 = &ip4_address;
+ output.src6 = &ip6_address;
+ input.swif = sw_if_index_;
+
+ int ret =
+ hicn_vapi_register_cons_app(VPPForwarderModule::sock_, &input, &output);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(hicn_vapi_get_error_string(ret));
+ }
+
+ face_id1_ = output.face_id1;
+ face_id2_ = output.face_id2;
+
+ std::memcpy(inet_address_.v4.as_u8, output.src4->v4.as_u8, IPV4_ADDR_LEN);
+
+ std::memcpy(inet6_address_.v6.as_u8, output.src6->v6.as_u8, IPV6_ADDR_LEN);
+}
+
+void VPPForwarderModule::producerConnection() {
+ // Producer connection will be set when we set the first route.
+}
+
+void VPPForwarderModule::connect(bool is_consumer) {
+ int retry = 20;
+
+ TRANSPORT_LOGI("Connecting to VPP through vapi.");
+ vapi_error_e ret = vapi_connect_safe(&sock_, 0);
+
+ while (ret != VAPI_OK && retry > 0) {
+ TRANSPORT_LOGE("Error connecting to VPP through vapi. Retrying..");
+ --retry;
+ ret = vapi_connect_safe(&sock_, 0);
+ }
+
+ if (ret != VAPI_OK) {
+ throw std::runtime_error(
+ "Impossible to connect to forwarder. Is VPP running?");
+ }
+
+ TRANSPORT_LOGI("Connected to VPP through vapi.");
+
+ sw_if_index_ = getMemifConfiguration();
+
+ is_consumer_ = is_consumer;
+ if (is_consumer_) {
+ consumerConnection();
+ }
+
+ connector_->connect(memif_id_, 0);
+ connector_->setRole(is_consumer_ ? Connector::Role::CONSUMER
+ : Connector::Role::PRODUCER);
+}
+
+void VPPForwarderModule::registerRoute(const Prefix &prefix) {
+ const ip_prefix_t &addr = prefix.toIpPrefixStruct();
+
+ ip_prefix_t producer_prefix;
+ ip_address_t producer_locator;
+
+ if (face_id1_ == uint32_t(~0)) {
+ hicn_producer_input_params input;
+ std::memset(&input, 0, sizeof(input));
+
+ hicn_producer_output_params output;
+ std::memset(&output, 0, sizeof(output));
+
+ input.prefix = &producer_prefix;
+ output.prod_addr = &producer_locator;
+
+ // Here we have to ask to the actual connector what is the
+ // memif_id, since this function should be called after the
+ // memif creation.n
+ input.swif = sw_if_index_;
+ input.prefix->address = addr.address;
+ input.prefix->family = addr.family;
+ input.prefix->len = addr.len;
+ input.cs_reserved = content_store_reserved_;
+
+ int ret =
+ hicn_vapi_register_prod_app(VPPForwarderModule::sock_, &input, &output);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(hicn_vapi_get_error_string(ret));
+ }
+
+ inet6_address_ = *output.prod_addr;
+
+ face_id1_ = output.face_id;
+ } else {
+ hicn_producer_set_route_params params;
+ params.prefix = &producer_prefix;
+ params.prefix->address = addr.address;
+ params.prefix->family = addr.family;
+ params.prefix->len = addr.len;
+ params.prod_addr = &producer_locator;
+
+ int ret = hicn_vapi_register_route(VPPForwarderModule::sock_, &params);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(hicn_vapi_get_error_string(ret));
+ }
+ }
+}
+
+void VPPForwarderModule::closeConnection() {
+ if (VPPForwarderModule::sock_) {
+ connector_->close();
+
+ if (is_consumer_) {
+ hicn_del_face_app_input_params params;
+ params.face_id = face_id1_;
+ hicn_vapi_face_cons_del(VPPForwarderModule::sock_, &params);
+ params.face_id = face_id2_;
+ hicn_vapi_face_cons_del(VPPForwarderModule::sock_, &params);
+ } else {
+ hicn_del_face_app_input_params params;
+ params.face_id = face_id1_;
+ hicn_vapi_face_prod_del(VPPForwarderModule::sock_, &params);
+ }
+
+ if (sw_if_index_ != uint32_t(~0)) {
+ int ret =
+ memif_vapi_delete_memif(VPPForwarderModule::sock_, sw_if_index_);
+ if (ret < 0) {
+ TRANSPORT_LOGE("Error deleting memif with sw idx %u.", sw_if_index_);
+ }
+ }
+
+ vapi_disconnect_safe();
+ VPPForwarderModule::sock_ = nullptr;
+ }
+}
+
+extern "C" IoModule *create_module(void) { return new VPPForwarderModule(); }
+
+} // namespace core
+
+} // namespace transport
diff --git a/libtransport/src/io_modules/memif/vpp_forwarder_module.h b/libtransport/src/io_modules/memif/vpp_forwarder_module.h
new file mode 100644
index 000000000..8c4114fed
--- /dev/null
+++ b/libtransport/src/io_modules/memif/vpp_forwarder_module.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <hicn/transport/core/io_module.h>
+#include <hicn/transport/core/prefix.h>
+
+#ifdef always_inline
+#undef always_inline
+#endif
+extern "C" {
+#include <vapi/vapi_safe.h>
+};
+
+namespace transport {
+
+namespace core {
+
+class MemifConnector;
+
+class VPPForwarderModule : public IoModule {
+ static constexpr std::uint16_t interface_mtu = 1500;
+
+ public:
+ VPPForwarderModule();
+ ~VPPForwarderModule();
+
+ void connect(bool is_consumer) override;
+
+ void send(Packet &packet) override;
+ void send(const uint8_t *packet, std::size_t len) override;
+
+ bool isConnected() override;
+
+ void init(Connector::PacketReceivedCallback &&receive_callback,
+ Connector::OnReconnectCallback &&reconnect_callback,
+ asio::io_service &io_service,
+ const std::string &app_name = "Libtransport") override;
+
+ void registerRoute(const Prefix &prefix) override;
+
+ std::uint32_t getMtu() override;
+
+ bool isControlMessage(const uint8_t *message) override;
+
+ void processControlMessageReply(utils::MemBuf &packet_buffer) override;
+
+ void closeConnection() override;
+
+ private:
+ uint32_t getMemifConfiguration();
+ void consumerConnection();
+ void producerConnection();
+
+ private:
+ MemifConnector *connector_;
+ uint32_t memif_id_;
+ uint32_t sw_if_index_;
+ // A consumer socket in vpp has two faces (ipv4 and ipv6)
+ uint32_t face_id1_;
+ uint32_t face_id2_;
+ bool is_consumer_;
+ vapi_ctx_t sock_;
+};
+
+extern "C" IoModule *create_module(void);
+
+} // namespace core
+
+} // namespace transport