aboutsummaryrefslogtreecommitdiffstats
path: root/libtransport/src/io_modules/memif/vpp_forwarder_module.cc
diff options
context:
space:
mode:
authorLuca Muscariello <muscariello@ieee.org>2021-04-15 09:05:46 +0200
committerMauro Sardara <msardara@cisco.com>2021-04-15 16:36:16 +0200
commite92e9e839ca2cf42b56322b2489ccc0d8bf767af (patch)
tree9f1647c83a87fbf982ae329e800af25dbfb226b5 /libtransport/src/io_modules/memif/vpp_forwarder_module.cc
parent3e541d7c947cc2f9db145f26c9274efd29a6fb56 (diff)
[HICN-690] Transport Library Major Refactory
The current patch provides a major refactory of the transportlibrary. A summary of the different components that underwent major modifications is reported below. - Transport protocol updates The hierarchy of classes has been optimized to have common transport services across different transport protocols. This can allow to customize a transport protocol with new features. - A new real-time communication protocol The RTC protocol has been optimized in terms of algorithms to reduce consumer-producer synchronization latency. - A novel socket API The API has been reworked to be easier to consumer but also to have a more efficient integration in L4 proxies. - Several performance improvements A large number of performance improvements have been included in particular to make the entire stack zero-copy and optimize cache miss. - New memory buffer framework Memory management has been reworked entirely to provide a more efficient infra with a richer API. Buffers are now allocated in blocks and a single buffer holds the memory for (1) the shared_ptr control block, (2) the metadata of the packet (e.g. name, pointer to other buffers if buffer is chained and relevant offsets), and (3) the packet itself, as it is sent/received over the network. - A new slab allocator Dynamic memory allocation is now managed by a novel slab allocator that is optimised for packet processing and connection management. Memory is organized in pools of blocks all of the same size which are used during the processing of outgoing/incoming packets. When a memory block Is allocated is always taken from a global pool and when it is deallocated is returned to the pool, thus avoiding the cost of any heap allocation in the data path. - New transport connectors Consumer and producer end-points can communication either using an hicn packet forwarder or with direct connector based on shared memories or sockets. The usage of transport connectors typically for unit and funcitonal testing but may have additional usage. - Support for FEC/ECC for transport services FEC/ECC via reed solomon is supported by default and made available to transport services as a modular component. Reed solomon block codes is a default FEC model that can be replaced in a modular way by many other codes including RLNC not avaiable in this distribution. The current FEC framework support variable size padding and efficiently makes use of the infra memory buffers to avoid additiona copies. - Secure transport framework for signature computation and verification Crypto support is nativelty used in hICN for integrity and authenticity. Novel support that includes RTC has been implemented and made modular and reusable acrosso different transport protocols. - TLS - Transport layer security over hicn Point to point confidentiality is provided by integrating TLS on top of hICN reliable and non-reliable transport. The integration is common and makes a different use of the TLS record. - MLS - Messaging layer security over hicn MLS integration on top of hICN is made by using the MLSPP implemetation open sourced by Cisco. We have included instrumentation tools to deploy performance and functional tests of groups of end-points. - Android support The overall code has been heavily tested in Android environments and has received heavy lifting to better run natively in recent Android OS. Co-authored-by: Mauro Sardara <msardara@cisco.com> Co-authored-by: Michele Papalini <micpapal@cisco.com> Co-authored-by: Olivier Roques <oroques+fdio@cisco.com> Co-authored-by: Giulio Grassi <gigrassi@cisco.com> Change-Id: If477ba2fa686e6f47bdf96307ac60938766aef69 Signed-off-by: Luca Muscariello <muscariello@ieee.org>
Diffstat (limited to 'libtransport/src/io_modules/memif/vpp_forwarder_module.cc')
-rw-r--r--libtransport/src/io_modules/memif/vpp_forwarder_module.cc263
1 files changed, 263 insertions, 0 deletions
diff --git a/libtransport/src/io_modules/memif/vpp_forwarder_module.cc b/libtransport/src/io_modules/memif/vpp_forwarder_module.cc
new file mode 100644
index 000000000..dcbcd7ed0
--- /dev/null
+++ b/libtransport/src/io_modules/memif/vpp_forwarder_module.cc
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hicn/transport/config.h>
+#include <hicn/transport/errors/not_implemented_exception.h>
+#include <io_modules/memif/hicn_vapi.h>
+#include <io_modules/memif/memif_connector.h>
+#include <io_modules/memif/memif_vapi.h>
+#include <io_modules/memif/vpp_forwarder_module.h>
+
+extern "C" {
+#include <memif/libmemif.h>
+};
+
+typedef enum { MASTER = 0, SLAVE = 1 } memif_role_t;
+
+#define MEMIF_DEFAULT_RING_SIZE 2048
+#define MEMIF_DEFAULT_RX_QUEUES 1
+#define MEMIF_DEFAULT_TX_QUEUES 1
+#define MEMIF_DEFAULT_BUFFER_SIZE 2048
+
+namespace transport {
+
+namespace core {
+
+VPPForwarderModule::VPPForwarderModule()
+ : IoModule(),
+ connector_(nullptr),
+ sw_if_index_(~0),
+ face_id1_(~0),
+ face_id2_(~0),
+ is_consumer_(false) {}
+
+VPPForwarderModule::~VPPForwarderModule() { delete connector_; }
+
+void VPPForwarderModule::init(
+ Connector::PacketReceivedCallback &&receive_callback,
+ Connector::OnReconnectCallback &&reconnect_callback,
+ asio::io_service &io_service, const std::string &app_name) {
+ if (!connector_) {
+ connector_ =
+ new MemifConnector(std::move(receive_callback), 0, 0,
+ std::move(reconnect_callback), io_service, app_name);
+ }
+}
+
+void VPPForwarderModule::processControlMessageReply(
+ utils::MemBuf &packet_buffer) {
+ throw errors::NotImplementedException();
+}
+
+bool VPPForwarderModule::isControlMessage(const uint8_t *message) {
+ return false;
+}
+
+bool VPPForwarderModule::isConnected() { return connector_->isConnected(); };
+
+void VPPForwarderModule::send(Packet &packet) {
+ IoModule::send(packet);
+ connector_->send(packet);
+}
+
+void VPPForwarderModule::send(const uint8_t *packet, std::size_t len) {
+ counters_.tx_packets++;
+ counters_.tx_bytes += len;
+
+ // Perfect forwarding
+ connector_->send(packet, len);
+}
+
+std::uint32_t VPPForwarderModule::getMtu() { return interface_mtu; }
+
+/**
+ * @brief Create a memif interface in the local VPP forwarder.
+ */
+uint32_t VPPForwarderModule::getMemifConfiguration() {
+ memif_create_params_t input_params = {0};
+
+ int ret = memif_vapi_get_next_memif_id(VPPForwarderModule::sock_, &memif_id_);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(
+ "Error getting next memif id. Could not create memif interface.");
+ }
+
+ input_params.id = memif_id_;
+ input_params.role = memif_role_t::MASTER;
+ input_params.mode = memif_interface_mode_t::MEMIF_INTERFACE_MODE_IP;
+ input_params.rx_queues = MEMIF_DEFAULT_RX_QUEUES;
+ input_params.tx_queues = MEMIF_DEFAULT_TX_QUEUES;
+ input_params.ring_size = MEMIF_DEFAULT_RING_SIZE;
+ input_params.buffer_size = MEMIF_DEFAULT_BUFFER_SIZE;
+
+ memif_output_params_t output_params = {0};
+
+ ret = memif_vapi_create_memif(VPPForwarderModule::sock_, &input_params,
+ &output_params);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(
+ "Error creating memif interface in the local VPP forwarder.");
+ }
+
+ return output_params.sw_if_index;
+}
+
+void VPPForwarderModule::consumerConnection() {
+ hicn_consumer_input_params input = {0};
+ hicn_consumer_output_params output = {0};
+ ip_address_t ip4_address;
+ ip_address_t ip6_address;
+
+ output.src4 = &ip4_address;
+ output.src6 = &ip6_address;
+ input.swif = sw_if_index_;
+
+ int ret =
+ hicn_vapi_register_cons_app(VPPForwarderModule::sock_, &input, &output);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(hicn_vapi_get_error_string(ret));
+ }
+
+ face_id1_ = output.face_id1;
+ face_id2_ = output.face_id2;
+
+ std::memcpy(inet_address_.v4.as_u8, output.src4->v4.as_u8, IPV4_ADDR_LEN);
+
+ std::memcpy(inet6_address_.v6.as_u8, output.src6->v6.as_u8, IPV6_ADDR_LEN);
+}
+
+void VPPForwarderModule::producerConnection() {
+ // Producer connection will be set when we set the first route.
+}
+
+void VPPForwarderModule::connect(bool is_consumer) {
+ int retry = 20;
+
+ TRANSPORT_LOGI("Connecting to VPP through vapi.");
+ vapi_error_e ret = vapi_connect_safe(&sock_, 0);
+
+ while (ret != VAPI_OK && retry > 0) {
+ TRANSPORT_LOGE("Error connecting to VPP through vapi. Retrying..");
+ --retry;
+ ret = vapi_connect_safe(&sock_, 0);
+ }
+
+ if (ret != VAPI_OK) {
+ throw std::runtime_error(
+ "Impossible to connect to forwarder. Is VPP running?");
+ }
+
+ TRANSPORT_LOGI("Connected to VPP through vapi.");
+
+ sw_if_index_ = getMemifConfiguration();
+
+ is_consumer_ = is_consumer;
+ if (is_consumer_) {
+ consumerConnection();
+ }
+
+ connector_->connect(memif_id_, 0);
+ connector_->setRole(is_consumer_ ? Connector::Role::CONSUMER
+ : Connector::Role::PRODUCER);
+}
+
+void VPPForwarderModule::registerRoute(const Prefix &prefix) {
+ const ip_prefix_t &addr = prefix.toIpPrefixStruct();
+
+ ip_prefix_t producer_prefix;
+ ip_address_t producer_locator;
+
+ if (face_id1_ == uint32_t(~0)) {
+ hicn_producer_input_params input;
+ std::memset(&input, 0, sizeof(input));
+
+ hicn_producer_output_params output;
+ std::memset(&output, 0, sizeof(output));
+
+ input.prefix = &producer_prefix;
+ output.prod_addr = &producer_locator;
+
+ // Here we have to ask to the actual connector what is the
+ // memif_id, since this function should be called after the
+ // memif creation.n
+ input.swif = sw_if_index_;
+ input.prefix->address = addr.address;
+ input.prefix->family = addr.family;
+ input.prefix->len = addr.len;
+ input.cs_reserved = content_store_reserved_;
+
+ int ret =
+ hicn_vapi_register_prod_app(VPPForwarderModule::sock_, &input, &output);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(hicn_vapi_get_error_string(ret));
+ }
+
+ inet6_address_ = *output.prod_addr;
+
+ face_id1_ = output.face_id;
+ } else {
+ hicn_producer_set_route_params params;
+ params.prefix = &producer_prefix;
+ params.prefix->address = addr.address;
+ params.prefix->family = addr.family;
+ params.prefix->len = addr.len;
+ params.prod_addr = &producer_locator;
+
+ int ret = hicn_vapi_register_route(VPPForwarderModule::sock_, &params);
+
+ if (ret < 0) {
+ throw errors::RuntimeException(hicn_vapi_get_error_string(ret));
+ }
+ }
+}
+
+void VPPForwarderModule::closeConnection() {
+ if (VPPForwarderModule::sock_) {
+ connector_->close();
+
+ if (is_consumer_) {
+ hicn_del_face_app_input_params params;
+ params.face_id = face_id1_;
+ hicn_vapi_face_cons_del(VPPForwarderModule::sock_, &params);
+ params.face_id = face_id2_;
+ hicn_vapi_face_cons_del(VPPForwarderModule::sock_, &params);
+ } else {
+ hicn_del_face_app_input_params params;
+ params.face_id = face_id1_;
+ hicn_vapi_face_prod_del(VPPForwarderModule::sock_, &params);
+ }
+
+ if (sw_if_index_ != uint32_t(~0)) {
+ int ret =
+ memif_vapi_delete_memif(VPPForwarderModule::sock_, sw_if_index_);
+ if (ret < 0) {
+ TRANSPORT_LOGE("Error deleting memif with sw idx %u.", sw_if_index_);
+ }
+ }
+
+ vapi_disconnect_safe();
+ VPPForwarderModule::sock_ = nullptr;
+ }
+}
+
+extern "C" IoModule *create_module(void) { return new VPPForwarderModule(); }
+
+} // namespace core
+
+} // namespace transport