summaryrefslogtreecommitdiffstats
path: root/src/plugins/flowperpkt/l2_node.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/flowperpkt/l2_node.c')
-rw-r--r--src/plugins/flowperpkt/l2_node.c561
1 files changed, 561 insertions, 0 deletions
diff --git a/src/plugins/flowperpkt/l2_node.c b/src/plugins/flowperpkt/l2_node.c
new file mode 100644
index 00000000000..1c2f681e1e1
--- /dev/null
+++ b/src/plugins/flowperpkt/l2_node.c
@@ -0,0 +1,561 @@
+/*
+ * l2_node.c - l2 ipfix-per-packet graph node
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <flowperpkt/flowperpkt.h>
+
+/**
+ * @file l2 flow record generator graph node
+ */
+
+typedef struct
+{
+ /** interface handle */
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+ /** src and dst L2 addresses */
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ /** Ethertype */
+ u16 ethertype;
+ /** packet timestamp */
+ u64 timestamp;
+ /** size of the buffer */
+ u16 buffer_size;
+} flowperpkt_l2_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_flowperpkt_l2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ flowperpkt_l2_trace_t *t = va_arg (*args, flowperpkt_l2_trace_t *);
+
+ s = format (s,
+ "FLOWPERPKT-L2: rx_sw_if_index %d, tx_sw_if_index %d, src %U dst %U ethertype %0x2, timestamp %lld, size %d",
+ t->rx_sw_if_index, t->tx_sw_if_index,
+ format_ethernet_address, &t->src_mac,
+ format_ethernet_address, &t->dst_mac,
+ t->ethertype, t->timestamp, t->buffer_size);
+ return s;
+}
+
+vlib_node_registration_t flowperpkt_l2_node;
+
+/* No counters at the moment */
+#define foreach_flowperpkt_l2_error
+
+typedef enum
+{
+#define _(sym,str) FLOWPERPKT_ERROR_##sym,
+ foreach_flowperpkt_l2_error
+#undef _
+ FLOWPERPKT_N_ERROR,
+} flowperpkt_l2_error_t;
+
+static char *flowperpkt_l2_error_strings[] = {
+#define _(sym,string) string,
+ foreach_flowperpkt_l2_error
+#undef _
+};
+
+typedef enum
+{
+ FLOWPERPKT_L2_NEXT_DROP,
+ FLOWPERPKT_L2_NEXT_IP4_LOOKUP,
+ FLOWPERPKT_L2_N_NEXT,
+} flowperpkt_l2_next_t;
+
+/**
+ * @brief add an entry to the flow record under construction
+ * @param vm vlib_main_t * current worker thread main structure pointer
+ * @param fm flowperpkt_main_t * flow-per-packet main structure pointer
+ * @param sw_if_index u32 interface handle
+ * @param tos u8 ToS bits from the packet
+ * @param timestamp u64 timestamp, nanoseconds since 1/1/70
+ * @param length u16 ip length of the packet
+ * @param do_flush int 1 = flush all cached records, 0 = construct a record
+ */
+
+static inline void
+add_to_flow_record_l2 (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ flowperpkt_main_t * fm,
+ u32 rx_sw_if_index, u32 tx_sw_if_index,
+ u8 * src_mac, u8 * dst_mac,
+ u16 ethertype, u64 timestamp, u16 length, int do_flush)
+{
+ u32 my_cpu_number = vm->cpu_index;
+ flow_report_main_t *frm = &flow_report_main;
+ ip4_header_t *ip;
+ udp_header_t *udp;
+ ip4_ipfix_template_packet_t *tp;
+ ipfix_message_header_t *h;
+ ipfix_set_header_t *s;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0;
+ u16 offset;
+ u32 bi0;
+ vlib_buffer_free_list_t *fl;
+
+ /* Find or allocate a buffer */
+ b0 = fm->l2_buffers_per_worker[my_cpu_number];
+
+ /* Need to allocate a buffer? */
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ /* Nothing to flush */
+ if (do_flush)
+ return;
+
+ /* $$$$ drop counter? */
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ return;
+
+ /* Initialize the buffer */
+ b0 = fm->l2_buffers_per_worker[my_cpu_number] =
+ vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ /* use the current buffer */
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = fm->l2_next_record_offset_per_worker[my_cpu_number];
+ }
+
+ /* Find or allocate a frame */
+ f = fm->l2_frames_per_worker[my_cpu_number];
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ fm->l2_frames_per_worker[my_cpu_number] = f;
+
+ /* Enqueue the buffer */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ /* Fresh packet, construct header */
+ if (PREDICT_FALSE (offset == 0))
+ {
+ flow_report_stream_t *stream;
+
+ stream = &frm->streams[0];
+
+ b0->current_data = 0;
+ b0->current_length = sizeof (*ip) + sizeof (*udp) + sizeof (*h) +
+ sizeof (*s);
+ b0->flags |= (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_FLOW_REPORT);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
+
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) & tp->ip4;
+ udp = (udp_header_t *) (ip + 1);
+ h = (ipfix_message_header_t *) (udp + 1);
+ s = (ipfix_set_header_t *) (h + 1);
+
+ ip->ip_version_and_header_length = 0x45;
+ ip->ttl = 254;
+ ip->protocol = IP_PROTOCOL_UDP;
+ ip->flags_and_fragment_offset = 0;
+ ip->src_address.as_u32 = frm->src_address.as_u32;
+ ip->dst_address.as_u32 = frm->ipfix_collector.as_u32;
+ udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
+ udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
+ udp->checksum = 0;
+
+ /* FIXUP: message header export_time */
+ h->export_time = (u32)
+ (((f64) frm->unix_time_0) +
+ (vlib_time_now (frm->vlib_main) - frm->vlib_time_0));
+ h->export_time = clib_host_to_net_u32 (h->export_time);
+ h->domain_id = clib_host_to_net_u32 (stream->domain_id);
+
+ /* FIXUP: message header sequence_number */
+ h->sequence_number = stream->sequence_number++;
+ h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
+
+ offset = (u32) (((u8 *) (s + 1)) - (u8 *) tp);
+ }
+
+ /* Add data, unless we're flushing stale data */
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+
+ /* Add data */
+ /* Ingress interface */
+ {
+ u32 ingress_interface = clib_host_to_net_u32 (rx_sw_if_index);
+ clib_memcpy (b0->data + offset, &ingress_interface,
+ sizeof (ingress_interface));
+ offset += sizeof (ingress_interface);
+ }
+ /* Egress interface */
+ {
+ u32 egress_interface = clib_host_to_net_u32 (tx_sw_if_index);
+ clib_memcpy (b0->data + offset, &egress_interface,
+ sizeof (egress_interface));
+ offset += sizeof (egress_interface);
+ }
+ /* src mac address */
+ {
+ clib_memcpy (b0->data + offset, src_mac, 6);
+ offset += 6;
+ }
+ /* dst mac address */
+ {
+ clib_memcpy (b0->data + offset, dst_mac, 6);
+ offset += 6;
+ }
+
+ /* ethertype */
+ b0->data[offset++] = ethertype >> 8;
+ b0->data[offset++] = ethertype & 0xFF;
+
+ /* Timestamp */
+ clib_memcpy (b0->data + offset, &timestamp, sizeof (f64));
+ offset += sizeof (f64);
+
+ /* pkt size */
+ {
+ u16 pkt_size = clib_host_to_net_u16 (length);
+ clib_memcpy (b0->data + offset, &pkt_size, sizeof (pkt_size));
+ offset += sizeof (pkt_size);
+ }
+
+ b0->current_length +=
+ /* 2*sw_if_index + 2*mac + ethertype + timestamp + length = 32 */
+ 2 * sizeof (u32) + 12 + sizeof (u16) + sizeof (f64) + sizeof (u16);
+
+ }
+ /* Time to flush the buffer? */
+ if (PREDICT_FALSE
+ (do_flush || (offset + 2 * sizeof (u32) + 12 + sizeof (u16) +
+ +sizeof (f64) + sizeof (u16)) > frm->path_mtu))
+ {
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) & tp->ip4;
+ udp = (udp_header_t *) (ip + 1);
+ h = (ipfix_message_header_t *) (udp + 1);
+ s = (ipfix_set_header_t *) (h + 1);
+
+ s->set_id_length = ipfix_set_id_length (fm->l2_report_id,
+ b0->current_length -
+ (sizeof (*ip) + sizeof (*udp) +
+ sizeof (*h)));
+ h->version_length = version_length (b0->current_length -
+ (sizeof (*ip) + sizeof (*udp)));
+
+ ip->length = clib_host_to_net_u16 (b0->current_length);
+
+ ip->checksum = ip4_header_checksum (ip);
+ udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
+
+ if (frm->udp_checksum)
+ {
+ /* RFC 7011 section 10.3.2. */
+ udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
+ if (udp->checksum == 0)
+ udp->checksum = 0xffff;
+ }
+
+ ASSERT (ip->checksum == ip4_header_checksum (ip));
+
+ if (PREDICT_FALSE (vlib_get_trace_count (vm, node) > 0))
+ {
+ vlib_trace_buffer (vm, node, FLOWPERPKT_L2_NEXT_IP4_LOOKUP, b0,
+ 0 /* follow chain */ );
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ memset (t, 0, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ t->buffer_size = b0->current_length;
+ }
+
+ vlib_put_frame_to_node (vm, ip4_lookup_node.index,
+ fm->l2_frames_per_worker[my_cpu_number]);
+ fm->l2_frames_per_worker[my_cpu_number] = 0;
+ fm->l2_buffers_per_worker[my_cpu_number] = 0;
+ offset = 0;
+ }
+
+ fm->l2_next_record_offset_per_worker[my_cpu_number] = offset;
+}
+
+void
+flowperpkt_flush_callback_l2 (void)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ flowperpkt_main_t *fm = &flowperpkt_main;
+ vlib_node_runtime_t *node;
+ node = vlib_node_get_runtime (vm, flowperpkt_l2_node.index);
+
+ add_to_flow_record_l2 (vm, node, fm, 0 /* rx_sw_if_index */ ,
+ 0 /* tx_sw_if_index */ ,
+ 0 /* src mac */ ,
+ 0 /* dst mac */ ,
+ 0 /* ethertype */ ,
+ 0ULL /* timestamp */ ,
+ 0 /* length */ ,
+ 1 /* do_flush */ );
+}
+
+
+static uword
+flowperpkt_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ flowperpkt_l2_next_t next_index;
+ flowperpkt_main_t *fm = &flowperpkt_main;
+ u64 now;
+
+ now = (u64) ((vlib_time_now (vm) - fm->vlib_time_0) * 1e9);
+ now += fm->nanosecond_time_0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = FLOWPERPKT_L2_NEXT_DROP;
+ u32 next1 = FLOWPERPKT_L2_NEXT_DROP;
+ ethernet_header_t *eh0, *eh1;
+ u16 len0, len1;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &next0, b0);
+ vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_TX],
+ &next1, b1);
+
+ eh0 = vlib_buffer_get_current (b0);
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ add_to_flow_record_l2 (vm, node, fm,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ eh0->src_address,
+ eh0->dst_address,
+ eh0->type, now, len0, 0 /* flush */ );
+
+ eh1 = vlib_buffer_get_current (b0);
+ len1 = vlib_buffer_length_in_chain (vm, b0);
+
+ if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ add_to_flow_record_l2 (vm, node, fm,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX],
+ vnet_buffer (b1)->sw_if_index[VLIB_TX],
+ eh1->src_address,
+ eh1->dst_address,
+ eh1->type, now, len1, 0 /* flush */ );
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ clib_memcpy (t->src_mac, eh0->src_address, 6);
+ clib_memcpy (t->dst_mac, eh0->dst_address, 6);
+ t->ethertype = clib_net_to_host_u16 (eh0->type);
+ t->timestamp = now;
+ t->buffer_size = len0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ clib_memcpy (t->src_mac, eh1->src_address, 6);
+ clib_memcpy (t->dst_mac, eh1->dst_address, 6);
+ t->ethertype = clib_net_to_host_u16 (eh1->type);
+ t->timestamp = now;
+ t->buffer_size = len1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = FLOWPERPKT_L2_NEXT_DROP;
+ ethernet_header_t *eh0;
+ u16 len0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &next0, b0);
+
+ eh0 = vlib_buffer_get_current (b0);
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ add_to_flow_record_l2 (vm, node, fm,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ eh0->src_address,
+ eh0->dst_address,
+ eh0->type, now, len0, 0 /* flush */ );
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ clib_memcpy (t->src_mac, eh0->src_address, 6);
+ clib_memcpy (t->dst_mac, eh0->dst_address, 6);
+ t->ethertype = clib_net_to_host_u16 (eh0->type);
+ t->timestamp = now;
+ t->buffer_size = len0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+/**
+ * @brief IPFIX l2 flow-per-packet graph node
+ * @node flowperpkt-l2
+ *
+ * This is the IPFIX flow-record-per-packet node.
+ *
+ * @param vm vlib_main_t corresponding to the current thread.
+ * @param node vlib_node_runtime_t data for this node.
+ * @param frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @par Graph mechanics: buffer metadata, next index usage
+ *
+ * <em>Uses:</em>
+ * - <code>vnet_buffer(b)->ip.save_rewrite_length</code>
+ * - tells the node the length of the rewrite which was applied in
+ * ip4/6_rewrite_inline, allows the code to find the IP header without
+ * having to parse L2 headers, or make stupid assumptions about their
+ * length.
+ * - <code>vnet_buffer(b)->flags & VLIB_BUFFER_FLOW_REPORT</code>
+ * - Used to suppress flow record generation for flow record packets.
+ *
+ * <em>Sets:</em>
+ * - <code>vnet_buffer(b)->flags & VLIB_BUFFER_FLOW_REPORT</code>
+ * - To suppress flow record generation for flow record packets
+ *
+ * <em>Next Index:</em>
+ * - Next configured output feature on the interface, usually
+ * "interface-output." Generated flow records head for ip4-lookup
+ */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (flowperpkt_l2_node) = {
+ .function = flowperpkt_l2_node_fn,
+ .name = "flowperpkt-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_flowperpkt_l2_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(flowperpkt_l2_error_strings),
+ .error_strings = flowperpkt_l2_error_strings,
+
+ .n_next_nodes = FLOWPERPKT_L2_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [FLOWPERPKT_L2_NEXT_DROP] = "error-drop",
+ [FLOWPERPKT_L2_NEXT_IP4_LOOKUP] = "ip4-lookup",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
\ .format = "ikev2: " _format, \ .format_args = "i8", \ }; \ CLIB_PACKED(struct \ { \ u64 ispi; \ }) *ed; \ ed = ELOG_DATA (&vlib_global_main.elog_main, e); \ ed->ispi = _ispi; \ } \ } while (0) \ #define ikev2_elog_exchange_internal(_format, _ispi, _rspi, _addr) \ do { \ ikev2_main_t *km = &ikev2_main; \ if (PREDICT_FALSE (km->log_level >= IKEV2_LOG_DEBUG)) \ { \ ELOG_TYPE_DECLARE (e) = \ { \ .format = "ikev2: " _format, \ .format_args = "i8i8i1i1i1i1", \ }; \ CLIB_PACKED(struct \ { \ u64 ispi; \ u64 rspi; \ u8 oct1; \ u8 oct2; \ u8 oct3; \ u8 oct4; \ }) *ed; \ ed = ELOG_DATA (&vlib_global_main.elog_main, e); \ ed->ispi = _ispi; \ ed->rspi = _rspi; \ ed->oct4 = (_addr) >> 24; \ ed->oct3 = (_addr) >> 16; \ ed->oct2 = (_addr) >> 8; \ ed->oct1 = (_addr); \ } \ } while (0) \ #define IKE_ELOG_IP4_FMT "%d.%d.%d.%d" #define IKE_ELOG_IP6_FMT "[v6]:%x%x:%x%x" #define ikev2_elog_exchange(_fmt, _ispi, _rspi, _addr, _v4) \ do { \ if (_v4) \ ikev2_elog_exchange_internal (_fmt IKE_ELOG_IP4_FMT, _ispi, _rspi, _addr);\ else \ ikev2_elog_exchange_internal (_fmt IKE_ELOG_IP6_FMT, _ispi, _rspi, _addr);\ } while (0) #define ikev2_elog_uint(_level, _format, _val) \ do { \ ikev2_main_t *km = &ikev2_main; \ if (PREDICT_FALSE (km->log_level >= _level)) \ { \ ELOG_TYPE_DECLARE (e) = \ { \ .format = "ikev2: " _format, \ .format_args = "i8", \ }; \ CLIB_PACKED(struct \ { \ u64 val; \ }) *ed; \ ed = ELOG_DATA (&vlib_global_main.elog_main, e); \ ed->val = _val; \ } \ } while (0) #define ikev2_elog_uint_peers(_level, _format, _val, _ip1, _ip2) \ do { \ ikev2_main_t *km = &ikev2_main; \ if (PREDICT_FALSE (km->log_level >= _level)) \ { \ ELOG_TYPE_DECLARE (e) = \ { \ .format = "ikev2: " _format, \ .format_args = "i8i1i1i1i1i1i1i1i1", \ }; \ CLIB_PACKED(struct { \ u64 val; \ u8 i11; u8 i12; u8 i13; u8 i14; \ u8 i21; u8 i22; u8 i23; u8 i24; }) *ed; \ ed = ELOG_DATA (&vlib_global_main.elog_main, e); \ ed->val = _val; \ ed->i14 = (_ip1) >> 24; \ ed->i13 = (_ip1) >> 16; \ ed->i12 = (_ip1) >> 8; \ ed->i11 = (_ip1); \ ed->i24 = (_ip2) >> 24; \ ed->i23 = (_ip2) >> 16; \ ed->i22 = (_ip2) >> 8; \ ed->i21 = (_ip2); \ } \ } while (0) #define ikev2_elog_error(_msg) \ _ikev2_elog(IKEV2_LOG_ERROR, "[error] " _msg) #define ikev2_elog_warning(_msg) \ _ikev2_elog(IKEV2_LOG_WARNING, "[warning] " _msg) #define ikev2_elog_debug(_msg) \ _ikev2_elog(IKEV2_LOG_DEBUG, "[debug] " _msg) #define ikev2_elog_detail(_msg) \ _ikev2_elog(IKEV2_LOG_DETAIL, "[detail] " _msg) /* logging for main thread */ #define ikev2_log_error(...) \ vlib_log(VLIB_LOG_LEVEL_ERR, ikev2_main.log_class, __VA_ARGS__) #define ikev2_log_warning(...) \ vlib_log(VLIB_LOG_LEVEL_WARNING, ikev2_main.log_class, __VA_ARGS__) #define ikev2_log_debug(...) \ vlib_log(VLIB_LOG_LEVEL_DEBUG, ikev2_main.log_class, __VA_ARGS__) typedef enum { IKEV2_STATE_UNKNOWN, IKEV2_STATE_SA_INIT, IKEV2_STATE_DELETED, IKEV2_STATE_AUTH_FAILED, IKEV2_STATE_AUTHENTICATED, IKEV2_STATE_NOTIFY_AND_DELETE, IKEV2_STATE_TS_UNACCEPTABLE, IKEV2_STATE_NO_PROPOSAL_CHOSEN, } ikev2_state_t; typedef struct { ikev2_auth_method_t method:8; u8 *data; u8 hex; /* hex encoding of the shared secret */ EVP_PKEY *key; } ikev2_auth_t; typedef enum { IKEV2_DH_GROUP_MODP = 0, IKEV2_DH_GROUP_ECP = 1, } ikev2_dh_group_t; typedef struct { ikev2_transform_type_t type; union { u16 transform_id; ikev2_transform_encr_type_t encr_type:16; ikev2_transform_prf_type_t prf_type:16; ikev2_transform_integ_type_t integ_type:16; ikev2_transform_dh_type_t dh_type:16; ikev2_transform_esn_type_t esn_type:16; }; u8 *attrs; u16 key_len; u16 key_trunc; u16 block_size; u8 dh_group; int nid; const char *dh_p; const char *dh_g; const void *md; const void *cipher; } ikev2_sa_transform_t; typedef struct { u8 proposal_num; ikev2_protocol_id_t protocol_id:8; u32 spi; ikev2_sa_transform_t *transforms; } ikev2_sa_proposal_t; typedef struct { ikev2_traffic_selector_type_t ts_type; u8 protocol_id; u16 selector_len; u16 start_port; u16 end_port; ip_address_t start_addr; ip_address_t end_addr; } ikev2_ts_t; typedef struct { u32 sw_if_index; ip_address_t addr; } ikev2_responder_t; typedef struct { ikev2_transform_encr_type_t crypto_alg; ikev2_transform_integ_type_t integ_alg; ikev2_transform_dh_type_t dh_type; u32 crypto_key_size; } ikev2_transforms_set; typedef struct { ikev2_id_type_t type:8; u8 *data; } ikev2_id_t; typedef struct { /* sa proposals vectors */ ikev2_sa_proposal_t *i_proposals; ikev2_sa_proposal_t *r_proposals; /* Traffic Selectors */ ikev2_ts_t *tsi; ikev2_ts_t *tsr; /* keys */ u8 *sk_ai; u8 *sk_ar; u8 *sk_ei; u8 *sk_er; u32 salt_ei; u32 salt_er; /* installed data */ u32 local_sa_id; u32 remote_sa_id; /* lifetime data */ f64 time_to_expiration; u8 is_expired; i8 rekey_retries; } ikev2_child_sa_t; typedef struct { u8 protocol_id; u32 spi; /*for ESP and AH SPI size is 4, for IKE size is 0 */ } ikev2_delete_t; typedef struct { u8 protocol_id; u32 spi; u32 ispi; ikev2_sa_proposal_t *i_proposal; ikev2_sa_proposal_t *r_proposal; ikev2_ts_t *tsi; ikev2_ts_t *tsr; } ikev2_rekey_t; typedef struct { u16 msg_type; u8 protocol_id; u32 spi; u8 *data; } ikev2_notify_t; typedef struct { u8 *name; ikev2_auth_t auth; ikev2_id_t loc_id; ikev2_id_t rem_id; ikev2_ts_t loc_ts; ikev2_ts_t rem_ts; ikev2_responder_t responder; ikev2_transforms_set ike_ts; ikev2_transforms_set esp_ts; u64 lifetime; u64 lifetime_maxdata; u32 lifetime_jitter; u32 handover; u16 ipsec_over_udp_port; u32 tun_itf; u8 udp_encap; u8 natt_disabled; } ikev2_profile_t; typedef enum { /* SA will switch to port 4500 when NAT is detected. * This is the default. */ IKEV2_NATT_ENABLED, /* Do nothing when NAT is detected */ IKEV2_NATT_DISABLED, /* NAT was detected and port switched to 4500 */ IKEV2_NATT_ACTIVE, } ikev2_natt_state_t; #define ikev2_natt_active(_sa) ((_sa)->natt_state == IKEV2_NATT_ACTIVE) typedef struct { ikev2_state_t state; u8 unsupported_cp; u8 initial_contact; ip_address_t iaddr; ip_address_t raddr; u64 ispi; u64 rspi; u8 *i_nonce; u8 *r_nonce; /* DH data */ u16 dh_group; u8 *dh_shared_key; u8 *dh_private_key; u8 *i_dh_data; u8 *r_dh_data; /* sa proposals vectors */ ikev2_sa_proposal_t *i_proposals; ikev2_sa_proposal_t *r_proposals; /* keys */ u8 *sk_d; u8 *sk_ai; u8 *sk_ar; u8 *sk_ei; u8 *sk_er; u8 *sk_pi; u8 *sk_pr; /* auth */ ikev2_auth_t i_auth; ikev2_auth_t r_auth; /* ID */ ikev2_id_t i_id; ikev2_id_t r_id; /* pending deletes */ ikev2_delete_t *del; /* pending rekeyings */ ikev2_rekey_t *rekey; /* packet data */ u8 *last_sa_init_req_packet_data; u8 *last_sa_init_res_packet_data; /* retransmit */ /* message id expected in the request from the other peer */ u32 last_msg_id; u8 *last_res_packet_data; u8 is_initiator; /* last message id that was used for an initiated request */ u32 last_init_msg_id; u32 profile_index; u8 is_tun_itf_set; u32 tun_itf; u8 udp_encap; u16 ipsec_over_udp_port; f64 old_id_expiration; u32 current_remote_id_mask; u32 old_remote_id; u8 old_remote_id_present; u8 init_response_received; ikev2_child_sa_t *childs; u8 liveness_retries; f64 liveness_period_check; u16 dst_port; u32 sw_if_index; /* is NAT traversal mode */ ikev2_natt_state_t natt_state; u8 keys_generated; } ikev2_sa_t; typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); /* pool of IKEv2 Security Associations */ ikev2_sa_t *sas; /* hash */ uword *sa_by_rspi; EVP_CIPHER_CTX *evp_ctx; HMAC_CTX *hmac_ctx; #if OPENSSL_VERSION_NUMBER < 0x10100000L HMAC_CTX _hmac_ctx; EVP_CIPHER_CTX _evp_ctx; #endif } ikev2_main_per_thread_data_t; typedef struct { /* pool of IKEv2 profiles */ ikev2_profile_t *profiles; /* vector of supported transform types */ ikev2_sa_transform_t *supported_transforms; /* hash */ mhash_t profile_index_by_name; /* local private key */ EVP_PKEY *pkey; /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; /* pool of IKEv2 Security Associations created in initiator mode */ ikev2_sa_t *sais; /* hash */ uword *sa_by_ispi; ikev2_main_per_thread_data_t *per_thread_data; /* interface indices managed by IKE */ uword *sw_if_indices; /* API message ID base */ u16 msg_id_base; /* log class used for main thread */ vlib_log_class_t log_class; /* logging level */ ikev2_log_level_t log_level; /* custom ipsec-over-udp ports managed by ike */ uword *udp_ports; /* how often a liveness check will be performed */ u32 liveness_period; /* max number of retries before considering peer dead */ u32 liveness_max_retries; /* dead peer detection */ u8 dpd_disabled; } ikev2_main_t; extern ikev2_main_t ikev2_main; void ikev2_sa_free_proposal_vector (ikev2_sa_proposal_t ** v); ikev2_sa_transform_t *ikev2_sa_get_td_for_type (ikev2_sa_proposal_t * p, ikev2_transform_type_t type); /* ikev2_crypto.c */ v8 *ikev2_calc_prf (ikev2_sa_transform_t * tr, v8 * key, v8 * data); u8 *ikev2_calc_prfplus (ikev2_sa_transform_t * tr, u8 * key, u8 * seed, int len); v8 *ikev2_calc_integr (ikev2_sa_transform_t * tr, v8 * key, u8 * data, int len); int ikev2_decrypt_data (ikev2_main_per_thread_data_t * ptd, ikev2_sa_t * sa, ikev2_sa_transform_t * tr_encr, u8 * data, int len, u32 * out_len); int ikev2_encrypt_data (ikev2_main_per_thread_data_t * ptd, ikev2_sa_t * sa, ikev2_sa_transform_t * tr_encr, v8 * src, u8 * dst); int ikev2_encrypt_aead_data (ikev2_main_per_thread_data_t * ptd, ikev2_sa_t * sa, ikev2_sa_transform_t * tr_encr, v8 * src, u8 * dst, u8 * aad, u32 aad_len, u8 * tag); int ikev2_decrypt_aead_data (ikev2_main_per_thread_data_t * ptd, ikev2_sa_t * sa, ikev2_sa_transform_t * tr_encr, u8 * data, int data_len, u8 * aad, u32 aad_len, u8 * tag, u32 * out_len); void ikev2_generate_dh (ikev2_sa_t * sa, ikev2_sa_transform_t * t); void ikev2_complete_dh (ikev2_sa_t * sa, ikev2_sa_transform_t * t); int ikev2_verify_sign (EVP_PKEY * pkey, u8 * sigbuf, u8 * data); u8 *ikev2_calc_sign (EVP_PKEY * pkey, u8 * data); EVP_PKEY *ikev2_load_cert_file (u8 * file); EVP_PKEY *ikev2_load_key_file (u8 * file); void ikev2_crypto_init (ikev2_main_t * km); /* ikev2_payload.c */ typedef struct { u8 first_payload_type; u16 last_hdr_off; u8 *data; } ikev2_payload_chain_t; #define ikev2_payload_new_chain(V) vec_validate (V, 0) #define ikev2_payload_destroy_chain(V) do { \ vec_free((V)->data); \ vec_free(V); \ } while (0) void ikev2_payload_add_notify (ikev2_payload_chain_t * c, u16 msg_type, u8 * data); void ikev2_payload_add_notify_2 (ikev2_payload_chain_t * c, u16 msg_type, u8 * data, ikev2_notify_t * notify); void ikev2_payload_add_sa (ikev2_payload_chain_t * c, ikev2_sa_proposal_t * proposals); void ikev2_payload_add_ke (ikev2_payload_chain_t * c, u16 dh_group, u8 * dh_data); void ikev2_payload_add_nonce (ikev2_payload_chain_t * c, u8 * nonce); void ikev2_payload_add_id (ikev2_payload_chain_t * c, ikev2_id_t * id, u8 type); void ikev2_payload_add_auth (ikev2_payload_chain_t * c, ikev2_auth_t * auth); void ikev2_payload_add_ts (ikev2_payload_chain_t * c, ikev2_ts_t * ts, u8 type); void ikev2_payload_add_delete (ikev2_payload_chain_t * c, ikev2_delete_t * d); void ikev2_payload_chain_add_padding (ikev2_payload_chain_t * c, int bs); void ikev2_parse_vendor_payload (ike_payload_header_t * ikep); ikev2_sa_proposal_t *ikev2_parse_sa_payload (ike_payload_header_t * ikep, u32 rlen); ikev2_ts_t *ikev2_parse_ts_payload (ike_payload_header_t * ikep, u32 rlen); ikev2_delete_t *ikev2_parse_delete_payload (ike_payload_header_t * ikep, u32 rlen); ikev2_notify_t *ikev2_parse_notify_payload (ike_payload_header_t * ikep, u32 rlen); int ikev2_set_log_level (ikev2_log_level_t log_level); u8 *ikev2_find_ike_notify_payload (ike_header_t * ike, u32 msg_type); void ikev2_disable_dpd (void); clib_error_t *ikev2_profile_natt_disable (u8 * name); static_always_inline ikev2_main_per_thread_data_t * ikev2_get_per_thread_data () { u32 thread_index = vlib_get_thread_index (); return vec_elt_at_index (ikev2_main.per_thread_data, thread_index); } #endif /* __included_ikev2_priv_h__ */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */