aboutsummaryrefslogtreecommitdiffstats
path: root/plugins
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2016-12-20 17:59:27 -0500
committerDamjan Marion <dmarion.lists@gmail.com>2016-12-22 19:04:16 +0000
commit504b83009a00d70c777c04014967494642074d52 (patch)
tree8159edc93c0027b0dac200875edf1f07481bbbff /plugins
parentabea9664983b723023e692d77fd4fe3ca29c1e34 (diff)
Add L2 flow-record generation
Change-Id: I32a9513f2b66d35e0abe2aeb6eb86a4729ba7f74 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'plugins')
-rw-r--r--plugins/flowperpkt-plugin/Makefile.am1
-rw-r--r--plugins/flowperpkt-plugin/flowperpkt/flowperpkt.api2
-rw-r--r--plugins/flowperpkt-plugin/flowperpkt/flowperpkt.c299
-rw-r--r--plugins/flowperpkt-plugin/flowperpkt/flowperpkt.h32
-rw-r--r--plugins/flowperpkt-plugin/flowperpkt/flowperpkt_test.c5
-rw-r--r--plugins/flowperpkt-plugin/flowperpkt/l2_node.c544
-rw-r--r--plugins/flowperpkt-plugin/flowperpkt/node.c159
7 files changed, 881 insertions, 161 deletions
diff --git a/plugins/flowperpkt-plugin/Makefile.am b/plugins/flowperpkt-plugin/Makefile.am
index 1df758ed6f4..9354e26faa2 100644
--- a/plugins/flowperpkt-plugin/Makefile.am
+++ b/plugins/flowperpkt-plugin/Makefile.am
@@ -24,6 +24,7 @@ vppplugins_LTLIBRARIES = flowperpkt_plugin.la
vppapitestplugins_LTLIBRARIES = flowperpkt_test_plugin.la
flowperpkt_plugin_la_SOURCES = flowperpkt/flowperpkt.c \
+ flowperpkt/l2_node.c \
flowperpkt/node.c \
flowperpkt/flowperpkt_plugin.api.h
flowperpkt_plugin_la_LDFLAGS = -module
diff --git a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.api b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.api
index 8933b5852f4..fa878f21ed3 100644
--- a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.api
+++ b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.api
@@ -22,7 +22,7 @@ manual_print define flowperpkt_tx_interface_add_del
/* Enable / disable the feature */
u8 is_add;
- u8 is_ipv6;
+ u8 which; /* 0 = ipv4, 1 = l2, 2 = ipv6 (not yet implemented) */
/* Interface handle */
u32 sw_if_index;
diff --git a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.c b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.c
index 96c930ae599..fb71d5b0ffc 100644
--- a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.c
+++ b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.c
@@ -57,13 +57,21 @@ flowperpkt_main_t flowperpkt_main;
#include <flowperpkt/flowperpkt_all_api_h.h>
#undef vl_api_version
-/* Define the per-interface configurable feature */
+/* Define the per-interface configurable features */
/* *INDENT-OFF* */
-VNET_FEATURE_INIT (flow_perpacket, static) = {
+VNET_FEATURE_INIT (flow_perpacket_ipv4, static) =
+{
.arc_name = "ip4-output",
- .node_name = "flowperpkt",
+ .node_name = "flowperpkt-ipv4",
.runs_before = VNET_FEATURES ("interface-output"),
};
+
+VNET_FEATURE_INIT (flow_perpacket_l2, static) =
+{
+ .arc_name = "interface-output",
+ .node_name = "flowperpkt-l2",
+ .runs_before = VNET_FEATURES ("interface-tx"),
+};
/* *INDENT-ON* */
/*
@@ -120,11 +128,12 @@ bad_sw_if_index: \
* @param collector_port u16 the collector port we should use, host byte order
* @returns u8 * vector containing the indicated IPFIX template packet
*/
-u8 *
-flowperpkt_template_rewrite (flow_report_main_t * frm,
- flow_report_t * fr,
- ip4_address_t * collector_address,
- ip4_address_t * src_address, u16 collector_port)
+static inline u8 *
+flowperpkt_template_rewrite_inline (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port, int variant)
{
ip4_header_t *ip;
udp_header_t *udp;
@@ -137,32 +146,64 @@ flowperpkt_template_rewrite (flow_report_main_t * frm,
ip4_ipfix_template_packet_t *tp;
u32 field_count = 0;
flow_report_stream_t *stream;
+ flowperpkt_main_t *fm = &flowperpkt_main;
stream = &frm->streams[fr->stream_index];
- /*
- * Supported Fields:
- *
- * ingressInterface, TLV type 10, u32
- * egressInterface, TLV type 14, u32
- * sourceIpv4Address, TLV type 8, u32
- * destinationIPv4Address, TLV type 12, u32
- * ipClassOfService, TLV type 5, u8
- * flowStartNanoseconds, TLV type 156, dateTimeNanoseconds (f64)
- * Implementation: f64 nanoseconds since VPP started
- * warning: wireshark doesn't really understand this TLV
- * dataLinkFrameSize, TLV type 312, u16
- * warning: wireshark doesn't understand this TLV at all
- */
-
- /* Currently 7 fields */
- field_count += 7;
-
- /* allocate rewrite space */
- vec_validate_aligned (rewrite,
- sizeof (ip4_ipfix_template_packet_t)
- + field_count * sizeof (ipfix_field_specifier_t) - 1,
- CLIB_CACHE_LINE_BYTES);
+ if (variant == FLOW_VARIANT_IPV4)
+ {
+ /*
+ * ip4 Supported Fields:
+ *
+ * ingressInterface, TLV type 10, u32
+ * egressInterface, TLV type 14, u32
+ * sourceIpv4Address, TLV type 8, u32
+ * destinationIPv4Address, TLV type 12, u32
+ * ipClassOfService, TLV type 5, u8
+ * flowStartNanoseconds, TLV type 156, dateTimeNanoseconds (f64)
+ * Implementation: f64 nanoseconds since VPP started
+ * warning: wireshark doesn't really understand this TLV
+ * dataLinkFrameSize, TLV type 312, u16
+ * warning: wireshark doesn't understand this TLV at all
+ */
+
+ /* Currently 7 fields */
+ field_count += 7;
+
+ /* allocate rewrite space */
+ vec_validate_aligned
+ (rewrite,
+ sizeof (ip4_ipfix_template_packet_t)
+ + field_count * sizeof (ipfix_field_specifier_t) - 1,
+ CLIB_CACHE_LINE_BYTES);
+ }
+ else if (variant == FLOW_VARIANT_L2)
+ {
+ /*
+ * L2 Supported Fields:
+ *
+ * ingressInterface, TLV type 10, u32
+ * egressInterface, TLV type 14, u32
+ * sourceMacAddress, TLV type 56, u8[6] we hope
+ * destinationMacAddress, TLV type 57, u8[6] we hope
+ * ethernetType, TLV type 256, u16
+ * flowStartNanoseconds, TLV type 156, dateTimeNanoseconds (f64)
+ * Implementation: f64 nanoseconds since VPP started
+ * warning: wireshark doesn't really understand this TLV
+ * dataLinkFrameSize, TLV type 312, u16
+ * warning: wireshark doesn't understand this TLV at all
+ */
+
+ /* Currently 7 fields */
+ field_count += 7;
+
+ /* allocate rewrite space */
+ vec_validate_aligned
+ (rewrite,
+ sizeof (ip4_ipfix_template_packet_t)
+ + field_count * sizeof (ipfix_field_specifier_t) - 1,
+ CLIB_CACHE_LINE_BYTES);
+ }
tp = (ip4_ipfix_template_packet_t *) rewrite;
ip = (ip4_header_t *) & tp->ip4;
@@ -186,28 +227,66 @@ flowperpkt_template_rewrite (flow_report_main_t * frm,
h->domain_id = clib_host_to_net_u32 (stream->domain_id);
/* Add TLVs to the template */
- f->e_id_length = ipfix_e_id_length (0 /* enterprise */ , ingressInterface,
- 4);
- f++;
- f->e_id_length = ipfix_e_id_length (0 /* enterprise */ , egressInterface,
- 4);
- f++;
- f->e_id_length = ipfix_e_id_length (0 /* enterprise */ , sourceIPv4Address,
- 4);
- f++;
- f->e_id_length = ipfix_e_id_length (0 /* enterprise */ ,
- destinationIPv4Address, 4);
- f++;
- f->e_id_length = ipfix_e_id_length (0 /* enterprise */ , ipClassOfService,
- 1);
- f++;
- f->e_id_length =
- ipfix_e_id_length (0 /* enterprise */ , flowStartNanoseconds,
- 8);
- f++;
- f->e_id_length = ipfix_e_id_length (0 /* enterprise */ , dataLinkFrameSize,
- 2);
- f++;
+ if (variant == FLOW_VARIANT_IPV4)
+ {
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , ingressInterface,
+ 4);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , egressInterface,
+ 4);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , sourceIPv4Address,
+ 4);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , destinationIPv4Address, 4);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , ipClassOfService,
+ 1);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , flowStartNanoseconds,
+ 8);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , dataLinkFrameSize,
+ 2);
+ f++;
+ }
+ else if (variant == FLOW_VARIANT_L2)
+ {
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , ingressInterface,
+ 4);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , egressInterface,
+ 4);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , sourceMacAddress,
+ 6);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , destinationMacAddress, 6);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0 /* enterprise */ , ethernetType,
+ 2);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , flowStartNanoseconds,
+ 8);
+ f++;
+ f->e_id_length =
+ ipfix_e_id_length (0 /* enterprise */ , dataLinkFrameSize,
+ 2);
+ f++;
+ }
+
/* Extend in the obvious way, right here... */
/* Back to the template packet... */
@@ -218,6 +297,11 @@ flowperpkt_template_rewrite (flow_report_main_t * frm,
/* Field count in this template */
t->id_count = ipfix_id_count (fr->template_id, f - first_field);
+ if (variant == FLOW_VARIANT_IPV4)
+ fm->ipv4_report_id = fr->template_id;
+ else if (variant == FLOW_VARIANT_L2)
+ fm->l2_report_id = fr->template_id;
+
/* set length in octets */
s->set_id_length =
ipfix_set_id_length (2 /* set_id */ , (u8 *) f - (u8 *) s);
@@ -231,6 +315,31 @@ flowperpkt_template_rewrite (flow_report_main_t * frm,
return rewrite;
}
+u8 *
+flowperpkt_template_rewrite_ipv4 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return flowperpkt_template_rewrite_inline
+ (frm, fr, collector_address, src_address, collector_port,
+ FLOW_VARIANT_IPV4);
+}
+
+u8 *
+flowperpkt_template_rewrite_l2 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return flowperpkt_template_rewrite_inline
+ (frm, fr, collector_address, src_address, collector_port,
+ FLOW_VARIANT_L2);
+}
+
+
/**
* @brief Flush accumulated data
* @param frm flow_report_main_t *
@@ -242,11 +351,21 @@ flowperpkt_template_rewrite (flow_report_main_t * frm,
* will be sent.
*/
vlib_frame_t *
-flowperpkt_data_callback (flow_report_main_t * frm,
- flow_report_t * fr,
- vlib_frame_t * f, u32 * to_next, u32 node_index)
+flowperpkt_data_callback_ipv4 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f, u32 * to_next,
+ u32 node_index)
+{
+ flowperpkt_flush_callback_ipv4 ();
+ return f;
+}
+
+vlib_frame_t *
+flowperpkt_data_callback_l2 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f, u32 * to_next, u32 node_index)
{
- flowperpkt_flush_callback ();
+ flowperpkt_flush_callback_l2 ();
return f;
}
@@ -259,21 +378,21 @@ flowperpkt_data_callback (flow_report_main_t * frm,
*/
static int flowperpkt_tx_interface_add_del_feature
- (flowperpkt_main_t * fm, u32 sw_if_index, int is_add)
+ (flowperpkt_main_t * fm, u32 sw_if_index, int which, int is_add)
{
flow_report_main_t *frm = &flow_report_main;
vnet_flow_report_add_del_args_t _a, *a = &_a;
int rv;
- if (!fm->report_created)
+ if (which == FLOW_VARIANT_IPV4 && !fm->ipv4_report_created)
{
memset (a, 0, sizeof (*a));
- a->rewrite_callback = flowperpkt_template_rewrite;
- a->flow_data_callback = flowperpkt_data_callback;
+ a->rewrite_callback = flowperpkt_template_rewrite_ipv4;
+ a->flow_data_callback = flowperpkt_data_callback_ipv4;
a->is_add = 1;
a->domain_id = 1; /*$$$$ config parameter */
a->src_port = 4739; /*$$$$ config parameter */
- fm->report_created = 1;
+ fm->ipv4_report_created = 1;
rv = vnet_flow_report_add_del (frm, a);
if (rv)
@@ -282,9 +401,30 @@ static int flowperpkt_tx_interface_add_del_feature
return -1;
}
}
+ else if (which == FLOW_VARIANT_L2 && !fm->l2_report_created)
+ {
+ memset (a, 0, sizeof (*a));
+ a->rewrite_callback = flowperpkt_template_rewrite_l2;
+ a->flow_data_callback = flowperpkt_data_callback_l2;
+ a->is_add = 1;
+ a->domain_id = 1; /*$$$$ config parameter */
+ a->src_port = 4739; /*$$$$ config parameter */
+ fm->l2_report_created = 1;
- vnet_feature_enable_disable ("ip4-output", "flowperpkt", sw_if_index,
- is_add, 0, 0);
+ rv = vnet_flow_report_add_del (frm, a);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
+ }
+
+ if (which == FLOW_VARIANT_IPV4)
+ vnet_feature_enable_disable ("ip4-output", "flowperpkt-ipv4",
+ sw_if_index, is_add, 0, 0);
+ else if (which == FLOW_VARIANT_L2)
+ vnet_feature_enable_disable ("interface-output", "flowperpkt-l2",
+ sw_if_index, is_add, 0, 0);
return 0;
}
@@ -303,8 +443,15 @@ void vl_api_flowperpkt_tx_interface_add_del_t_handler
VALIDATE_SW_IF_INDEX (mp);
- rv = flowperpkt_tx_interface_add_del_feature (fm, sw_if_index, mp->is_add);
+ if (mp->which != FLOW_VARIANT_IPV4 && mp->which != FLOW_VARIANT_L2)
+ {
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+ goto out;
+ }
+ rv = flowperpkt_tx_interface_add_del_feature (fm, sw_if_index, mp->which,
+ mp->is_add);
+out:
BAD_SW_IF_INDEX_LABEL;
REPLY_MACRO (VL_API_FLOWPERPKT_TX_INTERFACE_ADD_DEL_REPLY);
@@ -322,9 +469,9 @@ static void *vl_api_flowperpkt_tx_interface_add_del_t_print
u8 *s;
s = format (0, "SCRIPT: flowperpkt_tx_interface_add_del ");
- s = format (s, "sw_if_index %d is_add %d is_ipv6 %d ",
+ s = format (s, "sw_if_index %d is_add %d which %d ",
clib_host_to_net_u32 (mp->sw_if_index),
- (int) mp->is_add, (int) mp->is_ipv6);
+ (int) mp->is_add, (int) mp->which);
FINISH;
}
@@ -365,6 +512,7 @@ flowperpkt_tx_interface_add_del_feature_command_fn (vlib_main_t * vm,
flowperpkt_main_t *fm = &flowperpkt_main;
u32 sw_if_index = ~0;
int is_add = 1;
+ u8 which = FLOW_VARIANT_IPV4;
int rv;
@@ -373,8 +521,9 @@ flowperpkt_tx_interface_add_del_feature_command_fn (vlib_main_t * vm,
if (unformat (input, "disable"))
is_add = 0;
else if (unformat (input, "%U", unformat_vnet_sw_interface,
- fm->vnet_main, &sw_if_index))
- ;
+ fm->vnet_main, &sw_if_index));
+ else if (unformat (input, "l2"))
+ which = FLOW_VARIANT_L2;
else
break;
}
@@ -382,7 +531,8 @@ flowperpkt_tx_interface_add_del_feature_command_fn (vlib_main_t * vm,
if (sw_if_index == ~0)
return clib_error_return (0, "Please specify an interface...");
- rv = flowperpkt_tx_interface_add_del_feature (fm, sw_if_index, is_add);
+ rv =
+ flowperpkt_tx_interface_add_del_feature (fm, sw_if_index, which, is_add);
switch (rv)
{
case 0:
@@ -496,9 +646,12 @@ flowperpkt_init (vlib_main_t * vm)
num_threads = 1 /* main thread */ + tm->n_eal_threads;
/* Allocate per worker thread vectors */
- vec_validate (fm->buffers_per_worker, num_threads - 1);
- vec_validate (fm->frames_per_worker, num_threads - 1);
- vec_validate (fm->next_record_offset_per_worker, num_threads - 1);
+ vec_validate (fm->ipv4_buffers_per_worker, num_threads - 1);
+ vec_validate (fm->l2_buffers_per_worker, num_threads - 1);
+ vec_validate (fm->ipv4_frames_per_worker, num_threads - 1);
+ vec_validate (fm->l2_frames_per_worker, num_threads - 1);
+ vec_validate (fm->ipv4_next_record_offset_per_worker, num_threads - 1);
+ vec_validate (fm->l2_next_record_offset_per_worker, num_threads - 1);
/* Set up time reference pair */
fm->vlib_time_0 = vlib_time_now (vm);
diff --git a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.h b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.h
index 31e685eb68d..20f6939dda5 100644
--- a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.h
+++ b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt.h
@@ -35,15 +35,25 @@ typedef struct
/** API message ID base */
u16 msg_id_base;
- /** Has the report been created? */
- int report_created;
+ /** Have the reports [templates] been created? */
+ int ipv4_report_created;
+ int l2_report_created;
+
+ /** stream/template IDs */
+ u16 ipv4_report_id;
+ u16 l2_report_id;
/** ipfix buffers under construction, per-worker thread */
- vlib_buffer_t **buffers_per_worker;
+ vlib_buffer_t **ipv4_buffers_per_worker;
+ vlib_buffer_t **l2_buffers_per_worker;
+
/** frames containing ipfix buffers, per-worker thread */
- vlib_frame_t **frames_per_worker;
+ vlib_frame_t **ipv4_frames_per_worker;
+ vlib_frame_t **l2_frames_per_worker;
+
/** next record offset, per worker thread */
- u16 *next_record_offset_per_worker;
+ u16 *ipv4_next_record_offset_per_worker;
+ u16 *l2_next_record_offset_per_worker;
/** Time reference pair */
u64 nanosecond_time_0;
@@ -55,11 +65,19 @@ typedef struct
vnet_main_t *vnet_main;
} flowperpkt_main_t;
+typedef enum
+{
+ FLOW_VARIANT_IPV4,
+ FLOW_VARIANT_L2,
+ FLOW_N_VARIANTS,
+} flowperpkt_variant_t;
+
extern flowperpkt_main_t flowperpkt_main;
-vlib_node_registration_t flowperpkt_node;
+extern vlib_node_registration_t flowperpkt_ipv4_node;
-void flowperpkt_flush_callback (void);
+void flowperpkt_flush_callback_ipv4 (void);
+void flowperpkt_flush_callback_l2 (void);
#endif /* __included_flowperpkt_h__ */
diff --git a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt_test.c b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt_test.c
index 3c1cd227f57..716818ffe0a 100644
--- a/plugins/flowperpkt-plugin/flowperpkt/flowperpkt_test.c
+++ b/plugins/flowperpkt-plugin/flowperpkt/flowperpkt_test.c
@@ -132,6 +132,7 @@ api_flowperpkt_tx_interface_add_del (vat_main_t * vam)
unformat_input_t *i = vam->input;
f64 timeout;
int enable_disable = 1;
+ u8 which = 0; /* ipv4 by default */
u32 sw_if_index = ~0;
vl_api_flowperpkt_tx_interface_add_del_t *mp;
@@ -144,6 +145,8 @@ api_flowperpkt_tx_interface_add_del (vat_main_t * vam)
;
else if (unformat (i, "disable"))
enable_disable = 0;
+ else if (unformat (i, "l2"))
+ which = 1;
else
break;
}
@@ -158,7 +161,7 @@ api_flowperpkt_tx_interface_add_del (vat_main_t * vam)
M (FLOWPERPKT_TX_INTERFACE_ADD_DEL, flowperpkt_tx_interface_add_del);
mp->sw_if_index = ntohl (sw_if_index);
mp->is_add = enable_disable;
- mp->is_ipv6 = 0; /* $$$$ */
+ mp->which = which;
/* send it... */
S;
diff --git a/plugins/flowperpkt-plugin/flowperpkt/l2_node.c b/plugins/flowperpkt-plugin/flowperpkt/l2_node.c
new file mode 100644
index 00000000000..ba87d431b88
--- /dev/null
+++ b/plugins/flowperpkt-plugin/flowperpkt/l2_node.c
@@ -0,0 +1,544 @@
+/*
+ * l2_node.c - l2 ipfix-per-packet graph node
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/pg/pg.h>
+#include <vppinfra/error.h>
+#include <flowperpkt/flowperpkt.h>
+
+/**
+ * @file l2 flow record generator graph node
+ */
+
+typedef struct
+{
+ /** interface handle */
+ u32 rx_sw_if_index;
+ u32 tx_sw_if_index;
+ /** src and dst L2 addresses */
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ /** Ethertype */
+ u16 ethertype;
+ /** packet timestamp */
+ u64 timestamp;
+ /** size of the buffer */
+ u16 buffer_size;
+} flowperpkt_l2_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_flowperpkt_l2_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ flowperpkt_l2_trace_t *t = va_arg (*args, flowperpkt_l2_trace_t *);
+
+ s = format (s,
+ "FLOWPERPKT-L2: rx_sw_if_index %d, tx_sw_if_index %d, src %U dst %U ethertype %0x2, timestamp %lld, size %d",
+ t->rx_sw_if_index, t->tx_sw_if_index,
+ format_ethernet_address, &t->src_mac,
+ format_ethernet_address, &t->dst_mac,
+ t->ethertype, t->timestamp, t->buffer_size);
+ return s;
+}
+
+vlib_node_registration_t flowperpkt_l2_node;
+
+/* No counters at the moment */
+#define foreach_flowperpkt_l2_error
+
+typedef enum
+{
+#define _(sym,str) FLOWPERPKT_ERROR_##sym,
+ foreach_flowperpkt_l2_error
+#undef _
+ FLOWPERPKT_N_ERROR,
+} flowperpkt_l2_error_t;
+
+static char *flowperpkt_l2_error_strings[] = {
+#define _(sym,string) string,
+ foreach_flowperpkt_l2_error
+#undef _
+};
+
+typedef enum
+{
+ FLOWPERPKT_L2_NEXT_DROP,
+ FLOWPERPKT_L2_N_NEXT,
+} flowperpkt_l2_next_t;
+
+/**
+ * @brief add an entry to the flow record under construction
+ * @param vm vlib_main_t * current worker thread main structure pointer
+ * @param fm flowperpkt_main_t * flow-per-packet main structure pointer
+ * @param sw_if_index u32 interface handle
+ * @param tos u8 ToS bits from the packet
+ * @param timestamp u64 timestamp, nanoseconds since 1/1/70
+ * @param length u16 ip length of the packet
+ * @param do_flush int 1 = flush all cached records, 0 = construct a record
+ */
+
+static inline void
+add_to_flow_record_l2 (vlib_main_t * vm,
+ flowperpkt_main_t * fm,
+ u32 rx_sw_if_index, u32 tx_sw_if_index,
+ u8 * src_mac, u8 * dst_mac,
+ u16 ethertype, u64 timestamp, u16 length, int do_flush)
+{
+ u32 my_cpu_number = vm->cpu_index;
+ flow_report_main_t *frm = &flow_report_main;
+ ip4_header_t *ip;
+ udp_header_t *udp;
+ ip4_ipfix_template_packet_t *tp;
+ ipfix_message_header_t *h;
+ ipfix_set_header_t *s;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0;
+ u16 offset;
+ u32 bi0;
+ vlib_buffer_free_list_t *fl;
+
+ /* Find or allocate a buffer */
+ b0 = fm->l2_buffers_per_worker[my_cpu_number];
+
+ /* Need to allocate a buffer? */
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ /* Nothing to flush */
+ if (do_flush)
+ return;
+
+ /* $$$$ drop counter? */
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ return;
+
+ /* Initialize the buffer */
+ b0 = fm->l2_buffers_per_worker[my_cpu_number] =
+ vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ /* use the current buffer */
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = fm->l2_next_record_offset_per_worker[my_cpu_number];
+ }
+
+ /* Find or allocate a frame */
+ f = fm->l2_frames_per_worker[my_cpu_number];
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ fm->l2_frames_per_worker[my_cpu_number] = f;
+
+ /* Enqueue the buffer */
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ /* Fresh packet, construct header */
+ if (PREDICT_FALSE (offset == 0))
+ {
+ flow_report_stream_t *stream;
+
+ stream = &frm->streams[0];
+
+ b0->current_data = 0;
+ b0->current_length = sizeof (*ip) + sizeof (*udp) + sizeof (*h) +
+ sizeof (*s);
+ b0->flags |= (VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_FLOW_REPORT);
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = frm->fib_index;
+
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) & tp->ip4;
+ udp = (udp_header_t *) (ip + 1);
+ h = (ipfix_message_header_t *) (udp + 1);
+ s = (ipfix_set_header_t *) (h + 1);
+
+ ip->ip_version_and_header_length = 0x45;
+ ip->ttl = 254;
+ ip->protocol = IP_PROTOCOL_UDP;
+ ip->flags_and_fragment_offset = 0;
+ ip->src_address.as_u32 = frm->src_address.as_u32;
+ ip->dst_address.as_u32 = frm->ipfix_collector.as_u32;
+ udp->src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
+ udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipfix);
+ udp->checksum = 0;
+
+ /* FIXUP: message header export_time */
+ h->export_time = (u32)
+ (((f64) frm->unix_time_0) +
+ (vlib_time_now (frm->vlib_main) - frm->vlib_time_0));
+ h->export_time = clib_host_to_net_u32 (h->export_time);
+ h->domain_id = clib_host_to_net_u32 (stream->domain_id);
+
+ /* FIXUP: message header sequence_number */
+ h->sequence_number = stream->sequence_number++;
+ h->sequence_number = clib_host_to_net_u32 (h->sequence_number);
+
+ offset = (u32) (((u8 *) (s + 1)) - (u8 *) tp);
+ }
+
+ /* Add data, unless we're flushing stale data */
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+
+ /* Add data */
+ /* Ingress interface */
+ {
+ u32 ingress_interface = clib_host_to_net_u32 (rx_sw_if_index);
+ clib_memcpy (b0->data + offset, &ingress_interface,
+ sizeof (ingress_interface));
+ offset += sizeof (ingress_interface);
+ }
+ /* Egress interface */
+ {
+ u32 egress_interface = clib_host_to_net_u32 (tx_sw_if_index);
+ clib_memcpy (b0->data + offset, &egress_interface,
+ sizeof (egress_interface));
+ offset += sizeof (egress_interface);
+ }
+ /* src mac address */
+ {
+ clib_memcpy (b0->data + offset, src_mac, 6);
+ offset += 6;
+ }
+ /* dst mac address */
+ {
+ clib_memcpy (b0->data + offset, dst_mac, 6);
+ offset += 6;
+ }
+
+ /* ethertype */
+ b0->data[offset++] = ethertype >> 8;
+ b0->data[offset++] = ethertype & 0xFF;
+
+ /* Timestamp */
+ clib_memcpy (b0->data + offset, &timestamp, sizeof (f64));
+ offset += sizeof (f64);
+
+ /* pkt size */
+ {
+ u16 pkt_size = clib_host_to_net_u16 (length);
+ clib_memcpy (b0->data + offset, &pkt_size, sizeof (pkt_size));
+ offset += sizeof (pkt_size);
+ }
+
+ b0->current_length +=
+ /* 2*sw_if_index + 2*mac + ethertype + timestamp + length = 32 */
+ 2 * sizeof (u32) + 12 + sizeof (u16) + sizeof (f64) + sizeof (u16);
+
+ }
+ /* Time to flush the buffer? */
+ if (PREDICT_FALSE
+ (do_flush || (offset + 2 * sizeof (u32) + 12 + sizeof (u16) +
+ +sizeof (f64) + sizeof (u16)) > frm->path_mtu))
+ {
+ tp = vlib_buffer_get_current (b0);
+ ip = (ip4_header_t *) & tp->ip4;
+ udp = (udp_header_t *) (ip + 1);
+ h = (ipfix_message_header_t *) (udp + 1);
+ s = (ipfix_set_header_t *) (h + 1);
+
+ s->set_id_length = ipfix_set_id_length (fm->l2_report_id,
+ b0->current_length -
+ (sizeof (*ip) + sizeof (*udp) +
+ sizeof (*h)));
+ h->version_length = version_length (b0->current_length -
+ (sizeof (*ip) + sizeof (*udp)));
+
+ ip->length = clib_host_to_net_u16 (b0->current_length);
+
+ ip->checksum = ip4_header_checksum (ip);
+ udp->length = clib_host_to_net_u16 (b0->current_length - sizeof (*ip));
+
+ if (frm->udp_checksum)
+ {
+ /* RFC 7011 section 10.3.2. */
+ udp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip);
+ if (udp->checksum == 0)
+ udp->checksum = 0xffff;
+ }
+
+ ASSERT (ip->checksum == ip4_header_checksum (ip));
+
+ vlib_put_frame_to_node (vm, ip4_lookup_node.index,
+ fm->l2_frames_per_worker[my_cpu_number]);
+ fm->l2_frames_per_worker[my_cpu_number] = 0;
+ fm->l2_buffers_per_worker[my_cpu_number] = 0;
+ offset = 0;
+ }
+
+ fm->l2_next_record_offset_per_worker[my_cpu_number] = offset;
+}
+
+void
+flowperpkt_flush_callback_l2 (void)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ flowperpkt_main_t *fm = &flowperpkt_main;
+
+ add_to_flow_record_l2 (vm, fm, 0 /* rx_sw_if_index */ ,
+ 0 /* tx_sw_if_index */ ,
+ 0 /* src mac */ ,
+ 0 /* dst mac */ ,
+ 0 /* ethertype */ ,
+ 0ULL /* timestamp */ ,
+ 0 /* length */ ,
+ 1 /* do_flush */ );
+}
+
+
+static uword
+flowperpkt_l2_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ flowperpkt_l2_next_t next_index;
+ flowperpkt_main_t *fm = &flowperpkt_main;
+ u64 now;
+
+ now = (u64) ((vlib_time_now (vm) - fm->vlib_time_0) * 1e9);
+ now += fm->nanosecond_time_0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ u32 next0 = FLOWPERPKT_L2_NEXT_DROP;
+ u32 next1 = FLOWPERPKT_L2_NEXT_DROP;
+ ethernet_header_t *eh0, *eh1;
+ u16 len0, len1;
+ u32 bi0, bi1;
+ vlib_buffer_t *b0, *b1;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p2, *p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
+
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ /* speculatively enqueue b0 and b1 to the current next frame */
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &next0, b0);
+ vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_TX],
+ &next1, b1);
+
+ eh0 = vlib_buffer_get_current (b0);
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ add_to_flow_record_l2 (vm, fm,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ eh0->src_address,
+ eh0->dst_address,
+ eh0->type, now, len0, 0 /* flush */ );
+
+ eh1 = vlib_buffer_get_current (b0);
+ len1 = vlib_buffer_length_in_chain (vm, b0);
+
+ if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ add_to_flow_record_l2 (vm, fm,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX],
+ vnet_buffer (b1)->sw_if_index[VLIB_TX],
+ eh1->src_address,
+ eh1->dst_address,
+ eh1->type, now, len1, 0 /* flush */ );
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ clib_memcpy (t->src_mac, eh0->src_address, 6);
+ clib_memcpy (t->dst_mac, eh0->dst_address, 6);
+ t->ethertype = clib_net_to_host_u16 (eh0->type);
+ t->timestamp = now;
+ t->buffer_size = len0;
+ }
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b1, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ clib_memcpy (t->src_mac, eh1->src_address, 6);
+ clib_memcpy (t->dst_mac, eh1->dst_address, 6);
+ t->ethertype = clib_net_to_host_u16 (eh1->type);
+ t->timestamp = now;
+ t->buffer_size = len1;
+ }
+ }
+
+ /* verify speculative enqueues, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ u32 next0 = FLOWPERPKT_L2_NEXT_DROP;
+ ethernet_header_t *eh0;
+ u16 len0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ &next0, b0);
+
+ eh0 = vlib_buffer_get_current (b0);
+ len0 = vlib_buffer_length_in_chain (vm, b0);
+
+ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
+ add_to_flow_record_l2 (vm, fm,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ eh0->src_address,
+ eh0->dst_address,
+ eh0->type, now, len0, 0 /* flush */ );
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ flowperpkt_l2_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ clib_memcpy (t->src_mac, eh0->src_address, 6);
+ clib_memcpy (t->dst_mac, eh0->dst_address, 6);
+ t->ethertype = clib_net_to_host_u16 (eh0->type);
+ t->timestamp = now;
+ t->buffer_size = len0;
+ }
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return frame->n_vectors;
+}
+
+/**
+ * @brief IPFIX l2 flow-per-packet graph node
+ * @node flowperpkt-l2
+ *
+ * This is the IPFIX flow-record-per-packet node.
+ *
+ * @param vm vlib_main_t corresponding to the current thread.
+ * @param node vlib_node_runtime_t data for this node.
+ * @param frame vlib_frame_t whose contents should be dispatched.
+ *
+ * @par Graph mechanics: buffer metadata, next index usage
+ *
+ * <em>Uses:</em>
+ * - <code>vnet_buffer(b)->ip.save_rewrite_length</code>
+ * - tells the node the length of the rewrite which was applied in
+ * ip4/6_rewrite_inline, allows the code to find the IP header without
+ * having to parse L2 headers, or make stupid assumptions about their
+ * length.
+ * - <code>vnet_buffer(b)->flags & VLIB_BUFFER_FLOW_REPORT</code>
+ * - Used to suppress flow record generation for flow record packets.
+ *
+ * <em>Sets:</em>
+ * - <code>vnet_buffer(b)->flags & VLIB_BUFFER_FLOW_REPORT</code>
+ * - To suppress flow record generation for flow record packets
+ *
+ * <em>Next Index:</em>
+ * - Next configured output feature on the interface, usually
+ * "interface-output." Generated flow records head for ip4-lookup
+ */
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (flowperpkt_l2_node) = {
+ .function = flowperpkt_l2_node_fn,
+ .name = "flowperpkt-l2",
+ .vector_size = sizeof (u32),
+ .format_trace = format_flowperpkt_l2_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(flowperpkt_l2_error_strings),
+ .error_strings = flowperpkt_l2_error_strings,
+
+ .n_next_nodes = FLOWPERPKT_L2_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [FLOWPERPKT_L2_NEXT_DROP] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/plugins/flowperpkt-plugin/flowperpkt/node.c b/plugins/flowperpkt-plugin/flowperpkt/node.c
index b1806fc4a82..460aa3be387 100644
--- a/plugins/flowperpkt-plugin/flowperpkt/node.c
+++ b/plugins/flowperpkt-plugin/flowperpkt/node.c
@@ -1,5 +1,5 @@
/*
- * node.c - skeleton vpp engine plug-in dual-loop node skeleton
+ * node.c - ipv4 ipfix-per-packet graph node
*
* Copyright (c) <current-year> <your-organization>
* Licensed under the Apache License, Version 2.0 (the "License");
@@ -21,7 +21,7 @@
#include <flowperpkt/flowperpkt.h>
/**
- * @file flow record generator graph node
+ * @file ipv4 flow record generator graph node
*/
typedef struct
@@ -37,18 +37,18 @@ typedef struct
u64 timestamp;
/** size of the buffer */
u16 buffer_size;
-} flowperpkt_trace_t;
+} flowperpkt_ipv4_trace_t;
/* packet trace format function */
static u8 *
-format_flowperpkt_trace (u8 * s, va_list * args)
+format_flowperpkt_ipv4_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- flowperpkt_trace_t *t = va_arg (*args, flowperpkt_trace_t *);
+ flowperpkt_ipv4_trace_t *t = va_arg (*args, flowperpkt_ipv4_trace_t *);
s = format (s,
- "FLOWPERPKT: rx_sw_if_index %d, tx_sw_if_index %d, src %U dst %U tos %0x2, timestamp %lld, size %d",
+ "FLOWPERPKT-V4: rx_sw_if_index %d, tx_sw_if_index %d, src %U dst %U tos %0x2, timestamp %lld, size %d",
t->rx_sw_if_index, t->tx_sw_if_index,
format_ip4_address, &t->src_address,
format_ip4_address, &t->dst_address,
@@ -56,30 +56,30 @@ format_flowperpkt_trace (u8 * s, va_list * args)
return s;
}
-vlib_node_registration_t flowperpkt_node;
+vlib_node_registration_t flowperpkt_ipv4_node;
-#define foreach_flowperpkt_error \
-_(SWAPPED, "Mac swap packets processed")
+/* No counters at the moment */
+#define foreach_flowperpkt_ipv4_error
typedef enum
{
#define _(sym,str) FLOWPERPKT_ERROR_##sym,
- foreach_flowperpkt_error
+ foreach_flowperpkt_ipv4_error
#undef _
FLOWPERPKT_N_ERROR,
-} flowperpkt_error_t;
+} flowperpkt_ipv4_error_t;
-static char *flowperpkt_error_strings[] = {
+static char *flowperpkt_ipv4_error_strings[] = {
#define _(sym,string) string,
- foreach_flowperpkt_error
+ foreach_flowperpkt_ipv4_error
#undef _
};
typedef enum
{
- FLOWPERPKT_NEXT_DROP,
- FLOWPERPKT_N_NEXT,
-} flowperpkt_next_t;
+ FLOWPERPKT_IPV4_NEXT_DROP,
+ FLOWPERPKT_IPV4_N_NEXT,
+} flowperpkt_ipv4_next_t;
/**
* @brief add an entry to the flow record under construction
@@ -93,11 +93,11 @@ typedef enum
*/
static inline void
-add_to_flow_record (vlib_main_t * vm,
- flowperpkt_main_t * fm,
- u32 rx_sw_if_index, u32 tx_sw_if_index,
- u32 src_address, u32 dst_address,
- u8 tos, u64 timestamp, u16 length, int do_flush)
+add_to_flow_record_ipv4 (vlib_main_t * vm,
+ flowperpkt_main_t * fm,
+ u32 rx_sw_if_index, u32 tx_sw_if_index,
+ u32 src_address, u32 dst_address,
+ u8 tos, u64 timestamp, u16 length, int do_flush)
{
u32 my_cpu_number = vm->cpu_index;
flow_report_main_t *frm = &flow_report_main;
@@ -113,7 +113,7 @@ add_to_flow_record (vlib_main_t * vm,
vlib_buffer_free_list_t *fl;
/* Find or allocate a buffer */
- b0 = fm->buffers_per_worker[my_cpu_number];
+ b0 = fm->ipv4_buffers_per_worker[my_cpu_number];
/* Need to allocate a buffer? */
if (PREDICT_FALSE (b0 == 0))
@@ -127,7 +127,8 @@ add_to_flow_record (vlib_main_t * vm,
return;
/* Initialize the buffer */
- b0 = fm->buffers_per_worker[my_cpu_number] = vlib_get_buffer (vm, bi0);
+ b0 = fm->ipv4_buffers_per_worker[my_cpu_number] =
+ vlib_get_buffer (vm, bi0);
fl =
vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
vlib_buffer_init_for_free_list (b0, fl);
@@ -138,16 +139,16 @@ add_to_flow_record (vlib_main_t * vm,
{
/* use the current buffer */
bi0 = vlib_get_buffer_index (vm, b0);
- offset = fm->next_record_offset_per_worker[my_cpu_number];
+ offset = fm->ipv4_next_record_offset_per_worker[my_cpu_number];
}
/* Find or allocate a frame */
- f = fm->frames_per_worker[my_cpu_number];
+ f = fm->ipv4_frames_per_worker[my_cpu_number];
if (PREDICT_FALSE (f == 0))
{
u32 *to_next;
f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
- fm->frames_per_worker[my_cpu_number] = f;
+ fm->ipv4_frames_per_worker[my_cpu_number] = f;
/* Enqueue the buffer */
to_next = vlib_frame_vector_args (f);
@@ -250,8 +251,8 @@ add_to_flow_record (vlib_main_t * vm,
}
/* Time to flush the buffer? */
if (PREDICT_FALSE
- (do_flush || (offset + sizeof (u32) + sizeof (u8)
- + sizeof (f64)) > frm->path_mtu))
+ (do_flush || (offset + 4 * sizeof (u32) + sizeof (u8)
+ + sizeof (f64) + sizeof (u16)) > frm->path_mtu))
{
tp = vlib_buffer_get_current (b0);
ip = (ip4_header_t *) & tp->ip4;
@@ -259,7 +260,7 @@ add_to_flow_record (vlib_main_t * vm,
h = (ipfix_message_header_t *) (udp + 1);
s = (ipfix_set_header_t *) (h + 1);
- s->set_id_length = ipfix_set_id_length (256,
+ s->set_id_length = ipfix_set_id_length (fm->ipv4_report_id,
b0->current_length -
(sizeof (*ip) + sizeof (*udp) +
sizeof (*h)));
@@ -282,38 +283,38 @@ add_to_flow_record (vlib_main_t * vm,
ASSERT (ip->checksum == ip4_header_checksum (ip));
vlib_put_frame_to_node (vm, ip4_lookup_node.index,
- fm->frames_per_worker[my_cpu_number]);
- fm->frames_per_worker[my_cpu_number] = 0;
- fm->buffers_per_worker[my_cpu_number] = 0;
+ fm->ipv4_frames_per_worker[my_cpu_number]);
+ fm->ipv4_frames_per_worker[my_cpu_number] = 0;
+ fm->ipv4_buffers_per_worker[my_cpu_number] = 0;
offset = 0;
}
- fm->next_record_offset_per_worker[my_cpu_number] = offset;
+ fm->ipv4_next_record_offset_per_worker[my_cpu_number] = offset;
}
void
-flowperpkt_flush_callback (void)
+flowperpkt_flush_callback_ipv4 (void)
{
vlib_main_t *vm = vlib_get_main ();
flowperpkt_main_t *fm = &flowperpkt_main;
- add_to_flow_record (vm, fm, 0 /* rx_sw_if_index */ ,
- 0 /* tx_sw_if_index */ ,
- 0 /* src_address */ ,
- 0 /* dst_address */ ,
- 0 /* ToS */ ,
- 0ULL /* timestamp */ ,
- 0 /* length */ ,
- 1 /* do_flush */ );
+ add_to_flow_record_ipv4 (vm, fm, 0 /* rx_sw_if_index */ ,
+ 0 /* tx_sw_if_index */ ,
+ 0 /* src_address */ ,
+ 0 /* dst_address */ ,
+ 0 /* ToS */ ,
+ 0ULL /* timestamp */ ,
+ 0 /* length */ ,
+ 1 /* do_flush */ );
}
static uword
-flowperpkt_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+flowperpkt_ipv4_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
u32 n_left_from, *from, *to_next;
- flowperpkt_next_t next_index;
+ flowperpkt_ipv4_next_t next_index;
flowperpkt_main_t *fm = &flowperpkt_main;
u64 now;
@@ -332,8 +333,8 @@ flowperpkt_node_fn (vlib_main_t * vm,
while (n_left_from >= 4 && n_left_to_next >= 2)
{
- u32 next0 = FLOWPERPKT_NEXT_DROP;
- u32 next1 = FLOWPERPKT_NEXT_DROP;
+ u32 next0 = FLOWPERPKT_IPV4_NEXT_DROP;
+ u32 next1 = FLOWPERPKT_IPV4_NEXT_DROP;
ip4_header_t *ip0, *ip1;
u16 len0, len1;
u32 bi0, bi1;
@@ -375,30 +376,30 @@ flowperpkt_node_fn (vlib_main_t * vm,
len0 = vlib_buffer_length_in_chain (vm, b0);
if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
- add_to_flow_record (vm, fm,
- vnet_buffer (b0)->sw_if_index[VLIB_RX],
- vnet_buffer (b0)->sw_if_index[VLIB_TX],
- ip0->src_address.as_u32,
- ip0->dst_address.as_u32,
- ip0->tos, now, len0, 0 /* flush */ );
+ add_to_flow_record_ipv4 (vm, fm,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ ip0->src_address.as_u32,
+ ip0->dst_address.as_u32,
+ ip0->tos, now, len0, 0 /* flush */ );
ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b1) +
vnet_buffer (b1)->ip.save_rewrite_length);
len1 = vlib_buffer_length_in_chain (vm, b1);
if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
- add_to_flow_record (vm, fm,
- vnet_buffer (b1)->sw_if_index[VLIB_RX],
- vnet_buffer (b1)->sw_if_index[VLIB_TX],
- ip1->src_address.as_u32,
- ip1->dst_address.as_u32,
- ip1->tos, now, len1, 0 /* flush */ );
+ add_to_flow_record_ipv4 (vm, fm,
+ vnet_buffer (b1)->sw_if_index[VLIB_RX],
+ vnet_buffer (b1)->sw_if_index[VLIB_TX],
+ ip1->src_address.as_u32,
+ ip1->dst_address.as_u32,
+ ip1->tos, now, len1, 0 /* flush */ );
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
{
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
- flowperpkt_trace_t *t =
+ flowperpkt_ipv4_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
@@ -410,7 +411,7 @@ flowperpkt_node_fn (vlib_main_t * vm,
}
if (b1->flags & VLIB_BUFFER_IS_TRACED)
{
- flowperpkt_trace_t *t =
+ flowperpkt_ipv4_trace_t *t =
vlib_add_trace (vm, node, b1, sizeof (*t));
t->rx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_RX];
t->tx_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
@@ -432,7 +433,7 @@ flowperpkt_node_fn (vlib_main_t * vm,
{
u32 bi0;
vlib_buffer_t *b0;
- u32 next0 = FLOWPERPKT_NEXT_DROP;
+ u32 next0 = FLOWPERPKT_IPV4_NEXT_DROP;
ip4_header_t *ip0;
u16 len0;
@@ -461,17 +462,17 @@ flowperpkt_node_fn (vlib_main_t * vm,
len0 = vlib_buffer_length_in_chain (vm, b0);
if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_FLOW_REPORT) == 0))
- add_to_flow_record (vm, fm,
- vnet_buffer (b0)->sw_if_index[VLIB_RX],
- vnet_buffer (b0)->sw_if_index[VLIB_TX],
- ip0->src_address.as_u32,
- ip0->dst_address.as_u32,
- ip0->tos, now, len0, 0 /* flush */ );
+ add_to_flow_record_ipv4 (vm, fm,
+ vnet_buffer (b0)->sw_if_index[VLIB_RX],
+ vnet_buffer (b0)->sw_if_index[VLIB_TX],
+ ip0->src_address.as_u32,
+ ip0->dst_address.as_u32,
+ ip0->tos, now, len0, 0 /* flush */ );
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
&& (b0->flags & VLIB_BUFFER_IS_TRACED)))
{
- flowperpkt_trace_t *t =
+ flowperpkt_ipv4_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
t->rx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
t->tx_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
@@ -494,8 +495,8 @@ flowperpkt_node_fn (vlib_main_t * vm,
}
/**
- * @brief IPFIX flow-per-packet graph node
- * @node flowperpkt
+ * @brief IPFIX ipv4 flow-per-packet graph node
+ * @node flowperpkt-ipv4
*
* This is the IPFIX flow-record-per-packet node.
*
@@ -524,21 +525,21 @@ flowperpkt_node_fn (vlib_main_t * vm,
*/
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (flowperpkt_node) = {
- .function = flowperpkt_node_fn,
- .name = "flowperpkt",
+VLIB_REGISTER_NODE (flowperpkt_ipv4_node) = {
+ .function = flowperpkt_ipv4_node_fn,
+ .name = "flowperpkt-ipv4",
.vector_size = sizeof (u32),
- .format_trace = format_flowperpkt_trace,
+ .format_trace = format_flowperpkt_ipv4_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(flowperpkt_error_strings),
- .error_strings = flowperpkt_error_strings,
+ .n_errors = ARRAY_LEN(flowperpkt_ipv4_error_strings),
+ .error_strings = flowperpkt_ipv4_error_strings,
- .n_next_nodes = FLOWPERPKT_N_NEXT,
+ .n_next_nodes = FLOWPERPKT_IPV4_N_NEXT,
/* edit / add dispositions here */
.next_nodes = {
- [FLOWPERPKT_NEXT_DROP] = "error-drop",
+ [FLOWPERPKT_IPV4_NEXT_DROP] = "error-drop",
},
};
/* *INDENT-ON* */