summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2018-09-12 09:42:50 -0400
committerDamjan Marion <dmarion@me.com>2018-09-13 00:25:20 +0000
commitb1232555e91b286feab5667b5a22f29aa8e96626 (patch)
treeb64e383bfce6458392550618aa147991df0bbfa9 /src
parent48a30d3038f91f7ef0ee81488573ecdb1803c074 (diff)
L2-input: use vlib_buffer_enqueue_to_next
use the same trick as l2-ouput to group the processing of packets on the same interface. Change-Id: Ib2a6a1b5f362372936197f5bb2fdd0fe9439226b Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/vnet/ethernet/packet.h6
-rw-r--r--src/vnet/l2/l2_input.c556
2 files changed, 320 insertions, 242 deletions
diff --git a/src/vnet/ethernet/packet.h b/src/vnet/ethernet/packet.h
index d70960b0f7b..04ce420c07e 100644
--- a/src/vnet/ethernet/packet.h
+++ b/src/vnet/ethernet/packet.h
@@ -64,20 +64,20 @@ typedef struct
/* I/G bit: individual (unicast)/group (broadcast/multicast). */
always_inline uword
-ethernet_address_cast (u8 * a)
+ethernet_address_cast (const u8 * a)
{
return (a[0] >> 0) & 1;
}
always_inline int
-ethernet_address_is_broadcast (u8 * a)
+ethernet_address_is_broadcast (const u8 * a)
{
return clib_mem_unaligned (a, u32) == 0xffffffff &&
clib_mem_unaligned (a + 4, u16) == 0xffff;
}
always_inline uword
-ethernet_address_is_locally_administered (u8 * a)
+ethernet_address_is_locally_administered (const u8 * a)
{
return (a[0] >> 1) & 1;
}
diff --git a/src/vnet/l2/l2_input.c b/src/vnet/l2/l2_input.c
index 7e41c886dec..fbf4a6b499a 100644
--- a/src/vnet/l2/l2_input.c
+++ b/src/vnet/l2/l2_input.c
@@ -138,9 +138,85 @@ typedef enum
L2INPUT_N_NEXT,
} l2input_next_t;
+static_always_inline u32
+l2_input_classiy_unicast (vlib_buffer_t * b, const ethernet_header_t * h0)
+{
+ u32 feat_mask = ~0;
+ u8 *l3h0 = (u8 *) h0 + vnet_buffer (b)->l2.l2_len;
+
+#define get_u16(addr) ( *((u16 *)(addr)) )
+ u16 ethertype = clib_net_to_host_u16 (get_u16 (l3h0 - 2));
+ u8 protocol = ((ip6_header_t *) l3h0)->protocol;
+
+ /* Disable bridge forwarding (flooding will execute instead if not xconnect) */
+ feat_mask &= ~(L2INPUT_FEAT_FWD |
+ L2INPUT_FEAT_UU_FLOOD | L2INPUT_FEAT_GBP_FWD);
+
+ /* Disable ARP-term for non-ARP and non-ICMP6 packet */
+ if (ethertype != ETHERNET_TYPE_ARP &&
+ (ethertype != ETHERNET_TYPE_IP6 || protocol != IP_PROTOCOL_ICMP6))
+ feat_mask &= ~(L2INPUT_FEAT_ARP_TERM);
+
+ /*
+ * For packet from BVI - set SHG of ARP request or ICMPv6 neighbor
+ * solicitation packet from BVI to 0 so it can also flood to VXLAN
+ * tunnels or other ports with the same SHG as that of the BVI.
+ */
+ else if (PREDICT_FALSE (vnet_buffer (b)->sw_if_index[VLIB_TX] ==
+ L2INPUT_BVI))
+ {
+ if (ethertype == ETHERNET_TYPE_ARP)
+ {
+ ethernet_arp_header_t *arp0 = (ethernet_arp_header_t *) l3h0;
+ if (arp0->opcode ==
+ clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_request))
+ vnet_buffer (b)->l2.shg = 0;
+ }
+ else /* must be ICMPv6 */
+ {
+ ip6_header_t *iph0 = (ip6_header_t *) l3h0;
+ icmp6_neighbor_solicitation_or_advertisement_header_t *ndh0;
+ ndh0 = ip6_next_header (iph0);
+ if (ndh0->icmp.type == ICMP6_neighbor_solicitation)
+ vnet_buffer (b)->l2.shg = 0;
+ }
+ }
+
+ return (feat_mask);
+}
static_always_inline void
-classify_and_dispatch (l2input_main_t * msm, vlib_buffer_t * b0, u32 * next0)
+l2_input_classify_bridge (l2_bridge_domain_t * bd_config,
+ u16 bd_index,
+ u32 sw_if_index, vlib_buffer_t * b, u32 * feat_mask)
+{
+ /* save BD ID for next feature graph nodes */
+ vnet_buffer (b)->l2.bd_index = bd_index;
+
+ /* Save bridge domain and interface seq_num */
+ /* *INDENT-OFF* */
+ l2fib_seq_num_t sn = {
+ .swif = *l2fib_swif_seq_num(sw_if_index),
+ .bd = bd_config->seq_num,
+ };
+ /* *INDENT-ON* */
+ vnet_buffer (b)->l2.l2fib_sn = sn.as_u16;;
+ vnet_buffer (b)->l2.bd_age = bd_config->mac_age;
+
+ /*
+ * Process bridge domain feature enables.
+ * To perform learning/flooding/forwarding, the corresponding bit
+ * must be enabled in both the input interface config and in the
+ * bridge domain config. In the bd_bitmap, bits for features other
+ * than learning/flooding/forwarding should always be set.
+ */
+ *feat_mask = *feat_mask & bd_config->feature_bitmap;
+}
+
+static_always_inline void
+classify_and_dispatch (l2input_main_t * msm,
+ u16 n_left,
+ u32 sw_if_index, vlib_buffer_t ** b, u16 * next)
{
/*
* Load L2 input feature struct
@@ -156,119 +232,202 @@ classify_and_dispatch (l2input_main_t * msm, vlib_buffer_t * b0, u32 * next0)
* set tx sw-if-handle
*/
- u32 feat_mask = ~0;
- u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ l2_input_config_t *config;
/* Get config for the input interface */
- l2_input_config_t *config = vec_elt_at_index (msm->configs, sw_if_index0);
+ config = vec_elt_at_index (msm->configs, sw_if_index);
- /* Save split horizon group */
- vnet_buffer (b0)->l2.shg = config->shg;
-
- /* determine layer2 kind for stat and mask */
- if (PREDICT_FALSE (ethernet_address_cast (h0->dst_address)))
+ while (n_left > 8)
{
- u8 *l3h0 = (u8 *) h0 + vnet_buffer (b0)->l2.l2_len;
+ const ethernet_header_t *h0, *h1, *h2, *h3;
+ u32 fm0, fm1, fm2, fm3;
+ u32 fb0, fb1, fb2, fb3;
-#define get_u16(addr) ( *((u16 *)(addr)) )
- u16 ethertype = clib_net_to_host_u16 (get_u16 (l3h0 - 2));
- u8 protocol = ((ip6_header_t *) l3h0)->protocol;
+ if (n_left >= 4)
+ {
+ vlib_prefetch_buffer_header (b[0], LOAD);
+ vlib_prefetch_buffer_header (b[1], LOAD);
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+ }
- /* Disable bridge forwarding (flooding will execute instead if not xconnect) */
- feat_mask &= ~(L2INPUT_FEAT_FWD |
- L2INPUT_FEAT_UU_FLOOD | L2INPUT_FEAT_GBP_FWD);
+ fm0 = fm1 = fm2 = fm3 = ~0;
+ h0 = vlib_buffer_get_current (b[0]);
+ h1 = vlib_buffer_get_current (b[1]);
+ h2 = vlib_buffer_get_current (b[2]);
+ h3 = vlib_buffer_get_current (b[3]);
- /* Disable ARP-term for non-ARP and non-ICMP6 packet */
- if (ethertype != ETHERNET_TYPE_ARP &&
- (ethertype != ETHERNET_TYPE_IP6 || protocol != IP_PROTOCOL_ICMP6))
- feat_mask &= ~(L2INPUT_FEAT_ARP_TERM);
+ /* Save split horizon group */
+ vnet_buffer (b[0])->l2.shg = config->shg;
+ vnet_buffer (b[1])->l2.shg = config->shg;
+ vnet_buffer (b[2])->l2.shg = config->shg;
+ vnet_buffer (b[3])->l2.shg = config->shg;
- /*
- * For packet from BVI - set SHG of ARP request or ICMPv6 neighbor
- * solicitation packet from BVI to 0 so it can also flood to VXLAN
- * tunnels or other ports with the same SHG as that of the BVI.
- */
- else if (PREDICT_FALSE (vnet_buffer (b0)->sw_if_index[VLIB_TX] ==
- L2INPUT_BVI))
+ /* determine layer2 kind for stat and mask */
+ if (PREDICT_FALSE (ethernet_address_cast (h0->dst_address)))
{
- if (ethertype == ETHERNET_TYPE_ARP)
- {
- ethernet_arp_header_t *arp0 = (ethernet_arp_header_t *) l3h0;
- if (arp0->opcode ==
- clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_request))
- vnet_buffer (b0)->l2.shg = 0;
- }
- else /* must be ICMPv6 */
- {
- ip6_header_t *iph0 = (ip6_header_t *) l3h0;
- icmp6_neighbor_solicitation_or_advertisement_header_t *ndh0;
- ndh0 = ip6_next_header (iph0);
- if (ndh0->icmp.type == ICMP6_neighbor_solicitation)
- vnet_buffer (b0)->l2.shg = 0;
- }
+ fm0 = l2_input_classiy_unicast (b[0], h0);
+ }
+ else
+ {
+ if (PREDICT_FALSE (sw_if_index == L2INPUT_BVI))
+ vnet_buffer (b[0])->l2.shg = 0;
+ }
+ if (PREDICT_FALSE (ethernet_address_cast (h1->dst_address)))
+ {
+ fm1 = l2_input_classiy_unicast (b[1], h1);
+ }
+ else
+ {
+ if (PREDICT_FALSE (sw_if_index == L2INPUT_BVI))
+ vnet_buffer (b[1])->l2.shg = 0;
+ }
+ if (PREDICT_FALSE (ethernet_address_cast (h2->dst_address)))
+ {
+ fm2 = l2_input_classiy_unicast (b[2], h2);
+ }
+ else
+ {
+ if (PREDICT_FALSE (sw_if_index == L2INPUT_BVI))
+ vnet_buffer (b[1])->l2.shg = 0;
+ }
+ if (PREDICT_FALSE (ethernet_address_cast (h3->dst_address)))
+ {
+ fm3 = l2_input_classiy_unicast (b[3], h3);
+ }
+ else
+ {
+ if (PREDICT_FALSE (sw_if_index == L2INPUT_BVI))
+ vnet_buffer (b[1])->l2.shg = 0;
+ }
+
+ if (config->bridge)
+ {
+ /* Do bridge-domain processing */
+ l2_bridge_domain_t *bd_config;
+ u16 bd_index;
+
+ bd_index = config->bd_index;
+ bd_config = vec_elt_at_index (msm->bd_configs, bd_index);
+
+ l2_input_classify_bridge (bd_config, bd_index, sw_if_index,
+ b[0], &fm0);
+ l2_input_classify_bridge (bd_config, bd_index, sw_if_index,
+ b[1], &fm1);
+ l2_input_classify_bridge (bd_config, bd_index, sw_if_index,
+ b[2], &fm2);
+ l2_input_classify_bridge (bd_config, bd_index, sw_if_index,
+ b[3], &fm3);
+ }
+ else if (config->xconnect)
+ {
+ /* Set the output interface */
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
+ config->output_sw_if_index;
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] =
+ config->output_sw_if_index;
+ vnet_buffer (b[2])->sw_if_index[VLIB_TX] =
+ config->output_sw_if_index;
+ vnet_buffer (b[3])->sw_if_index[VLIB_TX] =
+ config->output_sw_if_index;
}
+ else
+ {
+ fm0 = L2INPUT_FEAT_DROP;
+ fm1 = L2INPUT_FEAT_DROP;
+ fm2 = L2INPUT_FEAT_DROP;
+ fm3 = L2INPUT_FEAT_DROP;
+ }
+
+ /* mask out features from bitmap using packet type and bd config */
+ fb0 = config->feature_bitmap & fm0;
+ fb1 = config->feature_bitmap & fm1;
+ fb2 = config->feature_bitmap & fm2;
+ fb3 = config->feature_bitmap & fm3;
+
+ /* save for next feature graph nodes */
+ vnet_buffer (b[0])->l2.feature_bitmap = fb0;
+ vnet_buffer (b[1])->l2.feature_bitmap = fb1;
+ vnet_buffer (b[2])->l2.feature_bitmap = fb2;
+ vnet_buffer (b[3])->l2.feature_bitmap = fb3;
+
+ /* Determine the next node */
+ *next = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ fb0);
+ next++;
+ *next = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ fb1);
+ next++;
+ *next = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ fb2);
+ next++;
+ *next = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ fb3);
+ next++;
+
+ b += 4;
+ n_left -= 4;
}
- else
+ while (n_left)
{
- /*
- * For packet from BVI - set SHG of unicast packet from BVI to 0 so it
- * is not dropped on output to VXLAN tunnels or other ports with the
- * same SHG as that of the BVI.
- */
- if (PREDICT_FALSE (vnet_buffer (b0)->sw_if_index[VLIB_TX] ==
- L2INPUT_BVI))
- vnet_buffer (b0)->l2.shg = 0;
- }
+ const ethernet_header_t *h0;
+ u32 fm0, fb0;
+ fm0 = ~0;
+ h0 = vlib_buffer_get_current (b[0]);
- if (config->bridge)
- {
- /* Do bridge-domain processing */
- u16 bd_index0 = config->bd_index;
- /* save BD ID for next feature graph nodes */
- vnet_buffer (b0)->l2.bd_index = bd_index0;
+ /* Save split horizon group */
+ vnet_buffer (b[0])->l2.shg = config->shg;
- /* Get config for the bridge domain interface */
- l2_bridge_domain_t *bd_config =
- vec_elt_at_index (msm->bd_configs, bd_index0);
+ /* determine layer2 kind for stat and mask */
+ if (PREDICT_FALSE (ethernet_address_cast (h0->dst_address)))
+ {
+ fm0 = l2_input_classiy_unicast (b[0], h0);
+ }
+ else
+ {
+ /*
+ * For packet from BVI - set SHG of unicast packet from BVI to 0 so it
+ * is not dropped on output to VXLAN tunnels or other ports with the
+ * same SHG as that of the BVI.
+ */
+ if (PREDICT_FALSE (sw_if_index == L2INPUT_BVI))
+ vnet_buffer (b[0])->l2.shg = 0;
+ }
- /* Save bridge domain and interface seq_num */
- /* *INDENT-OFF* */
- l2fib_seq_num_t sn = {
- .swif = *l2fib_swif_seq_num(sw_if_index0),
- .bd = bd_config->seq_num,
- };
- /* *INDENT-ON* */
- vnet_buffer (b0)->l2.l2fib_sn = sn.as_u16;;
- vnet_buffer (b0)->l2.bd_age = bd_config->mac_age;
+ if (config->bridge)
+ {
+ /* Do bridge-domain processing */
+ u16 bd_index = config->bd_index;
+ l2_bridge_domain_t *bd_config =
+ vec_elt_at_index (msm->bd_configs, bd_index);
- /*
- * Process bridge domain feature enables.
- * To perform learning/flooding/forwarding, the corresponding bit
- * must be enabled in both the input interface config and in the
- * bridge domain config. In the bd_bitmap, bits for features other
- * than learning/flooding/forwarding should always be set.
- */
- feat_mask = feat_mask & bd_config->feature_bitmap;
- }
- else if (config->xconnect)
- {
- /* Set the output interface */
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = config->output_sw_if_index;
- }
- else
- feat_mask = L2INPUT_FEAT_DROP;
+ l2_input_classify_bridge (bd_config, bd_index, sw_if_index,
+ b[0], &fm0);
+ }
+ else if (config->xconnect)
+ {
+ /* Set the output interface */
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] =
+ config->output_sw_if_index;
+ }
+ else
+ fm0 = L2INPUT_FEAT_DROP;
+
+ /* mask out features from bitmap using packet type and bd config */
+ fb0 = config->feature_bitmap & fm0;
- /* mask out features from bitmap using packet type and bd config */
- u32 feature_bitmap = config->feature_bitmap & feat_mask;
+ /* save for next feature graph nodes */
+ vnet_buffer (b[0])->l2.feature_bitmap = fb0;
- /* save for next feature graph nodes */
- vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap;
+ /* Determine the next node */
+ *next = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
+ fb0);
- /* Determine the next node */
- *next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index,
- feature_bitmap);
+ next += 1;
+ b += 1;
+ n_left -= 1;
+ }
}
static_always_inline uword
@@ -276,169 +435,88 @@ l2input_node_inline (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * frame,
int do_trace)
{
- u32 n_left_from, *from, *to_next;
- l2input_next_t next_index;
+ u32 n_left, *from;
l2input_main_t *msm = &l2input_main;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ u16 nexts[VLIB_FRAME_SIZE];
+ u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index;
from = vlib_frame_vector_args (frame);
- n_left_from = frame->n_vectors; /* number of packets to process */
- next_index = node->cached_next_index;
+ n_left = frame->n_vectors; /* number of packets to process */
+ vlib_get_buffers (vm, from, bufs, n_left);
+ b = bufs;
+ sw_if_index = sw_if_indices;
- while (n_left_from > 0)
+ /* extract data from buffer metadata */
+ while (n_left >= 8)
+ {
+ /* Prefetch the buffer header for the N+2 loop iteration */
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index[1] = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ sw_if_index[2] = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ sw_if_index[3] = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
+
+ /* next */
+ sw_if_index += 4;
+ n_left -= 4;
+ b += 4;
+ }
+ while (n_left)
{
- u32 n_left_to_next;
+ sw_if_index[0] = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
- /* get space to enqueue frame to graph node "next_index" */
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ /* next */
+ sw_if_index += 1;
+ n_left -= 1;
+ b += 1;
+ }
- while (n_left_from >= 8 && n_left_to_next >= 4)
- {
- u32 bi0, bi1, bi2, bi3;
- vlib_buffer_t *b0, *b1, *b2, *b3;
- u32 next0, next1, next2, next3;
- u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p4, *p5, *p6, *p7;
-
- p4 = vlib_get_buffer (vm, from[4]);
- p5 = vlib_get_buffer (vm, from[5]);
- p6 = vlib_get_buffer (vm, from[6]);
- p7 = vlib_get_buffer (vm, from[7]);
-
- /* Prefetch the buffer header and packet for the N+2 loop iteration */
- vlib_prefetch_buffer_header (p4, LOAD);
- vlib_prefetch_buffer_header (p5, LOAD);
- vlib_prefetch_buffer_header (p6, LOAD);
- vlib_prefetch_buffer_header (p7, LOAD);
-
- CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
-
- /*
- * Don't bother prefetching the bridge-domain config (which
- * depends on the input config above). Only a small number of
- * bridge domains are expected. Plus the structure is small
- * and several fit in a cache line.
- */
- }
-
- /* speculatively enqueue b0 and b1 to the current next frame */
- /* bi is "buffer index", b is pointer to the buffer */
- to_next[0] = bi0 = from[0];
- to_next[1] = bi1 = from[1];
- to_next[2] = bi2 = from[2];
- to_next[3] = bi3 = from[3];
- from += 4;
- to_next += 4;
- n_left_from -= 4;
- n_left_to_next -= 4;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
- b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
-
- if (do_trace)
- {
- /* RX interface handles */
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
- sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
- sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
+ n_left = frame->n_vectors;
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
- {
- ethernet_header_t *h0 = vlib_buffer_get_current (b0);
- l2input_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->sw_if_index = sw_if_index0;
- clib_memcpy (t->src, h0->src_address, 6);
- clib_memcpy (t->dst, h0->dst_address, 6);
- }
- if (b1->flags & VLIB_BUFFER_IS_TRACED)
- {
- ethernet_header_t *h1 = vlib_buffer_get_current (b1);
- l2input_trace_t *t =
- vlib_add_trace (vm, node, b1, sizeof (*t));
- t->sw_if_index = sw_if_index1;
- clib_memcpy (t->src, h1->src_address, 6);
- clib_memcpy (t->dst, h1->dst_address, 6);
- }
- if (b2->flags & VLIB_BUFFER_IS_TRACED)
- {
- ethernet_header_t *h2 = vlib_buffer_get_current (b2);
- l2input_trace_t *t =
- vlib_add_trace (vm, node, b2, sizeof (*t));
- t->sw_if_index = sw_if_index2;
- clib_memcpy (t->src, h2->src_address, 6);
- clib_memcpy (t->dst, h2->dst_address, 6);
- }
- if (b3->flags & VLIB_BUFFER_IS_TRACED)
- {
- ethernet_header_t *h3 = vlib_buffer_get_current (b3);
- l2input_trace_t *t =
- vlib_add_trace (vm, node, b3, sizeof (*t));
- t->sw_if_index = sw_if_index3;
- clib_memcpy (t->src, h3->src_address, 6);
- clib_memcpy (t->dst, h3->dst_address, 6);
- }
- }
+ while (n_left)
+ {
+ u16 count, *next;
+ u16 off = frame->n_vectors - n_left;
+ b = bufs + off;
- classify_and_dispatch (msm, b0, &next0);
- classify_and_dispatch (msm, b1, &next1);
- classify_and_dispatch (msm, b2, &next2);
- classify_and_dispatch (msm, b3, &next3);
-
- /* verify speculative enqueues, maybe switch current next frame */
- /* if next0==next1==next_index then nothing special needs to be done */
- vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, bi2, bi3,
- next0, next1, next2, next3);
- }
+ sw_if_index = sw_if_indices + off;
+ next = nexts + off;
+
+ count = clib_count_equal_u32 (sw_if_index, n_left);
+ n_left -= count;
+
+ classify_and_dispatch (msm, count, sw_if_index[0], b, next);
+ }
- while (n_left_from > 0 && n_left_to_next > 0)
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ n_left = frame->n_vectors; /* number of packets to process */
+ b = bufs;
+
+ while (n_left)
{
- u32 bi0;
- vlib_buffer_t *b0;
- u32 next0;
- u32 sw_if_index0;
-
- /* speculatively enqueue b0 to the current next frame */
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
-
- if (do_trace && PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- ethernet_header_t *h0 = vlib_buffer_get_current (b0);
- l2input_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- t->sw_if_index = sw_if_index0;
+ ethernet_header_t *h0 = vlib_buffer_get_current (b[0]);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+
+ t->sw_if_index = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
clib_memcpy (t->src, h0->src_address, 6);
clib_memcpy (t->dst, h0->dst_address, 6);
}
-
- classify_and_dispatch (msm, b0, &next0);
-
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ /* next */
+ n_left--;
+ b++;
}
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
vlib_node_increment_counter (vm, l2input_node.index,
L2INPUT_ERROR_L2INPUT, frame->n_vectors);
pan> <vnet/pg/pg.h> static u8 * format_ip6_icmp_type_and_code (u8 * s, va_list * args) { icmp6_type_t type = va_arg (*args, int); u8 code = va_arg (*args, int); char *t = 0; #define _(n,f) case n: t = #f; break; switch (type) { foreach_icmp6_type; default: break; } #undef _ if (!t) return format (s, "unknown 0x%x", type); s = format (s, "%s", t); t = 0; switch ((type << 8) | code) { #define _(a,n,f) case (ICMP6_##a << 8) | (n): t = #f; break; foreach_icmp6_code; #undef _ } if (t) s = format (s, " %s", t); return s; } static u8 * format_icmp6_header (u8 * s, va_list * args) { icmp46_header_t *icmp = va_arg (*args, icmp46_header_t *); u32 max_header_bytes = va_arg (*args, u32); /* Nothing to do. */ if (max_header_bytes < sizeof (icmp[0])) return format (s, "ICMP header truncated"); s = format (s, "ICMP %U checksum 0x%x", format_ip6_icmp_type_and_code, icmp->type, icmp->code, clib_net_to_host_u16 (icmp->checksum)); if (max_header_bytes >= sizeof (icmp6_neighbor_solicitation_or_advertisement_header_t) && (icmp->type == ICMP6_neighbor_solicitation || icmp->type == ICMP6_neighbor_advertisement)) { icmp6_neighbor_solicitation_or_advertisement_header_t *icmp6_nd = (icmp6_neighbor_solicitation_or_advertisement_header_t *) icmp; s = format (s, "\n target address %U", format_ip6_address, &icmp6_nd->target_address); } return s; } u8 * format_icmp6_input_trace (u8 * s, va_list * va) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *); icmp6_input_trace_t *t = va_arg (*va, icmp6_input_trace_t *); s = format (s, "%U", format_ip6_header, t->packet_data, sizeof (t->packet_data)); return s; } static char *icmp_error_strings[] = { #define _(f,s) s, foreach_icmp6_error #undef _ }; typedef enum { ICMP_INPUT_NEXT_DROP, ICMP_INPUT_N_NEXT, } icmp_input_next_t; typedef struct { uword *type_and_code_by_name; uword *type_by_name; /* Vector dispatch table indexed by [icmp type]. */ u8 input_next_index_by_type[256]; /* Max valid code indexed by icmp type. */ u8 max_valid_code_by_type[256]; /* hop_limit must be >= this value for this icmp type. */ u8 min_valid_hop_limit_by_type[256]; u8 min_valid_length_by_type[256]; } icmp6_main_t; icmp6_main_t icmp6_main; static uword ip6_icmp_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { icmp6_main_t *im = &icmp6_main; u32 *from, *to_next; u32 n_left_from, n_left_to_next, next_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; if (node->flags & VLIB_NODE_FLAG_TRACE) vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors, /* stride */ 1, sizeof (icmp6_input_trace_t)); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { vlib_buffer_t *b0; ip6_header_t *ip0; icmp46_header_t *icmp0; icmp6_type_t type0; u32 bi0, next0, error0, len0; bi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); ip0 = vlib_buffer_get_current (b0); icmp0 = ip6_next_header (ip0); type0 = icmp0->type; error0 = ICMP6_ERROR_NONE; next0 = im->input_next_index_by_type[type0]; error0 = next0 == ICMP_INPUT_NEXT_DROP ? ICMP6_ERROR_UNKNOWN_TYPE : error0; /* Check code is valid for type. */ error0 = icmp0->code > im->max_valid_code_by_type[type0] ? ICMP6_ERROR_INVALID_CODE_FOR_TYPE : error0; /* Checksum is already validated by ip6_local node so we don't need to check that. */ /* Check that hop limit == 255 for certain types. */ error0 = ip0->hop_limit < im->min_valid_hop_limit_by_type[type0] ? ICMP6_ERROR_INVALID_HOP_LIMIT_FOR_TYPE : error0; len0 = clib_net_to_host_u16 (ip0->payload_length); error0 = len0 < im->min_valid_length_by_type[type0] ? ICMP6_ERROR_LENGTH_TOO_SMALL_FOR_TYPE : error0; b0->error = node->errors[error0]; next0 = error0 != ICMP6_ERROR_NONE ? ICMP_INPUT_NEXT_DROP : next0; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip6_icmp_input_node) = { .function = ip6_icmp_input, .name = "ip6-icmp-input", .vector_size = sizeof (u32), .format_trace = format_icmp6_input_trace, .n_errors = ARRAY_LEN (icmp_error_strings), .error_strings = icmp_error_strings, .n_next_nodes = 1, .next_nodes = { [ICMP_INPUT_NEXT_DROP] = "error-drop", }, }; /* *INDENT-ON* */ typedef enum { ICMP6_ECHO_REQUEST_NEXT_LOOKUP, ICMP6_ECHO_REQUEST_NEXT_OUTPUT, ICMP6_ECHO_REQUEST_N_NEXT, } icmp6_echo_request_next_t; static uword ip6_icmp_echo_request (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 *from, *to_next; u32 n_left_from, n_left_to_next, next_index; ip6_main_t *im = &ip6_main; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; if (node->flags & VLIB_NODE_FLAG_TRACE) vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors, /* stride */ 1, sizeof (icmp6_input_trace_t)); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 2 && n_left_to_next > 2) { vlib_buffer_t *p0, *p1; ip6_header_t *ip0, *ip1; icmp46_header_t *icmp0, *icmp1; ip6_address_t tmp0, tmp1; ip_csum_t sum0, sum1; u32 bi0, bi1; u32 fib_index0, fib_index1; u32 next0 = ICMP6_ECHO_REQUEST_NEXT_LOOKUP; u32 next1 = ICMP6_ECHO_REQUEST_NEXT_LOOKUP; bi0 = to_next[0] = from[0]; bi1 = to_next[1] = from[1]; from += 2; n_left_from -= 2; to_next += 2; n_left_to_next -= 2; p0 = vlib_get_buffer (vm, bi0); p1 = vlib_get_buffer (vm, bi1); ip0 = vlib_buffer_get_current (p0); ip1 = vlib_buffer_get_current (p1); icmp0 = ip6_next_header (ip0); icmp1 = ip6_next_header (ip1); /* Check icmp type to echo reply and update icmp checksum. */ sum0 = icmp0->checksum; sum1 = icmp1->checksum; ASSERT (icmp0->type == ICMP6_echo_request); ASSERT (icmp1->type == ICMP6_echo_request); sum0 = ip_csum_update (sum0, ICMP6_echo_request, ICMP6_echo_reply, icmp46_header_t, type); sum1 = ip_csum_update (sum1, ICMP6_echo_request, ICMP6_echo_reply, icmp46_header_t, type); icmp0->checksum = ip_csum_fold (sum0); icmp1->checksum = ip_csum_fold (sum1); icmp0->type = ICMP6_echo_reply; icmp1->type = ICMP6_echo_reply; /* Swap source and destination address. */ tmp0 = ip0->src_address; tmp1 = ip1->src_address; ip0->src_address = ip0->dst_address; ip1->src_address = ip1->dst_address; ip0->dst_address = tmp0; ip1->dst_address = tmp1; /* New hop count. */ ip0->hop_limit = im->host_config.ttl; ip1->hop_limit = im->host_config.ttl; if (ip6_address_is_link_local_unicast (&ip0->dst_address)) { ethernet_header_t *eth0; u8 tmp_mac[6]; /* For link local, reuse current MAC header by sawpping * SMAC to DMAC instead of IP6 lookup since link local * is not in the IP6 FIB */ vlib_buffer_reset (p0); eth0 = vlib_buffer_get_current (p0); clib_memcpy (tmp_mac, eth0->dst_address, 6); clib_memcpy (eth0->dst_address, eth0->src_address, 6); clib_memcpy (eth0->src_address, tmp_mac, 6); vnet_buffer (p0)->sw_if_index[VLIB_TX] = vnet_buffer (p0)->sw_if_index[VLIB_RX]; next0 = ICMP6_ECHO_REQUEST_NEXT_OUTPUT; } else { /* Determine the correct lookup fib indices... */ fib_index0 = vec_elt (im->fib_index_by_sw_if_index, vnet_buffer (p0)->sw_if_index[VLIB_RX]); vnet_buffer (p0)->sw_if_index[VLIB_TX] = fib_index0; } if (ip6_address_is_link_local_unicast (&ip1->dst_address)) { ethernet_header_t *eth1; u8 tmp_mac[6]; /* For link local, reuse current MAC header by sawpping * SMAC to DMAC instead of IP6 lookup since link local * is not in the IP6 FIB */ vlib_buffer_reset (p1); eth1 = vlib_buffer_get_current (p1); clib_memcpy (tmp_mac, eth1->dst_address, 6); clib_memcpy (eth1->dst_address, eth1->src_address, 6); clib_memcpy (eth1->src_address, tmp_mac, 6); vnet_buffer (p1)->sw_if_index[VLIB_TX] = vnet_buffer (p1)->sw_if_index[VLIB_RX]; next1 = ICMP6_ECHO_REQUEST_NEXT_OUTPUT; } else { /* Determine the correct lookup fib indices... */ fib_index1 = vec_elt (im->fib_index_by_sw_if_index, vnet_buffer (p1)->sw_if_index[VLIB_RX]); vnet_buffer (p1)->sw_if_index[VLIB_TX] = fib_index1; } vnet_buffer (p0)->sw_if_index[VLIB_RX] = vnet_main.local_interface_sw_if_index; vnet_buffer (p1)->sw_if_index[VLIB_RX] = vnet_main.local_interface_sw_if_index; /* verify speculative enqueues, maybe switch current next frame */ /* if next0==next1==next_index then nothing special needs to be done */ vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1); } while (n_left_from > 0 && n_left_to_next > 0) { vlib_buffer_t *p0; ip6_header_t *ip0; icmp46_header_t *icmp0; u32 bi0; ip6_address_t tmp0; ip_csum_t sum0; u32 fib_index0; u32 next0 = ICMP6_ECHO_REQUEST_NEXT_LOOKUP; bi0 = to_next[0] = from[0]; from += 1; n_left_from -= 1; to_next += 1; n_left_to_next -= 1; p0 = vlib_get_buffer (vm, bi0); ip0 = vlib_buffer_get_current (p0); icmp0 = ip6_next_header (ip0); /* Check icmp type to echo reply and update icmp checksum. */ sum0 = icmp0->checksum; ASSERT (icmp0->type == ICMP6_echo_request); sum0 = ip_csum_update (sum0, ICMP6_echo_request, ICMP6_echo_reply, icmp46_header_t, type); icmp0->checksum = ip_csum_fold (sum0); icmp0->type = ICMP6_echo_reply; /* Swap source and destination address. */ tmp0 = ip0->src_address; ip0->src_address = ip0->dst_address; ip0->dst_address = tmp0; ip0->hop_limit = im->host_config.ttl; if (ip6_address_is_link_local_unicast (&ip0->dst_address)) { ethernet_header_t *eth0; u8 tmp_mac[6]; /* For link local, reuse current MAC header by sawpping * SMAC to DMAC instead of IP6 lookup since link local * is not in the IP6 FIB */ vlib_buffer_reset (p0); eth0 = vlib_buffer_get_current (p0); clib_memcpy (tmp_mac, eth0->dst_address, 6); clib_memcpy (eth0->dst_address, eth0->src_address, 6); clib_memcpy (eth0->src_address, tmp_mac, 6); vnet_buffer (p0)->sw_if_index[VLIB_TX] = vnet_buffer (p0)->sw_if_index[VLIB_RX]; next0 = ICMP6_ECHO_REQUEST_NEXT_OUTPUT; } else { fib_index0 = vec_elt (im->fib_index_by_sw_if_index, vnet_buffer (p0)->sw_if_index[VLIB_RX]); vnet_buffer (p0)->sw_if_index[VLIB_TX] = fib_index0; } vnet_buffer (p0)->sw_if_index[VLIB_RX] = vnet_main.local_interface_sw_if_index; /* Verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } vlib_error_count (vm, ip6_icmp_input_node.index, ICMP6_ERROR_ECHO_REPLIES_SENT, frame->n_vectors); return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip6_icmp_echo_request_node,static) = { .function = ip6_icmp_echo_request, .name = "ip6-icmp-echo-request", .vector_size = sizeof (u32), .format_trace = format_icmp6_input_trace, .n_next_nodes = ICMP6_ECHO_REQUEST_N_NEXT, .next_nodes = { [ICMP6_ECHO_REQUEST_NEXT_LOOKUP] = "ip6-lookup", [ICMP6_ECHO_REQUEST_NEXT_OUTPUT] = "interface-output", }, }; /* *INDENT-ON* */ typedef enum { IP6_ICMP_ERROR_NEXT_DROP, IP6_ICMP_ERROR_NEXT_LOOKUP, IP6_ICMP_ERROR_N_NEXT, } ip6_icmp_error_next_t; void icmp6_error_set_vnet_buffer (vlib_buffer_t * b, u8 type, u8 code, u32 data) { vnet_buffer (b)->ip.icmp.type = type; vnet_buffer (b)->ip.icmp.code = code; vnet_buffer (b)->ip.icmp.data = data; } static u8 icmp6_icmp_type_to_error (u8 type) { switch (type) { case ICMP6_destination_unreachable: return ICMP6_ERROR_DEST_UNREACH_SENT; case ICMP6_packet_too_big: return ICMP6_ERROR_PACKET_TOO_BIG_SENT; case ICMP6_time_exceeded: return ICMP6_ERROR_TTL_EXPIRE_SENT; case ICMP6_parameter_problem: return ICMP6_ERROR_PARAM_PROBLEM_SENT; default: return ICMP6_ERROR_DROP; } } static uword ip6_icmp_error (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 *from, *to_next; uword n_left_from, n_left_to_next; ip6_icmp_error_next_t next_index; ip6_main_t *im = &ip6_main; ip_lookup_main_t *lm = &im->lookup_main; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; if (node->flags & VLIB_NODE_FLAG_TRACE) vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors, /* stride */ 1, sizeof (icmp6_input_trace_t)); while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from > 0 && n_left_to_next > 0) { u32 pi0 = from[0]; u32 next0 = IP6_ICMP_ERROR_NEXT_LOOKUP; u8 error0 = ICMP6_ERROR_NONE; vlib_buffer_t *p0; ip6_header_t *ip0, *out_ip0; icmp46_header_t *icmp0; u32 sw_if_index0, if_add_index0; int bogus_length; /* Speculatively enqueue p0 to the current next frame */ to_next[0] = pi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; p0 = vlib_get_buffer (vm, pi0); ip0 = vlib_buffer_get_current (p0); sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX]; /* RFC4443 says to keep as much of the original packet as possible * within the minimum MTU. We cheat "a little" here by keeping whatever fits * in the first buffer, to be more efficient */ if (PREDICT_FALSE (p0->total_length_not_including_first_buffer)) { /* clear current_length of all other buffers in chain */ vlib_buffer_t *b = p0; p0->total_length_not_including_first_buffer = 0; while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { b = vlib_get_buffer (vm, b->next_buffer); b->current_length = 0; } } p0->current_length = p0->current_length > 1280 ? 1280 : p0->current_length; /* Add IP header and ICMPv6 header including a 4 byte data field */ vlib_buffer_advance (p0, -sizeof (ip6_header_t) - sizeof (icmp46_header_t) - 4); out_ip0 = vlib_buffer_get_current (p0); icmp0 = (icmp46_header_t *) & out_ip0[1]; /* Fill ip header fields */ out_ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (0x6 << 28); out_ip0->payload_length = clib_host_to_net_u16 (p0->current_length - sizeof (ip6_header_t)); out_ip0->protocol = IP_PROTOCOL_ICMP6; out_ip0->hop_limit = 0xff; out_ip0->dst_address = ip0->src_address; if_add_index0 = lm->if_address_pool_index_by_sw_if_index[sw_if_index0]; if (PREDICT_TRUE (if_add_index0 != ~0)) { ip_interface_address_t *if_add = pool_elt_at_index (lm->if_address_pool, if_add_index0); ip6_address_t *if_ip = ip_interface_address_get_address (lm, if_add); out_ip0->src_address = *if_ip; } else /* interface has no IP6 address - should not happen */ { next0 = IP6_ICMP_ERROR_NEXT_DROP; error0 = ICMP6_ERROR_DROP; } /* Fill icmp header fields */ icmp0->type = vnet_buffer (p0)->ip.icmp.type; icmp0->code = vnet_buffer (p0)->ip.icmp.code; *((u32 *) (icmp0 + 1)) = clib_host_to_net_u32 (vnet_buffer (p0)->ip.icmp.data); icmp0->checksum = 0; icmp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, p0, out_ip0, &bogus_length); /* Update error status */ if (error0 == ICMP6_ERROR_NONE) error0 = icmp6_icmp_type_to_error (icmp0->type); vlib_error_count (vm, node->node_index, error0, 1); /* Verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip6_icmp_error_node) = { .function = ip6_icmp_error, .name = "ip6-icmp-error", .vector_size = sizeof (u32), .n_errors = ARRAY_LEN (icmp_error_strings), .error_strings = icmp_error_strings, .n_next_nodes = IP6_ICMP_ERROR_N_NEXT, .next_nodes = { [IP6_ICMP_ERROR_NEXT_DROP] = "error-drop", [IP6_ICMP_ERROR_NEXT_LOOKUP] = "ip6-lookup", }, .format_trace = format_icmp6_input_trace, }; /* *INDENT-ON* */ static uword unformat_icmp_type_and_code (unformat_input_t * input, va_list * args) { icmp46_header_t *h = va_arg (*args, icmp46_header_t *); icmp6_main_t *cm = &icmp6_main; u32 i; if (unformat_user (input, unformat_vlib_number_by_name, cm->type_and_code_by_name, &i)) { h->type = (i >> 8) & 0xff; h->code = (i >> 0) & 0xff; } else if (unformat_user (input, unformat_vlib_number_by_name, cm->type_by_name, &i)) { h->type = i; h->code = 0; } else return 0; return 1; } static void icmp6_pg_edit_function (pg_main_t * pg, pg_stream_t * s, pg_edit_group_t * g, u32 * packets, u32 n_packets) { vlib_main_t *vm = vlib_get_main (); u32 ip_offset, icmp_offset; int bogus_length; icmp_offset = g->start_byte_offset; ip_offset = (g - 1)->start_byte_offset; while (n_packets >= 1) { vlib_buffer_t *p0; ip6_header_t *ip0; icmp46_header_t *icmp0; p0 = vlib_get_buffer (vm, packets[0]); n_packets -= 1; packets += 1; ASSERT (p0->current_data == 0); ip0 = (void *) (p0->data + ip_offset); icmp0 = (void *) (p0->data + icmp_offset); icmp0->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, p0, ip0, &bogus_length); ASSERT (bogus_length == 0); } } typedef struct { pg_edit_t type, code; pg_edit_t checksum; } pg_icmp46_header_t; always_inline void pg_icmp_header_init (pg_icmp46_header_t * p) { /* Initialize fields that are not bit fields in the IP header. */ #define _(f) pg_edit_init (&p->f, icmp46_header_t, f); _(type); _(code); _(checksum); #undef _ } static uword unformat_pg_icmp_header (unformat_input_t * input, va_list * args) { pg_stream_t *s = va_arg (*args, pg_stream_t *); pg_icmp46_header_t *p; u32 group_index; p = pg_create_edit_group (s, sizeof (p[0]), sizeof (icmp46_header_t), &group_index); pg_icmp_header_init (p); p->checksum.type = PG_EDIT_UNSPECIFIED; { icmp46_header_t tmp; if (!unformat (input, "ICMP %U", unformat_icmp_type_and_code, &tmp)) goto error; pg_edit_set_fixed (&p->type, tmp.type); pg_edit_set_fixed (&p->code, tmp.code); } /* Parse options. */ while (1) { if (unformat (input, "checksum %U", unformat_pg_edit, unformat_pg_number, &p->checksum)) ; /* Can't parse input: try next protocol level. */ else break; } if (!unformat_user (input, unformat_pg_payload, s)) goto error; if (p->checksum.type == PG_EDIT_UNSPECIFIED) { pg_edit_group_t *g = pg_stream_get_group (s, group_index); g->edit_function = icmp6_pg_edit_function; g->edit_function_opaque = 0; } return 1; error: /* Free up any edits we may have added. */ pg_free_edit_group (s); return 0; } void icmp6_register_type (vlib_main_t * vm, icmp6_type_t type, u32 node_index) { icmp6_main_t *im = &icmp6_main; ASSERT ((int) type < ARRAY_LEN (im->input_next_index_by_type)); im->input_next_index_by_type[type] = vlib_node_add_next (vm, ip6_icmp_input_node.index, node_index); } static clib_error_t * icmp6_init (vlib_main_t * vm) { ip_main_t *im = &ip_main; ip_protocol_info_t *pi; icmp6_main_t *cm = &icmp6_main; clib_error_t *error; error = vlib_call_init_function (vm, ip_main_init); if (error) return error; pi = ip_get_protocol_info (im, IP_PROTOCOL_ICMP6); pi->format_header = format_icmp6_header; pi->unformat_pg_edit = unformat_pg_icmp_header; cm->type_by_name = hash_create_string (0, sizeof (uword)); #define _(n,t) hash_set_mem (cm->type_by_name, #t, (n)); foreach_icmp6_type; #undef _ cm->type_and_code_by_name = hash_create_string (0, sizeof (uword)); #define _(a,n,t) hash_set_mem (cm->type_by_name, #t, (n) | (ICMP6_##a << 8)); foreach_icmp6_code; #undef _ memset (cm->input_next_index_by_type, ICMP_INPUT_NEXT_DROP, sizeof (cm->input_next_index_by_type)); memset (cm->max_valid_code_by_type, 0, sizeof (cm->max_valid_code_by_type)); #define _(a,n,t) cm->max_valid_code_by_type[ICMP6_##a] = clib_max (cm->max_valid_code_by_type[ICMP6_##a], n); foreach_icmp6_code; #undef _ memset (cm->min_valid_hop_limit_by_type, 0, sizeof (cm->min_valid_hop_limit_by_type)); cm->min_valid_hop_limit_by_type[ICMP6_router_solicitation] = 255; cm->min_valid_hop_limit_by_type[ICMP6_router_advertisement] = 255; cm->min_valid_hop_limit_by_type[ICMP6_neighbor_solicitation] = 255; cm->min_valid_hop_limit_by_type[ICMP6_neighbor_advertisement] = 255; cm->min_valid_hop_limit_by_type[ICMP6_redirect] = 255; memset (cm->min_valid_length_by_type, sizeof (icmp46_header_t), sizeof (cm->min_valid_length_by_type)); cm->min_valid_length_by_type[ICMP6_router_solicitation] = sizeof (icmp6_neighbor_discovery_header_t); cm->min_valid_length_by_type[ICMP6_router_advertisement] = sizeof (icmp6_router_advertisement_header_t); cm->min_valid_length_by_type[ICMP6_neighbor_solicitation] = sizeof (icmp6_neighbor_solicitation_or_advertisement_header_t); cm->min_valid_length_by_type[ICMP6_neighbor_advertisement] = sizeof (icmp6_neighbor_solicitation_or_advertisement_header_t); cm->min_valid_length_by_type[ICMP6_redirect] = sizeof (icmp6_redirect_header_t); icmp6_register_type (vm, ICMP6_echo_request, ip6_icmp_echo_request_node.index); return vlib_call_init_function (vm, ip6_neighbor_init); } VLIB_INIT_FUNCTION (icmp6_init); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */