summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vnet/ip/ip4_forward.c571
-rw-r--r--src/vnet/ip/ip6_forward.c1
2 files changed, 319 insertions, 253 deletions
diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c
index 04cfac0d6be..6d5f07bda2c 100644
--- a/src/vnet/ip/ip4_forward.c
+++ b/src/vnet/ip/ip4_forward.c
@@ -1143,8 +1143,9 @@ VNET_FEATURE_ARC_INIT (ip4_local) =
/* *INDENT-ON* */
static inline void
-ip4_local_validate_l4 (vlib_main_t * vm, vlib_buffer_t * p, ip4_header_t * ip,
- u8 is_udp, u8 * error, u8 * good_tcp_udp)
+ip4_local_l4_csum_validate (vlib_main_t * vm, vlib_buffer_t * p,
+ ip4_header_t * ip, u8 is_udp, u8 * error,
+ u8 * good_tcp_udp)
{
u32 flags0;
flags0 = ip4_tcp_udp_validate_checksum (vm, p);
@@ -1165,10 +1166,247 @@ ip4_local_validate_l4 (vlib_main_t * vm, vlib_buffer_t * p, ip4_header_t * ip,
}
}
-#define ip4_local_do_l4_check(is_tcp_udp, flags) \
- (is_tcp_udp && !(flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
- || flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM \
- || flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+#define ip4_local_csum_is_offloaded(_b) \
+ _b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM \
+ || _b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM
+
+#define ip4_local_need_csum_check(is_tcp_udp, _b) \
+ (is_tcp_udp && !(_b->flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED \
+ || ip4_local_csum_is_offloaded (_b)))
+
+#define ip4_local_csum_is_valid(_b) \
+ (_b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT \
+ || (ip4_local_csum_is_offloaded (_b))) != 0
+
+static inline void
+ip4_local_check_l4_csum (vlib_main_t * vm, vlib_buffer_t * b,
+ ip4_header_t * ih, u8 * error)
+{
+ u8 is_udp, is_tcp_udp, good_tcp_udp;
+
+ is_udp = ih->protocol == IP_PROTOCOL_UDP;
+ is_tcp_udp = is_udp || ih->protocol == IP_PROTOCOL_TCP;
+
+ if (PREDICT_FALSE (ip4_local_need_csum_check (is_tcp_udp, b)))
+ ip4_local_l4_csum_validate (vm, b, ih, is_udp, error, &good_tcp_udp);
+ else
+ good_tcp_udp = ip4_local_csum_is_valid (b);
+
+ ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
+ *error = (is_tcp_udp && !good_tcp_udp
+ ? IP4_ERROR_TCP_CHECKSUM + is_udp : *error);
+}
+
+static inline void
+ip4_local_check_l4_csum_x2 (vlib_main_t * vm, vlib_buffer_t ** b,
+ ip4_header_t ** ih, u8 * error)
+{
+ u8 is_udp[2], is_tcp_udp[2], good_tcp_udp[2];
+
+ is_udp[0] = ih[0]->protocol == IP_PROTOCOL_UDP;
+ is_udp[1] = ih[1]->protocol == IP_PROTOCOL_UDP;
+
+ is_tcp_udp[0] = is_udp[0] || ih[0]->protocol == IP_PROTOCOL_TCP;
+ is_tcp_udp[1] = is_udp[1] || ih[1]->protocol == IP_PROTOCOL_TCP;
+
+ good_tcp_udp[0] = ip4_local_csum_is_valid (b[0]);
+ good_tcp_udp[1] = ip4_local_csum_is_valid (b[1]);
+
+ if (PREDICT_FALSE (ip4_local_need_csum_check (is_tcp_udp[0], b[0])
+ || ip4_local_need_csum_check (is_tcp_udp[1], b[1])))
+ {
+ if (is_tcp_udp[0])
+ ip4_local_l4_csum_validate (vm, b[0], ih[0], is_udp[0], &error[0],
+ &good_tcp_udp[0]);
+ if (is_tcp_udp[1])
+ ip4_local_l4_csum_validate (vm, b[1], ih[1], is_udp[1], &error[1],
+ &good_tcp_udp[1]);
+ }
+
+ error[0] = (is_tcp_udp[0] && !good_tcp_udp[0] ?
+ IP4_ERROR_TCP_CHECKSUM + is_udp[0] : error[0]);
+ error[1] = (is_tcp_udp[1] && !good_tcp_udp[1] ?
+ IP4_ERROR_TCP_CHECKSUM + is_udp[1] : error[1]);
+}
+
+static inline void
+ip4_local_set_next_and_error (vlib_node_runtime_t * error_node,
+ vlib_buffer_t * b, u16 * next, u8 error,
+ u8 head_of_feature_arc)
+{
+ u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index;
+ u32 next_index;
+
+ *next = error != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : *next;
+ b->error = error ? error_node->errors[error] : 0;
+ if (head_of_feature_arc)
+ {
+ next_index = *next;
+ if (PREDICT_TRUE (error == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
+ {
+ vnet_feature_arc_start (arc_index,
+ vnet_buffer (b)->sw_if_index[VLIB_RX],
+ &next_index, b);
+ *next = next_index;
+ }
+ }
+}
+
+typedef struct
+{
+ ip4_address_t src;
+ u32 lbi;
+ u8 error;
+} ip4_local_last_check_t;
+
+static inline void
+ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0,
+ ip4_local_last_check_t * last_check, u8 * error0)
+{
+ ip4_fib_mtrie_leaf_t leaf0;
+ ip4_fib_mtrie_t *mtrie0;
+ const dpo_id_t *dpo0;
+ load_balance_t *lb0;
+ u32 lbi0;
+
+ vnet_buffer (b)->ip.fib_index =
+ vnet_buffer (b)->sw_if_index[VLIB_TX] != ~0 ?
+ vnet_buffer (b)->sw_if_index[VLIB_TX] : vnet_buffer (b)->ip.fib_index;
+
+ if (PREDICT_FALSE (last_check->src.as_u32 != ip0->src_address.as_u32))
+ {
+ mtrie0 = &ip4_fib_get (vnet_buffer (b)->ip.fib_index)->mtrie;
+ leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 2);
+ leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address, 3);
+ lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
+
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = lbi0;
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] = lbi0;
+
+ lb0 = load_balance_get (lbi0);
+ dpo0 = load_balance_get_bucket_i (lb0, 0);
+
+ /*
+ * Must have a route to source otherwise we drop the packet.
+ * ip4 broadcasts are accepted, e.g. to make dhcp client work
+ *
+ * The checks are:
+ * - the source is a recieve => it's from us => bogus, do this
+ * first since it sets a different error code.
+ * - uRPF check for any route to source - accept if passes.
+ * - allow packets destined to the broadcast address from unknown sources
+ */
+
+ *error0 = ((*error0 == IP4_ERROR_UNKNOWN_PROTOCOL
+ && dpo0->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : *error0);
+ *error0 = ((*error0 == IP4_ERROR_UNKNOWN_PROTOCOL
+ && !fib_urpf_check_size (lb0->lb_urpf)
+ && ip0->dst_address.as_u32 != 0xFFFFFFFF) ?
+ IP4_ERROR_SRC_LOOKUP_MISS : *error0);
+
+ last_check->src.as_u32 = ip0->src_address.as_u32;
+ last_check->lbi = lbi0;
+ last_check->error = *error0;
+ }
+ else
+ {
+ vnet_buffer (b)->ip.adj_index[VLIB_TX] = last_check->lbi;
+ vnet_buffer (b)->ip.adj_index[VLIB_RX] = last_check->lbi;
+ *error0 = last_check->error;
+ }
+}
+
+static inline void
+ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip,
+ ip4_local_last_check_t * last_check, u8 * error)
+{
+ ip4_fib_mtrie_leaf_t leaf[2];
+ ip4_fib_mtrie_t *mtrie[2];
+ const dpo_id_t *dpo[2];
+ load_balance_t *lb[2];
+ u32 not_last_hit = 0;
+ u32 lbi[2];
+
+ not_last_hit |= ip[0]->src_address.as_u32 ^ last_check->src.as_u32;
+ not_last_hit |= ip[1]->src_address.as_u32 ^ last_check->src.as_u32;
+
+ vnet_buffer (b[0])->ip.fib_index =
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] != ~0 ?
+ vnet_buffer (b[0])->sw_if_index[VLIB_TX] :
+ vnet_buffer (b[0])->ip.fib_index;
+
+ vnet_buffer (b[1])->ip.fib_index =
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] != ~0 ?
+ vnet_buffer (b[1])->sw_if_index[VLIB_TX] :
+ vnet_buffer (b[1])->ip.fib_index;
+
+ if (PREDICT_FALSE (not_last_hit))
+ {
+ mtrie[0] = &ip4_fib_get (vnet_buffer (b[0])->ip.fib_index)->mtrie;
+ mtrie[1] = &ip4_fib_get (vnet_buffer (b[1])->ip.fib_index)->mtrie;
+
+ leaf[0] = ip4_fib_mtrie_lookup_step_one (mtrie[0], &ip[0]->src_address);
+ leaf[1] = ip4_fib_mtrie_lookup_step_one (mtrie[1], &ip[1]->src_address);
+
+ leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0],
+ &ip[0]->src_address, 2);
+ leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1],
+ &ip[1]->src_address, 2);
+
+ leaf[0] = ip4_fib_mtrie_lookup_step (mtrie[0], leaf[0],
+ &ip[0]->src_address, 3);
+ leaf[1] = ip4_fib_mtrie_lookup_step (mtrie[1], leaf[1],
+ &ip[1]->src_address, 3);
+
+ lbi[0] = ip4_fib_mtrie_leaf_get_adj_index (leaf[0]);
+ lbi[1] = ip4_fib_mtrie_leaf_get_adj_index (leaf[1]);
+
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = lbi[0];
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = lbi[0];
+
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = lbi[1];
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = lbi[1];
+
+ lb[0] = load_balance_get (lbi[0]);
+ lb[1] = load_balance_get (lbi[1]);
+
+ dpo[0] = load_balance_get_bucket_i (lb[0], 0);
+ dpo[1] = load_balance_get_bucket_i (lb[1], 0);
+
+ error[0] = ((error[0] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo[0]->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error[0]);
+ error[0] = ((error[0] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb[0]->lb_urpf) &&
+ ip[0]->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error[0]);
+
+ error[1] = ((error[1] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo[1]->dpoi_type == DPO_RECEIVE) ?
+ IP4_ERROR_SPOOFED_LOCAL_PACKETS : error[1]);
+ error[1] = ((error[1] == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size (lb[1]->lb_urpf) &&
+ ip[1]->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS : error[1]);
+
+ last_check->src.as_u32 = ip[1]->src_address.as_u32;
+ last_check->lbi = lbi[1];
+ last_check->error = error[1];
+ }
+ else
+ {
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = last_check->lbi;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_RX] = last_check->lbi;
+
+ vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = last_check->lbi;
+ vnet_buffer (b[1])->ip.adj_index[VLIB_RX] = last_check->lbi;
+
+ error[0] = last_check->error;
+ error[1] = last_check->error;
+ }
+}
static inline uword
ip4_local_inline (vlib_main_t * vm,
@@ -1177,288 +1415,115 @@ ip4_local_inline (vlib_main_t * vm,
{
ip4_main_t *im = &ip4_main;
ip_lookup_main_t *lm = &im->lookup_main;
- ip_local_next_t next_index;
- u32 *from, *to_next, n_left_from, n_left_to_next;
+ u32 *from, n_left_from;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip4_input_node.index);
- u8 arc_index = vnet_feat_arc_ip4_local.feature_arc_index;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+ ip4_header_t *ip[2];
+ u8 error[2];
+
+ ip4_local_last_check_t last_check = {
+ .src = {.as_u32 = 0},
+ .lbi = ~0,
+ .error = IP4_ERROR_UNKNOWN_PROTOCOL
+ };
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
if (node->flags & VLIB_NODE_FLAG_TRACE)
ip4_forward_next_trace (vm, node, frame, VLIB_TX);
- while (n_left_from > 0)
- {
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from >= 4 && n_left_to_next >= 2)
- {
- vlib_buffer_t *p0, *p1;
- ip4_header_t *ip0, *ip1;
- ip4_fib_mtrie_t *mtrie0, *mtrie1;
- ip4_fib_mtrie_leaf_t leaf0, leaf1;
- const dpo_id_t *dpo0, *dpo1;
- const load_balance_t *lb0, *lb1;
- u32 pi0, next0, lbi0;
- u32 pi1, next1, lbi1;
- u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0;
- u8 error1, is_udp1, is_tcp_udp1, good_tcp_udp1, proto1;
- u32 sw_if_index0, sw_if_index1;
-
- pi0 = to_next[0] = from[0];
- pi1 = to_next[1] = from[1];
- from += 2;
- n_left_from -= 2;
- to_next += 2;
- n_left_to_next -= 2;
-
- next0 = next1 = IP_LOCAL_NEXT_DROP;
- error0 = error1 = IP4_ERROR_UNKNOWN_PROTOCOL;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ b = bufs;
+ next = nexts;
- p0 = vlib_get_buffer (vm, pi0);
- p1 = vlib_get_buffer (vm, pi1);
+ while (n_left_from >= 6)
+ {
+ u32 is_nat, not_batch = 0;
- ip0 = vlib_buffer_get_current (p0);
- ip1 = vlib_buffer_get_current (p1);
+ /* Prefetch next iteration. */
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
- vnet_buffer (p0)->l3_hdr_offset = p0->current_data;
- vnet_buffer (p1)->l3_hdr_offset = p1->current_data;
+ CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
- sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (p1)->sw_if_index[VLIB_RX];
+ error[0] = error[1] = IP4_ERROR_UNKNOWN_PROTOCOL;
- proto0 = ip0->protocol;
- proto1 = ip1->protocol;
+ ip[0] = vlib_buffer_get_current (b[0]);
+ ip[1] = vlib_buffer_get_current (b[1]);
- if (head_of_feature_arc == 0)
- goto skip_checks;
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
+ vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data;
- is_udp0 = proto0 == IP_PROTOCOL_UDP;
- is_udp1 = proto1 == IP_PROTOCOL_UDP;
- is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP;
- is_tcp_udp1 = is_udp1 || proto1 == IP_PROTOCOL_TCP;
+ is_nat = b[0]->flags & VNET_BUFFER_F_IS_NATED;
+ not_batch |= is_nat ^ (b[1]->flags & VNET_BUFFER_F_IS_NATED);
- good_tcp_udp0 =
- (p0->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT
- || (p0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
- || p0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) != 0;
- good_tcp_udp1 = (p1->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT
- || (p1->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
- || p1->flags &
- VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) != 0;
+ if (head_of_feature_arc == 0 || (is_nat && not_batch == 0))
+ goto skip_checks;
- if (PREDICT_FALSE (ip4_local_do_l4_check (is_tcp_udp0, p0->flags)
- || ip4_local_do_l4_check (is_tcp_udp1,
- p1->flags)))
+ if (PREDICT_TRUE (not_batch == 0))
+ {
+ ip4_local_check_l4_csum_x2 (vm, b, ip, error);
+ ip4_local_check_src_x2 (b, ip, &last_check, error);
+ }
+ else
+ {
+ if (!(b[0]->flags & VNET_BUFFER_F_IS_NATED))
{
- if (is_tcp_udp0)
- ip4_local_validate_l4 (vm, p0, ip0, is_udp0, &error0,
- &good_tcp_udp0);
- if (is_tcp_udp1)
- ip4_local_validate_l4 (vm, p1, ip1, is_udp1, &error1,
- &good_tcp_udp1);
+ ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
+ ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
}
-
- ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
- error0 = (is_tcp_udp0 && !good_tcp_udp0
- ? IP4_ERROR_TCP_CHECKSUM + is_udp0 : error0);
- error1 = (is_tcp_udp1 && !good_tcp_udp1
- ? IP4_ERROR_TCP_CHECKSUM + is_udp1 : error1);
-
- vnet_buffer (p0)->ip.fib_index =
- vnet_buffer (p0)->sw_if_index[VLIB_TX] != ~0 ?
- vnet_buffer (p0)->sw_if_index[VLIB_TX] :
- vnet_buffer (p0)->ip.fib_index;
-
- vnet_buffer (p1)->ip.fib_index =
- vnet_buffer (p1)->sw_if_index[VLIB_TX] != ~0 ?
- vnet_buffer (p1)->sw_if_index[VLIB_TX] :
- vnet_buffer (p1)->ip.fib_index;
-
-
- mtrie0 = &ip4_fib_get (vnet_buffer (p0)->ip.fib_index)->mtrie;
- mtrie1 = &ip4_fib_get (vnet_buffer (p1)->ip.fib_index)->mtrie;
-
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
- leaf1 = ip4_fib_mtrie_lookup_step_one (mtrie1, &ip1->src_address);
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address,
- 2);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address,
- 2);
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address,
- 3);
- leaf1 = ip4_fib_mtrie_lookup_step (mtrie1, leaf1, &ip1->src_address,
- 3);
-
- vnet_buffer (p0)->ip.adj_index[VLIB_RX] = lbi0 =
- ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbi0;
-
- vnet_buffer (p1)->ip.adj_index[VLIB_RX] = lbi1 =
- ip4_fib_mtrie_leaf_get_adj_index (leaf1);
- vnet_buffer (p1)->ip.adj_index[VLIB_TX] = lbi1;
-
- lb0 = load_balance_get (lbi0);
- lb1 = load_balance_get (lbi1);
- dpo0 = load_balance_get_bucket_i (lb0, 0);
- dpo1 = load_balance_get_bucket_i (lb1, 0);
-
- /*
- * Must have a route to source otherwise we drop the packet.
- * ip4 broadcasts are accepted, e.g. to make dhcp client work
- *
- * The checks are:
- * - the source is a recieve => it's from us => bogus, do this
- * first since it sets a different error code.
- * - uRPF check for any route to source - accept if passes.
- * - allow packets destined to the broadcast address from unknown sources
- */
- if (p0->flags & VNET_BUFFER_F_IS_NATED)
- goto skip_check0;
-
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- dpo0->dpoi_type == DPO_RECEIVE) ?
- IP4_ERROR_SPOOFED_LOCAL_PACKETS : error0);
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- !fib_urpf_check_size (lb0->lb_urpf) &&
- ip0->dst_address.as_u32 != 0xFFFFFFFF)
- ? IP4_ERROR_SRC_LOOKUP_MISS : error0);
-
- skip_check0:
- if (p1->flags & VNET_BUFFER_F_IS_NATED)
- goto skip_checks;
-
- error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- dpo1->dpoi_type == DPO_RECEIVE) ?
- IP4_ERROR_SPOOFED_LOCAL_PACKETS : error1);
- error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- !fib_urpf_check_size (lb1->lb_urpf) &&
- ip1->dst_address.as_u32 != 0xFFFFFFFF)
- ? IP4_ERROR_SRC_LOOKUP_MISS : error1);
-
- skip_checks:
-
- next0 = lm->local_next_by_ip_protocol[proto0];
- next1 = lm->local_next_by_ip_protocol[proto1];
-
- next0 =
- error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
- next1 =
- error1 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next1;
-
- p0->error = error0 ? error_node->errors[error0] : 0;
- p1->error = error1 ? error_node->errors[error1] : 0;
-
- if (head_of_feature_arc)
+ if (!(b[1]->flags & VNET_BUFFER_F_IS_NATED))
{
- if (PREDICT_TRUE (error0 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
- vnet_feature_arc_start (arc_index, sw_if_index0, &next0, p0);
- if (PREDICT_TRUE (error1 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
- vnet_feature_arc_start (arc_index, sw_if_index1, &next1, p1);
+ ip4_local_check_l4_csum (vm, b[1], ip[1], &error[1]);
+ ip4_local_check_src (b[1], ip[1], &last_check, &error[1]);
}
-
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
- n_left_to_next, pi0, pi1,
- next0, next1);
}
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- vlib_buffer_t *p0;
- ip4_header_t *ip0;
- ip4_fib_mtrie_t *mtrie0;
- ip4_fib_mtrie_leaf_t leaf0;
- u32 pi0, next0, lbi0;
- u8 error0, is_udp0, is_tcp_udp0, good_tcp_udp0, proto0;
- load_balance_t *lb0;
- const dpo_id_t *dpo0;
- u32 sw_if_index0;
-
- pi0 = to_next[0] = from[0];
- from += 1;
- n_left_from -= 1;
- to_next += 1;
- n_left_to_next -= 1;
+ skip_checks:
- next0 = IP_LOCAL_NEXT_DROP;
- error0 = IP4_ERROR_UNKNOWN_PROTOCOL;
+ next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol];
+ next[1] = lm->local_next_by_ip_protocol[ip[1]->protocol];
+ ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
+ head_of_feature_arc);
+ ip4_local_set_next_and_error (error_node, b[1], &next[1], error[1],
+ head_of_feature_arc);
- p0 = vlib_get_buffer (vm, pi0);
- ip0 = vlib_buffer_get_current (p0);
- vnet_buffer (p0)->l3_hdr_offset = p0->current_data;
- sw_if_index0 = vnet_buffer (p0)->sw_if_index[VLIB_RX];
-
- proto0 = ip0->protocol;
-
- if (head_of_feature_arc == 0 || p0->flags & VNET_BUFFER_F_IS_NATED)
- goto skip_check;
-
- is_udp0 = proto0 == IP_PROTOCOL_UDP;
- is_tcp_udp0 = is_udp0 || proto0 == IP_PROTOCOL_TCP;
-
- good_tcp_udp0 =
- (p0->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT
- || (p0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
- || p0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) != 0;
-
- if (PREDICT_FALSE (ip4_local_do_l4_check (is_tcp_udp0, p0->flags)))
- {
- ip4_local_validate_l4 (vm, p0, ip0, is_udp0, &error0,
- &good_tcp_udp0);
- }
+ b += 2;
+ next += 2;
+ n_left_from -= 2;
+ }
- ASSERT (IP4_ERROR_TCP_CHECKSUM + 1 == IP4_ERROR_UDP_CHECKSUM);
- error0 = (is_tcp_udp0 && !good_tcp_udp0
- ? IP4_ERROR_TCP_CHECKSUM + is_udp0 : error0);
-
- vnet_buffer (p0)->ip.fib_index =
- vnet_buffer (p0)->sw_if_index[VLIB_TX] != ~0 ?
- vnet_buffer (p0)->sw_if_index[VLIB_TX] :
- vnet_buffer (p0)->ip.fib_index;
-
- mtrie0 = &ip4_fib_get (vnet_buffer (p0)->ip.fib_index)->mtrie;
- leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address,
- 2);
- leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, &ip0->src_address,
- 3);
- lbi0 = ip4_fib_mtrie_leaf_get_adj_index (leaf0);
- vnet_buffer (p0)->ip.adj_index[VLIB_TX] = lbi0;
- vnet_buffer (p0)->ip.adj_index[VLIB_RX] = lbi0;
+ while (n_left_from > 0)
+ {
+ error[0] = IP4_ERROR_UNKNOWN_PROTOCOL;
- lb0 = load_balance_get (lbi0);
- dpo0 = load_balance_get_bucket_i (lb0, 0);
+ ip[0] = vlib_buffer_get_current (b[0]);
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- dpo0->dpoi_type == DPO_RECEIVE) ?
- IP4_ERROR_SPOOFED_LOCAL_PACKETS : error0);
- error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
- !fib_urpf_check_size (lb0->lb_urpf) &&
- ip0->dst_address.as_u32 != 0xFFFFFFFF)
- ? IP4_ERROR_SRC_LOOKUP_MISS : error0);
+ if (head_of_feature_arc == 0 || (b[0]->flags & VNET_BUFFER_F_IS_NATED))
+ goto skip_check;
- skip_check:
- next0 = lm->local_next_by_ip_protocol[proto0];
- next0 =
- error0 != IP4_ERROR_UNKNOWN_PROTOCOL ? IP_LOCAL_NEXT_DROP : next0;
+ ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
+ ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
- p0->error = error0 ? error_node->errors[error0] : 0;
+ skip_check:
- if (head_of_feature_arc)
- {
- if (PREDICT_TRUE (error0 == (u8) IP4_ERROR_UNKNOWN_PROTOCOL))
- vnet_feature_arc_start (arc_index, sw_if_index0, &next0, p0);
- }
+ next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol];
+ ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
+ head_of_feature_arc);
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, pi0, next0);
- }
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ b += 1;
+ next += 1;
+ n_left_from -= 1;
}
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c
index 275bcc67b69..a66070302c0 100644
--- a/src/vnet/ip/ip6_forward.c
+++ b/src/vnet/ip/ip6_forward.c
@@ -1335,6 +1335,7 @@ ip6_local_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
error0 = (!ip6_urpf_loose_check (im, p0, ip0)
? IP6_ERROR_SRC_LOOKUP_MISS : error0);
}
+
vnet_buffer (p0)->ip.fib_index =
vnet_buffer (p0)->sw_if_index[VLIB_TX] != ~0 ?
vnet_buffer (p0)->sw_if_index[VLIB_TX] :