diff options
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/ip/lookup.c | 8 | ||||
-rw-r--r-- | src/vnet/udp/udp_encap.c | 11 | ||||
-rw-r--r-- | src/vnet/udp/udp_encap.h | 2 | ||||
-rw-r--r-- | src/vnet/udp/udp_encap_node.c | 101 | ||||
-rw-r--r-- | src/vnet/udp/udp_inlines.h | 30 | ||||
-rw-r--r-- | src/vnet/vxlan-gpe/encap.c | 49 |
6 files changed, 137 insertions, 64 deletions
diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c index 92310e418c2..4da20df57a0 100644 --- a/src/vnet/ip/lookup.c +++ b/src/vnet/ip/lookup.c @@ -588,7 +588,13 @@ VLIB_CLI_COMMAND (vlib_cli_show_ip6_command, static) = { /* *INDENT-OFF* */ VLIB_CLI_COMMAND (ip_route_command, static) = { .path = "ip route", - .short_help = "ip route [add|del] [count <n>] <dst-ip-addr>/<width> [table <table-id>] via [next-hop-address] [next-hop-interface] [next-hop-table <value>] [weight <value>] [preference <value>] [udp-encap-id <value>] [ip4-lookup-in-table <value>] [ip6-lookup-in-table <value>] [mpls-lookup-in-table <value>] [resolve-via-host] [resolve-via-connected] [rx-ip4 <interface>] [out-labels <value value value>]", + .short_help = "ip route [add|del] [count <n>] <dst-ip-addr>/<width> [table " + "<table-id>] via [next-hop-address] [next-hop-interface] " + "[next-hop-table <value>] [weight <value>] [preference " + "<value>] [udp-encap <value>] [ip4-lookup-in-table <value>] " + "[ip6-lookup-in-table <value>] [mpls-lookup-in-table <value>] " + "[resolve-via-host] [resolve-via-connected] [rx-ip4 " + "<interface>] [out-labels <value value value>]", .function = vnet_ip_route_cmd, .is_mp_safe = 1, }; diff --git a/src/vnet/udp/udp_encap.c b/src/vnet/udp/udp_encap.c index cb93adb8d39..a0f5a50c223 100644 --- a/src/vnet/udp/udp_encap.c +++ b/src/vnet/udp/udp_encap.c @@ -47,8 +47,7 @@ static void udp_encap_restack (udp_encap_t * ue) { dpo_stack (udp_encap_dpo_types[ue->ue_ip_proto], - fib_proto_to_dpo (ue->ue_ip_proto), - &ue->ue_dpo, + fib_proto_to_dpo (ue->ue_ip_proto), &ue->ue_dpo, fib_entry_contribute_ip_forwarding (ue->ue_fib_entry_index)); } @@ -325,12 +324,12 @@ udp_encap_fib_last_lock_gone (fib_node_t * node) } const static char *const udp4_encap_ip4_nodes[] = { - "udp4-encap", + "udp4o4-encap", NULL, }; const static char *const udp4_encap_ip6_nodes[] = { - "udp4-encap", + "udp6o4-encap", NULL, }; @@ -345,12 +344,12 @@ const static char *const udp4_encap_bier_nodes[] = { }; const static char *const udp6_encap_ip4_nodes[] = { - "udp6-encap", + "udp4o6-encap", NULL, }; const static char *const udp6_encap_ip6_nodes[] = { - "udp6-encap", + "udp6o6-encap", NULL, }; diff --git a/src/vnet/udp/udp_encap.h b/src/vnet/udp/udp_encap.h index b096e0f5c09..648e3b59e6d 100644 --- a/src/vnet/udp/udp_encap.h +++ b/src/vnet/udp/udp_encap.h @@ -85,7 +85,7 @@ typedef struct udp_encap_t_ /** * The second cacheline contains control-plane data */ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); + CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); /** * linkage into the FIB graph diff --git a/src/vnet/udp/udp_encap_node.c b/src/vnet/udp/udp_encap_node.c index 5b9fc0bf34b..1ebe79532f4 100644 --- a/src/vnet/udp/udp_encap_node.c +++ b/src/vnet/udp/udp_encap_node.c @@ -61,9 +61,9 @@ format_udp6_encap_trace (u8 * s, va_list * args) } always_inline uword -udp_encap_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, int is_encap_v6) +udp_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame, ip_address_family_t encap_family, + ip_address_family_t payload_family) { vlib_combined_counter_main_t *cm = &udp_encap_counters; u32 *from = vlib_frame_vector_args (frame); @@ -121,12 +121,13 @@ udp_encap_inline (vlib_main_t * vm, ue1 = udp_encap_get (uei1); /* Paint */ - if (is_encap_v6) + if (encap_family == AF_IP6) { const u8 n_bytes = sizeof (udp_header_t) + sizeof (ip6_header_t); - ip_udp_encap_two (vm, b0, b1, (u8 *) & ue0->ue_hdrs, - (u8 *) & ue1->ue_hdrs, n_bytes, 0); + ip_udp_encap_two (vm, b0, b1, (u8 *) &ue0->ue_hdrs, + (u8 *) &ue1->ue_hdrs, n_bytes, encap_family, + payload_family); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { udp6_encap_trace_t *tr = @@ -147,9 +148,9 @@ udp_encap_inline (vlib_main_t * vm, const u8 n_bytes = sizeof (udp_header_t) + sizeof (ip4_header_t); - ip_udp_encap_two (vm, b0, b1, - (u8 *) & ue0->ue_hdrs, - (u8 *) & ue1->ue_hdrs, n_bytes, 1); + ip_udp_encap_two (vm, b0, b1, (u8 *) &ue0->ue_hdrs, + (u8 *) &ue1->ue_hdrs, n_bytes, encap_family, + payload_family); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -202,12 +203,12 @@ udp_encap_inline (vlib_main_t * vm, b0)); /* Paint */ - if (is_encap_v6) + if (encap_family == AF_IP6) { const u8 n_bytes = sizeof (udp_header_t) + sizeof (ip6_header_t); - ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip6, n_bytes, - 0); + ip_udp_encap_one (vm, b0, (u8 *) &ue0->ue_hdrs.ip6, n_bytes, + encap_family, payload_family); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -222,8 +223,8 @@ udp_encap_inline (vlib_main_t * vm, const u8 n_bytes = sizeof (udp_header_t) + sizeof (ip4_header_t); - ip_udp_encap_one (vm, b0, (u8 *) & ue0->ue_hdrs.ip4, n_bytes, - 1); + ip_udp_encap_one (vm, b0, (u8 *) &ue0->ue_hdrs.ip4, n_bytes, + encap_family, payload_family); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -248,37 +249,87 @@ udp_encap_inline (vlib_main_t * vm, return frame->n_vectors; } -VLIB_NODE_FN (udp4_encap_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +VLIB_NODE_FN (udp4o4_encap_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { - return udp_encap_inline (vm, node, frame, 0); + return udp_encap_inline (vm, node, frame, AF_IP4, AF_IP4); } -VLIB_NODE_FN (udp6_encap_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +VLIB_NODE_FN (udp6o4_encap_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) { - return udp_encap_inline (vm, node, frame, 1); + return udp_encap_inline (vm, node, frame, AF_IP4, AF_IP6); +} + +VLIB_NODE_FN (udp4_encap_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) +{ + return udp_encap_inline (vm, node, frame, AF_IP4, N_AF); +} + +VLIB_NODE_FN (udp6o6_encap_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) +{ + return udp_encap_inline (vm, node, frame, AF_IP6, AF_IP6); +} + +VLIB_NODE_FN (udp4o6_encap_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) +{ + return udp_encap_inline (vm, node, frame, AF_IP6, AF_IP4); +} + +VLIB_NODE_FN (udp6_encap_node) +(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame) +{ + return udp_encap_inline (vm, node, frame, AF_IP6, N_AF); } /* *INDENT-OFF* */ +VLIB_REGISTER_NODE (udp4o4_encap_node) = { + .name = "udp4o4-encap", + .vector_size = sizeof (u32), + .format_trace = format_udp4_encap_trace, + .n_next_nodes = 0, +}; + +VLIB_REGISTER_NODE (udp6o4_encap_node) = { + .name = "udp6o4-encap", + .vector_size = sizeof (u32), + .format_trace = format_udp6_encap_trace, + .n_next_nodes = 0, + .sibling_of = "udp4o4-encap", +}; + VLIB_REGISTER_NODE (udp4_encap_node) = { .name = "udp4-encap", .vector_size = sizeof (u32), - .format_trace = format_udp4_encap_trace, + .n_next_nodes = 0, + .sibling_of = "udp4o4-encap", +}; +VLIB_REGISTER_NODE (udp6o6_encap_node) = { + .name = "udp6o6-encap", + .vector_size = sizeof (u32), + .format_trace = format_udp6_encap_trace, + .n_next_nodes = 0, +}; + +VLIB_REGISTER_NODE (udp4o6_encap_node) = { + .name = "udp4o6-encap", + .vector_size = sizeof (u32), + .format_trace = format_udp4_encap_trace, .n_next_nodes = 0, + .sibling_of = "udp6o6-encap", }; VLIB_REGISTER_NODE (udp6_encap_node) = { .name = "udp6-encap", .vector_size = sizeof (u32), - .format_trace = format_udp6_encap_trace, - .n_next_nodes = 0, + .sibling_of = "udp6o6-encap", }; /* *INDENT-ON* */ diff --git a/src/vnet/udp/udp_inlines.h b/src/vnet/udp/udp_inlines.h index e4eb0c88e83..d79dc9a2bad 100644 --- a/src/vnet/udp/udp_inlines.h +++ b/src/vnet/udp/udp_inlines.h @@ -97,14 +97,20 @@ ip_udp_fixup_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 is_ip4) } always_inline void -ip_udp_encap_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 * ec0, word ec_len, - u8 is_ip4) +ip_udp_encap_one (vlib_main_t *vm, vlib_buffer_t *b0, u8 *ec0, word ec_len, + ip_address_family_t encap_family, + ip_address_family_t payload_family) { - vnet_calc_checksums_inline (vm, b0, is_ip4, !is_ip4); + + if (payload_family < N_AF) + { + vnet_calc_checksums_inline (vm, b0, payload_family == AF_IP4, + payload_family == AF_IP6); + } vlib_buffer_advance (b0, -ec_len); - if (is_ip4) + if (encap_family == AF_IP4) { ip4_header_t *ip0; @@ -127,21 +133,27 @@ ip_udp_encap_one (vlib_main_t * vm, vlib_buffer_t * b0, u8 * ec0, word ec_len, } always_inline void -ip_udp_encap_two (vlib_main_t * vm, vlib_buffer_t * b0, vlib_buffer_t * b1, - u8 * ec0, u8 * ec1, word ec_len, u8 is_v4) +ip_udp_encap_two (vlib_main_t *vm, vlib_buffer_t *b0, vlib_buffer_t *b1, + u8 *ec0, u8 *ec1, word ec_len, + ip_address_family_t encap_family, + ip_address_family_t payload_family) { u16 new_l0, new_l1; udp_header_t *udp0, *udp1; + int payload_ip4 = (payload_family == AF_IP4); ASSERT (_vec_len (ec0) == _vec_len (ec1)); - vnet_calc_checksums_inline (vm, b0, is_v4, !is_v4); - vnet_calc_checksums_inline (vm, b1, is_v4, !is_v4); + if (payload_family < N_AF) + { + vnet_calc_checksums_inline (vm, b0, payload_ip4, !payload_ip4); + vnet_calc_checksums_inline (vm, b1, payload_ip4, !payload_ip4); + } vlib_buffer_advance (b0, -ec_len); vlib_buffer_advance (b1, -ec_len); - if (is_v4) + if (encap_family == AF_IP4) { ip4_header_t *ip0, *ip1; ip_csum_t sum0, sum1; diff --git a/src/vnet/vxlan-gpe/encap.c b/src/vnet/vxlan-gpe/encap.c index daa0381c4bb..35a5529e80b 100644 --- a/src/vnet/vxlan-gpe/encap.c +++ b/src/vnet/vxlan-gpe/encap.c @@ -88,13 +88,15 @@ format_vxlan_gpe_encap_trace (u8 * s, va_list * args) * */ always_inline void -vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, - vxlan_gpe_tunnel_t * t0, u32 * next0, u8 is_v4) +vxlan_gpe_encap_one_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, + vxlan_gpe_tunnel_t *t0, u32 *next0, + ip_address_family_t af) { ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36); ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56); - ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4); + ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af, + N_AF); next0[0] = t0->encap_next_node; } @@ -112,16 +114,18 @@ vxlan_gpe_encap_one_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, * */ always_inline void -vxlan_gpe_encap_two_inline (vxlan_gpe_main_t * ngm, vlib_buffer_t * b0, - vlib_buffer_t * b1, vxlan_gpe_tunnel_t * t0, - vxlan_gpe_tunnel_t * t1, u32 * next0, - u32 * next1, u8 is_v4) +vxlan_gpe_encap_two_inline (vxlan_gpe_main_t *ngm, vlib_buffer_t *b0, + vlib_buffer_t *b1, vxlan_gpe_tunnel_t *t0, + vxlan_gpe_tunnel_t *t1, u32 *next0, u32 *next1, + ip_address_family_t af) { ASSERT (sizeof (ip4_vxlan_gpe_header_t) == 36); ASSERT (sizeof (ip6_vxlan_gpe_header_t) == 56); - ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, is_v4); - ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, is_v4); + ip_udp_encap_one (ngm->vlib_main, b0, t0->rewrite, t0->rewrite_size, af, + N_AF); + ip_udp_encap_one (ngm->vlib_main, b1, t1->rewrite, t1->rewrite_size, af, + N_AF); next0[0] = next1[0] = t0->encap_next_node; } @@ -170,7 +174,7 @@ vxlan_gpe_encap (vlib_main_t * vm, u32 sw_if_index0 = ~0, sw_if_index1 = ~0, len0, len1; vnet_hw_interface_t *hi0, *hi1; vxlan_gpe_tunnel_t *t0 = NULL, *t1 = NULL; - u8 is_ip4_0 = 0, is_ip4_1 = 0; + ip_address_family_t af_0 = AF_IP4, af_1 = AF_IP4; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); @@ -201,7 +205,7 @@ vxlan_gpe_encap (vlib_main_t * vm, n_left_to_next -= 2; n_left_from -= 2; - /* get the flag "is_ip4" */ + /* get "af_0" */ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX]) { sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; @@ -210,10 +214,10 @@ vxlan_gpe_encap (vlib_main_t * vm, vnet_buffer (b[0])->sw_if_index [VLIB_TX]); t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6); } - /* get the flag "is_ip4" */ + /* get "af_1" */ if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX]) { if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX]) @@ -221,7 +225,7 @@ vxlan_gpe_encap (vlib_main_t * vm, sw_if_index1 = sw_if_index0; hi1 = hi0; t1 = t0; - is_ip4_1 = is_ip4_0; + af_1 = af_0; } else { @@ -231,19 +235,20 @@ vxlan_gpe_encap (vlib_main_t * vm, vnet_buffer (b[1])->sw_if_index [VLIB_TX]); t1 = pool_elt_at_index (ngm->tunnels, hi1->dev_instance); - is_ip4_1 = (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + af_1 = + (t1->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6); } } - if (PREDICT_TRUE (is_ip4_0 == is_ip4_1)) + if (PREDICT_TRUE (af_0 == af_1)) { vxlan_gpe_encap_two_inline (ngm, b[0], b[1], t0, t1, &next0, - &next1, is_ip4_0); + &next1, af_0); } else { - vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, is_ip4_0); - vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, is_ip4_1); + vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0); + vxlan_gpe_encap_one_inline (ngm, b[1], t1, &next1, af_1); } /* Reset to look up tunnel partner in the configured FIB */ @@ -325,7 +330,7 @@ vxlan_gpe_encap (vlib_main_t * vm, n_left_from -= 1; n_left_to_next -= 1; - /* get the flag "is_ip4" */ + /* get "af_0" */ if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX]) { sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX]; @@ -336,10 +341,10 @@ vxlan_gpe_encap (vlib_main_t * vm, t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance); - is_ip4_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4); + af_0 = (t0->flags & VXLAN_GPE_TUNNEL_IS_IPV4 ? AF_IP4 : AF_IP6); } - vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, is_ip4_0); + vxlan_gpe_encap_one_inline (ngm, b[0], t0, &next0, af_0); /* Reset to look up tunnel partner in the configured FIB */ vnet_buffer (b[0])->sw_if_index[VLIB_TX] = t0->encap_fib_index; |