diff options
author | Mohsin Kazmi <sykazmi@cisco.com> | 2021-10-21 20:20:24 +0000 |
---|---|---|
committer | Beno�t Ganne <bganne@cisco.com> | 2021-11-22 10:26:14 +0000 |
commit | d431d7404d0bf8568e86f145288ed5d76ce40530 (patch) | |
tree | f1864246d94df5e310f8d69df63eea7eb61d8530 /src | |
parent | bd46907b3f27b480e5f8b0be9cb4c5b5f1fa08ab (diff) |
gso: implement gso segementation copy with checksum
Type: improvement
Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
Change-Id: Iec9dacde170533ca16e8117787e62da8af69ae96
Diffstat (limited to 'src')
-rw-r--r-- | src/plugins/unittest/gso_test.c | 4 | ||||
-rw-r--r-- | src/vnet/gso/gso.h | 237 | ||||
-rw-r--r-- | src/vnet/gso/node.c | 17 |
3 files changed, 240 insertions, 18 deletions
diff --git a/src/plugins/unittest/gso_test.c b/src/plugins/unittest/gso_test.c index c7b047f7bff..54eb7422c87 100644 --- a/src/plugins/unittest/gso_test.c +++ b/src/plugins/unittest/gso_test.c @@ -170,14 +170,12 @@ gso_segment_buffer_test (vlib_main_t *vm, u32 bi, { vlib_buffer_t *b = vlib_get_buffer (vm, bi); generic_header_offset_t gho = { 0 }; - u32 n_bytes_b = vlib_buffer_length_in_chain (vm, b); u32 n_tx_bytes = 0; if (PREDICT_TRUE (b->flags & VNET_BUFFER_F_GSO)) { vnet_generic_header_offset_parser (b, &gho, is_l2, !is_ip6, is_ip6); - n_tx_bytes = - gso_segment_buffer (vm, ptd, bi, b, &gho, n_bytes_b, is_l2, is_ip6); + n_tx_bytes = gso_segment_buffer_inline (vm, ptd, b, &gho, is_l2, is_ip6); } return n_tx_bytes; diff --git a/src/vnet/gso/gso.h b/src/vnet/gso/gso.h index 926ce634fd0..041fab3bcc4 100644 --- a/src/vnet/gso/gso.h +++ b/src/vnet/gso/gso.h @@ -18,6 +18,7 @@ #include <vnet/vnet.h> #include <vnet/gso/hdr_offset_parser.h> +#include <vnet/ip/ip_psh_cksum.h> typedef struct { @@ -33,6 +34,242 @@ u32 gso_segment_buffer (vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 bi, vlib_buffer_t *b, generic_header_offset_t *gho, u32 n_bytes_b, u8 is_l2, u8 is_ip6); +static_always_inline void +gso_init_bufs_from_template_base (vlib_buffer_t **bufs, vlib_buffer_t *b0, + u32 flags, u16 n_bufs, u16 hdr_sz) +{ + u32 i = n_bufs; + while (i >= 4) + { + /* prefetches */ + CLIB_PREFETCH (bufs[2], 2 * CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (bufs[3], 2 * CLIB_CACHE_LINE_BYTES, LOAD); + vlib_prefetch_buffer_data (bufs[2], LOAD); + vlib_prefetch_buffer_data (bufs[3], LOAD); + + /* copying objects from cacheline 0 */ + bufs[0]->current_data = 0; + bufs[1]->current_data = 0; + + bufs[0]->current_length = hdr_sz; + bufs[1]->current_length = hdr_sz; + + bufs[0]->flags = bufs[1]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags; + bufs[0]->flow_id = bufs[1]->flow_id = b0->flow_id; + bufs[0]->error = bufs[1]->error = b0->error; + bufs[0]->current_config_index = bufs[1]->current_config_index = + b0->current_config_index; + + clib_memcpy_fast (&bufs[0]->opaque, &b0->opaque, sizeof (b0->opaque)); + clib_memcpy_fast (&bufs[1]->opaque, &b0->opaque, sizeof (b0->opaque)); + + /* copying objects from cacheline 1 */ + bufs[0]->trace_handle = b0->trace_handle; + bufs[1]->trace_handle = b0->trace_handle; + + bufs[0]->total_length_not_including_first_buffer = 0; + bufs[1]->total_length_not_including_first_buffer = 0; + + /* copying data */ + clib_memcpy_fast (bufs[0]->data, vlib_buffer_get_current (b0), hdr_sz); + clib_memcpy_fast (bufs[1]->data, vlib_buffer_get_current (b0), hdr_sz); + + bufs += 2; + i -= 2; + } + + while (i > 0) + { + /* copying objects from cacheline 0 */ + bufs[0]->current_data = 0; + bufs[0]->current_length = hdr_sz; + bufs[0]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags; + bufs[0]->flow_id = b0->flow_id; + bufs[0]->error = b0->error; + bufs[0]->current_config_index = b0->current_config_index; + clib_memcpy_fast (&bufs[0]->opaque, &b0->opaque, sizeof (b0->opaque)); + + /* copying objects from cacheline 1 */ + bufs[0]->trace_handle = b0->trace_handle; + bufs[0]->total_length_not_including_first_buffer = 0; + + /* copying data */ + clib_memcpy_fast (bufs[0]->data, vlib_buffer_get_current (b0), hdr_sz); + + bufs++; + i--; + } +} + +static_always_inline void +gso_fixup_segmented_buf (vlib_main_t *vm, vlib_buffer_t *b0, u32 next_tcp_seq, + int is_l2, int is_ip6, generic_header_offset_t *gho, + clib_ip_csum_t *c, u8 tcp_flags) +{ + + ip4_header_t *ip4 = + (ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset + + gho->outer_hdr_sz); + ip6_header_t *ip6 = + (ip6_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset + + gho->outer_hdr_sz); + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b0) + gho->l4_hdr_offset + + gho->outer_hdr_sz); + + tcp->flags = tcp_flags; + tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq); + + if (is_ip6) + { + ip6->payload_length = clib_host_to_net_u16 ( + b0->current_length - gho->l4_hdr_offset - gho->outer_hdr_sz); + vnet_buffer_offload_flags_clear (b0, VNET_BUFFER_OFFLOAD_F_TCP_CKSUM); + ip6_psh_t psh = { 0 }; + u32 *p = (u32 *) &psh; + psh.src = ip6->src_address; + psh.dst = ip6->dst_address; + psh.l4len = ip6->payload_length; + psh.proto = clib_host_to_net_u32 ((u32) ip6->protocol); + for (int i = 0; i < 10; i++) + c->sum += p[i]; + } + else + { + ip4->length = clib_host_to_net_u16 ( + b0->current_length - gho->l3_hdr_offset - gho->outer_hdr_sz); + if (gho->gho_flags & GHO_F_IP4) + ip4->checksum = ip4_header_checksum (ip4); + vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM | + VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)); + c->sum += clib_mem_unaligned (&ip4->src_address, u32); + c->sum += clib_mem_unaligned (&ip4->dst_address, u32); + c->sum += clib_host_to_net_u32 ( + (clib_net_to_host_u16 (ip4->length) - ip4_header_bytes (ip4)) + + (ip4->protocol << 16)); + } + clib_ip_csum_chunk (c, (u8 *) tcp, gho->l4_hdr_sz); + tcp->checksum = clib_ip_csum_fold (c); + + if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0)) + { + u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX]; + + ip_adjacency_t *adj0 = adj_get (adj_index0); + + if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN && + adj0->sub_type.midchain.fixup_func) + /* calls e.g. ipip44_fixup */ + adj0->sub_type.midchain.fixup_func ( + vm, adj0, b0, adj0->sub_type.midchain.fixup_data); + } +} + +static_always_inline u32 +gso_segment_buffer_inline (vlib_main_t *vm, + vnet_interface_per_thread_data_t *ptd, + vlib_buffer_t *b, generic_header_offset_t *gho, + int is_l2, int is_ip6) +{ + vlib_buffer_t **bufs = 0; + u32 n_tx_bytes = 0; + u16 gso_size = vnet_buffer2 (b)->gso_size; + u8 tcp_flags = 0, tcp_flags_no_fin_psh = 0; + u32 default_bflags = + b->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT); + u16 hdr_sz = gho->hdr_sz + gho->outer_hdr_sz; + u32 next_tcp_seq = 0, tcp_seq = 0; + u32 data_size = vlib_buffer_length_in_chain (vm, b) - hdr_sz; + u16 size = + clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - hdr_sz); + u16 n_alloc = 0, n_bufs = ((data_size + size - 1) / size); + clib_ip_csum_t c = { .sum = 0, .odd = 0 }; + u8 *src_ptr, *dst_ptr; + u16 src_left, dst_left, bytes_to_copy; + u32 i = 0; + + vec_validate (ptd->split_buffers, n_bufs - 1); + n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs); + if (n_alloc < n_bufs) + { + vlib_buffer_free (vm, ptd->split_buffers, n_alloc); + return 0; + } + + vec_validate (bufs, n_bufs - 1); + vlib_get_buffers (vm, ptd->split_buffers, bufs, n_bufs); + + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b) + gho->l4_hdr_offset + + gho->outer_hdr_sz); + tcp_seq = next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number); + /* store original flags for last packet and reset FIN and PSH */ + tcp_flags = tcp->flags; + tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH); + tcp->checksum = 0; + + gso_init_bufs_from_template_base (bufs, b, default_bflags, n_bufs, hdr_sz); + + src_ptr = vlib_buffer_get_current (b) + hdr_sz; + src_left = b->current_length - hdr_sz; + dst_ptr = vlib_buffer_get_current (bufs[i]) + hdr_sz; + dst_left = size; + + while (data_size) + { + bytes_to_copy = clib_min (src_left, dst_left); + clib_ip_csum_and_copy_chunk (&c, src_ptr, dst_ptr, bytes_to_copy); + + src_left -= bytes_to_copy; + src_ptr += bytes_to_copy; + data_size -= bytes_to_copy; + dst_left -= bytes_to_copy; + dst_ptr += bytes_to_copy; + next_tcp_seq += bytes_to_copy; + bufs[i]->current_length += bytes_to_copy; + + if (0 == src_left) + { + /* init src to the next buffer in chain */ + if (b->flags & VLIB_BUFFER_NEXT_PRESENT) + { + b = vlib_get_buffer (vm, b->next_buffer); + src_left = b->current_length; + src_ptr = vlib_buffer_get_current (b); + } + else + { + ASSERT (data_size == 0); + break; + } + } + if (0 == dst_left && data_size) + { + vlib_prefetch_buffer_header (bufs[i + 1], LOAD); + vlib_prefetch_buffer_data (bufs[i + 1], LOAD); + + n_tx_bytes += bufs[i]->current_length; + gso_fixup_segmented_buf (vm, bufs[i], tcp_seq, is_l2, is_ip6, gho, + &c, tcp_flags_no_fin_psh); + i++; + dst_left = size; + dst_ptr = vlib_buffer_get_current (bufs[i]) + hdr_sz; + tcp_seq = next_tcp_seq; + // reset clib_ip_csum_t + c.odd = 0; + c.sum = 0; + } + } + + ASSERT ((i + 1) == n_alloc); + n_tx_bytes += bufs[i]->current_length; + gso_fixup_segmented_buf (vm, bufs[i], tcp_seq, is_l2, is_ip6, gho, &c, + tcp_flags); + + vec_free (bufs); + return n_tx_bytes; +} + #endif /* included_gso_h */ /* diff --git a/src/vnet/gso/node.c b/src/vnet/gso/node.c index c48d8fefe16..109d178c33c 100644 --- a/src/vnet/gso/node.c +++ b/src/vnet/gso/node.c @@ -92,9 +92,6 @@ tso_segment_ipip_tunnel_fixup (vlib_main_t * vm, while (i < n_tx_bufs) { vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]); - vnet_get_outer_header (b0, gho); - clib_memcpy_fast (vlib_buffer_get_current (b0), - vlib_buffer_get_current (sb0), gho->outer_hdr_sz); ip4_header_t *ip4 = (ip4_header_t *) (vlib_buffer_get_current (b0) + @@ -184,9 +181,6 @@ tso_segment_vxlan_tunnel_fixup (vlib_main_t * vm, while (i < n_tx_bufs) { vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]); - vnet_get_outer_header (b0, gho); - clib_memcpy_fast (vlib_buffer_get_current (b0), - vlib_buffer_get_current (sb0), gho->outer_hdr_sz); tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho); n_tx_bytes += gho->outer_hdr_sz; @@ -686,7 +680,6 @@ vnet_gso_node_inline (vlib_main_t * vm, n_left_to_next += 1; /* undo the counting. */ generic_header_offset_t gho = { 0 }; - u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]); u32 n_tx_bytes = 0; u32 inner_is_ip6 = is_ip6; @@ -707,15 +700,11 @@ vnet_gso_node_inline (vlib_main_t * vm, continue; } - vnet_get_inner_header (b[0], &gho); - - n_bytes_b0 -= gho.outer_hdr_sz; inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0; } - n_tx_bytes = - tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0, - is_l2, inner_is_ip6); + n_tx_bytes = gso_segment_buffer_inline (vm, ptd, b[0], &gho, + is_l2, inner_is_ip6); if (PREDICT_FALSE (n_tx_bytes == 0)) { @@ -729,7 +718,6 @@ vnet_gso_node_inline (vlib_main_t * vm, if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL)) { - vnet_get_outer_header (b[0], &gho); n_tx_bytes += tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho); } @@ -738,7 +726,6 @@ vnet_gso_node_inline (vlib_main_t * vm, (gho.gho_flags & (GHO_F_IPIP_TUNNEL | GHO_F_IPIP6_TUNNEL))) { - vnet_get_outer_header (b[0], &gho); n_tx_bytes += tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho); } |