From c1fd17bd10fd7441d1f4d7c6f0ecf5761c4e00b2 Mon Sep 17 00:00:00 2001 From: Mohsin Kazmi Date: Tue, 22 Mar 2022 21:40:04 +0000 Subject: devices: add support for offloads Type: improvement This patch adds support for: 1) GSO 2) checksum offload Signed-off-by: Mohsin Kazmi Change-Id: Ib00629888c62da04d58db36ce021993769e736c9 --- src/vnet/devices/af_packet/node.c | 234 ++++++++++++++++++++++++-------------- 1 file changed, 147 insertions(+), 87 deletions(-) (limited to 'src/vnet/devices/af_packet/node.c') diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c index 229e0501af7..a42e3e07cd9 100644 --- a/src/vnet/devices/af_packet/node.c +++ b/src/vnet/devices/af_packet/node.c @@ -28,6 +28,7 @@ #include #include +#include #define foreach_af_packet_input_error \ _(PARTIAL_PKT, "partial packet") @@ -51,10 +52,11 @@ typedef struct u32 next_index; u32 hw_if_index; int block; - u32 num_pkts; + u32 pkt_num; void *block_start; block_desc_t bd; tpacket3_hdr_t tph; + vnet_virtio_net_hdr_t vnet_hdr; } af_packet_input_trace_t; static u8 * @@ -68,12 +70,10 @@ format_af_packet_input_trace (u8 * s, va_list * args) s = format (s, "af_packet: hw_if_index %d next-index %d", t->hw_if_index, t->next_index); - s = format (s, - "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu" - " num_pkts %u", - format_white_space, indent + 2, t->block, format_white_space, - indent + 4, t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num, - t->num_pkts); + s = format ( + s, "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu pkt_num %u", + format_white_space, indent + 2, t->block, format_white_space, indent + 4, + t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num, t->pkt_num); s = format (s, "\n%Utpacket3_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u" @@ -91,16 +91,21 @@ format_af_packet_input_trace (u8 * s, va_list * args) t->tph.hv1.tp_vlan_tpid #endif ); + + s = format (s, + "\n%Uvnet-hdr:\n%Uflags 0x%02x gso_type 0x%02x hdr_len %u" + "\n%Ugso_size %u csum_start %u csum_offset %u", + format_white_space, indent + 2, format_white_space, indent + 4, + t->vnet_hdr.flags, t->vnet_hdr.gso_type, t->vnet_hdr.hdr_len, + format_white_space, indent + 4, t->vnet_hdr.gso_size, + t->vnet_hdr.csum_start, t->vnet_hdr.csum_offset); return s; } always_inline void -buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) +buffer_add_to_chain (vlib_buffer_t *b, vlib_buffer_t *first_b, + vlib_buffer_t *prev_b, u32 bi) { - vlib_buffer_t *b = vlib_get_buffer (vm, bi); - vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi); - vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi); - /* update first buffer */ first_b->total_length_not_including_first_buffer += b->current_length; @@ -109,11 +114,11 @@ buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT; /* update current buffer */ - b->next_buffer = 0; + b->next_buffer = ~0; } static_always_inline void -fill_gso_buffer_flags (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) +fill_gso_offload (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) { b->flags |= VNET_BUFFER_F_GSO; vnet_buffer2 (b)->gso_size = gso_size; @@ -121,43 +126,66 @@ fill_gso_buffer_flags (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) } static_always_inline void -mark_tcp_udp_cksum_calc (vlib_buffer_t *b, u8 *l4_hdr_sz) +fill_cksum_offload (vlib_buffer_t *b, u8 *l4_hdr_sz, u8 is_ip) { - ethernet_header_t *eth = vlib_buffer_get_current (b); vnet_buffer_oflags_t oflags = 0; - if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP4) + u16 l2hdr_sz = 0; + u16 ethertype = 0; + u8 l4_proto = 0; + + if (is_ip) { - ip4_header_t *ip4 = - (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); - b->flags |= VNET_BUFFER_F_IS_IP4; - if (ip4->protocol == IP_PROTOCOL_TCP) + switch (b->data[0] & 0xf0) { - oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; - tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + - ip4_header_bytes (ip4)); - *l4_hdr_sz = tcp_header_bytes (tcp); + case 0x40: + ethertype = ETHERNET_TYPE_IP4; + break; + case 0x60: + ethertype = ETHERNET_TYPE_IP6; + break; } - else if (ip4->protocol == IP_PROTOCOL_UDP) + } + else + { + ethernet_header_t *eth = vlib_buffer_get_current (b); + ethertype = clib_net_to_host_u16 (eth->type); + l2hdr_sz = sizeof (ethernet_header_t); + if (ethernet_frame_is_tagged (ethertype)) { - oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; - udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + - ip4_header_bytes (ip4)); - *l4_hdr_sz = sizeof (*udp); + ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1); + + ethertype = clib_net_to_host_u16 (vlan->type); + l2hdr_sz += sizeof (*vlan); + if (ethertype == ETHERNET_TYPE_VLAN) + { + vlan++; + ethertype = clib_net_to_host_u16 (vlan->type); + l2hdr_sz += sizeof (*vlan); + } } - vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); - vnet_buffer (b)->l4_hdr_offset = - sizeof (ethernet_header_t) + ip4_header_bytes (ip4); - if (oflags) - vnet_buffer_offload_flags_set (b, oflags); } - else if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP6) + + vnet_buffer (b)->l2_hdr_offset = 0; + vnet_buffer (b)->l3_hdr_offset = l2hdr_sz; + + if (ethertype == ETHERNET_TYPE_IP4) + { + ip4_header_t *ip4 = (vlib_buffer_get_current (b) + l2hdr_sz); + vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4); + b->flags |= (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID | + VNET_BUFFER_F_L3_HDR_OFFSET_VALID | + VNET_BUFFER_F_L4_HDR_OFFSET_VALID); + + l4_proto = ip4->protocol; + } + else if (ethertype == ETHERNET_TYPE_IP6) { - ip6_header_t *ip6 = - (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); - b->flags |= VNET_BUFFER_F_IS_IP6; + ip6_header_t *ip6 = (vlib_buffer_get_current (b) + l2hdr_sz); + b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID | + VNET_BUFFER_F_L3_HDR_OFFSET_VALID | + VNET_BUFFER_F_L4_HDR_OFFSET_VALID); u16 ip6_hdr_len = sizeof (ip6_header_t); + if (ip6_ext_hdr (ip6->protocol)) { ip6_ext_header_t *p = (void *) (ip6 + 1); @@ -167,34 +195,34 @@ mark_tcp_udp_cksum_calc (vlib_buffer_t *b, u8 *l4_hdr_sz) ip6_hdr_len += ip6_ext_header_len (p); p = ip6_ext_next_header (p); } + l4_proto = p->next_hdr; } - if (ip6->protocol == IP_PROTOCOL_TCP) - { - oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; - tcp_header_t *tcp = - (tcp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + ip6_hdr_len); - *l4_hdr_sz = tcp_header_bytes (tcp); - } - else if (ip6->protocol == IP_PROTOCOL_UDP) - { - oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; - udp_header_t *udp = - (udp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + ip6_hdr_len); - *l4_hdr_sz = sizeof (*udp); - } - vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); - vnet_buffer (b)->l4_hdr_offset = - sizeof (ethernet_header_t) + ip6_hdr_len; - if (oflags) - vnet_buffer_offload_flags_set (b, oflags); + else + l4_proto = ip6->protocol; + vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip6_hdr_len; + } + + if (l4_proto == IP_PROTOCOL_TCP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; + tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b) + + vnet_buffer (b)->l4_hdr_offset); + *l4_hdr_sz = tcp_header_bytes (tcp); + } + else if (l4_proto == IP_PROTOCOL_UDP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; + *l4_hdr_sz = sizeof (udp_header_t); } + + if (oflags) + vnet_buffer_offload_flags_set (b, oflags); } always_inline uword -af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame, af_packet_if_t * apif) +af_packet_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame, af_packet_if_t *apif, + u8 is_cksum_gso_enabled) { af_packet_main_t *apm = &af_packet_main; tpacket3_hdr_t *tph; @@ -210,19 +238,16 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 thread_index = vm->thread_index; u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes; - u32 eth_header_size = 0; u32 num_pkts = 0; u32 rx_frame_offset = 0; block_desc_t *bd = 0; vlib_buffer_t bt; + u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP); - if (apif->mode == AF_PACKET_IF_MODE_IP) - { - next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; - } + if (is_ip) + next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; else { - eth_header_size = sizeof (ethernet_header_t); next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) next_index = apif->per_interface_next_index; @@ -257,10 +282,11 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, while (num_pkts && (n_free_bufs > min_bufs)) { - vlib_buffer_t *b0 = 0, *first_b0 = 0; u32 next0 = next_index; u32 n_left_to_next; + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + while (num_pkts && n_left_to_next && (n_free_bufs > min_bufs)) { tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset); @@ -269,19 +295,36 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_PREFETCH (block_start + rx_frame_offset + tph->tp_next_offset, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + + vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0; + vnet_virtio_net_hdr_t *vnet_hdr = 0; u32 data_len = tph->tp_snaplen; u32 offset = 0; - u32 bi0 = 0, first_bi0 = 0, prev_bi0; + u32 bi0 = ~0, first_bi0 = ~0; u8 l4_hdr_sz = 0; + if (is_cksum_gso_enabled) + vnet_hdr = + (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac - + sizeof (vnet_virtio_net_hdr_t)); + + if (PREDICT_FALSE (((data_len / n_buffer_bytes) + 1) > + vec_len (apm->rx_buffers[thread_index]))) + { + vec_validate (apm->rx_buffers[thread_index], + VLIB_FRAME_SIZE + n_free_bufs - 1); + n_free_bufs += vlib_buffer_alloc ( + vm, &apm->rx_buffers[thread_index][n_free_bufs], + VLIB_FRAME_SIZE); + _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs; + } + while (data_len) { /* grab free buffer */ u32 last_empty_buffer = vec_len (apm->rx_buffers[thread_index]) - 1; - prev_bi0 = bi0; bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; - b0 = vlib_get_buffer (vm, bi0); _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; @@ -290,7 +333,10 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, data_len > n_buffer_bytes ? n_buffer_bytes : data_len; u32 vlan_len = 0; u32 bytes_copied = 0; + + b0 = vlib_get_buffer (vm, bi0); b0->current_data = 0; + /* Kernel removes VLAN headers, so reconstruct VLAN */ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID)) { @@ -328,20 +374,22 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index; vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0; + first_b0 = b0; first_bi0 = bi0; - first_b0 = vlib_get_buffer (vm, first_bi0); - if (tph->tp_status & TP_STATUS_CSUMNOTREADY) - mark_tcp_udp_cksum_calc (first_b0, &l4_hdr_sz); - /* This is a trade-off for GSO. As kernel isn't passing - * us the GSO state or size, we guess it by comparing it - * to the host MTU of the interface */ - if (tph->tp_snaplen > (apif->host_mtu + eth_header_size)) - fill_gso_buffer_flags (first_b0, apif->host_mtu, - l4_hdr_sz); + if (is_cksum_gso_enabled) + { + if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) + fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip); + if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 | + VIRTIO_NET_HDR_GSO_TCPV6)) + fill_gso_offload (first_b0, vnet_hdr->gso_size, + l4_hdr_sz); + } } else - buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0); + buffer_add_to_chain (b0, first_b0, prev_b0, bi0); + prev_b0 = b0; offset += bytes_to_copy; data_len -= bytes_to_copy; } @@ -398,9 +446,15 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, tr->hw_if_index = apif->hw_if_index; tr->block = block; tr->block_start = bd; - tr->num_pkts = num_pkts; + tr->pkt_num = bd->hdr.bh1.num_pkts - num_pkts; clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t)); clib_memcpy_fast (&tr->tph, tph, sizeof (tpacket3_hdr_t)); + if (is_cksum_gso_enabled) + clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr, + sizeof (vnet_virtio_net_hdr_t)); + else + clib_memset_u8 (&tr->vnet_hdr, 0, + sizeof (vnet_virtio_net_hdr_t)); } /* enque and take next packet */ @@ -446,9 +500,15 @@ VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm, af_packet_if_t *apif; apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance); if (apif->is_admin_up) - n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif); + { + if (apif->is_cksum_gso_enabled) + n_rx_packets += + af_packet_device_input_fn (vm, node, frame, apif, 1); + else + n_rx_packets += + af_packet_device_input_fn (vm, node, frame, apif, 0); + } } - return n_rx_packets; } -- cgit 1.2.3-korg