diff options
author | Mohsin Kazmi <sykazmi@cisco.com> | 2022-03-22 21:40:04 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2022-03-23 18:47:15 +0000 |
commit | c1fd17bd10fd7441d1f4d7c6f0ecf5761c4e00b2 (patch) | |
tree | 0baa5437415bdebaa89724cd06982a5a459acb26 /src/vnet | |
parent | 219cbcb29fdcd9d76d7fe00c5a5e074f8e149083 (diff) |
devices: add support for offloads
Type: improvement
This patch adds support for:
1) GSO
2) checksum offload
Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
Change-Id: Ib00629888c62da04d58db36ce021993769e736c9
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/devices/af_packet/FEATURE.yaml | 1 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/af_packet.c | 43 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/af_packet.h | 1 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/device.c | 139 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/node.c | 234 |
5 files changed, 301 insertions, 117 deletions
diff --git a/src/vnet/devices/af_packet/FEATURE.yaml b/src/vnet/devices/af_packet/FEATURE.yaml index 25d8b2b5964..4a11ea2beb5 100644 --- a/src/vnet/devices/af_packet/FEATURE.yaml +++ b/src/vnet/devices/af_packet/FEATURE.yaml @@ -3,6 +3,7 @@ name: host-interface Device AF_PACKET maintainer: Damjan Marion <damarion@cisco.com> features: - L4 checksum offload + - GSO offload description: "Create a host interface that will attach to a linux AF_PACKET interface, one side of a veth pair. The veth pair must already exist. Once created, a new host interface will diff --git a/src/vnet/devices/af_packet/af_packet.c b/src/vnet/devices/af_packet/af_packet.c index 60eadf2b0dc..4fb54637a67 100644 --- a/src/vnet/devices/af_packet/af_packet.c +++ b/src/vnet/devices/af_packet/af_packet.c @@ -44,7 +44,7 @@ VNET_HW_INTERFACE_CLASS (af_packet_ip_device_hw_interface_class, static) = { }; #define AF_PACKET_DEFAULT_TX_FRAMES_PER_BLOCK 1024 -#define AF_PACKET_DEFAULT_TX_FRAME_SIZE (2048 * 5) +#define AF_PACKET_DEFAULT_TX_FRAME_SIZE (2048 * 33) // GSO packet of 64KB #define AF_PACKET_TX_BLOCK_NR 1 #define AF_PACKET_DEFAULT_RX_FRAMES_PER_BLOCK 256 @@ -130,7 +130,7 @@ is_bridge (const u8 * host_if_name) static int create_packet_v3_sock (int host_if_index, tpacket_req3_t *rx_req, tpacket_req3_t *tx_req, int *fd, u8 **ring, - u32 *hdrlen_ptr) + u32 *hdrlen_ptr, u8 *is_cksum_gso_enabled) { af_packet_main_t *apm = &af_packet_main; struct sockaddr_ll sll; @@ -198,6 +198,17 @@ create_packet_v3_sock (int host_if_index, tpacket_req3_t *rx_req, goto error; } + int opt2 = 1; + if (setsockopt (*fd, SOL_PACKET, PACKET_VNET_HDR, &opt2, sizeof (opt2)) < 0) + { + vlib_log_debug ( + apm->log_class, + "Failed to set packet vnet hdr error handling option: %s (errno %d)", + strerror (errno), errno); + } + else + *is_cksum_gso_enabled = 1; + #if defined(PACKET_QDISC_BYPASS) /* Introduced with Linux 3.14 so the ifdef should eventually be removed */ if (setsockopt (*fd, SOL_PACKET, PACKET_QDISC_BYPASS, &opt, sizeof (opt)) < @@ -263,6 +274,7 @@ af_packet_create_if (af_packet_create_if_arg_t *arg) vnet_sw_interface_t *sw; vlib_thread_main_t *tm = vlib_get_thread_main (); vnet_main_t *vnm = vnet_get_main (); + vnet_hw_if_caps_t caps = VNET_HW_IF_CAP_INT_MODE; uword *p; uword if_index; u8 *host_if_name_dup = 0; @@ -271,6 +283,7 @@ af_packet_create_if (af_packet_create_if_arg_t *arg) u32 rx_frame_size, tx_frame_size; u32 hdrlen = 0; u32 i = 0; + u8 is_cksum_gso_enabled = 0; p = mhash_get (&apm->if_index_by_host_if_name, arg->host_if_name); if (p) @@ -365,8 +378,8 @@ af_packet_create_if (af_packet_create_if_arg_t *arg) fd2 = -1; } - ret = - create_packet_v3_sock (host_if_index, rx_req, tx_req, &fd, &ring, &hdrlen); + ret = create_packet_v3_sock (host_if_index, rx_req, tx_req, &fd, &ring, + &hdrlen, &is_cksum_gso_enabled); if (ret != 0) goto error; @@ -405,6 +418,7 @@ af_packet_create_if (af_packet_create_if_arg_t *arg) apif->next_rx_block = 0; apif->mode = arg->mode; apif->hdrlen = hdrlen; + apif->is_cksum_gso_enabled = is_cksum_gso_enabled; ret = af_packet_read_mtu (apif); if (ret != 0) @@ -450,7 +464,11 @@ af_packet_create_if (af_packet_create_if_arg_t *arg) apif->queue_index = vnet_hw_if_register_rx_queue (vnm, apif->hw_if_index, 0, VNET_HW_IF_RXQ_THREAD_ANY); - vnet_hw_if_set_caps (vnm, apif->hw_if_index, VNET_HW_IF_CAP_INT_MODE); + if (apif->is_cksum_gso_enabled) + caps |= VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_TX_IP4_CKSUM | + VNET_HW_IF_CAP_TX_TCP_CKSUM | VNET_HW_IF_CAP_TX_UDP_CKSUM; + + vnet_hw_if_set_caps (vnm, apif->hw_if_index, caps); vnet_hw_interface_set_flags (vnm, apif->hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP); @@ -554,20 +572,7 @@ af_packet_delete_if (u8 *host_if_name) int af_packet_set_l4_cksum_offload (u32 sw_if_index, u8 set) { - vnet_main_t *vnm = vnet_get_main (); - vnet_hw_interface_t *hw; - vnet_hw_if_caps_t caps = - VNET_HW_IF_CAP_TX_TCP_CKSUM | VNET_HW_IF_CAP_TX_UDP_CKSUM; - hw = vnet_get_sup_hw_interface (vnm, sw_if_index); - - if (hw->dev_class_index != af_packet_device_class.index) - return VNET_API_ERROR_INVALID_INTERFACE; - - if (set) - vnet_hw_if_set_caps (vnm, hw->hw_if_index, caps); - else - vnet_hw_if_unset_caps (vnm, hw->hw_if_index, caps); - + // deprecated ... return 0; } diff --git a/src/vnet/devices/af_packet/af_packet.h b/src/vnet/devices/af_packet/af_packet.h index ed3f10bb092..dd38e1bb266 100644 --- a/src/vnet/devices/af_packet/af_packet.h +++ b/src/vnet/devices/af_packet/af_packet.h @@ -49,6 +49,7 @@ typedef struct tpacket_req3_t *tx_req; u8 **rx_ring; u8 **tx_ring; + u8 is_cksum_gso_enabled; u32 hdrlen; u32 hw_if_index; u32 sw_if_index; diff --git a/src/vnet/devices/af_packet/device.c b/src/vnet/devices/af_packet/device.c index 23b18832cc8..1aa1e0823c1 100644 --- a/src/vnet/devices/af_packet/device.c +++ b/src/vnet/devices/af_packet/device.c @@ -27,8 +27,14 @@ #include <vlib/unix/unix.h> #include <vnet/ip/ip.h> #include <vnet/ethernet/ethernet.h> +#include <vnet/ip/ip4_packet.h> +#include <vnet/ip/ip6_packet.h> +#include <vnet/ip/ip_psh_cksum.h> +#include <vnet/tcp/tcp_packet.h> +#include <vnet/udp/udp_packet.h> #include <vnet/devices/af_packet/af_packet.h> +#include <vnet/devices/virtio/virtio_std.h> #define foreach_af_packet_tx_func_error \ _(FRAME_NOT_READY, "tx frame not ready") \ @@ -100,7 +106,7 @@ format_af_packet_device (u8 * s, va_list * args) int n_send_req = 0, n_avail = 0, n_sending = 0, n_tot = 0, n_wrong = 0; do { - tph = (struct tpacket3_hdr *) (tx_block_start + tx_frame * tx_frame_sz); + tph = (tpacket3_hdr_t *) (tx_block_start + tx_frame * tx_frame_sz); tx_frame = (tx_frame + 1) % tx_frame_nr; if (tph->tp_status == 0) n_avail++; @@ -128,6 +134,93 @@ format_af_packet_tx_trace (u8 * s, va_list * args) return s; } +static_always_inline void +fill_gso_offload (vlib_buffer_t *b0, vnet_virtio_net_hdr_t *vnet_hdr) +{ + vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags; + if (b0->flags & VNET_BUFFER_F_IS_IP4) + { + ip4_header_t *ip4; + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + vnet_hdr->gso_size = vnet_buffer2 (b0)->gso_size; + vnet_hdr->hdr_len = + vnet_buffer (b0)->l4_hdr_offset + vnet_buffer2 (b0)->gso_l4_hdr_sz; + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = vnet_buffer (b0)->l4_hdr_offset; // 0x22; + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + ip4 = (ip4_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l3_hdr_offset); + if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM) + ip4->checksum = ip4_header_checksum (ip4); + } + else if (b0->flags & VNET_BUFFER_F_IS_IP6) + { + vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + vnet_hdr->gso_size = vnet_buffer2 (b0)->gso_size; + vnet_hdr->hdr_len = + vnet_buffer (b0)->l4_hdr_offset + vnet_buffer2 (b0)->gso_l4_hdr_sz; + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = vnet_buffer (b0)->l4_hdr_offset; // 0x36; + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + } +} + +static_always_inline void +fill_cksum_offload (vlib_buffer_t *b0, vnet_virtio_net_hdr_t *vnet_hdr) +{ + vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags; + if (b0->flags & VNET_BUFFER_F_IS_IP4) + { + ip4_header_t *ip4; + ip4 = (ip4_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l3_hdr_offset); + if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM) + ip4->checksum = ip4_header_checksum (ip4); + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = 0x22; + if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM) + { + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + tcp->checksum = ip4_pseudo_header_cksum (ip4); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + } + else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM) + { + udp_header_t *udp = + (udp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + udp->checksum = ip4_pseudo_header_cksum (ip4); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum); + } + } + else if (b0->flags & VNET_BUFFER_F_IS_IP6) + { + ip6_header_t *ip6; + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = 0x36; + ip6 = (ip6_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l3_hdr_offset); + if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM) + { + tcp_header_t *tcp = + (tcp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + tcp->checksum = ip6_pseudo_header_cksum (ip6); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum); + } + else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM) + { + udp_header_t *udp = + (udp_header_t *) (vlib_buffer_get_current (b0) + + vnet_buffer (b0)->l4_hdr_offset); + udp->checksum = ip6_pseudo_header_cksum (ip6); + vnet_hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum); + } + } +} + VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -147,17 +240,20 @@ VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm, u32 tx_frame = apif->next_tx_frame; tpacket3_hdr_t *tph; u32 frame_not_ready = 0; + u8 is_cksum_gso_enabled = (apif->is_cksum_gso_enabled == 1) ? 1 : 0; while (n_left) { u32 len; + vnet_virtio_net_hdr_t *vnet_hdr = 0; u32 offset = 0; - vlib_buffer_t *b0; - n_left--; + vlib_buffer_t *b0 = 0; u32 bi = buffers[0]; + + n_left--; buffers++; - tph = (struct tpacket3_hdr *) (block_start + tx_frame * frame_size); + tph = (tpacket3_hdr_t *) (block_start + tx_frame * frame_size); if (PREDICT_FALSE (tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING))) { @@ -165,17 +261,38 @@ VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm, goto next; } - do + b0 = vlib_get_buffer (vm, bi); + + if (is_cksum_gso_enabled) + { + vnet_hdr = + (vnet_virtio_net_hdr_t *) ((u8 *) tph + TPACKET_ALIGN (sizeof ( + tpacket3_hdr_t))); + + clib_memset_u8 (vnet_hdr, 0, sizeof (vnet_virtio_net_hdr_t)); + offset = sizeof (vnet_virtio_net_hdr_t); + + if (b0->flags & VNET_BUFFER_F_GSO) + fill_gso_offload (b0, vnet_hdr); + else if (b0->flags & VNET_BUFFER_F_OFFLOAD) + fill_cksum_offload (b0, vnet_hdr); + } + + len = b0->current_length; + clib_memcpy_fast ((u8 *) tph + TPACKET_ALIGN (sizeof (tpacket3_hdr_t)) + + offset, + vlib_buffer_get_current (b0), len); + offset += len; + + while (b0->flags & VLIB_BUFFER_NEXT_PRESENT) { - b0 = vlib_get_buffer (vm, bi); + b0 = vlib_get_buffer (vm, b0->next_buffer); len = b0->current_length; - clib_memcpy_fast ( - (u8 *) tph + TPACKET_ALIGN (sizeof (struct tpacket3_hdr)) + offset, - vlib_buffer_get_current (b0), len); + clib_memcpy_fast ((u8 *) tph + + TPACKET_ALIGN (sizeof (tpacket3_hdr_t)) + offset, + vlib_buffer_get_current (b0), len); offset += len; } - while ((bi = - (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0)); tph->tp_len = tph->tp_snaplen = offset; tph->tp_status = TP_STATUS_SEND_REQUEST; diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c index 229e0501af7..a42e3e07cd9 100644 --- a/src/vnet/devices/af_packet/node.c +++ b/src/vnet/devices/af_packet/node.c @@ -28,6 +28,7 @@ #include <vnet/ethernet/packet.h> #include <vnet/devices/af_packet/af_packet.h> +#include <vnet/devices/virtio/virtio_std.h> #define foreach_af_packet_input_error \ _(PARTIAL_PKT, "partial packet") @@ -51,10 +52,11 @@ typedef struct u32 next_index; u32 hw_if_index; int block; - u32 num_pkts; + u32 pkt_num; void *block_start; block_desc_t bd; tpacket3_hdr_t tph; + vnet_virtio_net_hdr_t vnet_hdr; } af_packet_input_trace_t; static u8 * @@ -68,12 +70,10 @@ format_af_packet_input_trace (u8 * s, va_list * args) s = format (s, "af_packet: hw_if_index %d next-index %d", t->hw_if_index, t->next_index); - s = format (s, - "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu" - " num_pkts %u", - format_white_space, indent + 2, t->block, format_white_space, - indent + 4, t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num, - t->num_pkts); + s = format ( + s, "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu pkt_num %u", + format_white_space, indent + 2, t->block, format_white_space, indent + 4, + t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num, t->pkt_num); s = format (s, "\n%Utpacket3_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u" @@ -91,16 +91,21 @@ format_af_packet_input_trace (u8 * s, va_list * args) t->tph.hv1.tp_vlan_tpid #endif ); + + s = format (s, + "\n%Uvnet-hdr:\n%Uflags 0x%02x gso_type 0x%02x hdr_len %u" + "\n%Ugso_size %u csum_start %u csum_offset %u", + format_white_space, indent + 2, format_white_space, indent + 4, + t->vnet_hdr.flags, t->vnet_hdr.gso_type, t->vnet_hdr.hdr_len, + format_white_space, indent + 4, t->vnet_hdr.gso_size, + t->vnet_hdr.csum_start, t->vnet_hdr.csum_offset); return s; } always_inline void -buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) +buffer_add_to_chain (vlib_buffer_t *b, vlib_buffer_t *first_b, + vlib_buffer_t *prev_b, u32 bi) { - vlib_buffer_t *b = vlib_get_buffer (vm, bi); - vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi); - vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi); - /* update first buffer */ first_b->total_length_not_including_first_buffer += b->current_length; @@ -109,11 +114,11 @@ buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi) prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT; /* update current buffer */ - b->next_buffer = 0; + b->next_buffer = ~0; } static_always_inline void -fill_gso_buffer_flags (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) +fill_gso_offload (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) { b->flags |= VNET_BUFFER_F_GSO; vnet_buffer2 (b)->gso_size = gso_size; @@ -121,43 +126,66 @@ fill_gso_buffer_flags (vlib_buffer_t *b, u32 gso_size, u8 l4_hdr_sz) } static_always_inline void -mark_tcp_udp_cksum_calc (vlib_buffer_t *b, u8 *l4_hdr_sz) +fill_cksum_offload (vlib_buffer_t *b, u8 *l4_hdr_sz, u8 is_ip) { - ethernet_header_t *eth = vlib_buffer_get_current (b); vnet_buffer_oflags_t oflags = 0; - if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP4) + u16 l2hdr_sz = 0; + u16 ethertype = 0; + u8 l4_proto = 0; + + if (is_ip) { - ip4_header_t *ip4 = - (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); - b->flags |= VNET_BUFFER_F_IS_IP4; - if (ip4->protocol == IP_PROTOCOL_TCP) + switch (b->data[0] & 0xf0) { - oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; - tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + - ip4_header_bytes (ip4)); - *l4_hdr_sz = tcp_header_bytes (tcp); + case 0x40: + ethertype = ETHERNET_TYPE_IP4; + break; + case 0x60: + ethertype = ETHERNET_TYPE_IP6; + break; } - else if (ip4->protocol == IP_PROTOCOL_UDP) + } + else + { + ethernet_header_t *eth = vlib_buffer_get_current (b); + ethertype = clib_net_to_host_u16 (eth->type); + l2hdr_sz = sizeof (ethernet_header_t); + if (ethernet_frame_is_tagged (ethertype)) { - oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; - udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + - ip4_header_bytes (ip4)); - *l4_hdr_sz = sizeof (*udp); + ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1); + + ethertype = clib_net_to_host_u16 (vlan->type); + l2hdr_sz += sizeof (*vlan); + if (ethertype == ETHERNET_TYPE_VLAN) + { + vlan++; + ethertype = clib_net_to_host_u16 (vlan->type); + l2hdr_sz += sizeof (*vlan); + } } - vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); - vnet_buffer (b)->l4_hdr_offset = - sizeof (ethernet_header_t) + ip4_header_bytes (ip4); - if (oflags) - vnet_buffer_offload_flags_set (b, oflags); } - else if (clib_net_to_host_u16 (eth->type) == ETHERNET_TYPE_IP6) + + vnet_buffer (b)->l2_hdr_offset = 0; + vnet_buffer (b)->l3_hdr_offset = l2hdr_sz; + + if (ethertype == ETHERNET_TYPE_IP4) + { + ip4_header_t *ip4 = (vlib_buffer_get_current (b) + l2hdr_sz); + vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4); + b->flags |= (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID | + VNET_BUFFER_F_L3_HDR_OFFSET_VALID | + VNET_BUFFER_F_L4_HDR_OFFSET_VALID); + + l4_proto = ip4->protocol; + } + else if (ethertype == ETHERNET_TYPE_IP6) { - ip6_header_t *ip6 = - (vlib_buffer_get_current (b) + sizeof (ethernet_header_t)); - b->flags |= VNET_BUFFER_F_IS_IP6; + ip6_header_t *ip6 = (vlib_buffer_get_current (b) + l2hdr_sz); + b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID | + VNET_BUFFER_F_L3_HDR_OFFSET_VALID | + VNET_BUFFER_F_L4_HDR_OFFSET_VALID); u16 ip6_hdr_len = sizeof (ip6_header_t); + if (ip6_ext_hdr (ip6->protocol)) { ip6_ext_header_t *p = (void *) (ip6 + 1); @@ -167,34 +195,34 @@ mark_tcp_udp_cksum_calc (vlib_buffer_t *b, u8 *l4_hdr_sz) ip6_hdr_len += ip6_ext_header_len (p); p = ip6_ext_next_header (p); } + l4_proto = p->next_hdr; } - if (ip6->protocol == IP_PROTOCOL_TCP) - { - oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; - tcp_header_t *tcp = - (tcp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + ip6_hdr_len); - *l4_hdr_sz = tcp_header_bytes (tcp); - } - else if (ip6->protocol == IP_PROTOCOL_UDP) - { - oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; - udp_header_t *udp = - (udp_header_t *) (vlib_buffer_get_current (b) + - sizeof (ethernet_header_t) + ip6_hdr_len); - *l4_hdr_sz = sizeof (*udp); - } - vnet_buffer (b)->l3_hdr_offset = sizeof (ethernet_header_t); - vnet_buffer (b)->l4_hdr_offset = - sizeof (ethernet_header_t) + ip6_hdr_len; - if (oflags) - vnet_buffer_offload_flags_set (b, oflags); + else + l4_proto = ip6->protocol; + vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip6_hdr_len; + } + + if (l4_proto == IP_PROTOCOL_TCP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; + tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b) + + vnet_buffer (b)->l4_hdr_offset); + *l4_hdr_sz = tcp_header_bytes (tcp); + } + else if (l4_proto == IP_PROTOCOL_UDP) + { + oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; + *l4_hdr_sz = sizeof (udp_header_t); } + + if (oflags) + vnet_buffer_offload_flags_set (b, oflags); } always_inline uword -af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame, af_packet_if_t * apif) +af_packet_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame, af_packet_if_t *apif, + u8 is_cksum_gso_enabled) { af_packet_main_t *apm = &af_packet_main; tpacket3_hdr_t *tph; @@ -210,19 +238,16 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 thread_index = vm->thread_index; u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes; - u32 eth_header_size = 0; u32 num_pkts = 0; u32 rx_frame_offset = 0; block_desc_t *bd = 0; vlib_buffer_t bt; + u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP); - if (apif->mode == AF_PACKET_IF_MODE_IP) - { - next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; - } + if (is_ip) + next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; else { - eth_header_size = sizeof (ethernet_header_t); next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) next_index = apif->per_interface_next_index; @@ -257,10 +282,11 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, while (num_pkts && (n_free_bufs > min_bufs)) { - vlib_buffer_t *b0 = 0, *first_b0 = 0; u32 next0 = next_index; u32 n_left_to_next; + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + while (num_pkts && n_left_to_next && (n_free_bufs > min_bufs)) { tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset); @@ -269,19 +295,36 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_PREFETCH (block_start + rx_frame_offset + tph->tp_next_offset, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + + vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0; + vnet_virtio_net_hdr_t *vnet_hdr = 0; u32 data_len = tph->tp_snaplen; u32 offset = 0; - u32 bi0 = 0, first_bi0 = 0, prev_bi0; + u32 bi0 = ~0, first_bi0 = ~0; u8 l4_hdr_sz = 0; + if (is_cksum_gso_enabled) + vnet_hdr = + (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac - + sizeof (vnet_virtio_net_hdr_t)); + + if (PREDICT_FALSE (((data_len / n_buffer_bytes) + 1) > + vec_len (apm->rx_buffers[thread_index]))) + { + vec_validate (apm->rx_buffers[thread_index], + VLIB_FRAME_SIZE + n_free_bufs - 1); + n_free_bufs += vlib_buffer_alloc ( + vm, &apm->rx_buffers[thread_index][n_free_bufs], + VLIB_FRAME_SIZE); + _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs; + } + while (data_len) { /* grab free buffer */ u32 last_empty_buffer = vec_len (apm->rx_buffers[thread_index]) - 1; - prev_bi0 = bi0; bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; - b0 = vlib_get_buffer (vm, bi0); _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; @@ -290,7 +333,10 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, data_len > n_buffer_bytes ? n_buffer_bytes : data_len; u32 vlan_len = 0; u32 bytes_copied = 0; + + b0 = vlib_get_buffer (vm, bi0); b0->current_data = 0; + /* Kernel removes VLAN headers, so reconstruct VLAN */ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID)) { @@ -328,20 +374,22 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index; vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0; + first_b0 = b0; first_bi0 = bi0; - first_b0 = vlib_get_buffer (vm, first_bi0); - if (tph->tp_status & TP_STATUS_CSUMNOTREADY) - mark_tcp_udp_cksum_calc (first_b0, &l4_hdr_sz); - /* This is a trade-off for GSO. As kernel isn't passing - * us the GSO state or size, we guess it by comparing it - * to the host MTU of the interface */ - if (tph->tp_snaplen > (apif->host_mtu + eth_header_size)) - fill_gso_buffer_flags (first_b0, apif->host_mtu, - l4_hdr_sz); + if (is_cksum_gso_enabled) + { + if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) + fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip); + if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 | + VIRTIO_NET_HDR_GSO_TCPV6)) + fill_gso_offload (first_b0, vnet_hdr->gso_size, + l4_hdr_sz); + } } else - buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0); + buffer_add_to_chain (b0, first_b0, prev_b0, bi0); + prev_b0 = b0; offset += bytes_to_copy; data_len -= bytes_to_copy; } @@ -398,9 +446,15 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, tr->hw_if_index = apif->hw_if_index; tr->block = block; tr->block_start = bd; - tr->num_pkts = num_pkts; + tr->pkt_num = bd->hdr.bh1.num_pkts - num_pkts; clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t)); clib_memcpy_fast (&tr->tph, tph, sizeof (tpacket3_hdr_t)); + if (is_cksum_gso_enabled) + clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr, + sizeof (vnet_virtio_net_hdr_t)); + else + clib_memset_u8 (&tr->vnet_hdr, 0, + sizeof (vnet_virtio_net_hdr_t)); } /* enque and take next packet */ @@ -446,9 +500,15 @@ VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm, af_packet_if_t *apif; apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance); if (apif->is_admin_up) - n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif); + { + if (apif->is_cksum_gso_enabled) + n_rx_packets += + af_packet_device_input_fn (vm, node, frame, apif, 1); + else + n_rx_packets += + af_packet_device_input_fn (vm, node, frame, apif, 0); + } } - return n_rx_packets; } |