summaryrefslogtreecommitdiffstats
path: root/src/vnet/devices/virtio
diff options
context:
space:
mode:
authorMohsin Kazmi <sykazmi@cisco.com>2020-01-03 15:11:53 +0000
committerAndrew Yourtchenko <ayourtch@gmail.com>2020-01-08 17:08:50 +0000
commit6d4af8918c8d14aa609d6488f0793152f9ed1927 (patch)
tree78772559db16acd57a73ec834aaf0b798ebe2f26 /src/vnet/devices/virtio
parent0d74dd1f823b170d623316cf07eee70851494470 (diff)
virtio: split gso and checksum offload functionality
Type: refactor Change-Id: I897e36bd5db593b417c2bac9f739bc51cf45bc08 Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
Diffstat (limited to 'src/vnet/devices/virtio')
-rw-r--r--src/vnet/devices/virtio/cli.c66
-rw-r--r--src/vnet/devices/virtio/device.c88
-rw-r--r--src/vnet/devices/virtio/node.c12
-rw-r--r--src/vnet/devices/virtio/pci.c174
-rw-r--r--src/vnet/devices/virtio/pci.h6
-rw-r--r--src/vnet/devices/virtio/virtio.api2
-rw-r--r--src/vnet/devices/virtio/virtio.c2
-rw-r--r--src/vnet/devices/virtio/virtio.h1
-rw-r--r--src/vnet/devices/virtio/virtio_api.c5
9 files changed, 312 insertions, 44 deletions
diff --git a/src/vnet/devices/virtio/cli.c b/src/vnet/devices/virtio/cli.c
index 7291f17e645..1b37338af1f 100644
--- a/src/vnet/devices/virtio/cli.c
+++ b/src/vnet/devices/virtio/cli.c
@@ -44,6 +44,8 @@ virtio_pci_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
args.features = feature_mask;
else if (unformat (line_input, "gso-enabled"))
args.gso_enabled = 1;
+ else if (unformat (line_input, "csum-enabled"))
+ args.checksum_offload_enabled = 1;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
@@ -59,7 +61,7 @@ virtio_pci_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
VLIB_CLI_COMMAND (virtio_pci_create_command, static) = {
.path = "create interface virtio",
.short_help = "create interface virtio <pci-address> "
- "[feature-mask <hex-mask>] [gso-enabled]",
+ "[feature-mask <hex-mask>] [gso-enabled] [csum-enabled]",
.function = virtio_pci_create_command_fn,
};
/* *INDENT-ON* */
@@ -118,6 +120,68 @@ VLIB_CLI_COMMAND (virtio_pci_delete_command, static) = {
/* *INDENT-ON* */
static clib_error_t *
+virtio_pci_enable_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0;
+ vnet_hw_interface_t *hw;
+ virtio_main_t *vim = &virtio_main;
+ virtio_if_t *vif;
+ vnet_main_t *vnm = vnet_get_main ();
+ int gso_enabled = 0, checksum_offload_enabled = 0;
+ int offloads_disabled = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else if (unformat (line_input, "%U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "gso-enabled"))
+ gso_enabled = 1;
+ else if (unformat (line_input, "csum-offload-enabled"))
+ checksum_offload_enabled = 1;
+ else if (unformat (line_input, "offloads-disabled"))
+ offloads_disabled = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0,
+ "please specify interface name or sw_if_index");
+
+ hw = vnet_get_sup_hw_interface_api_visible_or_null (vnm, sw_if_index);
+ if (hw == NULL || virtio_device_class.index != hw->dev_class_index)
+ return clib_error_return (0, "not a virtio interface");
+
+ vif = pool_elt_at_index (vim->interfaces, hw->dev_instance);
+
+ if (virtio_pci_enable_disable_offloads
+ (vm, vif, gso_enabled, checksum_offload_enabled, offloads_disabled) < 0)
+ return clib_error_return (0, "not able to enable/disable offloads");
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (virtio_pci_enable_command, static) = {
+ .path = "set virtio pci",
+ .short_help = "set virtio pci {<interface> | sw_if_index <sw_idx>}"
+ " [gso-enabled | csum-offload-enabled | offloads-disabled]",
+ .function = virtio_pci_enable_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
show_virtio_pci_fn (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_command_t * cmd)
{
diff --git a/src/vnet/devices/virtio/device.c b/src/vnet/devices/virtio/device.c
index 16d1c808643..ecc8b8c1aa7 100644
--- a/src/vnet/devices/virtio/device.c
+++ b/src/vnet/devices/virtio/device.c
@@ -22,8 +22,11 @@
#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
#include <vnet/ethernet/ethernet.h>
+#include <vnet/gso/gso.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
+#include <vnet/tcp/tcp_packet.h>
+#include <vnet/udp/udp_packet.h>
#include <vnet/devices/virtio/virtio.h>
#define foreach_virtio_tx_func_error \
@@ -114,10 +117,46 @@ virtio_free_used_device_desc (vlib_main_t * vm, virtio_vring_t * vring)
vring->last_used_idx = last;
}
+static_always_inline void
+set_checksum_offsets (vlib_main_t * vm, virtio_if_t * vif, vlib_buffer_t * b,
+ struct virtio_net_hdr_v1 *hdr)
+{
+ if (b->flags & VNET_BUFFER_F_IS_IP4)
+ {
+ ip4_header_t *ip4;
+ gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->csum_start = gho.l4_hdr_offset; // 0x22;
+ if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
+
+ /*
+ * virtio devices do not support IP4 checksum offload. So driver takes care
+ * of it while doing tx.
+ */
+ ip4 =
+ (ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
+ if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+ ip4->checksum = ip4_header_checksum (ip4);
+ }
+ else if (b->flags & VNET_BUFFER_F_IS_IP6)
+ {
+ gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->csum_start = gho.l4_hdr_offset; // 0x36;
+ if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
+ }
+}
+
static_always_inline u16
add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
virtio_vring_t * vring, u32 bi, u16 avail, u16 next,
- u16 mask, int do_gso)
+ u16 mask, int do_gso, int csum_offload)
{
u16 n_added = 0;
int hdr_sz = vif->virtio_net_hdr_sz;
@@ -127,25 +166,46 @@ add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
struct virtio_net_hdr_v1 *hdr = vlib_buffer_get_current (b) - hdr_sz;
clib_memset (hdr, 0, hdr_sz);
+
if (do_gso && (b->flags & VNET_BUFFER_F_GSO))
{
if (b->flags & VNET_BUFFER_F_IS_IP4)
{
+ ip4_header_t *ip4;
+ gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 0);
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
hdr->gso_size = vnet_buffer2 (b)->gso_size;
+ hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->csum_start = vnet_buffer (b)->l4_hdr_offset; // 0x22;
- hdr->csum_offset = 0x10;
+ hdr->csum_start = gho.l4_hdr_offset; // 0x22;
+ hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
+ ip4 =
+ (ip4_header_t *) (vlib_buffer_get_current (b) +
+ gho.l3_hdr_offset);
+ /*
+ * virtio devices do not support IP4 checksum offload. So driver takes care
+ * of it while doing tx.
+ */
+ if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+ ip4->checksum = ip4_header_checksum (ip4);
}
- else
+ else if (b->flags & VNET_BUFFER_F_IS_IP6)
{
+ gso_header_offset_t gho = vnet_gso_header_offset_parser (b, 1);
hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
hdr->gso_size = vnet_buffer2 (b)->gso_size;
+ hdr->hdr_len = gho.l4_hdr_offset + gho.l4_hdr_sz;
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->csum_start = vnet_buffer (b)->l4_hdr_offset; // 0x36;
- hdr->csum_offset = 0x10;
+ hdr->csum_start = gho.l4_hdr_offset; // 0x36;
+ hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
}
}
+ else if (csum_offload
+ && (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
+ VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))
+ {
+ set_checksum_offsets (vm, vif, b, hdr);
+ }
if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
{
@@ -245,7 +305,7 @@ add_buffer_to_slot (vlib_main_t * vm, virtio_if_t * vif,
static_always_inline uword
virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, virtio_if_t * vif,
- int do_gso)
+ int do_gso, int csum_offload)
{
u16 n_left = frame->n_vectors;
virtio_vring_t *vring;
@@ -274,7 +334,7 @@ virtio_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
u16 n_added = 0;
n_added =
add_buffer_to_slot (vm, vif, vring, buffers[0], avail, next, mask,
- do_gso);
+ do_gso, csum_offload);
if (!n_added)
break;
avail += n_added;
@@ -310,15 +370,21 @@ VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
+ vnet_main_t *vnm = vnet_get_main ();
virtio_main_t *nm = &virtio_main;
vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
- if (vif->gso_enabled > 0)
- return virtio_interface_tx_inline (vm, node, frame, vif, 1 /* do_gso */ );
+ if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
+ return virtio_interface_tx_inline (vm, node, frame, vif, 1 /* do_gso */ ,
+ 1);
+ else if (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD)
+ return virtio_interface_tx_inline (vm, node, frame, vif,
+ 0 /* no do_gso */ , 1);
else
return virtio_interface_tx_inline (vm, node, frame, vif,
- 0 /* no do_gso */ );
+ 0 /* no do_gso */ , 0);
}
static void
diff --git a/src/vnet/devices/virtio/node.c b/src/vnet/devices/virtio/node.c
index ba17075d48c..d31fb144b8f 100644
--- a/src/vnet/devices/virtio/node.c
+++ b/src/vnet/devices/virtio/node.c
@@ -242,7 +242,7 @@ fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
static_always_inline uword
virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
- int gso_enabled)
+ int gso_enabled, int checksum_offload_enabled)
{
vnet_main_t *vnm = vnet_get_main ();
u32 thread_index = vm->thread_index;
@@ -289,7 +289,8 @@ virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
b0->total_length_not_including_first_buffer = 0;
b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
- virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz);
+ if (checksum_offload_enabled)
+ virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz);
if (gso_enabled)
fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
@@ -396,10 +397,13 @@ VLIB_NODE_FN (virtio_input_node) (vlib_main_t * vm,
{
if (vif->gso_enabled)
n_rx += virtio_device_input_inline (vm, node, frame, vif,
- dq->queue_id, 1);
+ dq->queue_id, 1, 1);
+ else if (vif->csum_offload_enabled)
+ n_rx += virtio_device_input_inline (vm, node, frame, vif,
+ dq->queue_id, 0, 1);
else
n_rx += virtio_device_input_inline (vm, node, frame, vif,
- dq->queue_id, 0);
+ dq->queue_id, 0, 0);
}
}
diff --git a/src/vnet/devices/virtio/pci.c b/src/vnet/devices/virtio/pci.c
index ab21147aca9..c956e7b75fa 100644
--- a/src/vnet/devices/virtio/pci.c
+++ b/src/vnet/devices/virtio/pci.c
@@ -110,7 +110,7 @@ virtio_pci_legacy_write_config (vlib_main_t * vm, virtio_if_t * vif,
}
static u64
-virtio_pci_legacy_get_features (vlib_main_t * vm, virtio_if_t * vif)
+virtio_pci_legacy_get_host_features (vlib_main_t * vm, virtio_if_t * vif)
{
u32 features;
vlib_pci_read_io_u32 (vm, vif->pci_dev_handle, VIRTIO_PCI_HOST_FEATURES,
@@ -119,8 +119,18 @@ virtio_pci_legacy_get_features (vlib_main_t * vm, virtio_if_t * vif)
}
static u32
-virtio_pci_legacy_set_features (vlib_main_t * vm, virtio_if_t * vif,
- u64 features)
+virtio_pci_legacy_get_guest_features (vlib_main_t * vm, virtio_if_t * vif)
+{
+ u32 feature = 0;
+ vlib_pci_read_io_u32 (vm, vif->pci_dev_handle, VIRTIO_PCI_GUEST_FEATURES,
+ &feature);
+ vif->features = feature;
+ return feature;
+}
+
+static u32
+virtio_pci_legacy_set_guest_features (vlib_main_t * vm, virtio_if_t * vif,
+ u64 features)
{
if ((features >> 32) != 0)
{
@@ -555,6 +565,47 @@ virtio_pci_send_ctrl_msg (vlib_main_t * vm, virtio_if_t * vif,
}
static int
+virtio_pci_disable_offload (vlib_main_t * vm, virtio_if_t * vif)
+{
+ struct virtio_ctrl_msg offload_hdr;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+
+ offload_hdr.ctrl.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
+ offload_hdr.ctrl.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
+ offload_hdr.status = VIRTIO_NET_ERR;
+ u64 offloads = 0ULL;
+ clib_memcpy (offload_hdr.data, &offloads, sizeof (offloads));
+
+ status =
+ virtio_pci_send_ctrl_msg (vm, vif, &offload_hdr, sizeof (offloads));
+ virtio_log_debug (vif, "disable offloads");
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
+ virtio_pci_legacy_get_guest_features (vm, vif);
+ return status;
+}
+
+static int
+virtio_pci_enable_checksum_offload (vlib_main_t * vm, virtio_if_t * vif)
+{
+ struct virtio_ctrl_msg csum_offload_hdr;
+ virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
+
+ csum_offload_hdr.ctrl.class = VIRTIO_NET_CTRL_GUEST_OFFLOADS;
+ csum_offload_hdr.ctrl.cmd = VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET;
+ csum_offload_hdr.status = VIRTIO_NET_ERR;
+ u64 offloads = 0ULL;
+ offloads |= VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM);
+ clib_memcpy (csum_offload_hdr.data, &offloads, sizeof (offloads));
+
+ status =
+ virtio_pci_send_ctrl_msg (vm, vif, &csum_offload_hdr, sizeof (offloads));
+ virtio_log_debug (vif, "enable checksum offload");
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
+ virtio_pci_legacy_get_guest_features (vm, vif);
+ return status;
+}
+
+static int
virtio_pci_enable_gso (vlib_main_t * vm, virtio_if_t * vif)
{
struct virtio_ctrl_msg gso_hdr;
@@ -565,16 +616,78 @@ virtio_pci_enable_gso (vlib_main_t * vm, virtio_if_t * vif)
gso_hdr.status = VIRTIO_NET_ERR;
u64 offloads = VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)
| VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO4)
- | VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6)
- | VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_UFO);
+ | VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6);
clib_memcpy (gso_hdr.data, &offloads, sizeof (offloads));
status = virtio_pci_send_ctrl_msg (vm, vif, &gso_hdr, sizeof (offloads));
virtio_log_debug (vif, "enable gso");
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
+ virtio_pci_legacy_get_guest_features (vm, vif);
return status;
}
static int
+virtio_pci_offloads (vlib_main_t * vm, virtio_if_t * vif, int gso_enabled,
+ int csum_offload_enabled)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
+
+ if ((vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ)) &&
+ (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)))
+ {
+ if (gso_enabled
+ && (vif->features & (VIRTIO_FEATURE (VIRTIO_NET_F_HOST_TSO4) |
+ VIRTIO_FEATURE (VIRTIO_NET_F_HOST_TSO6))))
+ {
+ if (virtio_pci_enable_gso (vm, vif))
+ {
+ virtio_log_warning (vif, "gso is not enabled");
+ }
+ else
+ {
+ vif->gso_enabled = 1;
+ vif->csum_offload_enabled = 0;
+ hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
+ }
+ }
+ else if (csum_offload_enabled
+ && (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CSUM)))
+ {
+ if (virtio_pci_enable_checksum_offload (vm, vif))
+ {
+ virtio_log_warning (vif, "checksum offload is not enabled");
+ }
+ else
+ {
+ vif->csum_offload_enabled = 1;
+ vif->gso_enabled = 0;
+ hw->flags &= ~VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
+ hw->flags |=
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
+ }
+ }
+ else
+ {
+ if (virtio_pci_disable_offload (vm, vif))
+ {
+ virtio_log_warning (vif, "offloads are not disabled");
+ }
+ else
+ {
+ vif->csum_offload_enabled = 0;
+ vif->gso_enabled = 0;
+ hw->flags &= ~(VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
virtio_pci_enable_multiqueue (vlib_main_t * vm, virtio_if_t * vif,
u16 num_queues)
{
@@ -770,13 +883,14 @@ virtio_negotiate_features (vlib_main_t * vm, virtio_if_t * vif,
vif->features &= ~VIRTIO_FEATURE (VIRTIO_NET_F_MTU);
}
- vif->features = virtio_pci_legacy_set_features (vm, vif, vif->features);
+ vif->features =
+ virtio_pci_legacy_set_guest_features (vm, vif, vif->features);
}
void
virtio_pci_read_device_feature (vlib_main_t * vm, virtio_if_t * vif)
{
- vif->remote_features = virtio_pci_legacy_get_features (vm, vif);
+ vif->remote_features = virtio_pci_legacy_get_host_features (vm, vif);
}
int
@@ -1208,30 +1322,17 @@ virtio_pci_create_if (vlib_main_t * vm, virtio_pci_create_if_args_t * args)
else
vnet_hw_interface_set_flags (vnm, vif->hw_if_index, 0);
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
+ virtio_pci_offloads (vm, vif, args->gso_enabled,
+ args->checksum_offload_enabled);
+
+ if ((vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ)) &&
+ (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MQ)))
{
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) &&
- args->gso_enabled)
- {
- if (virtio_pci_enable_gso (vm, vif))
- {
- virtio_log_warning (vif, "gso is not enabled");
- }
- else
- {
- vif->gso_enabled = 1;
- hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
- }
- }
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MQ))
- {
- if (virtio_pci_enable_multiqueue (vm, vif, vif->max_queue_pairs))
- virtio_log_warning (vif, "multiqueue is not set");
- }
+ if (virtio_pci_enable_multiqueue (vm, vif, vif->max_queue_pairs))
+ virtio_log_warning (vif, "multiqueue is not set");
}
return;
-
error:
virtio_pci_delete_if (vm, vif);
args->rv = VNET_API_ERROR_INVALID_INTERFACE;
@@ -1325,6 +1426,25 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif)
return 0;
}
+int
+virtio_pci_enable_disable_offloads (vlib_main_t * vm, virtio_if_t * vif,
+ int gso_enabled,
+ int checksum_offload_enabled,
+ int offloads_disabled)
+{
+ if (vif->type != VIRTIO_IF_TYPE_PCI)
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ if (gso_enabled)
+ virtio_pci_offloads (vm, vif, 1, 0);
+ else if (checksum_offload_enabled)
+ virtio_pci_offloads (vm, vif, 0, 1);
+ else if (offloads_disabled)
+ virtio_pci_offloads (vm, vif, 0, 0);
+
+ return 0;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/vnet/devices/virtio/pci.h b/src/vnet/devices/virtio/pci.h
index 4c62885adbf..8ec1360bc59 100644
--- a/src/vnet/devices/virtio/pci.h
+++ b/src/vnet/devices/virtio/pci.h
@@ -225,6 +225,7 @@ typedef struct
u8 mac_addr[6];
u64 features;
u8 gso_enabled;
+ u8 checksum_offload_enabled;
clib_error_t *error;
} virtio_pci_create_if_args_t;
@@ -233,7 +234,10 @@ extern void device_status (vlib_main_t * vm, virtio_if_t * vif);
void virtio_pci_create_if (vlib_main_t * vm,
virtio_pci_create_if_args_t * args);
int virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * ad);
-
+int virtio_pci_enable_disable_offloads (vlib_main_t * vm, virtio_if_t * vif,
+ int gso_enabled,
+ int checksum_offload_enabled,
+ int offloads_disabled);
#endif /* __included_virtio_pci_h__ */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/vnet/devices/virtio/virtio.api b/src/vnet/devices/virtio/virtio.api
index b191810d71b..efbcba8fa43 100644
--- a/src/vnet/devices/virtio/virtio.api
+++ b/src/vnet/devices/virtio/virtio.api
@@ -27,6 +27,7 @@ import "vnet/pci/pci_types.api";
@param use_random_mac - let the system generate a unique mac address
@param mac_address - mac addr to assign to the interface if use_random not set
@param gso_enabled - enable gso feature if available, 1 to enable
+ @param checksum_offload_enabled - enable checksum feature if available, 1 to enable
@param features - the virtio features which driver should negotiate with device
*/
define virtio_pci_create
@@ -37,6 +38,7 @@ define virtio_pci_create
bool use_random_mac;
vl_api_mac_address_t mac_address;
bool gso_enabled;
+ bool checksum_offload_enabled;
u64 features;
};
diff --git a/src/vnet/devices/virtio/virtio.c b/src/vnet/devices/virtio/virtio.c
index d3a6e15ac56..6504b2ae762 100644
--- a/src/vnet/devices/virtio/virtio.c
+++ b/src/vnet/devices/virtio/virtio.c
@@ -288,6 +288,8 @@ virtio_show (vlib_main_t * vm, u32 * hw_if_indices, u8 show_descr, u32 type)
{
vlib_cli_output (vm, " PCI Address: %U", format_vlib_pci_addr,
&vif->pci_addr);
+ vlib_cli_output (vm, " csum-enabled %d",
+ vif->csum_offload_enabled);
}
if (type == VIRTIO_IF_TYPE_TAP)
{
diff --git a/src/vnet/devices/virtio/virtio.h b/src/vnet/devices/virtio/virtio.h
index 27ecc2238ad..4b8662699da 100644
--- a/src/vnet/devices/virtio/virtio.h
+++ b/src/vnet/devices/virtio/virtio.h
@@ -174,6 +174,7 @@ typedef struct
u8 host_ip6_prefix_len;
u32 host_mtu_size;
int gso_enabled;
+ int csum_offload_enabled;
int ifindex;
virtio_vring_t *cxq_vring;
} virtio_if_t;
diff --git a/src/vnet/devices/virtio/virtio_api.c b/src/vnet/devices/virtio/virtio_api.c
index ac167e827c6..4d98e725fde 100644
--- a/src/vnet/devices/virtio/virtio_api.c
+++ b/src/vnet/devices/virtio/virtio_api.c
@@ -71,6 +71,11 @@ vl_api_virtio_pci_create_t_handler (vl_api_virtio_pci_create_t * mp)
ap->gso_enabled = 1;
else
ap->gso_enabled = 0;
+ if (mp->checksum_offload_enabled)
+ ap->checksum_offload_enabled = 1;
+ else
+ ap->checksum_offload_enabled = 0;
+
ap->features = clib_net_to_host_u64 (mp->features);
virtio_pci_create_if (vm, ap);
n>, void (*two_buffers) (vlib_main_t * vm, void *opaque1, uword opaque2, vlib_buffer_t * b0, vlib_buffer_t * b1, u32 * next0, u32 * next1), void (*one_buffer) (vlib_main_t * vm, void *opaque1, uword opaque2, vlib_buffer_t * b0, u32 * next0)) { u32 n_left_from, *from, *to_next; u32 next_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; if (node->flags & VLIB_NODE_FLAG_TRACE) vlib_trace_frame_buffers_only (vm, node, from, frame->n_vectors, /* stride */ 1, sizeof_trace); while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (n_left_from >= 4 && n_left_to_next >= 2) { vlib_buffer_t *p0, *p1; u32 pi0, next0; u32 pi1, next1; /* Prefetch next iteration. */ { vlib_buffer_t *p2, *p3; p2 = vlib_get_buffer (vm, from[2]); p3 = vlib_get_buffer (vm, from[3]); vlib_prefetch_buffer_header (p2, LOAD); vlib_prefetch_buffer_header (p3, LOAD); CLIB_PREFETCH (p2->data, 64, LOAD); CLIB_PREFETCH (p3->data, 64, LOAD); } pi0 = to_next[0] = from[0]; pi1 = to_next[1] = from[1]; from += 2; to_next += 2; n_left_from -= 2; n_left_to_next -= 2; p0 = vlib_get_buffer (vm, pi0); p1 = vlib_get_buffer (vm, pi1); two_buffers (vm, opaque1, opaque2, p0, p1, &next0, &next1); vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1); } while (n_left_from > 0 && n_left_to_next > 0) { vlib_buffer_t *p0; u32 pi0, next0; pi0 = from[0]; to_next[0] = pi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; p0 = vlib_get_buffer (vm, pi0); one_buffer (vm, opaque1, opaque2, p0, &next0); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return frame->n_vectors; } static_always_inline void vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * buffers, u16 * nexts, uword count) { u32 *to_next, n_left_to_next, max; u16 next_index; next_index = nexts[0]; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); max = clib_min (n_left_to_next, count); while (count) { u32 n_enqueued; if ((nexts[0] != next_index) || n_left_to_next == 0) { vlib_put_next_frame (vm, node, next_index, n_left_to_next); next_index = nexts[0]; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); max = clib_min (n_left_to_next, count); } #if defined(CLIB_HAVE_VEC512) u16x32 next32 = u16x32_load_unaligned (nexts); next32 = (next32 == u16x32_splat (next32[0])); u64 bitmap = u16x32_msb_mask (next32); n_enqueued = count_trailing_zeros (~bitmap); #elif defined(CLIB_HAVE_VEC256) u16x16 next16 = u16x16_load_unaligned (nexts); next16 = (next16 == u16x16_splat (next16[0])); u64 bitmap = u8x32_msb_mask ((u8x32) next16); n_enqueued = count_trailing_zeros (~bitmap) / 2; #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) u16x8 next8 = u16x8_load_unaligned (nexts); next8 = (next8 == u16x8_splat (next8[0])); u64 bitmap = u8x16_msb_mask ((u8x16) next8); n_enqueued = count_trailing_zeros (~bitmap) / 2; #else u16 x = 0; if (count + 3 < max) { x |= next_index ^ nexts[1]; x |= next_index ^ nexts[2]; x |= next_index ^ nexts[3]; n_enqueued = (x == 0) ? 4 : 1; } else n_enqueued = 1; #endif if (PREDICT_FALSE (n_enqueued > max)) n_enqueued = max; #ifdef CLIB_HAVE_VEC512 if (n_enqueued >= 32) { clib_memcpy_fast (to_next, buffers, 32 * sizeof (u32)); nexts += 32; to_next += 32; buffers += 32; n_left_to_next -= 32; count -= 32; max -= 32; continue; } #endif #ifdef CLIB_HAVE_VEC256 if (n_enqueued >= 16) { clib_memcpy_fast (to_next, buffers, 16 * sizeof (u32)); nexts += 16; to_next += 16; buffers += 16; n_left_to_next -= 16; count -= 16; max -= 16; continue; } #endif #ifdef CLIB_HAVE_VEC128 if (n_enqueued >= 8) { clib_memcpy_fast (to_next, buffers, 8 * sizeof (u32)); nexts += 8; to_next += 8; buffers += 8; n_left_to_next -= 8; count -= 8; max -= 8; continue; } #endif if (n_enqueued >= 4) { clib_memcpy_fast (to_next, buffers, 4 * sizeof (u32)); nexts += 4; to_next += 4; buffers += 4; n_left_to_next -= 4; count -= 4; max -= 4; continue; } /* copy */ to_next[0] = buffers[0]; /* next */ nexts += 1; to_next += 1; buffers += 1; n_left_to_next -= 1; count -= 1; max -= 1; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } static_always_inline void vlib_buffer_enqueue_to_single_next (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * buffers, u16 next_index, u32 count) { u32 *to_next, n_left_to_next, n_enq; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); if (PREDICT_TRUE (n_left_to_next >= count)) { clib_memcpy_fast (to_next, buffers, count * sizeof (u32)); n_left_to_next -= count; vlib_put_next_frame (vm, node, next_index, n_left_to_next); return; } n_enq = n_left_to_next; next: clib_memcpy_fast (to_next, buffers, n_enq * sizeof (u32)); n_left_to_next -= n_enq; if (PREDICT_FALSE (count > n_enq)) { count -= n_enq; buffers += n_enq; vlib_put_next_frame (vm, node, next_index, n_left_to_next); vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); n_enq = clib_min (n_left_to_next, count); goto next; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } static_always_inline u32 vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index, u32 * buffer_indices, u16 * thread_indices, u32 n_packets, int drop_on_congestion) { vlib_thread_main_t *tm = vlib_get_thread_main (); vlib_frame_queue_main_t *fqm; vlib_frame_queue_per_thread_data_t *ptd; u32 n_left = n_packets; u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0; vlib_frame_queue_elt_t *hf = 0; u32 n_left_to_next_thread = 0, *to_next_thread = 0; u32 next_thread_index, current_thread_index = ~0; int i; fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index); ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index); while (n_left) { next_thread_index = thread_indices[0]; if (next_thread_index != current_thread_index) { if (drop_on_congestion && is_vlib_frame_queue_congested (frame_queue_index, next_thread_index, fqm->queue_hi_thresh, ptd->congested_handoff_queue_by_thread_index)) { dbi[0] = buffer_indices[0]; dbi++; n_drop++; goto next; } if (hf) hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread; hf = vlib_get_worker_handoff_queue_elt (frame_queue_index, next_thread_index, ptd->handoff_queue_elt_by_thread_index); n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors; to_next_thread = &hf->buffer_index[hf->n_vectors]; current_thread_index = next_thread_index; } to_next_thread[0] = buffer_indices[0]; to_next_thread++; n_left_to_next_thread--; if (n_left_to_next_thread == 0) { hf->n_vectors = VLIB_FRAME_SIZE; vlib_put_frame_queue_elt (hf); current_thread_index = ~0; ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0; hf = 0; } /* next */ next: thread_indices += 1; buffer_indices += 1; n_left -= 1; } if (hf) hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread; /* Ship frames to the thread nodes */ for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++) { if (ptd->handoff_queue_elt_by_thread_index[i]) { hf = ptd->handoff_queue_elt_by_thread_index[i]; /* * It works better to let the handoff node * rate-adapt, always ship the handoff queue element. */ if (1 || hf->n_vectors == hf->last_n_vectors) { vlib_put_frame_queue_elt (hf); ptd->handoff_queue_elt_by_thread_index[i] = 0; } else hf->last_n_vectors = hf->n_vectors; } ptd->congested_handoff_queue_by_thread_index[i] = (vlib_frame_queue_t *) (~0); } if (drop_on_congestion && n_drop) vlib_buffer_free (vm, drop_list, n_drop); return n_packets - n_drop; } #endif /* included_vlib_buffer_node_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */