summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Luong <sluong@cisco.com>2019-05-06 08:51:56 -0700
committerDave Barach <openvpp@barachs.net>2019-07-31 16:33:15 +0000
commit4208a4ce8d72d3fb6428527cde1fba7b397bd6f7 (patch)
tree3a04e358925ab7215c7d358fbf62b6c1eb33b25f
parent83832e7ced8be8b7de394415feaba70c32e3c38d (diff)
devices interface tests: vhosst GSO support
Add gso option in create vhost interface to support gso and checksum offload. Tested with the following startup options in qemu: csum=on,gso=on,guest_csum=on,guest_tso4=on,guest_tso6=on,guest_ufo=on, host_tso4=on,host_tso6=on,host_ufo=on Type: feature Change-Id: I9ba1ee33677a694c4a0dfe66e745b098995902b8 Signed-off-by: Steven Luong <sluong@cisco.com>
-rw-r--r--src/vat/api_format.c12
-rw-r--r--src/vnet/devices/virtio/vhost_user.api6
-rw-r--r--src/vnet/devices/virtio/vhost_user.c55
-rw-r--r--src/vnet/devices/virtio/vhost_user.h35
-rw-r--r--src/vnet/devices/virtio/vhost_user_api.c22
-rw-r--r--src/vnet/devices/virtio/vhost_user_inline.h22
-rw-r--r--src/vnet/devices/virtio/vhost_user_input.c122
-rw-r--r--src/vnet/devices/virtio/vhost_user_output.c53
-rw-r--r--src/vnet/interface_output.c4
-rw-r--r--src/vpp/api/custom_dump.c4
-rw-r--r--test/vpp_vhost_interface.py4
11 files changed, 313 insertions, 26 deletions
diff --git a/src/vat/api_format.c b/src/vat/api_format.c
index a1cd9fee527..ec21c045f5c 100644
--- a/src/vat/api_format.c
+++ b/src/vat/api_format.c
@@ -13300,6 +13300,7 @@ api_create_vhost_user_if (vat_main_t * vam)
u8 disable_mrg_rxbuf = 0;
u8 disable_indirect_desc = 0;
u8 *tag = 0;
+ u8 enable_gso = 0;
int ret;
/* Shut up coverity */
@@ -13321,6 +13322,8 @@ api_create_vhost_user_if (vat_main_t * vam)
disable_mrg_rxbuf = 1;
else if (unformat (i, "disable_indirect_desc"))
disable_indirect_desc = 1;
+ else if (unformat (i, "gso"))
+ enable_gso = 1;
else if (unformat (i, "tag %s", &tag))
;
else
@@ -13345,6 +13348,7 @@ api_create_vhost_user_if (vat_main_t * vam)
mp->is_server = is_server;
mp->disable_mrg_rxbuf = disable_mrg_rxbuf;
mp->disable_indirect_desc = disable_indirect_desc;
+ mp->enable_gso = enable_gso;
clib_memcpy (mp->sock_filename, file_name, vec_len (file_name));
vec_free (file_name);
if (custom_dev_instance != ~0)
@@ -13375,6 +13379,7 @@ api_modify_vhost_user_if (vat_main_t * vam)
u32 custom_dev_instance = ~0;
u8 sw_if_index_set = 0;
u32 sw_if_index = (u32) ~ 0;
+ u8 enable_gso = 0;
int ret;
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
@@ -13391,6 +13396,8 @@ api_modify_vhost_user_if (vat_main_t * vam)
;
else if (unformat (i, "server"))
is_server = 1;
+ else if (unformat (i, "gso"))
+ enable_gso = 1;
else
break;
}
@@ -13418,6 +13425,7 @@ api_modify_vhost_user_if (vat_main_t * vam)
mp->sw_if_index = ntohl (sw_if_index);
mp->is_server = is_server;
+ mp->enable_gso = enable_gso;
clib_memcpy (mp->sock_filename, file_name, vec_len (file_name));
vec_free (file_name);
if (custom_dev_instance != ~0)
@@ -22126,11 +22134,11 @@ _(l2_interface_vlan_tag_rewrite, \
"[translate-2-[1|2]] [push_dot1q 0] tag1 <nn> tag2 <nn>") \
_(create_vhost_user_if, \
"socket <filename> [server] [renumber <dev_instance>] " \
- "[disable_mrg_rxbuf] [disable_indirect_desc] " \
+ "[disable_mrg_rxbuf] [disable_indirect_desc] [gso] " \
"[mac <mac_address>]") \
_(modify_vhost_user_if, \
"<intfc> | sw_if_index <nn> socket <filename>\n" \
- "[server] [renumber <dev_instance>]") \
+ "[server] [renumber <dev_instance>] [gso]") \
_(delete_vhost_user_if, "<intfc> | sw_if_index <nn>") \
_(sw_interface_vhost_user_dump, "") \
_(show_version, "") \
diff --git a/src/vnet/devices/virtio/vhost_user.api b/src/vnet/devices/virtio/vhost_user.api
index c745b3d616b..e08c7351ddf 100644
--- a/src/vnet/devices/virtio/vhost_user.api
+++ b/src/vnet/devices/virtio/vhost_user.api
@@ -13,7 +13,7 @@
* limitations under the License.
*/
-option version = "2.0.0";
+option version = "3.0.0";
/** \brief vhost-user interface create request
@param client_index - opaque cookie to identify the sender
@@ -22,6 +22,7 @@ option version = "2.0.0";
@param use_custom_mac - enable or disable the use of the provided hardware address
@param disable_mrg_rxbuf - disable the use of merge receive buffers
@param disable_indirect_desc - disable the use of indirect descriptors which driver can use
+ @param enable_gso - enable gso support (default 0)
@param mac_address - hardware address to use if 'use_custom_mac' is set
*/
define create_vhost_user_if
@@ -33,6 +34,7 @@ define create_vhost_user_if
u8 renumber;
u8 disable_mrg_rxbuf;
u8 disable_indirect_desc;
+ u8 enable_gso;
u32 custom_dev_instance;
u8 use_custom_mac;
u8 mac_address[6];
@@ -55,6 +57,7 @@ define create_vhost_user_if_reply
@param client_index - opaque cookie to identify the sender
@param is_server - our side is socket server
@param sock_filename - unix socket filename, used to speak with frontend
+ @param enable_gso - enable gso support (default 0)
*/
autoreply define modify_vhost_user_if
{
@@ -64,6 +67,7 @@ autoreply define modify_vhost_user_if
u8 is_server;
u8 sock_filename[256];
u8 renumber;
+ u8 enable_gso;
u32 custom_dev_instance;
};
diff --git a/src/vnet/devices/virtio/vhost_user.c b/src/vnet/devices/virtio/vhost_user.c
index e26cfdfd8c8..788b348cc0b 100644
--- a/src/vnet/devices/virtio/vhost_user.c
+++ b/src/vnet/devices/virtio/vhost_user.c
@@ -463,6 +463,10 @@ vhost_user_socket_read (clib_file_t * uf)
(1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
(1ULL << FEAT_VIRTIO_F_VERSION_1);
msg.u64 &= vui->feature_mask;
+
+ if (vui->enable_gso)
+ msg.u64 |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+
msg.size = sizeof (msg.u64);
vu_log_debug (vui, "if %d msg VHOST_USER_GET_FEATURES - reply "
"0x%016llx", vui->hw_if_index, msg.u64);
@@ -492,6 +496,12 @@ vhost_user_socket_read (clib_file_t * uf)
(vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vui->hw_if_index);
+ if (vui->enable_gso &&
+ (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
+ hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
+ else
+ hw->flags &= ~VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
vui->is_ready = 0;
vhost_user_update_iface_state (vui);
@@ -1202,6 +1212,7 @@ vhost_user_term_if (vhost_user_intf_t * vui)
// disconnect interface sockets
vhost_user_if_disconnect (vui);
+ vhost_user_update_gso_interface_count (vui, 0 /* delete */ );
vhost_user_update_iface_state (vui);
for (q = 0; q < VHOST_VRING_MAX_N; q++)
@@ -1403,7 +1414,7 @@ vhost_user_vui_init (vnet_main_t * vnm,
vhost_user_intf_t * vui,
int server_sock_fd,
const char *sock_filename,
- u64 feature_mask, u32 * sw_if_index)
+ u64 feature_mask, u32 * sw_if_index, u8 enable_gso)
{
vnet_sw_interface_t *sw;
int q;
@@ -1434,6 +1445,23 @@ vhost_user_vui_init (vnet_main_t * vnm,
vui->clib_file_index = ~0;
vui->log_base_addr = 0;
vui->if_index = vui - vum->vhost_user_interfaces;
+ vui->enable_gso = enable_gso;
+ /*
+ * enable_gso takes precedence over configurable feature mask if there
+ * is a clash.
+ * if feature mask disables gso, but enable_gso is configured,
+ * then gso is enable
+ * if feature mask enables gso, but enable_gso is not configured,
+ * then gso is enable
+ *
+ * if gso is enable via feature mask, it must enable both host and guest
+ * gso feature mask, we don't support one sided GSO or partial GSO.
+ */
+ if ((vui->enable_gso == 0) &&
+ ((feature_mask & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS) ==
+ (FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)))
+ vui->enable_gso = 1;
+ vhost_user_update_gso_interface_count (vui, 1 /* add */ );
mhash_set_mem (&vum->if_index_by_sock_name, vui->sock_filename,
&vui->if_index, 0);
@@ -1464,7 +1492,8 @@ vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
u8 is_server,
u32 * sw_if_index,
u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr,
+ u8 enable_gso)
{
vhost_user_intf_t *vui = NULL;
u32 sw_if_idx = ~0;
@@ -1505,7 +1534,7 @@ vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
vlib_worker_thread_barrier_release (vm);
vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
- feature_mask, &sw_if_idx);
+ feature_mask, &sw_if_idx, enable_gso);
vnet_sw_interface_set_mtu (vnm, vui->sw_if_index, 9000);
vhost_user_rx_thread_placement (vui, 1);
@@ -1526,7 +1555,8 @@ vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
const char *sock_filename,
u8 is_server,
u32 sw_if_index,
- u64 feature_mask, u8 renumber, u32 custom_dev_instance)
+ u64 feature_mask, u8 renumber, u32 custom_dev_instance,
+ u8 enable_gso)
{
vhost_user_main_t *vum = &vhost_user_main;
vhost_user_intf_t *vui = NULL;
@@ -1563,7 +1593,7 @@ vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
vhost_user_term_if (vui);
vhost_user_vui_init (vnm, vui, server_sock_fd,
- sock_filename, feature_mask, &sw_if_idx);
+ sock_filename, feature_mask, &sw_if_idx, enable_gso);
if (renumber)
vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
@@ -1589,17 +1619,22 @@ vhost_user_connect_command_fn (vlib_main_t * vm,
u8 hwaddr[6];
u8 *hw = NULL;
clib_error_t *error = NULL;
+ u8 enable_gso = 0;
/* Get a line of input. */
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
+ /* GSO feature is disable by default */
+ feature_mask &= ~FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "socket %s", &sock_filename))
;
else if (unformat (line_input, "server"))
is_server = 1;
+ else if (unformat (line_input, "gso"))
+ enable_gso = 1;
else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
;
else
@@ -1623,7 +1658,8 @@ vhost_user_connect_command_fn (vlib_main_t * vm,
int rv;
if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
is_server, &sw_if_index, feature_mask,
- renumber, custom_dev_instance, hw)))
+ renumber, custom_dev_instance, hw,
+ enable_gso)))
{
error = clib_error_return (0, "vhost_user_create_if returned %d", rv);
goto done;
@@ -1809,8 +1845,9 @@ show_vhost_user_command_fn (vlib_main_t * vm,
vlib_cli_output (vm, "Virtio vhost-user interfaces");
vlib_cli_output (vm, "Global:\n coalesce frames %d time %e",
vum->coalesce_frames, vum->coalesce_time);
- vlib_cli_output (vm, " number of rx virtqueues in interrupt mode: %d",
+ vlib_cli_output (vm, " Number of rx virtqueues in interrupt mode: %d",
vum->ifq_count);
+ vlib_cli_output (vm, " Number of GSO interfaces: %d", vum->gso_count);
for (i = 0; i < vec_len (hw_if_indices); i++)
{
@@ -1819,6 +1856,8 @@ show_vhost_user_command_fn (vlib_main_t * vm,
vlib_cli_output (vm, "Interface: %U (ifindex %d)",
format_vnet_hw_if_index_name, vnm, hw_if_indices[i],
hw_if_indices[i]);
+ if (vui->enable_gso)
+ vlib_cli_output (vm, " GSO enable");
vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
" features mask (0x%llx): \n"
@@ -2025,7 +2064,7 @@ done:
VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
.path = "create vhost-user",
.short_help = "create vhost-user socket <socket-filename> [server] "
- "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] ",
+ "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] [gso]",
.function = vhost_user_connect_command_fn,
.is_mp_safe = 1,
};
diff --git a/src/vnet/devices/virtio/vhost_user.h b/src/vnet/devices/virtio/vhost_user.h
index 7dadfed2334..c7a4206fa1d 100644
--- a/src/vnet/devices/virtio/vhost_user.h
+++ b/src/vnet/devices/virtio/vhost_user.h
@@ -85,6 +85,14 @@ typedef enum
} virtio_trace_flag_t;
#define foreach_virtio_net_feature \
+ _ (VIRTIO_NET_F_CSUM, 0) \
+ _ (VIRTIO_NET_F_GUEST_CSUM, 1) \
+ _ (VIRTIO_NET_F_GUEST_TSO4, 7) \
+ _ (VIRTIO_NET_F_GUEST_TSO6, 8) \
+ _ (VIRTIO_NET_F_GUEST_UFO, 10) \
+ _ (VIRTIO_NET_F_HOST_TSO4, 11) \
+ _ (VIRTIO_NET_F_HOST_TSO6, 12) \
+ _ (VIRTIO_NET_F_HOST_UFO, 14) \
_ (VIRTIO_NET_F_MRG_RXBUF, 15) \
_ (VIRTIO_NET_F_CTRL_VQ, 17) \
_ (VIRTIO_NET_F_GUEST_ANNOUNCE, 21) \
@@ -102,14 +110,32 @@ typedef enum
#undef _
} virtio_net_feature_t;
+#define FEATURE_VIRTIO_NET_F_HOST_TSO_FEATURE_BITS \
+ ((1ULL << FEAT_VIRTIO_NET_F_CSUM) | \
+ (1ULL << FEAT_VIRTIO_NET_F_HOST_UFO) | \
+ (1ULL << FEAT_VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << FEAT_VIRTIO_NET_F_HOST_TSO6))
+
+#define FEATURE_VIRTIO_NET_F_GUEST_TSO_FEATURE_BITS \
+ ((1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO) | \
+ (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6))
+
+#define FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS \
+ (FEATURE_VIRTIO_NET_F_HOST_TSO_FEATURE_BITS | \
+ FEATURE_VIRTIO_NET_F_GUEST_TSO_FEATURE_BITS)
+
int vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
const char *sock_filename, u8 is_server,
u32 * sw_if_index, u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 * hwaddr);
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr,
+ u8 enable_gso);
int vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
const char *sock_filename, u8 is_server,
u32 sw_if_index, u64 feature_mask,
- u8 renumber, u32 custom_dev_instance);
+ u8 renumber, u32 custom_dev_instance,
+ u8 enable_gso);
int vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
u32 sw_if_index);
@@ -301,6 +327,8 @@ typedef struct
/* Whether to use spinlock or per_cpu_tx_qid assignment */
u8 use_tx_spinlock;
u16 *per_cpu_tx_qid;
+
+ u8 enable_gso;
} vhost_user_intf_t;
typedef struct
@@ -357,6 +385,9 @@ typedef struct
/* logging */
vlib_log_class_t log_default;
+
+ /* gso interface count */
+ u32 gso_count;
} vhost_user_main_t;
typedef struct
diff --git a/src/vnet/devices/virtio/vhost_user_api.c b/src/vnet/devices/virtio/vhost_user_api.c
index 4c765f30961..7899fa2ae78 100644
--- a/src/vnet/devices/virtio/vhost_user_api.c
+++ b/src/vnet/devices/virtio/vhost_user_api.c
@@ -65,12 +65,18 @@ vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp)
if (mp->disable_indirect_desc)
disabled_features |= (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC);
+ /*
+ * feature mask is not supported via binary API. We disable GSO feature in the
+ * feature mask. It may be enabled via enable_gso argument.
+ */
+ disabled_features |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
features &= ~disabled_features;
rv = vhost_user_create_if (vnm, vm, (char *) mp->sock_filename,
mp->is_server, &sw_if_index, features,
mp->renumber, ntohl (mp->custom_dev_instance),
- (mp->use_custom_mac) ? mp->mac_address : NULL);
+ (mp->use_custom_mac) ? mp->mac_address : NULL,
+ mp->enable_gso);
/* Remember an interface tag for the new interface */
if (rv == 0)
@@ -99,13 +105,23 @@ vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp)
int rv = 0;
vl_api_modify_vhost_user_if_reply_t *rmp;
u32 sw_if_index = ntohl (mp->sw_if_index);
+ u64 features = (u64) ~ (0ULL);
+ u64 disabled_features = (u64) (0ULL);
vnet_main_t *vnm = vnet_get_main ();
vlib_main_t *vm = vlib_get_main ();
+ /*
+ * feature mask is not supported via binary API. We disable GSO feature in the
+ * feature mask. It may be enabled via enable_gso argument.
+ */
+ disabled_features |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+ features &= ~disabled_features;
+
rv = vhost_user_modify_if (vnm, vm, (char *) mp->sock_filename,
- mp->is_server, sw_if_index, (u64) ~ 0,
- mp->renumber, ntohl (mp->custom_dev_instance));
+ mp->is_server, sw_if_index, features,
+ mp->renumber, ntohl (mp->custom_dev_instance),
+ mp->enable_gso);
REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_REPLY);
}
diff --git a/src/vnet/devices/virtio/vhost_user_inline.h b/src/vnet/devices/virtio/vhost_user_inline.h
index 544e2d36952..27048c66186 100644
--- a/src/vnet/devices/virtio/vhost_user_inline.h
+++ b/src/vnet/devices/virtio/vhost_user_inline.h
@@ -270,6 +270,28 @@ vui_is_link_up (vhost_user_intf_t * vui)
return vui->admin_up && vui->is_ready;
}
+static_always_inline void
+vhost_user_update_gso_interface_count (vhost_user_intf_t * vui, u8 add)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ if (vui->enable_gso)
+ {
+ if (add)
+ {
+ vnm->interface_main.gso_interface_count++;
+ vum->gso_count++;
+ }
+ else
+ {
+ ASSERT (vnm->interface_main.gso_interface_count > 0);
+ vnm->interface_main.gso_interface_count--;
+ ASSERT (vum->gso_count > 0);
+ vum->gso_count--;
+ }
+ }
+}
#endif
/*
diff --git a/src/vnet/devices/virtio/vhost_user_input.c b/src/vnet/devices/virtio/vhost_user_input.c
index c4ea32814b3..22f79e60555 100644
--- a/src/vnet/devices/virtio/vhost_user_input.c
+++ b/src/vnet/devices/virtio/vhost_user_input.c
@@ -39,6 +39,7 @@
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
+#include <vnet/devices/virtio/virtio.h>
#include <vnet/devices/virtio/vhost_user.h>
#include <vnet/devices/virtio/vhost_user_inline.h>
@@ -243,12 +244,97 @@ vhost_user_input_rewind_buffers (vlib_main_t * vm,
cpu->rx_buffers_len++;
}
+static_always_inline void
+vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
+ virtio_net_hdr_t * hdr)
+{
+ u8 l4_hdr_sz = 0;
+
+ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
+ {
+ u8 l4_proto = 0;
+ ethernet_header_t *eh = (ethernet_header_t *) b0_data;
+ u16 ethertype = clib_net_to_host_u16 (eh->type);
+ u16 l2hdr_sz = sizeof (ethernet_header_t);
+
+ if (ethernet_frame_is_tagged (ethertype))
+ {
+ ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eh + 1);
+
+ ethertype = clib_net_to_host_u16 (vlan->type);
+ l2hdr_sz += sizeof (*vlan);
+ if (ethertype == ETHERNET_TYPE_VLAN)
+ {
+ vlan++;
+ ethertype = clib_net_to_host_u16 (vlan->type);
+ l2hdr_sz += sizeof (*vlan);
+ }
+ }
+ vnet_buffer (b0)->l2_hdr_offset = 0;
+ vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
+ vnet_buffer (b0)->l4_hdr_offset = hdr->csum_start;
+ b0->flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
+
+ if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
+ {
+ ip4_header_t *ip4 = (ip4_header_t *) (b0_data + l2hdr_sz);
+ l4_proto = ip4->protocol;
+ b0->flags |= VNET_BUFFER_F_IS_IP4;
+ }
+ else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
+ {
+ ip6_header_t *ip6 = (ip6_header_t *) (b0_data + l2hdr_sz);
+ l4_proto = ip6->protocol;
+ b0->flags |= VNET_BUFFER_F_IS_IP6;
+ }
+
+ if (l4_proto == IP_PROTOCOL_TCP)
+ {
+ tcp_header_t *tcp = (tcp_header_t *)
+ (b0_data + vnet_buffer (b0)->l4_hdr_offset);
+ l4_hdr_sz = tcp_header_bytes (tcp);
+ tcp->checksum = 0;
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ }
+ else if (l4_proto == IP_PROTOCOL_UDP)
+ {
+ udp_header_t *udp =
+ (udp_header_t *) (b0_data + vnet_buffer (b0)->l4_hdr_offset);
+ l4_hdr_sz = sizeof (*udp);
+ udp->checksum = 0;
+ b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ }
+ }
+
+ if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
+ {
+ vnet_buffer2 (b0)->gso_size = hdr->gso_size;
+ vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
+ b0->flags |= VNET_BUFFER_F_GSO;
+ }
+ else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
+ {
+ vnet_buffer2 (b0)->gso_size = hdr->gso_size;
+ vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
+ b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
+ }
+ else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
+ {
+ vnet_buffer2 (b0)->gso_size = hdr->gso_size;
+ vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
+ b0->flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
+ }
+}
+
static_always_inline u32
vhost_user_if_input (vlib_main_t * vm,
vhost_user_main_t * vum,
vhost_user_intf_t * vui,
u16 qid, vlib_node_runtime_t * node,
- vnet_hw_interface_rx_mode mode)
+ vnet_hw_interface_rx_mode mode, u8 enable_csum)
{
vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
vnet_feature_main_t *fm = &feature_main;
@@ -446,7 +532,6 @@ vhost_user_if_input (vlib_main_t * vm,
if (PREDICT_FALSE (n_trace))
{
- //TODO: next_index is not exactly known at that point
vlib_trace_buffer (vm, node, next_index, b_head,
/* follow_chain */ 0);
vhost_trace_t *t0 =
@@ -459,11 +544,13 @@ vhost_user_if_input (vlib_main_t * vm,
/* This depends on the setup but is very consistent
* So I think the CPU branch predictor will make a pretty good job
* at optimizing the decision. */
+ u8 indirect = 0;
if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
{
desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
&map_hint);
desc_current = 0;
+ indirect = 1;
if (PREDICT_FALSE (desc_table == 0))
{
vlib_error_count (vm, node->node_index,
@@ -484,6 +571,27 @@ vhost_user_if_input (vlib_main_t * vm,
desc_data_offset = desc_table[desc_current].len;
}
+ if (enable_csum)
+ {
+ virtio_net_hdr_mrg_rxbuf_t *hdr;
+ u8 *b_data;
+ u16 current = desc_current;
+ u32 data_offset = desc_data_offset;
+
+ if ((data_offset == desc_table[current].len) &&
+ (desc_table[current].flags & VIRTQ_DESC_F_NEXT))
+ {
+ current = desc_table[current].next;
+ data_offset = 0;
+ }
+ hdr = map_guest_mem (vui, desc_table[current].addr, &map_hint);
+ b_data = (u8 *) hdr + data_offset;
+ if (indirect)
+ hdr = map_guest_mem (vui, desc_table[desc_current].addr,
+ &map_hint);
+ vhost_user_handle_rx_offload (b_head, b_data, &hdr->hdr);
+ }
+
while (1)
{
/* Get more input if necessary. Or end of packet. */
@@ -653,8 +761,14 @@ VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
{
vui =
pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
- n_rx_packets += vhost_user_if_input (vm, vum, vui, dq->queue_id, node,
- dq->mode);
+ if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
+ n_rx_packets +=
+ vhost_user_if_input (vm, vum, vui, dq->queue_id, node, dq->mode,
+ 1);
+ else
+ n_rx_packets +=
+ vhost_user_if_input (vm, vum, vui, dq->queue_id, node, dq->mode,
+ 0);
}
}
diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c
index c0c54d1b13a..797c1c5ff92 100644
--- a/src/vnet/devices/virtio/vhost_user_output.c
+++ b/src/vnet/devices/virtio/vhost_user_output.c
@@ -17,6 +17,7 @@
*------------------------------------------------------------------
*/
+#include <stddef.h>
#include <fcntl.h> /* for open */
#include <sys/ioctl.h>
#include <sys/socket.h>
@@ -39,6 +40,7 @@
#include <vnet/devices/devices.h>
#include <vnet/feature/feature.h>
+#include <vnet/devices/virtio/virtio.h>
#include <vnet/devices/virtio/vhost_user.h>
#include <vnet/devices/virtio/vhost_user_inline.h>
@@ -226,6 +228,51 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
return 0;
}
+static_always_inline void
+vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
+ virtio_net_hdr_t * hdr)
+{
+ /* checksum offload */
+ if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ {
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
+ hdr->csum_offset = offsetof (udp_header_t, checksum);
+ }
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ {
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ hdr->csum_start = vnet_buffer (b)->l4_hdr_offset;
+ hdr->csum_offset = offsetof (tcp_header_t, checksum);
+ }
+
+ /* GSO offload */
+ if (b->flags & VNET_BUFFER_F_GSO)
+ {
+ if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ {
+ if ((b->flags & VNET_BUFFER_F_IS_IP4) &&
+ (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
+ {
+ hdr->gso_size = vnet_buffer2 (b)->gso_size;
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ }
+ else if ((b->flags & VNET_BUFFER_F_IS_IP6) &&
+ (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
+ {
+ hdr->gso_size = vnet_buffer2 (b)->gso_size;
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ }
+ }
+ else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
+ (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+ {
+ hdr->gso_size = vnet_buffer2 (b)->gso_size;
+ hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ }
+ }
+}
+
VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm,
vlib_node_runtime_t *
node, vlib_frame_t * frame)
@@ -335,9 +382,13 @@ retry:
virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
tx_headers_len++;
hdr->hdr.flags = 0;
- hdr->hdr.gso_type = 0;
+ hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
hdr->num_buffers = 1; //This is local, no need to check
+ /* Guest supports csum offload? */
+ if (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM))
+ vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
+
// Prepare a copy order executed later for the header
vhost_copy_t *cpy = &cpu->copy[copy_len];
copy_len++;
diff --git a/src/vnet/interface_output.c b/src/vnet/interface_output.c
index 1a9f5dbab8f..884c00d93cf 100644
--- a/src/vnet/interface_output.c
+++ b/src/vnet/interface_output.c
@@ -224,10 +224,6 @@ calc_checksums (vlib_main_t * vm, vlib_buffer_t * b)
ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
}
}
-
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
}
static_always_inline u16
diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c
index e53d46259dc..c75837209d5 100644
--- a/src/vpp/api/custom_dump.c
+++ b/src/vpp/api/custom_dump.c
@@ -1920,6 +1920,8 @@ static void *vl_api_create_vhost_user_if_t_print
s = format (s, "disable_indirect_desc ");
if (mp->tag[0])
s = format (s, "tag %s", mp->tag);
+ if (mp->enable_gso)
+ s = format (s, "gso");
FINISH;
}
@@ -1937,6 +1939,8 @@ static void *vl_api_modify_vhost_user_if_t_print
s = format (s, "server ");
if (mp->renumber)
s = format (s, "renumber %d ", ntohl (mp->custom_dev_instance));
+ if (mp->enable_gso)
+ s = format (s, "gso");
FINISH;
}
diff --git a/test/vpp_vhost_interface.py b/test/vpp_vhost_interface.py
index 0a5f6b72b2f..569fe36d1d6 100644
--- a/test/vpp_vhost_interface.py
+++ b/test/vpp_vhost_interface.py
@@ -5,7 +5,7 @@ class VppVhostInterface(VppInterface):
"""VPP vhost interface."""
def __init__(self, test, sock_filename, is_server=0, renumber=0,
- disable_mrg_rxbuf=0, disable_indirect_desc=0,
+ disable_mrg_rxbuf=0, disable_indirect_desc=0, gso=0,
custom_dev_instance=0, use_custom_mac=0, mac_address='',
tag=''):
@@ -16,6 +16,7 @@ class VppVhostInterface(VppInterface):
self.renumber = renumber
self.disable_mrg_rxbuf = disable_mrg_rxbuf
self.disable_indirect_desc = disable_indirect_desc
+ self.gso = gso
self.custom_dev_instance = custom_dev_instance
self.use_custom_mac = use_custom_mac
self.mac_address = mac_address
@@ -27,6 +28,7 @@ class VppVhostInterface(VppInterface):
self.renumber,
self.disable_mrg_rxbuf,
self.disable_indirect_desc,
+ self.gso,
self.custom_dev_instance,
self.use_custom_mac,
self.mac_address,