aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/devices
diff options
context:
space:
mode:
authorSteven Luong <sluong@cisco.com>2020-11-17 13:30:44 -0800
committerBeno�t Ganne <bganne@cisco.com>2021-01-08 13:10:58 +0000
commit27ba5008a16eddccc0b285272de7f89fd0aa3a24 (patch)
tree3065040b62ee15ac45b1c1d77dd7421dcb558a5d /src/vnet/devices
parented4b38e868c7cabb8e88cc0f1254c95a8f1c5939 (diff)
vhost: Add event index for interrupt notification to driver
VPP only supports a poor man's approach for interrupt notification to the driver. It uses a simple binary flag for "interrupt needed" or "interrupt not needed". Most drivers support more sophisticated event index already. This feature is to add the long due missing feature and make it configurable, off by default. Type: feature Signed-off-by: Steven Luong <sluong@cisco.com> Change-Id: I68dab7dd07045cafb49af97b7f70db9b8131ae03
Diffstat (limited to 'src/vnet/devices')
-rw-r--r--src/vnet/devices/virtio/vhost_user.api73
-rw-r--r--src/vnet/devices/virtio/vhost_user.c261
-rw-r--r--src/vnet/devices/virtio/vhost_user.h31
-rw-r--r--src/vnet/devices/virtio/vhost_user_api.c158
-rw-r--r--src/vnet/devices/virtio/vhost_user_inline.h133
-rw-r--r--src/vnet/devices/virtio/vhost_user_input.c15
-rw-r--r--src/vnet/devices/virtio/vhost_user_output.c13
-rw-r--r--src/vnet/devices/virtio/virtio_std.h14
8 files changed, 506 insertions, 192 deletions
diff --git a/src/vnet/devices/virtio/vhost_user.api b/src/vnet/devices/virtio/vhost_user.api
index 338fd710bc0..b026ba768a9 100644
--- a/src/vnet/devices/virtio/vhost_user.api
+++ b/src/vnet/devices/virtio/vhost_user.api
@@ -13,7 +13,7 @@
* limitations under the License.
*/
-option version = "4.0.1";
+option version = "4.1.1";
import "vnet/interface_types.api";
import "vnet/ethernet/ethernet_types.api";
@@ -32,6 +32,7 @@ import "vnet/devices/virtio/virtio_types.api";
*/
define create_vhost_user_if
{
+ option deprecated;
u32 client_index;
u32 context;
bool is_server;
@@ -54,6 +55,7 @@ define create_vhost_user_if
*/
define create_vhost_user_if_reply
{
+ option deprecated;
u32 context;
i32 retval;
vl_api_interface_index_t sw_if_index;
@@ -68,6 +70,7 @@ define create_vhost_user_if_reply
*/
autoreply define modify_vhost_user_if
{
+ option deprecated;
u32 client_index;
u32 context;
vl_api_interface_index_t sw_if_index;
@@ -79,6 +82,74 @@ autoreply define modify_vhost_user_if
u32 custom_dev_instance;
};
+/** \brief vhost-user interface create request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+ @param use_custom_mac - enable or disable the use of the provided hardware address
+ @param disable_mrg_rxbuf - disable the use of merge receive buffers
+ @param disable_indirect_desc - disable the use of indirect descriptors which driver can use
+ @param enable_gso - enable gso support (default 0)
+ @param enable_packed - enable packed ring support (default 0)
+ @param enable_event_idx - enable event_idx support (default 0)
+ @param mac_address - hardware address to use if 'use_custom_mac' is set
+ @param renumber - if true, use custom_dev_instance is valid
+ @param custom_dev_instance - custom device instance number
+*/
+define create_vhost_user_if_v2
+{
+ u32 client_index;
+ u32 context;
+ bool is_server;
+ string sock_filename[256];
+ bool renumber;
+ bool disable_mrg_rxbuf;
+ bool disable_indirect_desc;
+ bool enable_gso;
+ bool enable_packed;
+ bool enable_event_idx;
+ u32 custom_dev_instance;
+ bool use_custom_mac;
+ vl_api_mac_address_t mac_address;
+ string tag[64];
+};
+
+/** \brief vhost-user interface create response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param sw_if_index - interface the operation is applied to
+*/
+define create_vhost_user_if_v2_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+/** \brief vhost-user interface modify request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+ @param enable_gso - enable gso support (default 0)
+ @param enable_packed - enable packed ring support (default 0)
+ @param enable_event_idx - enable event idx support (default 0)
+ @param renumber - if true, use custom_dev_instance is valid
+ @param custom_dev_instance - custom device instance number
+*/
+autoreply define modify_vhost_user_if_v2
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ bool is_server;
+ string sock_filename[256];
+ bool renumber;
+ bool enable_gso;
+ bool enable_packed;
+ bool enable_event_idx;
+ u32 custom_dev_instance;
+};
+
/** \brief vhost-user interface delete request
@param client_index - opaque cookie to identify the sender
*/
diff --git a/src/vnet/devices/virtio/vhost_user.c b/src/vnet/devices/virtio/vhost_user.c
index 573d6579cea..daa126064c5 100644
--- a/src/vnet/devices/virtio/vhost_user.c
+++ b/src/vnet/devices/virtio/vhost_user.c
@@ -467,6 +467,8 @@ vhost_user_socket_read (clib_file_t * uf)
VIRTIO_FEATURE (VIRTIO_F_VERSION_1);
msg.u64 &= vui->feature_mask;
+ if (vui->enable_event_idx)
+ msg.u64 |= VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX);
if (vui->enable_gso)
msg.u64 |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
if (vui->enable_packed)
@@ -664,6 +666,8 @@ vhost_user_socket_read (clib_file_t * uf)
vui->vrings[msg.state.index].last_used_idx =
vui->vrings[msg.state.index].last_avail_idx =
vui->vrings[msg.state.index].used->idx;
+ vui->vrings[msg.state.index].last_kick =
+ vui->vrings[msg.state.index].last_used_idx;
/* tell driver that we don't want interrupts */
if (vhost_user_is_packed_ring_supported (vui))
@@ -865,6 +869,8 @@ vhost_user_socket_read (clib_file_t * uf)
*/
vui->vrings[msg.state.index].last_used_idx =
vui->vrings[msg.state.index].last_avail_idx;
+ vui->vrings[msg.state.index].last_kick =
+ vui->vrings[msg.state.index].last_used_idx;
vui->vrings[msg.state.index].used_wrap_counter =
vui->vrings[msg.state.index].avail_wrap_counter;
@@ -1196,7 +1202,7 @@ vhost_user_send_interrupt_process (vlib_main_t * vm,
if (txvq->n_since_last_int)
{
if (now >= txvq->int_deadline)
- vhost_user_send_call (vm, txvq);
+ vhost_user_send_call (vm, vui, txvq);
else
next_timeout = txvq->int_deadline - now;
}
@@ -1204,7 +1210,7 @@ vhost_user_send_interrupt_process (vlib_main_t * vm,
if (rxvq->n_since_last_int)
{
if (now >= rxvq->int_deadline)
- vhost_user_send_call (vm, rxvq);
+ vhost_user_send_call (vm, vui, rxvq);
else
next_timeout = rxvq->int_deadline - now;
}
@@ -1560,12 +1566,9 @@ vhost_user_create_ethernet (vnet_main_t * vnm, vlib_main_t * vm,
* Initialize vui with specified attributes
*/
static void
-vhost_user_vui_init (vnet_main_t * vnm,
- vhost_user_intf_t * vui,
- int server_sock_fd,
- const char *sock_filename,
- u64 feature_mask, u32 * sw_if_index, u8 enable_gso,
- u8 enable_packed)
+vhost_user_vui_init (vnet_main_t * vnm, vhost_user_intf_t * vui,
+ int server_sock_fd, vhost_user_create_if_args_t * args,
+ u32 * sw_if_index)
{
vnet_sw_interface_t *sw;
int q;
@@ -1589,16 +1592,17 @@ vhost_user_vui_init (vnet_main_t * vnm,
}
vui->sw_if_index = sw->sw_if_index;
- strncpy (vui->sock_filename, sock_filename,
+ strncpy (vui->sock_filename, args->sock_filename,
ARRAY_LEN (vui->sock_filename) - 1);
vui->sock_errno = 0;
vui->is_ready = 0;
- vui->feature_mask = feature_mask;
+ vui->feature_mask = args->feature_mask;
vui->clib_file_index = ~0;
vui->log_base_addr = 0;
vui->if_index = vui - vum->vhost_user_interfaces;
- vui->enable_gso = enable_gso;
- vui->enable_packed = enable_packed;
+ vui->enable_gso = args->enable_gso;
+ vui->enable_event_idx = args->enable_event_idx;
+ vui->enable_packed = args->enable_packed;
/*
* enable_gso takes precedence over configurable feature mask if there
* is a clash.
@@ -1611,8 +1615,8 @@ vhost_user_vui_init (vnet_main_t * vnm,
* gso feature mask, we don't support one sided GSO or partial GSO.
*/
if ((vui->enable_gso == 0) &&
- ((feature_mask & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS) ==
- (FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)))
+ ((args->feature_mask & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)
+ == (FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)))
vui->enable_gso = 1;
vhost_user_update_gso_interface_count (vui, 1 /* add */ );
mhash_set_mem (&vum->if_index_by_sock_name, vui->sock_filename,
@@ -1637,12 +1641,7 @@ vhost_user_vui_init (vnet_main_t * vnm,
int
vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
- const char *sock_filename,
- u8 is_server,
- u32 * sw_if_index,
- u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 * hwaddr,
- u8 enable_gso, u8 enable_packed)
+ vhost_user_create_if_args_t * args)
{
vhost_user_intf_t *vui = NULL;
u32 sw_if_idx = ~0;
@@ -1651,26 +1650,25 @@ vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
vhost_user_main_t *vum = &vhost_user_main;
uword *if_index;
- if (sock_filename == NULL || !(strlen (sock_filename) > 0))
+ if (args->sock_filename == NULL || !(strlen (args->sock_filename) > 0))
{
return VNET_API_ERROR_INVALID_ARGUMENT;
}
- if_index = mhash_get (&vum->if_index_by_sock_name, (void *) sock_filename);
+ if_index = mhash_get (&vum->if_index_by_sock_name,
+ (void *) args->sock_filename);
if (if_index)
{
- if (sw_if_index)
- {
- vui = &vum->vhost_user_interfaces[*if_index];
- *sw_if_index = vui->sw_if_index;
- }
+ vui = &vum->vhost_user_interfaces[*if_index];
+ args->sw_if_index = vui->sw_if_index;
return VNET_API_ERROR_IF_ALREADY_EXISTS;
}
- if (is_server)
+ if (args->is_server)
{
if ((rv =
- vhost_user_init_server_sock (sock_filename, &server_sock_fd)) != 0)
+ vhost_user_init_server_sock (args->sock_filename,
+ &server_sock_fd)) != 0)
{
return rv;
}
@@ -1679,19 +1677,17 @@ vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
/* Protect the uninitialized vui from being dispatched by rx/tx */
vlib_worker_thread_barrier_sync (vm);
pool_get (vhost_user_main.vhost_user_interfaces, vui);
- vhost_user_create_ethernet (vnm, vm, vui, hwaddr);
+ vhost_user_create_ethernet (vnm, vm, vui, args->hwaddr);
vlib_worker_thread_barrier_release (vm);
- vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
- feature_mask, &sw_if_idx, enable_gso, enable_packed);
+ vhost_user_vui_init (vnm, vui, server_sock_fd, args, &sw_if_idx);
vnet_sw_interface_set_mtu (vnm, vui->sw_if_index, 9000);
vhost_user_rx_thread_placement (vui, 1);
- if (renumber)
- vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+ if (args->renumber)
+ vnet_interface_name_renumber (sw_if_idx, args->custom_dev_instance);
- if (sw_if_index)
- *sw_if_index = sw_if_idx;
+ args->sw_if_index = sw_if_idx;
// Process node must connect
vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
@@ -1701,11 +1697,7 @@ vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
int
vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
- const char *sock_filename,
- u8 is_server,
- u32 sw_if_index,
- u64 feature_mask, u8 renumber, u32 custom_dev_instance,
- u8 enable_gso, u8 enable_packed)
+ vhost_user_create_if_args_t * args)
{
vhost_user_main_t *vum = &vhost_user_main;
vhost_user_intf_t *vui = NULL;
@@ -1715,13 +1707,12 @@ vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
vnet_hw_interface_t *hwif;
uword *if_index;
- if (!
- (hwif =
- vnet_get_sup_hw_interface_api_visible_or_null (vnm, sw_if_index))
+ if (!(hwif = vnet_get_sup_hw_interface_api_visible_or_null (vnm,
+ args->sw_if_index))
|| hwif->dev_class_index != vhost_user_device_class.index)
return VNET_API_ERROR_INVALID_SW_IF_INDEX;
- if (sock_filename == NULL || !(strlen (sock_filename) > 0))
+ if (args->sock_filename == NULL || !(strlen (args->sock_filename) > 0))
return VNET_API_ERROR_INVALID_ARGUMENT;
vui = vec_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
@@ -1730,23 +1721,22 @@ vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
* Disallow changing the interface to have the same path name
* as other interface
*/
- if_index = mhash_get (&vum->if_index_by_sock_name, (void *) sock_filename);
+ if_index = mhash_get (&vum->if_index_by_sock_name,
+ (void *) args->sock_filename);
if (if_index && (*if_index != vui->if_index))
return VNET_API_ERROR_IF_ALREADY_EXISTS;
// First try to open server socket
- if (is_server)
- if ((rv = vhost_user_init_server_sock (sock_filename,
+ if (args->is_server)
+ if ((rv = vhost_user_init_server_sock (args->sock_filename,
&server_sock_fd)) != 0)
return rv;
vhost_user_term_if (vui);
- vhost_user_vui_init (vnm, vui, server_sock_fd,
- sock_filename, feature_mask, &sw_if_idx, enable_gso,
- enable_packed);
+ vhost_user_vui_init (vnm, vui, server_sock_fd, args, &sw_if_idx);
- if (renumber)
- vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+ if (args->renumber)
+ vnet_interface_name_renumber (sw_if_idx, args->custom_dev_instance);
// Process node must connect
vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
@@ -1759,46 +1749,46 @@ vhost_user_connect_command_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
+ vnet_main_t *vnm = vnet_get_main ();
unformat_input_t _line_input, *line_input = &_line_input;
- u8 *sock_filename = NULL;
- u32 sw_if_index;
- u8 is_server = 0;
- u64 feature_mask = (u64) ~ (0ULL);
- u8 renumber = 0;
- u32 custom_dev_instance = ~0;
- u8 hwaddr[6];
- u8 *hw = NULL;
clib_error_t *error = NULL;
- u8 enable_gso = 0, enable_packed = 0;
+ vhost_user_create_if_args_t args = { 0 };
+ int rv;
/* Get a line of input. */
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
+ args.feature_mask = (u64) ~ (0ULL);
+ args.custom_dev_instance = ~0;
/* GSO feature is disable by default */
- feature_mask &= ~FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
+ args.feature_mask &= ~FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS;
/* packed-ring feature is disable by default */
- feature_mask &= ~VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
+ args.feature_mask &= ~VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
+ /* event_idx feature is disable by default */
+ args.feature_mask &= ~VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX);
+
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (line_input, "socket %s", &sock_filename))
+ if (unformat (line_input, "socket %s", &args.sock_filename))
;
else if (unformat (line_input, "server"))
- is_server = 1;
+ args.is_server = 1;
else if (unformat (line_input, "gso"))
- enable_gso = 1;
+ args.enable_gso = 1;
else if (unformat (line_input, "packed"))
- enable_packed = 1;
- else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
+ args.enable_packed = 1;
+ else if (unformat (line_input, "event-idx"))
+ args.enable_event_idx = 1;
+ else if (unformat (line_input, "feature-mask 0x%llx",
+ &args.feature_mask))
;
- else
- if (unformat
- (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
- hw = hwaddr;
- else if (unformat (line_input, "renumber %d", &custom_dev_instance))
- {
- renumber = 1;
- }
+ else if (unformat (line_input, "hwaddr %U", unformat_ethernet_address,
+ args.hwaddr))
+ ;
+ else if (unformat (line_input, "renumber %d",
+ &args.custom_dev_instance))
+ args.renumber = 1;
else
{
error = clib_error_return (0, "unknown input `%U'",
@@ -1807,23 +1797,17 @@ vhost_user_connect_command_fn (vlib_main_t * vm,
}
}
- vnet_main_t *vnm = vnet_get_main ();
-
- int rv;
- if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
- is_server, &sw_if_index, feature_mask,
- renumber, custom_dev_instance, hw,
- enable_gso, enable_packed)))
+ if ((rv = vhost_user_create_if (vnm, vm, &args)))
{
error = clib_error_return (0, "vhost_user_create_if returned %d", rv);
goto done;
}
- vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
- sw_if_index);
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnm,
+ args.sw_if_index);
done:
- vec_free (sock_filename);
+ vec_free (args.sock_filename);
unformat_free (line_input);
return error;
@@ -1937,26 +1921,14 @@ format_vhost_user_desc (u8 * s, va_list * args)
return s;
}
-static u8 *
-format_vhost_user_vring (u8 * s, va_list * args)
-{
- char *fmt = va_arg (*args, char *);
- vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
- int q = va_arg (*args, int);
-
- s = format (s, fmt, vui->vrings[q].avail->flags, vui->vrings[q].avail->idx,
- vui->vrings[q].used->flags, vui->vrings[q].used->idx);
- return s;
-}
-
static void
-vhost_user_show_fds (vlib_main_t * vm, vhost_user_intf_t * vui, int q)
+vhost_user_show_fds (vlib_main_t * vm, vhost_user_vring_t * vq)
{
- int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
- int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
+ int kickfd = UNIX_GET_FD (vq->kickfd_idx);
+ int callfd = UNIX_GET_FD (vq->callfd_idx);
vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n", kickfd, callfd,
- vui->vrings[q].errfd);
+ vq->errfd);
}
static void
@@ -1968,13 +1940,15 @@ vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
u32 idx;
u32 n_entries;
vring_desc_t *desc_table;
+ vhost_user_vring_t *vq = &vui->vrings[q];
- if (vui->vrings[q].avail && vui->vrings[q].used)
- vlib_cli_output (vm, "%U", format_vhost_user_vring,
- " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
- vui, q);
+ if (vq->avail && vq->used)
+ vlib_cli_output (vm, " avail.flags %x avail event idx %u avail.idx %d "
+ "used event idx %u used.idx %d\n", vq->avail->flags,
+ vhost_user_avail_event_idx (vq), vq->avail->idx,
+ vhost_user_used_event_idx (vq), vq->used->idx);
- vhost_user_show_fds (vm, vui, q);
+ vhost_user_show_fds (vm, vq);
if (show_descr)
{
@@ -1985,9 +1959,9 @@ vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
vlib_cli_output (vm,
" ===== ================== ===== ====== ===== "
"==================\n");
- for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ for (j = 0; j < vq->qsz_mask + 1; j++)
{
- desc_table = vui->vrings[q].desc;
+ desc_table = vq->desc;
vlib_cli_output (vm, "%U", format_vhost_user_desc,
" %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n", vui,
desc_table, j, &mem_hint);
@@ -2030,18 +2004,25 @@ format_vhost_user_packed_desc (u8 * s, va_list * args)
}
static u8 *
-format_vhost_user_vring_packed (u8 * s, va_list * args)
+format_vhost_user_event_idx_flags (u8 * s, va_list * args)
{
- char *fmt = va_arg (*args, char *);
- vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
- int q = va_arg (*args, int);
-
- s = format (s, fmt, vui->vrings[q].avail_event->flags,
- vui->vrings[q].avail_event->off_wrap,
- vui->vrings[q].used_event->flags,
- vui->vrings[q].used_event->off_wrap,
- vui->vrings[q].avail_wrap_counter,
- vui->vrings[q].used_wrap_counter);
+ u32 flags = va_arg (*args, u32);
+ typedef struct
+ {
+ u8 value;
+ char *str;
+ } event_idx_flags;
+ static event_idx_flags event_idx_array[] = {
+#define _(s,v) { .str = #s, .value = v, },
+ foreach_virtio_event_idx_flags
+#undef _
+ };
+ u32 num_entries = sizeof (event_idx_array) / sizeof (event_idx_flags);
+
+ if (flags < num_entries)
+ s = format (s, "%s", event_idx_array[flags].str);
+ else
+ s = format (s, "%u", flags);
return s;
}
@@ -2054,15 +2035,25 @@ vhost_user_show_desc_packed (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
u32 idx;
u32 n_entries;
vring_packed_desc_t *desc_table;
+ vhost_user_vring_t *vq = &vui->vrings[q];
+ u16 off_wrap, event_idx;
+
+ off_wrap = vq->avail_event->off_wrap;
+ event_idx = off_wrap & 0x7fff;
+ vlib_cli_output (vm, " avail_event.flags %U avail_event.off_wrap %u "
+ "avail event idx %u\n", format_vhost_user_event_idx_flags,
+ (u32) vq->avail_event->flags, off_wrap, event_idx);
+
+ off_wrap = vq->used_event->off_wrap;
+ event_idx = off_wrap & 0x7fff;
+ vlib_cli_output (vm, " used_event.flags %U used_event.off_wrap %u "
+ "used event idx %u\n", format_vhost_user_event_idx_flags,
+ (u32) vq->used_event->flags, off_wrap, event_idx);
- if (vui->vrings[q].avail_event && vui->vrings[q].used_event)
- vlib_cli_output (vm, "%U", format_vhost_user_vring_packed,
- " avail_event.flags %x avail_event.off_wrap %u "
- "used_event.flags %x used_event.off_wrap %u\n"
- " avail wrap counter %u, used wrap counter %u\n",
- vui, q);
+ vlib_cli_output (vm, " avail wrap counter %u, used wrap counter %u\n",
+ vq->avail_wrap_counter, vq->used_wrap_counter);
- vhost_user_show_fds (vm, vui, q);
+ vhost_user_show_fds (vm, vq);
if (show_descr)
{
@@ -2073,9 +2064,9 @@ vhost_user_show_desc_packed (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
vlib_cli_output (vm,
" ===== ================== ===== ====== ===== "
"==================\n");
- for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ for (j = 0; j < vq->qsz_mask + 1; j++)
{
- desc_table = vui->vrings[q].packed_desc;
+ desc_table = vq->packed_desc;
vlib_cli_output (vm, "%U", format_vhost_user_packed_desc,
" %-5u 0x%016lx %-5u 0x%04x %-5u 0x%016lx\n", vui,
desc_table, j, &mem_hint);
@@ -2191,6 +2182,8 @@ show_vhost_user_command_fn (vlib_main_t * vm,
vlib_cli_output (vm, " GSO enable");
if (vui->enable_packed)
vlib_cli_output (vm, " Packed ring enable");
+ if (vui->enable_event_idx)
+ vlib_cli_output (vm, " Event index enable");
vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
" features mask (0x%llx): \n"
@@ -2287,10 +2280,12 @@ show_vhost_user_command_fn (vlib_main_t * vm,
vui->vrings[q].enabled ? "" : " disabled");
vlib_cli_output (vm,
- " qsz %d last_avail_idx %d last_used_idx %d\n",
+ " qsz %d last_avail_idx %d last_used_idx %d"
+ " last_kick %u\n",
vui->vrings[q].qsz_mask + 1,
vui->vrings[q].last_avail_idx,
- vui->vrings[q].last_used_idx);
+ vui->vrings[q].last_used_idx,
+ vui->vrings[q].last_kick);
if (vhost_user_is_packed_ring_supported (vui))
vhost_user_show_desc_packed (vm, vui, q, show_descr,
@@ -2368,7 +2363,7 @@ VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
.path = "create vhost-user",
.short_help = "create vhost-user socket <socket-filename> [server] "
"[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] [gso] "
- "[packed]",
+ "[packed] [event-idx]",
.function = vhost_user_connect_command_fn,
.is_mp_safe = 1,
};
diff --git a/src/vnet/devices/virtio/vhost_user.h b/src/vnet/devices/virtio/vhost_user.h
index eecfd2d60e5..604e5571141 100644
--- a/src/vnet/devices/virtio/vhost_user.h
+++ b/src/vnet/devices/virtio/vhost_user.h
@@ -97,16 +97,27 @@ typedef enum
(FEATURE_VIRTIO_NET_F_HOST_TSO_FEATURE_BITS | \
FEATURE_VIRTIO_NET_F_GUEST_TSO_FEATURE_BITS)
+
+typedef struct
+{
+ char *sock_filename;
+ u64 feature_mask;
+ u32 custom_dev_instance;
+ u8 hwaddr[6];
+ u8 renumber;
+ u8 is_server;
+ u8 enable_gso;
+ u8 enable_packed;
+ u8 enable_event_idx;
+
+ /* return */
+ u32 sw_if_index;
+} vhost_user_create_if_args_t;
+
int vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
- const char *sock_filename, u8 is_server,
- u32 * sw_if_index, u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 * hwaddr,
- u8 enable_gso, u8 enable_packed);
+ vhost_user_create_if_args_t * args);
int vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
- const char *sock_filename, u8 is_server,
- u32 sw_if_index, u64 feature_mask,
- u8 renumber, u32 custom_dev_instance,
- u8 enable_gso, u8 enable_packed);
+ vhost_user_create_if_args_t * args);
int vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
u32 sw_if_index);
@@ -216,6 +227,9 @@ typedef struct
u16 used_wrap_counter;
u16 avail_wrap_counter;
+
+ u16 last_kick;
+ u8 first_kick;
} vhost_user_vring_t;
#define VHOST_USER_EVENT_START_TIMER 1
@@ -272,6 +286,7 @@ typedef struct
/* Packed ring configured */
u8 enable_packed;
+ u8 enable_event_idx;
} vhost_user_intf_t;
typedef struct
diff --git a/src/vnet/devices/virtio/vhost_user_api.c b/src/vnet/devices/virtio/vhost_user_api.c
index ec335c529f2..a4e027f214e 100644
--- a/src/vnet/devices/virtio/vhost_user_api.c
+++ b/src/vnet/devices/virtio/vhost_user_api.c
@@ -48,6 +48,8 @@
#define foreach_vpe_api_msg \
_(CREATE_VHOST_USER_IF, create_vhost_user_if) \
_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \
+_(CREATE_VHOST_USER_IF_V2, create_vhost_user_if_v2) \
+_(MODIFY_VHOST_USER_IF_V2, modify_vhost_user_if_v2) \
_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \
_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump)
@@ -56,14 +58,13 @@ vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp)
{
int rv = 0;
vl_api_create_vhost_user_if_reply_t *rmp;
- u32 sw_if_index = (u32) ~ 0;
vnet_main_t *vnm = vnet_get_main ();
vlib_main_t *vm = vlib_get_main ();
- u64 features = (u64) ~ (0ULL);
u64 disabled_features = (u64) (0ULL);
- mac_address_t mac;
- u8 *mac_p = NULL;
+ vhost_user_create_if_args_t args = { 0 };
+ args.sw_if_index = (u32) ~ 0;
+ args.feature_mask = (u64) ~ (0ULL);
if (mp->disable_mrg_rxbuf)
disabled_features = VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF);
@@ -77,18 +78,21 @@ vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp)
*/
disabled_features |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS |
VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
- features &= ~disabled_features;
+
+ /* EVENT_IDX is disabled by default */
+ disabled_features |= VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX);
+ args.feature_mask &= ~disabled_features;
if (mp->use_custom_mac)
- {
- mac_address_decode (mp->mac_address, &mac);
- mac_p = (u8 *) & mac;
- }
+ mac_address_decode (mp->mac_address, (mac_address_t *) args.hwaddr);
- rv = vhost_user_create_if (vnm, vm, (char *) mp->sock_filename,
- mp->is_server, &sw_if_index, features,
- mp->renumber, ntohl (mp->custom_dev_instance),
- mac_p, mp->enable_gso, mp->enable_packed);
+ args.is_server = mp->is_server;
+ args.sock_filename = (char *) mp->sock_filename;
+ args.renumber = mp->renumber;
+ args.custom_dev_instance = ntohl (mp->custom_dev_instance);
+ args.enable_gso = mp->enable_gso;
+ args.enable_packed = mp->enable_packed;
+ rv = vhost_user_create_if (vnm, vm, &args);
/* Remember an interface tag for the new interface */
if (rv == 0)
@@ -99,14 +103,14 @@ vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp)
/* Make sure it's a proper C-string */
mp->tag[ARRAY_LEN (mp->tag) - 1] = 0;
u8 *tag = format (0, "%s%c", mp->tag, 0);
- vnet_set_sw_interface_tag (vnm, tag, sw_if_index);
+ vnet_set_sw_interface_tag (vnm, tag, args.sw_if_index);
}
}
/* *INDENT-OFF* */
REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_REPLY,
({
- rmp->sw_if_index = ntohl (sw_if_index);
+ rmp->sw_if_index = ntohl (args.sw_if_index);
}));
/* *INDENT-ON* */
}
@@ -116,13 +120,12 @@ vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp)
{
int rv = 0;
vl_api_modify_vhost_user_if_reply_t *rmp;
- u32 sw_if_index = ntohl (mp->sw_if_index);
- u64 features = (u64) ~ (0ULL);
u64 disabled_features = (u64) (0ULL);
-
+ vhost_user_create_if_args_t args = { 0 };
vnet_main_t *vnm = vnet_get_main ();
vlib_main_t *vm = vlib_get_main ();
+ args.feature_mask = (u64) ~ (0ULL);
/*
* GSO and PACKED are not supported by feature mask via binary API. We
* disable GSO and PACKED feature in the feature mask. They may be enabled
@@ -130,17 +133,125 @@ vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp)
*/
disabled_features |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS |
VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
- features &= ~disabled_features;
- rv = vhost_user_modify_if (vnm, vm, (char *) mp->sock_filename,
- mp->is_server, sw_if_index, features,
- mp->renumber, ntohl (mp->custom_dev_instance),
- mp->enable_gso, mp->enable_packed);
+ /* EVENT_IDX is disabled by default */
+ disabled_features |= VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX);
+ args.feature_mask &= ~disabled_features;
+
+ args.sw_if_index = ntohl (mp->sw_if_index);
+ args.sock_filename = (char *) mp->sock_filename;
+ args.is_server = mp->is_server;
+ args.renumber = mp->renumber;
+ args.custom_dev_instance = ntohl (mp->custom_dev_instance);
+ args.enable_gso = mp->enable_gso;
+ args.enable_packed = mp->enable_packed;
+ rv = vhost_user_modify_if (vnm, vm, &args);
REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_REPLY);
}
static void
+vl_api_create_vhost_user_if_v2_t_handler (vl_api_create_vhost_user_if_v2_t *
+ mp)
+{
+ int rv = 0;
+ vl_api_create_vhost_user_if_v2_reply_t *rmp;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ u64 disabled_features = (u64) (0ULL);
+ vhost_user_create_if_args_t args = { 0 };
+
+ args.sw_if_index = (u32) ~ 0;
+ args.feature_mask = (u64) ~ (0ULL);
+ if (mp->disable_mrg_rxbuf)
+ disabled_features = VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF);
+
+ if (mp->disable_indirect_desc)
+ disabled_features |= VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC);
+
+ /*
+ * GSO and PACKED are not supported by feature mask via binary API. We
+ * disable GSO and PACKED feature in the feature mask. They may be enabled
+ * explicitly via enable_gso and enable_packed argument
+ */
+ disabled_features |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS |
+ VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
+
+ /* EVENT_IDX is disabled by default */
+ disabled_features |= VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX);
+ args.feature_mask &= ~disabled_features;
+
+ if (mp->use_custom_mac)
+ mac_address_decode (mp->mac_address, (mac_address_t *) args.hwaddr);
+
+ args.is_server = mp->is_server;
+ args.sock_filename = (char *) mp->sock_filename;
+ args.renumber = mp->renumber;
+ args.custom_dev_instance = ntohl (mp->custom_dev_instance);
+ args.enable_gso = mp->enable_gso;
+ args.enable_packed = mp->enable_packed;
+ args.enable_event_idx = mp->enable_event_idx;
+ rv = vhost_user_create_if (vnm, vm, &args);
+
+ /* Remember an interface tag for the new interface */
+ if (rv == 0)
+ {
+ /* If a tag was supplied... */
+ if (mp->tag[0])
+ {
+ /* Make sure it's a proper C-string */
+ mp->tag[ARRAY_LEN (mp->tag) - 1] = 0;
+ u8 *tag = format (0, "%s%c", mp->tag, 0);
+ vnet_set_sw_interface_tag (vnm, tag, args.sw_if_index);
+ }
+ }
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_V2_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (args.sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_modify_vhost_user_if_v2_t_handler (vl_api_modify_vhost_user_if_v2_t *
+ mp)
+{
+ int rv = 0;
+ vl_api_modify_vhost_user_if_v2_reply_t *rmp;
+ u64 disabled_features = (u64) (0ULL);
+ vhost_user_create_if_args_t args = { 0 };
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ args.feature_mask = (u64) ~ (0ULL);
+ /*
+ * GSO and PACKED are not supported by feature mask via binary API. We
+ * disable GSO and PACKED feature in the feature mask. They may be enabled
+ * explicitly via enable_gso and enable_packed argument
+ */
+ disabled_features |= FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS |
+ VIRTIO_FEATURE (VIRTIO_F_RING_PACKED);
+
+ /* EVENT_IDX is disabled by default */
+ disabled_features |= VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX);
+ args.feature_mask &= ~disabled_features;
+
+ args.sw_if_index = ntohl (mp->sw_if_index);
+ args.sock_filename = (char *) mp->sock_filename;
+ args.is_server = mp->is_server;
+ args.renumber = mp->renumber;
+ args.custom_dev_instance = ntohl (mp->custom_dev_instance);
+ args.enable_gso = mp->enable_gso;
+ args.enable_packed = mp->enable_packed;
+ args.enable_event_idx = mp->enable_event_idx;
+ rv = vhost_user_modify_if (vnm, vm, &args);
+
+ REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_V2_REPLY);
+}
+
+static void
vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t * mp)
{
int rv = 0;
@@ -263,6 +374,7 @@ vhost_user_api_hookup (vlib_main_t * vm)
/* Mark CREATE_VHOST_USER_IF as mp safe */
am->is_mp_safe[VL_API_CREATE_VHOST_USER_IF] = 1;
+ am->is_mp_safe[VL_API_CREATE_VHOST_USER_IF_V2] = 1;
/*
* Set up the (msg_name, crc, message-id) table
diff --git a/src/vnet/devices/virtio/vhost_user_inline.h b/src/vnet/devices/virtio/vhost_user_inline.h
index 17b6a90618f..5297453c317 100644
--- a/src/vnet/devices/virtio/vhost_user_inline.h
+++ b/src/vnet/devices/virtio/vhost_user_inline.h
@@ -248,8 +248,20 @@ format_vhost_trace (u8 * s, va_list * va)
return s;
}
+static_always_inline u64
+vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
+{
+ return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
+}
+
+static_always_inline u64
+vhost_user_is_event_idx_supported (vhost_user_intf_t * vui)
+{
+ return (vui->features & VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX));
+}
+
static_always_inline void
-vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
+vhost_user_kick (vlib_main_t * vm, vhost_user_vring_t * vq)
{
vhost_user_main_t *vum = &vhost_user_main;
u64 x = 1;
@@ -257,7 +269,7 @@ vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
int rv;
rv = write (fd, &x, sizeof (x));
- if (rv <= 0)
+ if (PREDICT_FALSE (rv <= 0))
{
clib_unix_warning
("Error: Could not write to unix socket for callfd %d", fd);
@@ -268,6 +280,101 @@ vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
}
+static_always_inline u16
+vhost_user_avail_event_idx (vhost_user_vring_t * vq)
+{
+ volatile u16 *event_idx = (u16 *) & (vq->used->ring[vq->qsz_mask + 1]);
+
+ return *event_idx;
+}
+
+static_always_inline u16
+vhost_user_used_event_idx (vhost_user_vring_t * vq)
+{
+ volatile u16 *event_idx = (u16 *) & (vq->avail->ring[vq->qsz_mask + 1]);
+
+ return *event_idx;
+}
+
+static_always_inline u16
+vhost_user_need_event (u16 event_idx, u16 new_idx, u16 old_idx)
+{
+ return ((u16) (new_idx - event_idx - 1) < (u16) (new_idx - old_idx));
+}
+
+static_always_inline void
+vhost_user_send_call_event_idx (vlib_main_t * vm, vhost_user_vring_t * vq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 first_kick = vq->first_kick;
+ u16 event_idx = vhost_user_used_event_idx (vq);
+
+ vq->first_kick = 1;
+ if (vhost_user_need_event (event_idx, vq->last_used_idx, vq->last_kick) ||
+ PREDICT_FALSE (!first_kick))
+ {
+ vhost_user_kick (vm, vq);
+ vq->last_kick = event_idx;
+ }
+ else
+ {
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
+ }
+}
+
+static_always_inline void
+vhost_user_send_call_event_idx_packed (vlib_main_t * vm,
+ vhost_user_vring_t * vq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 first_kick = vq->first_kick;
+ u16 off_wrap;
+ u16 event_idx;
+ u16 new_idx = vq->last_used_idx;
+ u16 old_idx = vq->last_kick;
+
+ if (PREDICT_TRUE (vq->avail_event->flags == VRING_EVENT_F_DESC))
+ {
+ CLIB_COMPILER_BARRIER ();
+ off_wrap = vq->avail_event->off_wrap;
+ event_idx = off_wrap & 0x7fff;
+ if (vq->used_wrap_counter != (off_wrap >> 15))
+ event_idx -= (vq->qsz_mask + 1);
+
+ if (new_idx <= old_idx)
+ old_idx -= (vq->qsz_mask + 1);
+
+ vq->first_kick = 1;
+ vq->last_kick = event_idx;
+ if (vhost_user_need_event (event_idx, new_idx, old_idx) ||
+ PREDICT_FALSE (!first_kick))
+ vhost_user_kick (vm, vq);
+ else
+ {
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
+ }
+ }
+ else
+ vhost_user_kick (vm, vq);
+}
+
+static_always_inline void
+vhost_user_send_call (vlib_main_t * vm, vhost_user_intf_t * vui,
+ vhost_user_vring_t * vq)
+{
+ if (vhost_user_is_event_idx_supported (vui))
+ {
+ if (vhost_user_is_packed_ring_supported (vui))
+ vhost_user_send_call_event_idx_packed (vm, vq);
+ else
+ vhost_user_send_call_event_idx (vm, vq);
+ }
+ else
+ vhost_user_kick (vm, vq);
+}
+
static_always_inline u8
vui_is_link_up (vhost_user_intf_t * vui)
{
@@ -305,7 +412,10 @@ vhost_user_advance_last_avail_idx (vhost_user_vring_t * vring)
{
vring->last_avail_idx++;
if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
- vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
+ {
+ vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
+ vring->last_avail_idx = 0;
+ }
}
static_always_inline void
@@ -331,7 +441,11 @@ vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
{
if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
vring->avail_wrap_counter ^= VRING_DESC_F_AVAIL;
- vring->last_avail_idx--;
+
+ if (PREDICT_FALSE (vring->last_avail_idx == 0))
+ vring->last_avail_idx = vring->qsz_mask;
+ else
+ vring->last_avail_idx--;
}
static_always_inline void
@@ -362,13 +476,10 @@ vhost_user_advance_last_used_idx (vhost_user_vring_t * vring)
{
vring->last_used_idx++;
if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
- vring->used_wrap_counter ^= 1;
-}
-
-static_always_inline u64
-vhost_user_is_packed_ring_supported (vhost_user_intf_t * vui)
-{
- return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
+ {
+ vring->used_wrap_counter ^= 1;
+ vring->last_used_idx = 0;
+ }
}
#endif
diff --git a/src/vnet/devices/virtio/vhost_user_input.c b/src/vnet/devices/virtio/vhost_user_input.c
index 53230a61bc7..7ea70c629f8 100644
--- a/src/vnet/devices/virtio/vhost_user_input.c
+++ b/src/vnet/devices/virtio/vhost_user_input.c
@@ -320,16 +320,17 @@ vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
}
static_always_inline void
-vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_vring_t * txvq,
+vhost_user_input_do_interrupt (vlib_main_t * vm, vhost_user_intf_t * vui,
+ vhost_user_vring_t * txvq,
vhost_user_vring_t * rxvq)
{
f64 now = vlib_time_now (vm);
if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
- vhost_user_send_call (vm, txvq);
+ vhost_user_send_call (vm, vui, txvq);
if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
- vhost_user_send_call (vm, rxvq);
+ vhost_user_send_call (vm, vui, rxvq);
}
static_always_inline void
@@ -400,7 +401,7 @@ vhost_user_if_input (vlib_main_t * vm,
{
/* do we have pending interrupts ? */
vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
- vhost_user_input_do_interrupt (vm, txvq, rxvq);
+ vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
}
/*
@@ -742,7 +743,7 @@ stop:
txvq->n_since_last_int += n_rx_packets;
if (txvq->n_since_last_int > vum->coalesce_frames)
- vhost_user_send_call (vm, txvq);
+ vhost_user_send_call (vm, vui, txvq);
}
/* increase rx counters */
@@ -1116,7 +1117,7 @@ vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
/* do we have pending interrupts ? */
vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
- vhost_user_input_do_interrupt (vm, txvq, rxvq);
+ vhost_user_input_do_interrupt (vm, vui, txvq, rxvq);
/*
* For adaptive mode, it is optimized to reduce interrupts.
@@ -1389,7 +1390,7 @@ vhost_user_if_input_packed (vlib_main_t * vm, vhost_user_main_t * vum,
{
txvq->n_since_last_int += n_rx_packets;
if (txvq->n_since_last_int > vum->coalesce_frames)
- vhost_user_send_call (vm, txvq);
+ vhost_user_send_call (vm, vui, txvq);
}
/* increase rx counters */
diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c
index 80eefa6d9ed..465c0ea0903 100644
--- a/src/vnet/devices/virtio/vhost_user_output.c
+++ b/src/vnet/devices/virtio/vhost_user_output.c
@@ -279,7 +279,8 @@ vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
}
static_always_inline void
-vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_vring_t * rxvq,
+vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_intf_t * vui,
+ vhost_user_vring_t * rxvq,
u16 * n_descs_processed, u8 chained,
vlib_frame_t * frame, u32 n_left)
{
@@ -334,7 +335,7 @@ vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_vring_t * rxvq,
rxvq->n_since_last_int += frame->n_vectors - n_left;
if (rxvq->n_since_last_int > vum->coalesce_frames)
- vhost_user_send_call (vm, rxvq);
+ vhost_user_send_call (vm, vui, rxvq);
}
}
@@ -645,7 +646,7 @@ retry:
copy_len = 0;
/* give buffers back to driver */
- vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed,
+ vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
chained, frame, n_left);
}
@@ -660,8 +661,8 @@ done:
vlib_error_count (vm, node->node_index,
VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
- vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed, chained,
- frame, n_left);
+ vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
+ chained, frame, n_left);
}
/*
@@ -1019,7 +1020,7 @@ done:
rxvq->n_since_last_int += frame->n_vectors - n_left;
if (rxvq->n_since_last_int > vum->coalesce_frames)
- vhost_user_send_call (vm, rxvq);
+ vhost_user_send_call (vm, vui, rxvq);
}
vhost_user_vring_unlock (vui, qid);
diff --git a/src/vnet/devices/virtio/virtio_std.h b/src/vnet/devices/virtio/virtio_std.h
index 98befb5c820..619dd66d5ed 100644
--- a/src/vnet/devices/virtio/virtio_std.h
+++ b/src/vnet/devices/virtio/virtio_std.h
@@ -77,9 +77,17 @@ typedef enum
#define VRING_DESC_F_AVAIL (1 << 7)
#define VRING_DESC_F_USED (1 << 15)
-#define VRING_EVENT_F_ENABLE 0x0
-#define VRING_EVENT_F_DISABLE 0x1
-#define VRING_EVENT_F_DESC 0x2
+#define foreach_virtio_event_idx_flags \
+ _ (VRING_EVENT_F_ENABLE, 0) \
+ _ (VRING_EVENT_F_DISABLE, 1) \
+ _ (VRING_EVENT_F_DESC, 2)
+
+typedef enum
+{
+#define _(f,n) f = n,
+ foreach_virtio_event_idx_flags
+#undef _
+} virtio_event_idx_flags_t;
#define VRING_USED_F_NO_NOTIFY 1
#define VRING_AVAIL_F_NO_INTERRUPT 1