diff options
author | Steven Luong <sluong@cisco.com> | 2021-04-21 09:54:34 -0700 |
---|---|---|
committer | Beno�t Ganne <bganne@cisco.com> | 2021-08-18 12:25:34 +0000 |
commit | 38071b1331b44746679997f6e66081c4936d087c (patch) | |
tree | 2c25d05051510603a21528a7d7fcd15754a0ae09 /src/vnet/devices | |
parent | 91144fbf54764feb005a33419d0129febd2365d4 (diff) |
vhost: interrupt mode support with mq
vhost interrupt mode support with mq does not work when coalesce frame
is not configured to 0.
When packed-ring is configured, we were also setting the wrong flag
for want-interrupt.
No need to trigger an interrupt to RX infra if there is at least one
queue in the same thread that is doing polling.
Type: fix
Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: I8e64250f2aa6bf611a777a53e4d2c1076d83305f
Diffstat (limited to 'src/vnet/devices')
-rw-r--r-- | src/vnet/devices/virtio/vhost_user.c | 299 | ||||
-rw-r--r-- | src/vnet/devices/virtio/vhost_user.h | 10 | ||||
-rw-r--r-- | src/vnet/devices/virtio/vhost_user_output.c | 38 |
3 files changed, 209 insertions, 138 deletions
diff --git a/src/vnet/devices/virtio/vhost_user.c b/src/vnet/devices/virtio/vhost_user.c index 3217c72f95d..7ea7cbef995 100644 --- a/src/vnet/devices/virtio/vhost_user.c +++ b/src/vnet/devices/virtio/vhost_user.c @@ -106,13 +106,13 @@ unmap_all_mem_regions (vhost_user_intf_t * vui) } vui->nregions = 0; - for (q = 0; q < vui->num_qid; q++) - { - vq = &vui->vrings[q]; - vq->avail = 0; - vq->used = 0; - vq->desc = 0; - } + FOR_ALL_VHOST_RX_TXQ (q, vui) + { + vq = &vui->vrings[q]; + vq->avail = 0; + vq->used = 0; + vq->desc = 0; + } } static_always_inline void @@ -163,16 +163,28 @@ vhost_user_rx_thread_placement (vhost_user_intf_t * vui, u32 qid) vnet_main_t *vnm = vnet_get_main (); int rv; u32 q = qid >> 1; + vhost_user_main_t *vum = &vhost_user_main; ASSERT ((qid & 1) == 1); // should be odd // Assign new queue mappings for the interface + if (txvq->queue_index != ~0) + return; vnet_hw_if_set_input_node (vnm, vui->hw_if_index, vhost_user_input_node.index); txvq->queue_index = vnet_hw_if_register_rx_queue (vnm, vui->hw_if_index, q, VNET_HW_IF_RXQ_THREAD_ANY); + txvq->thread_index = + vnet_hw_if_get_rx_queue_thread_index (vnm, txvq->queue_index); + if (txvq->mode == VNET_HW_IF_RX_MODE_UNKNOWN) /* Set polling as the default */ txvq->mode = VNET_HW_IF_RX_MODE_POLLING; + if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING) + { + vhost_cpu_t *cpu = vec_elt_at_index (vum->cpus, txvq->thread_index); + /* Keep a polling queue count for each thread */ + cpu->polling_q_count++; + } txvq->qid = q; rv = vnet_hw_if_set_rx_queue_mode (vnm, txvq->queue_index, txvq->mode); if (rv) @@ -227,7 +239,7 @@ vhost_user_thread_placement (vhost_user_intf_t * vui, u32 qid) { if (qid & 1) // RX is odd, TX is even { - if (vui->vrings[qid].qid == -1) + if (vui->vrings[qid].queue_index == ~0) vhost_user_rx_thread_placement (vui, qid); } else @@ -258,10 +270,17 @@ vhost_user_kickfd_read_ready (clib_file_t * uf) vq->kickfd_idx); } - if (is_txq && (vhost_user_intf_ready (vui) && - ((vq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE) || - (vq->mode == VNET_HW_IF_RX_MODE_INTERRUPT)))) - vnet_hw_if_rx_queue_set_int_pending (vnm, vq->queue_index); + if (is_txq && (vq->mode != VNET_HW_IF_RX_MODE_POLLING) && + vhost_user_intf_ready (vui)) + { + vhost_cpu_t *cpu = vec_elt_at_index (vum->cpus, vq->thread_index); + /* + * If the thread has more than 1 queue and the other queue is in polling + * mode, there is no need to trigger an interrupt + */ + if (cpu->polling_q_count == 0) + vnet_hw_if_rx_queue_set_int_pending (vnm, vq->queue_index); + } return 0; } @@ -276,6 +295,9 @@ vhost_user_vring_init (vhost_user_intf_t * vui, u32 qid) vring->callfd_idx = ~0; vring->errfd = -1; vring->qid = -1; + vring->queue_index = ~0; + vring->thread_index = ~0; + vring->mode = VNET_HW_IF_RX_MODE_POLLING; clib_spinlock_init (&vring->vring_lock); @@ -319,11 +341,16 @@ vhost_user_vring_close (vhost_user_intf_t * vui, u32 qid) clib_spinlock_free (&vring->vring_lock); - // save the qid so that we don't need to unassign and assign_rx_thread - // when the interface comes back up. They are expensive calls. + // save the needed information in vrings prior to being wiped out u16 q = vui->vrings[qid].qid; + u32 queue_index = vui->vrings[qid].queue_index; + u32 mode = vui->vrings[qid].mode; + u32 thread_index = vui->vrings[qid].thread_index; vhost_user_vring_init (vui, qid); vui->vrings[qid].qid = q; + vui->vrings[qid].queue_index = queue_index; + vui->vrings[qid].mode = mode; + vui->vrings[qid].thread_index = thread_index; } static_always_inline void @@ -342,13 +369,38 @@ vhost_user_if_disconnect (vhost_user_intf_t * vui) vui->is_ready = 0; - for (q = 0; q < vui->num_qid; q++) - vhost_user_vring_close (vui, q); + FOR_ALL_VHOST_RX_TXQ (q, vui) { vhost_user_vring_close (vui, q); } unmap_all_mem_regions (vui); vu_log_debug (vui, "interface ifindex %d disconnected", vui->sw_if_index); } +void +vhost_user_set_operation_mode (vhost_user_intf_t *vui, + vhost_user_vring_t *txvq) +{ + if (vhost_user_is_packed_ring_supported (vui)) + { + if (txvq->used_event) + { + if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING) + txvq->used_event->flags = VRING_EVENT_F_DISABLE; + else + txvq->used_event->flags = 0; + } + } + else + { + if (txvq->used) + { + if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING) + txvq->used->flags = VRING_USED_F_NO_NOTIFY; + else + txvq->used->flags = 0; + } + } +} + static clib_error_t * vhost_user_socket_read (clib_file_t * uf) { @@ -579,19 +631,19 @@ vhost_user_socket_read (clib_file_t * uf) * Re-compute desc, used, and avail descriptor table if vring address * is set. */ - for (q = 0; q < vui->num_qid; q++) - { - if (vui->vrings[q].desc_user_addr && - vui->vrings[q].used_user_addr && vui->vrings[q].avail_user_addr) - { - vui->vrings[q].desc = - map_user_mem (vui, vui->vrings[q].desc_user_addr); - vui->vrings[q].used = - map_user_mem (vui, vui->vrings[q].used_user_addr); - vui->vrings[q].avail = - map_user_mem (vui, vui->vrings[q].avail_user_addr); - } - } + FOR_ALL_VHOST_RX_TXQ (q, vui) + { + if (vui->vrings[q].desc_user_addr && vui->vrings[q].used_user_addr && + vui->vrings[q].avail_user_addr) + { + vui->vrings[q].desc = + map_user_mem (vui, vui->vrings[q].desc_user_addr); + vui->vrings[q].used = + map_user_mem (vui, vui->vrings[q].used_user_addr); + vui->vrings[q].avail = + map_user_mem (vui, vui->vrings[q].avail_user_addr); + } + } vlib_worker_thread_barrier_release (vm); break; @@ -665,12 +717,8 @@ vhost_user_socket_read (clib_file_t * uf) vui->vrings[msg.state.index].last_kick = vui->vrings[msg.state.index].last_used_idx; - /* tell driver that we don't want interrupts */ - if (vhost_user_is_packed_ring_supported (vui)) - vui->vrings[msg.state.index].used_event->flags = - VRING_EVENT_F_DISABLE; - else - vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY; + /* tell driver that we want interrupts or not */ + vhost_user_set_operation_mode (vui, &vui->vrings[msg.state.index]); vlib_worker_thread_barrier_release (vm); vhost_user_update_iface_state (vui); break; @@ -1180,6 +1228,7 @@ vhost_user_send_interrupt_process (vlib_main_t * vm, case VHOST_USER_EVENT_START_TIMER: stop_timer = 0; + timeout = 1e-3; if (!vlib_process_suspend_time_is_zero (poll_time_remaining)) break; /* fall through */ @@ -1188,32 +1237,23 @@ vhost_user_send_interrupt_process (vlib_main_t * vm, /* *INDENT-OFF* */ pool_foreach (vui, vum->vhost_user_interfaces) { next_timeout = timeout; - for (qid = 0; qid < vui->num_qid / 2; qid += 2) - { - vhost_user_vring_t *rxvq = &vui->vrings[qid]; - vhost_user_vring_t *txvq = &vui->vrings[qid + 1]; - - if (txvq->qid == -1) - continue; - if (txvq->n_since_last_int) - { - if (now >= txvq->int_deadline) - vhost_user_send_call (vm, vui, txvq); - else - next_timeout = txvq->int_deadline - now; - } - - if (rxvq->n_since_last_int) - { - if (now >= rxvq->int_deadline) - vhost_user_send_call (vm, vui, rxvq); - else - next_timeout = rxvq->int_deadline - now; - } - - if ((next_timeout < timeout) && (next_timeout > 0.0)) - timeout = next_timeout; - } + FOR_ALL_VHOST_RX_TXQ (qid, vui) + { + vhost_user_vring_t *vq = &vui->vrings[qid]; + + if (vq->started == 0) + continue; + if (vq->n_since_last_int) + { + if (now >= vq->int_deadline) + vhost_user_send_call (vm, vui, vq); + else + next_timeout = vq->int_deadline - now; + } + + if ((next_timeout < timeout) && (next_timeout > 0.0)) + timeout = next_timeout; + } } /* *INDENT-ON* */ break; @@ -1364,10 +1404,10 @@ vhost_user_term_if (vhost_user_intf_t * vui) vhost_user_update_gso_interface_count (vui, 0 /* delete */ ); vhost_user_update_iface_state (vui); - for (q = 0; q < vui->num_qid; q++) - { - clib_spinlock_free (&vui->vrings[q].vring_lock); - } + FOR_ALL_VHOST_RX_TXQ (q, vui) + { + clib_spinlock_free (&vui->vrings[q].vring_lock); + } if (vui->unix_server_index != ~0) { @@ -1403,28 +1443,33 @@ vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index) vu_log_debug (vui, "Deleting vhost-user interface %s (instance %d)", hwif->name, hwif->dev_instance); - for (qid = 1; qid < vui->num_qid / 2; qid += 2) - { - vhost_user_vring_t *txvq = &vui->vrings[qid]; + FOR_ALL_VHOST_TXQ (qid, vui) + { + vhost_user_vring_t *txvq = &vui->vrings[qid]; - if (txvq->qid == -1) - continue; - if ((vum->ifq_count > 0) && - ((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) || - (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))) - { - vum->ifq_count--; - // Stop the timer if there is no more interrupt interface/queue - if ((vum->ifq_count == 0) && - (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) - { - vlib_process_signal_event (vm, - vhost_user_send_interrupt_node.index, - VHOST_USER_EVENT_STOP_TIMER, 0); - break; - } - } - } + if ((txvq->mode == VNET_HW_IF_RX_MODE_POLLING) && + (txvq->thread_index != ~0)) + { + vhost_cpu_t *cpu = vec_elt_at_index (vum->cpus, txvq->thread_index); + ASSERT (cpu->polling_q_count != 0); + cpu->polling_q_count--; + } + + if ((vum->ifq_count > 0) && + ((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) || + (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE))) + { + vum->ifq_count--; + // Stop the timer if there is no more interrupt interface/queue + if (vum->ifq_count == 0) + { + vlib_process_signal_event (vm, + vhost_user_send_interrupt_node.index, + VHOST_USER_EVENT_STOP_TIMER, 0); + break; + } + } + } // Disable and reset interface vhost_user_term_if (vui); @@ -1922,9 +1967,11 @@ vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q, vhost_user_vring_t *vq = &vui->vrings[q]; if (vq->avail && vq->used) - vlib_cli_output (vm, " avail.flags %x avail event idx %u avail.idx %d " - "used event idx %u used.idx %d\n", vq->avail->flags, - vhost_user_avail_event_idx (vq), vq->avail->idx, + vlib_cli_output (vm, + " avail.flags %x avail event idx %u avail.idx %d " + "used.flags %x used event idx %u used.idx %d\n", + vq->avail->flags, vhost_user_avail_event_idx (vq), + vq->avail->idx, vq->used->flags, vhost_user_used_event_idx (vq), vq->used->idx); vhost_user_show_fds (vm, vq); @@ -2148,6 +2195,12 @@ show_vhost_user_command_fn (vlib_main_t * vm, vlib_cli_output (vm, " Number of rx virtqueues in interrupt mode: %d", vum->ifq_count); vlib_cli_output (vm, " Number of GSO interfaces: %d", vum->gso_count); + for (u32 tid = 0; tid <= vlib_num_workers (); tid++) + { + vhost_cpu_t *cpu = vec_elt_at_index (vum->cpus, tid); + vlib_cli_output (vm, " Thread %u: Polling queue count %u", tid, + cpu->polling_q_count); + } for (i = 0; i < vec_len (hw_if_indices); i++) { @@ -2199,19 +2252,16 @@ show_vhost_user_command_fn (vlib_main_t * vm, vlib_cli_output (vm, " rx placement: "); - for (qid = 1; qid < vui->num_qid / 2; qid += 2) - { - vnet_main_t *vnm = vnet_get_main (); - uword thread_index; - vhost_user_vring_t *txvq = &vui->vrings[qid]; + FOR_ALL_VHOST_TXQ (qid, vui) + { + vhost_user_vring_t *txvq = &vui->vrings[qid]; - if (txvq->qid == -1) - continue; - thread_index = - vnet_hw_if_get_rx_queue_thread_index (vnm, txvq->queue_index); - vlib_cli_output (vm, " thread %d on vring %d, %U\n", thread_index, - qid, format_vnet_hw_if_rx_mode, txvq->mode); - } + if (txvq->qid == -1) + continue; + vlib_cli_output (vm, " thread %d on vring %d, %U\n", + txvq->thread_index, qid, format_vnet_hw_if_rx_mode, + txvq->mode); + } vlib_cli_output (vm, " tx placement: %s\n", vui->use_tx_spinlock ? "spin-lock" : "lock-free"); @@ -2244,29 +2294,30 @@ show_vhost_user_command_fn (vlib_main_t * vm, vui->regions[j].mmap_offset, pointer_to_uword (vui->region_mmap_addr[j])); } - for (q = 0; q < vui->num_qid; q++) - { - if (!vui->vrings[q].started) - continue; - - vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q, - (q & 1) ? "RX" : "TX", - vui->vrings[q].enabled ? "" : " disabled"); - - vlib_cli_output (vm, - " qsz %d last_avail_idx %d last_used_idx %d" - " last_kick %u\n", - vui->vrings[q].qsz_mask + 1, - vui->vrings[q].last_avail_idx, - vui->vrings[q].last_used_idx, - vui->vrings[q].last_kick); - - if (vhost_user_is_packed_ring_supported (vui)) - vhost_user_show_desc_packed (vm, vui, q, show_descr, - show_verbose); - else - vhost_user_show_desc (vm, vui, q, show_descr, show_verbose); - } + FOR_ALL_VHOST_RX_TXQ (q, vui) + { + if (!vui->vrings[q].started) + continue; + + vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q, + (q & 1) ? "RX" : "TX", + vui->vrings[q].enabled ? "" : " disabled"); + if (q & 1) + vlib_cli_output (vm, " global RX queue index %u\n", + vui->vrings[q].queue_index); + + vlib_cli_output ( + vm, + " qsz %d last_avail_idx %d last_used_idx %d" + " last_kick %u\n", + vui->vrings[q].qsz_mask + 1, vui->vrings[q].last_avail_idx, + vui->vrings[q].last_used_idx, vui->vrings[q].last_kick); + + if (vhost_user_is_packed_ring_supported (vui)) + vhost_user_show_desc_packed (vm, vui, q, show_descr, show_verbose); + else + vhost_user_show_desc (vm, vui, q, show_descr, show_verbose); + } vlib_cli_output (vm, "\n"); } done: diff --git a/src/vnet/devices/virtio/vhost_user.h b/src/vnet/devices/virtio/vhost_user.h index 63a0bc0da48..8ecf4a3aca7 100644 --- a/src/vnet/devices/virtio/vhost_user.h +++ b/src/vnet/devices/virtio/vhost_user.h @@ -231,6 +231,7 @@ typedef struct u16 last_kick; u8 first_kick; u32 queue_index; + u32 thread_index; } vhost_user_vring_t; #define VHOST_USER_EVENT_START_TIMER 1 @@ -290,6 +291,12 @@ typedef struct u8 enable_event_idx; } vhost_user_intf_t; +#define FOR_ALL_VHOST_TXQ(qid, vui) for (qid = 1; qid < vui->num_qid; qid += 2) + +#define FOR_ALL_VHOST_RXQ(qid, vui) for (qid = 0; qid < vui->num_qid; qid += 2) + +#define FOR_ALL_VHOST_RX_TXQ(qid, vui) for (qid = 0; qid < vui->num_qid; qid++) + typedef struct { uword dst; @@ -323,6 +330,7 @@ typedef struct u32 *to_next_list; vlib_buffer_t **rx_buffers_pdesc; + u32 polling_q_count; } vhost_cpu_t; typedef struct @@ -365,6 +373,8 @@ typedef struct int vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm, vhost_user_intf_details_t ** out_vuids); +void vhost_user_set_operation_mode (vhost_user_intf_t *vui, + vhost_user_vring_t *txvq); extern vlib_node_registration_t vhost_user_send_interrupt_node; extern vnet_device_class_t vhost_user_device_class; diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c index 40faefadb42..d3e38bfa04e 100644 --- a/src/vnet/devices/virtio/vhost_user_output.c +++ b/src/vnet/devices/virtio/vhost_user_output.c @@ -1046,7 +1046,24 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance); vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)]; + vhost_cpu_t *cpu; + if (mode == txvq->mode) + return 0; + + if ((mode != VNET_HW_IF_RX_MODE_POLLING) && + (mode != VNET_HW_IF_RX_MODE_ADAPTIVE) && + (mode != VNET_HW_IF_RX_MODE_INTERRUPT)) + { + vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode, + hw_if_index, qid); + return clib_error_return (0, "unsupported"); + } + + if (txvq->thread_index == ~0) + return clib_error_return (0, "Queue initialization is not finished yet"); + + cpu = vec_elt_at_index (vum->cpus, txvq->thread_index); if ((mode == VNET_HW_IF_RX_MODE_INTERRUPT) || (mode == VNET_HW_IF_RX_MODE_ADAPTIVE)) { @@ -1057,11 +1074,14 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, } if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING) { + ASSERT (cpu->polling_q_count != 0); + if (cpu->polling_q_count) + cpu->polling_q_count--; vum->ifq_count++; // Start the timer if this is the first encounter on interrupt // interface/queue if ((vum->ifq_count == 1) && - (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) + ((vum->coalesce_time > 0.0) || (vum->coalesce_frames > 0))) vlib_process_signal_event (vm, vhost_user_send_interrupt_node.index, VHOST_USER_EVENT_START_TIMER, 0); @@ -1072,10 +1092,10 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, if (((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) || (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)) && vum->ifq_count) { + cpu->polling_q_count++; vum->ifq_count--; // Stop the timer if there is no more interrupt interface/queue - if ((vum->ifq_count == 0) && - (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0)) + if (vum->ifq_count == 0) vlib_process_signal_event (vm, vhost_user_send_interrupt_node.index, VHOST_USER_EVENT_STOP_TIMER, 0); @@ -1083,17 +1103,7 @@ vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, } txvq->mode = mode; - if (mode == VNET_HW_IF_RX_MODE_POLLING) - txvq->used->flags = VRING_USED_F_NO_NOTIFY; - else if ((mode == VNET_HW_IF_RX_MODE_ADAPTIVE) || - (mode == VNET_HW_IF_RX_MODE_INTERRUPT)) - txvq->used->flags = 0; - else - { - vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode, - hw_if_index, qid); - return clib_error_return (0, "unsupported"); - } + vhost_user_set_operation_mode (vui, txvq); return 0; } |