diff options
author | Steven Luong <sluong@cisco.com> | 2021-08-23 14:31:16 -0700 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2021-08-26 14:48:54 +0000 |
commit | ce5075823f8c3629d8233994d129a6835e7226a0 (patch) | |
tree | bb6841667774512f2cb473147d8a918d6272d5c4 /src/vnet/devices | |
parent | 1eaaba41982861b59f0b70470604f6d111c68fa6 (diff) |
vhost: migrate to new TX infra
Take advantage of the new TX infra and support manual thread placement
Type: improvement
Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: Id8023846a2eb59125fcf2f80f4f11eb681cf14dc
Diffstat (limited to 'src/vnet/devices')
-rw-r--r-- | src/vnet/devices/virtio/vhost_user.c | 95 | ||||
-rw-r--r-- | src/vnet/devices/virtio/vhost_user.h | 8 | ||||
-rw-r--r-- | src/vnet/devices/virtio/vhost_user_output.c | 52 |
3 files changed, 67 insertions, 88 deletions
diff --git a/src/vnet/devices/virtio/vhost_user.c b/src/vnet/devices/virtio/vhost_user.c index 7ea7cbef995..089e08aabb4 100644 --- a/src/vnet/devices/virtio/vhost_user.c +++ b/src/vnet/devices/virtio/vhost_user.c @@ -37,6 +37,7 @@ #include <vnet/devices/devices.h> #include <vnet/feature/feature.h> #include <vnet/interface/rx_queue_funcs.h> +#include <vnet/interface/tx_queue_funcs.h> #include <vnet/devices/virtio/vhost_user.h> #include <vnet/devices/virtio/vhost_user_inline.h> @@ -116,40 +117,45 @@ unmap_all_mem_regions (vhost_user_intf_t * vui) } static_always_inline void -vhost_user_tx_thread_placement (vhost_user_intf_t * vui) +vhost_user_tx_thread_placement (vhost_user_intf_t *vui, u32 qid) { - //Let's try to assign one queue to each thread - u32 qid; - u32 thread_index = 0; + vnet_main_t *vnm = vnet_get_main (); + vhost_user_vring_t *rxvq = &vui->vrings[qid]; + u32 q = qid >> 1, rxvq_count; - vui->use_tx_spinlock = 0; - while (1) + ASSERT ((qid & 1) == 0); + if (!rxvq->started || !rxvq->enabled) + return; + + rxvq_count = (qid >> 1) + 1; + if (rxvq->queue_index == ~0) { - for (qid = 0; qid < vui->num_qid / 2; qid++) - { - vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)]; - if (!rxvq->started || !rxvq->enabled) - continue; - - vui->per_cpu_tx_qid[thread_index] = qid; - thread_index++; - if (thread_index == vlib_get_thread_main ()->n_vlib_mains) - return; - } - //We need to loop, meaning the spinlock has to be used - vui->use_tx_spinlock = 1; - if (thread_index == 0) - { - //Could not find a single valid one - for (thread_index = 0; - thread_index < vlib_get_thread_main ()->n_vlib_mains; - thread_index++) - { - vui->per_cpu_tx_qid[thread_index] = 0; - } - return; - } + rxvq->queue_index = + vnet_hw_if_register_tx_queue (vnm, vui->hw_if_index, q); + rxvq->qid = q; + } + + FOR_ALL_VHOST_RXQ (q, vui) + { + vhost_user_vring_t *rxvq = &vui->vrings[q]; + u32 qi = rxvq->queue_index; + + if (rxvq->queue_index == ~0) + break; + for (u32 i = 0; i < vlib_get_n_threads (); i++) + vnet_hw_if_tx_queue_unassign_thread (vnm, qi, i); + } + + for (u32 i = 0; i < vlib_get_n_threads (); i++) + { + vhost_user_vring_t *rxvq = + &vui->vrings[VHOST_VRING_IDX_RX (i % rxvq_count)]; + u32 qi = rxvq->queue_index; + + vnet_hw_if_tx_queue_assign_thread (vnm, qi, i); } + + vnet_hw_if_update_runtime_data (vnm, vui->hw_if_index); } /** @@ -243,7 +249,7 @@ vhost_user_thread_placement (vhost_user_intf_t * vui, u32 qid) vhost_user_rx_thread_placement (vui, qid); } else - vhost_user_tx_thread_placement (vui); + vhost_user_tx_thread_placement (vui, qid); } static clib_error_t * @@ -1657,10 +1663,6 @@ vhost_user_vui_init (vnet_main_t * vnm, vhost_user_intf_t * vui, if (sw_if_index) *sw_if_index = vui->sw_if_index; - - vec_validate (vui->per_cpu_tx_qid, - vlib_get_thread_main ()->n_vlib_mains - 1); - vhost_user_tx_thread_placement (vui); } int @@ -2130,7 +2132,6 @@ show_vhost_user_command_fn (vlib_main_t * vm, u32 hw_if_index, *hw_if_indices = 0; vnet_hw_interface_t *hi; u16 qid; - u32 ci; int i, j, q; int show_descr = 0; int show_verbose = 0; @@ -2263,13 +2264,20 @@ show_vhost_user_command_fn (vlib_main_t * vm, txvq->mode); } - vlib_cli_output (vm, " tx placement: %s\n", - vui->use_tx_spinlock ? "spin-lock" : "lock-free"); + vlib_cli_output (vm, " tx placement\n"); - vec_foreach_index (ci, vui->per_cpu_tx_qid) + FOR_ALL_VHOST_RXQ (qid, vui) { - vlib_cli_output (vm, " thread %d on vring %d\n", ci, - VHOST_VRING_IDX_RX (vui->per_cpu_tx_qid[ci])); + vhost_user_vring_t *rxvq = &vui->vrings[qid]; + vnet_hw_if_tx_queue_t *txq; + + if (rxvq->queue_index == ~0) + continue; + txq = vnet_hw_if_get_tx_queue (vnm, rxvq->queue_index); + if (txq->threads) + vlib_cli_output (vm, " threads %U on vring %u: %s\n", + format_bitmap_list, txq->threads, qid, + txq->shared_queue ? "spin-lock" : "lock-free"); } vlib_cli_output (vm, "\n"); @@ -2302,9 +2310,8 @@ show_vhost_user_command_fn (vlib_main_t * vm, vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q, (q & 1) ? "RX" : "TX", vui->vrings[q].enabled ? "" : " disabled"); - if (q & 1) - vlib_cli_output (vm, " global RX queue index %u\n", - vui->vrings[q].queue_index); + vlib_cli_output (vm, " global %s queue index %u\n", + (q & 1) ? "RX" : "TX", vui->vrings[q].queue_index); vlib_cli_output ( vm, diff --git a/src/vnet/devices/virtio/vhost_user.h b/src/vnet/devices/virtio/vhost_user.h index 8ecf4a3aca7..59db5b4c592 100644 --- a/src/vnet/devices/virtio/vhost_user.h +++ b/src/vnet/devices/virtio/vhost_user.h @@ -30,8 +30,8 @@ * The max number for q pair is naturally 128. */ #define VHOST_VRING_MAX_MQ_PAIR_SZ 128 -#define VHOST_VRING_IDX_RX(qid) (2*qid) -#define VHOST_VRING_IDX_TX(qid) (2*qid + 1) +#define VHOST_VRING_IDX_RX(qid) (2 * (qid)) +#define VHOST_VRING_IDX_TX(qid) (2 * (qid) + 1) #define VHOST_USER_VRING_NOFD_MASK 0x100 @@ -279,10 +279,6 @@ typedef struct void *log_base_addr; u64 log_size; - /* Whether to use spinlock or per_cpu_tx_qid assignment */ - u8 use_tx_spinlock; - u16 *per_cpu_tx_qid; - u8 enable_gso; /* Packed ring configured */ diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c index d3e38bfa04e..4efafa85333 100644 --- a/src/vnet/devices/virtio/vhost_user_output.c +++ b/src/vnet/devices/virtio/vhost_user_output.c @@ -118,24 +118,6 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) return 0; } -/** - * @brief Spin until the vring is successfully locked - */ -static_always_inline void -vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid) -{ - clib_spinlock_lock_if_init (&vui->vrings[qid].vring_lock); -} - -/** - * @brief Unlock the vring lock - */ -static_always_inline void -vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid) -{ - clib_spinlock_unlock_if_init (&vui->vrings[qid].vring_lock); -} - static_always_inline void vhost_user_tx_trace (vhost_trace_t * t, vhost_user_intf_t * vui, u16 qid, @@ -377,17 +359,14 @@ vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui, } static_always_inline uword -vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) +vhost_user_device_class_packed (vlib_main_t *vm, vlib_node_runtime_t *node, + vlib_frame_t *frame, vhost_user_intf_t *vui, + vhost_user_vring_t *rxvq) { u32 *buffers = vlib_frame_vector_args (frame); u32 n_left = frame->n_vectors; vhost_user_main_t *vum = &vhost_user_main; - vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; - vhost_user_intf_t *vui = - pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance); - u32 qid; - vhost_user_vring_t *rxvq; + u32 qid = rxvq->qid; u8 error; u32 thread_index = vm->thread_index; vhost_cpu_t *cpu = &vum->cpus[thread_index]; @@ -401,10 +380,6 @@ vhost_user_device_class_packed (vlib_main_t * vm, vlib_node_runtime_t * node, u16 n_descs_processed; u8 indirect, chained; - qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, - thread_index)); - rxvq = &vui->vrings[qid]; - retry: error = VHOST_USER_TX_FUNC_ERROR_NONE; tx_headers_len = 0; @@ -682,7 +657,7 @@ done: goto retry; } - vhost_user_vring_unlock (vui, qid); + clib_spinlock_unlock (&rxvq->vring_lock); if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE)) { @@ -706,7 +681,7 @@ VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, vnet_interface_output_runtime_t *rd = (void *) node->runtime_data; vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance); - u32 qid = ~0; + u32 qid; vhost_user_vring_t *rxvq; u8 error; u32 thread_index = vm->thread_index; @@ -716,6 +691,7 @@ VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, u16 copy_len; u16 tx_headers_len; u32 or_flags; + vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame); if (PREDICT_FALSE (!vui->admin_up)) { @@ -729,20 +705,20 @@ VNET_DEVICE_CLASS_TX_FN (vhost_user_device_class) (vlib_main_t * vm, goto done3; } - qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid, - thread_index)); + qid = VHOST_VRING_IDX_RX (tf->queue_id); rxvq = &vui->vrings[qid]; + ASSERT (tf->queue_id == rxvq->qid); + if (PREDICT_FALSE (rxvq->avail == 0)) { error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL; goto done3; } - - if (PREDICT_FALSE (vui->use_tx_spinlock)) - vhost_user_vring_lock (vui, qid); + if (tf->shared_queue) + clib_spinlock_lock (&rxvq->vring_lock); if (vhost_user_is_packed_ring_supported (vui)) - return (vhost_user_device_class_packed (vm, node, frame)); + return (vhost_user_device_class_packed (vm, node, frame, vui, rxvq)); retry: error = VHOST_USER_TX_FUNC_ERROR_NONE; @@ -1020,7 +996,7 @@ done: vhost_user_send_call (vm, vui, rxvq); } - vhost_user_vring_unlock (vui, qid); + clib_spinlock_unlock (&rxvq->vring_lock); done3: if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE)) |