diff options
author | Steven Luong <sluong@cisco.com> | 2021-06-01 14:09:28 -0700 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2021-08-31 15:27:37 +0000 |
commit | 29dc67b5cbc96c988d6f859728ee040ff4991cf4 (patch) | |
tree | d5958947222370ee02954ed07bcb2149e54d63ae /src/plugins/vmxnet3 | |
parent | 6b3eeebacf8ee5c5be56b98c62a696f19e518e84 (diff) |
vmxnet3: set RX interrupt pending only when needed
When an RX thread handles more than one RX queue and has a mix of
queues in interrupt mode and polling mode, the RX input routine is
naturally in polling mode. In that case, there is no need to set RX
interrupt pending when descriptor is available in the queue for
interrupt mode.
Type: fix
Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: Iedbe57941eca3152c0e8ab9096cc81f315e0a915
Diffstat (limited to 'src/plugins/vmxnet3')
-rw-r--r-- | src/plugins/vmxnet3/cli.c | 12 | ||||
-rw-r--r-- | src/plugins/vmxnet3/vmxnet3.c | 43 | ||||
-rw-r--r-- | src/plugins/vmxnet3/vmxnet3.h | 10 |
3 files changed, 59 insertions, 6 deletions
diff --git a/src/plugins/vmxnet3/cli.c b/src/plugins/vmxnet3/cli.c index 71342bd535c..574d86fe220 100644 --- a/src/plugins/vmxnet3/cli.c +++ b/src/plugins/vmxnet3/cli.c @@ -213,6 +213,15 @@ show_vmxnet3 (vlib_main_t * vm, u32 * hw_if_indices, u8 show_descr, vmxnet3_tx_comp *tx_comp; u16 qid; + vlib_cli_output (vm, "Global:"); + for (u32 tid = 0; tid <= vlib_num_workers (); tid++) + { + vmxnet3_per_thread_data_t *ptd = + vec_elt_at_index (vmxm->per_thread_data, tid); + vlib_cli_output (vm, " Thread %u: polling queue count %u", tid, + ptd->polling_q_count); + } + if (!hw_if_indices) return; @@ -581,11 +590,14 @@ clib_error_t * vmxnet3_cli_init (vlib_main_t * vm) { vmxnet3_main_t *vmxm = &vmxnet3_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); /* initialize binary API */ vmxnet3_plugin_api_hookup (vm); vmxm->log_default = vlib_log_register_class ("vmxnet3", 0); + + vec_validate (vmxm->per_thread_data, tm->n_vlib_mains - 1); return 0; } diff --git a/src/plugins/vmxnet3/vmxnet3.c b/src/plugins/vmxnet3/vmxnet3.c index ff0a7dc706b..16d7d86fb3a 100644 --- a/src/plugins/vmxnet3/vmxnet3.c +++ b/src/plugins/vmxnet3/vmxnet3.c @@ -69,11 +69,23 @@ vmxnet3_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance); vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid); + vmxnet3_per_thread_data_t *ptd; - if (mode == VNET_HW_IF_RX_MODE_POLLING) - rxq->int_mode = 0; + if (mode == rxq->mode) + return 0; + if ((mode != VNET_HW_IF_RX_MODE_POLLING) && + (mode != VNET_HW_IF_RX_MODE_INTERRUPT)) + return clib_error_return (0, "Rx mode %U not supported", + format_vnet_hw_if_rx_mode, mode); + rxq->mode = mode; + ptd = vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING) + ptd->polling_q_count++; else - rxq->int_mode = 1; + { + ASSERT (ptd->polling_q_count != 0); + ptd->polling_q_count--; + } return 0; } @@ -288,6 +300,7 @@ vmxnet3_rxq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz) rxq = vec_elt_at_index (vd->rxqs, qid); clib_memset (rxq, 0, sizeof (*rxq)); rxq->size = qsz; + rxq->mode = VNET_HW_IF_RX_MODE_POLLING; for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++) { rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa @@ -534,8 +547,13 @@ vmxnet3_rxq_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line) u16 qid = line; vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid); - if (vec_len (vd->rxqs) > qid && vd->rxqs[qid].int_mode != 0) - vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index); + if (vec_len (vd->rxqs) > qid && (rxq->mode != VNET_HW_IF_RX_MODE_POLLING)) + { + vmxnet3_per_thread_data_t *ptd = + vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + if (ptd->polling_q_count == 0) + vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index); + } } static void @@ -815,12 +833,20 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args) { vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid); u32 qi, fi; + vmxnet3_per_thread_data_t *ptd; qi = vnet_hw_if_register_rx_queue (vnm, vd->hw_if_index, qid, VNET_HW_IF_RXQ_THREAD_ANY); fi = vlib_pci_get_msix_file_index (vm, vd->pci_dev_handle, qid); vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi); rxq->queue_index = qi; + rxq->thread_index = + vnet_hw_if_get_rx_queue_thread_index (vnm, rxq->queue_index); + if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING) + { + ptd = vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + ptd->polling_q_count++; + } rxq->buffer_pool_index = vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index); vmxnet3_rxq_refill_ring0 (vm, vd, rxq); @@ -886,7 +912,14 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd) vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i); u16 mask = rxq->size - 1; u16 rid; + vmxnet3_per_thread_data_t *ptd = + vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING) + { + ASSERT (ptd->polling_q_count != 0); + ptd->polling_q_count--; + } for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++) { vmxnet3_rx_ring *ring; diff --git a/src/plugins/vmxnet3/vmxnet3.h b/src/plugins/vmxnet3/vmxnet3.h index 75107689443..81aeec6f5c5 100644 --- a/src/plugins/vmxnet3/vmxnet3.h +++ b/src/plugins/vmxnet3/vmxnet3.h @@ -513,10 +513,17 @@ typedef struct typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + u32 polling_q_count; +} vmxnet3_per_thread_data_t; + +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); u16 size; - u8 int_mode; + u32 mode; u8 buffer_pool_index; u32 queue_index; + u32 thread_index; vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE]; vmxnet3_rx_desc *rx_desc[VMXNET3_RX_RING_SIZE]; vmxnet3_rx_comp *rx_comp; @@ -594,6 +601,7 @@ typedef struct vmxnet3_device_t *devices; u16 msg_id_base; vlib_log_class_t log_default; + vmxnet3_per_thread_data_t *per_thread_data; } vmxnet3_main_t; extern vmxnet3_main_t vmxnet3_main; |