diff options
author | Maxime Peim <mpeim@cisco.com> | 2021-05-06 12:17:25 +0200 |
---|---|---|
committer | Mohammed Hawari <mohammed@hawari.fr> | 2021-09-07 15:48:37 +0200 |
commit | 5b22339973feaa3eba48ead1e9fb428b1816c0cc (patch) | |
tree | a00efedefde3ddec7693bde6b109267e61b8ad83 /src/vnet | |
parent | 277ae8ba43cdfcf9c22adfb675cf5b15b8b4f898 (diff) |
interface: full poll vector on adaptive mode
When an input supports adaptive mode and enters
in polling, some rx queues may be missing from
the node's rx_poll_vector. To solve the issue,
a full poll vector is being computed in
vnet_hw_if_update_runtime_data, and returned
by vnet_hw_if_get_rxq_poll_vector when the
node is polling in adaptive mode.
Type: fix
Signed-off-by: Maxime Peim <mpeim@cisco.com>
Change-Id: I249bcb20ae0dd28afb0a5ca32993092bafd2f6b1
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/interface.h | 3 | ||||
-rw-r--r-- | src/vnet/interface/runtime.c | 40 | ||||
-rw-r--r-- | src/vnet/interface/rx_queue.c | 4 | ||||
-rw-r--r-- | src/vnet/interface/rx_queue_funcs.h | 5 |
4 files changed, 42 insertions, 10 deletions
diff --git a/src/vnet/interface.h b/src/vnet/interface.h index a761b04aff8..92a43c51958 100644 --- a/src/vnet/interface.h +++ b/src/vnet/interface.h @@ -751,7 +751,8 @@ typedef struct typedef struct { - vnet_hw_if_rxq_poll_vector_t *rxq_poll_vector; + vnet_hw_if_rxq_poll_vector_t *rxq_vector_int; + vnet_hw_if_rxq_poll_vector_t *rxq_vector_poll; void *rxq_interrupts; } vnet_hw_if_rx_node_runtime_t; diff --git a/src/vnet/interface/runtime.c b/src/vnet/interface/runtime.c index 462f7bbfba7..d507a88f792 100644 --- a/src/vnet/interface/runtime.c +++ b/src/vnet/interface/runtime.c @@ -62,7 +62,7 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index) vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index); u32 node_index = hi->input_node_index; vnet_hw_if_rx_queue_t *rxq; - vnet_hw_if_rxq_poll_vector_t *pv, **d = 0; + vnet_hw_if_rxq_poll_vector_t *pv, **d = 0, **a = 0; vnet_hw_if_output_node_runtime_t *new_out_runtimes = 0; vlib_node_state_t *per_thread_node_state = 0; u32 n_threads = vlib_get_n_threads (); @@ -76,6 +76,7 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index) format_vlib_node_name, vm, node_index, hi->name); vec_validate (d, n_threads - 1); + vec_validate (a, n_threads - 1); vec_validate_init_empty (per_thread_node_state, n_threads - 1, VLIB_NODE_STATE_DISABLED); vec_validate_init_empty (per_thread_node_adaptive, n_threads - 1, 0); @@ -126,6 +127,13 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index) rxq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE) last_int = clib_max (last_int, rxq - im->hw_if_rx_queues); + if (per_thread_node_adaptive[ti]) + { + vec_add2_aligned (a[ti], pv, 1, CLIB_CACHE_LINE_BYTES); + pv->dev_instance = rxq->dev_instance; + pv->queue_id = rxq->queue_id; + } + if (per_thread_node_state[ti] != VLIB_NODE_STATE_POLLING) continue; @@ -157,13 +165,22 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index) { vnet_hw_if_rx_node_runtime_t *rt; rt = vlib_node_get_runtime_data (ovm, node_index); - if (vec_len (rt->rxq_poll_vector) != vec_len (d[i])) + if (vec_len (rt->rxq_vector_int) != vec_len (d[i])) something_changed_on_rx = 1; - else if (memcmp (d[i], rt->rxq_poll_vector, + else if (memcmp (d[i], rt->rxq_vector_int, vec_len (d[i]) * sizeof (**d))) something_changed_on_rx = 1; if (clib_interrupt_get_n_int (rt->rxq_interrupts) != last_int + 1) something_changed_on_rx = 1; + + if (something_changed_on_rx == 0 && per_thread_node_adaptive[i]) + { + if (vec_len (rt->rxq_vector_poll) != vec_len (a[i])) + something_changed_on_rx = 1; + else if (memcmp (a[i], rt->rxq_vector_poll, + vec_len (a[i]) * sizeof (*a))) + something_changed_on_rx = 1; + } } } @@ -223,10 +240,17 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index) vlib_main_t *vm = vlib_get_main_by_index (i); vnet_hw_if_rx_node_runtime_t *rt; rt = vlib_node_get_runtime_data (vm, node_index); - pv = rt->rxq_poll_vector; - rt->rxq_poll_vector = d[i]; + pv = rt->rxq_vector_int; + rt->rxq_vector_int = d[i]; d[i] = pv; + if (per_thread_node_adaptive[i]) + { + pv = rt->rxq_vector_poll; + rt->rxq_vector_poll = a[i]; + a[i] = pv; + } + if (rt->rxq_interrupts) { void *in = rt->rxq_interrupts; @@ -276,9 +300,13 @@ vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index) } for (int i = 0; i < n_threads; i++) - vec_free (d[i]); + { + vec_free (d[i]); + vec_free (a[i]); + } vec_free (d); + vec_free (a); vec_free (per_thread_node_state); vec_free (per_thread_node_adaptive); vec_free (new_out_runtimes); diff --git a/src/vnet/interface/rx_queue.c b/src/vnet/interface/rx_queue.c index 1099a0ba0f9..7632b190705 100644 --- a/src/vnet/interface/rx_queue.c +++ b/src/vnet/interface/rx_queue.c @@ -238,7 +238,7 @@ vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm, ASSERT (node->state == VLIB_NODE_STATE_INTERRUPT); - vec_reset_length (rt->rxq_poll_vector); + vec_reset_length (rt->rxq_vector_int); while ((int_num = clib_interrupt_get_next (rt->rxq_interrupts, int_num)) != -1) @@ -248,7 +248,7 @@ vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm, clib_interrupt_clear (rt->rxq_interrupts, int_num); - vec_add2 (rt->rxq_poll_vector, pv, 1); + vec_add2 (rt->rxq_vector_int, pv, 1); pv->dev_instance = rxq->dev_instance; pv->queue_id = rxq->queue_id; } diff --git a/src/vnet/interface/rx_queue_funcs.h b/src/vnet/interface/rx_queue_funcs.h index 26dc1b8777f..83f1bac4e94 100644 --- a/src/vnet/interface/rx_queue_funcs.h +++ b/src/vnet/interface/rx_queue_funcs.h @@ -69,11 +69,14 @@ static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector (vlib_main_t *vm, vlib_node_runtime_t *node) { vnet_hw_if_rx_node_runtime_t *rt = (void *) node->runtime_data; + vnet_hw_if_rxq_poll_vector_t *pv = rt->rxq_vector_int; if (PREDICT_FALSE (node->state == VLIB_NODE_STATE_INTERRUPT)) vnet_hw_if_generate_rxq_int_poll_vector (vm, node); + else if (node->flags & VLIB_NODE_FLAG_ADAPTIVE_MODE) + pv = rt->rxq_vector_poll; - return rt->rxq_poll_vector; + return pv; } static_always_inline u8 |