diff options
author | Florin Coras <fcoras@cisco.com> | 2018-10-30 10:21:59 -0700 |
---|---|---|
committer | Marco Varlese <marco.varlese@suse.de> | 2018-10-31 08:17:33 +0000 |
commit | 5f56d736cbd752216357357b8de6a120a41c08eb (patch) | |
tree | 783abee8a2c3e77513940734716775a88ac6bdeb | |
parent | 5a7ca7bde104b12f8dc4a751fd911819d4bd6c8e (diff) |
session: prioritize postponed sessions
If sessions cannot be handled during the current dispatch loop
iteration, ensure that they are first to be handled in the next.
Change-Id: Ifc6215900f8cfd530d4886b58641189f0ccf9bb7
Signed-off-by: Florin Coras <fcoras@cisco.com>
-rw-r--r-- | src/vnet/session/session.c | 10 | ||||
-rw-r--r-- | src/vnet/session/session.h | 3 | ||||
-rw-r--r-- | src/vnet/session/session_node.c | 65 |
3 files changed, 33 insertions, 45 deletions
diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 1802c0efd26..3dd80ad337a 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -1344,7 +1344,7 @@ session_manager_main_enable (vlib_main_t * vm) if (num_threads < 1) return clib_error_return (0, "n_thread_stacks not set"); - /* configure per-thread ** vectors */ + /* Allocate cache line aligned worker contexts */ vec_validate_aligned (smm->wrk, num_threads - 1, CLIB_CACHE_LINE_BYTES); for (i = 0; i < TRANSPORT_N_PROTO; i++) @@ -1356,12 +1356,14 @@ session_manager_main_enable (vlib_main_t * vm) for (i = 0; i < num_threads; i++) { wrk = &smm->wrk[i]; - vec_validate (wrk->free_event_vector, 0); + vec_validate (wrk->free_event_vector, 128); _vec_len (wrk->free_event_vector) = 0; - vec_validate (wrk->pending_event_vector, 0); + vec_validate (wrk->pending_event_vector, 128); _vec_len (wrk->pending_event_vector) = 0; - vec_validate (wrk->pending_disconnects, 0); + vec_validate (wrk->pending_disconnects, 128); _vec_len (wrk->pending_disconnects) = 0; + vec_validate (wrk->postponed_event_vector, 128); + _vec_len (wrk->postponed_event_vector) = 0; wrk->last_vlib_time = vlib_time_now (vlib_mains[i]); diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index a98986124d7..131652a8c6e 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -223,6 +223,9 @@ typedef struct session_manager_worker_ /** Vector of postponed disconnects */ session_event_t *pending_disconnects; + /** Vector of postponed events */ + session_event_t *postponed_event_vector; + /** Peekers rw lock */ clib_rwlock_t peekers_rw_locks; diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index 04a5622c835..6ef461a5f34 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -764,8 +764,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, session_manager_main_t *smm = vnet_get_session_manager_main (); u32 thread_index = vm->thread_index, n_to_dequeue, n_events; session_manager_worker_t *wrk = &smm->wrk[thread_index]; - session_event_t *pending_events, *e; - session_event_t *fifo_events; + session_event_t *e, *fifo_events; svm_msg_q_msg_t _msg, *msg = &_msg; f64 now = vlib_time_now (vm); int n_tx_packets = 0, i, rv; @@ -782,55 +781,39 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, session_update_dispatch_period (wrk, now, thread_index); transport_update_time (now, thread_index); - /* - * Get vpp queue events that we can dequeue without blocking - */ - mq = wrk->vpp_event_queue; - fifo_events = wrk->free_event_vector; - n_to_dequeue = svm_msg_q_size (mq); - pending_events = wrk->pending_event_vector; - - if (!n_to_dequeue && !vec_len (pending_events) - && !vec_len (wrk->pending_disconnects)) - return 0; - SESSION_EVT_DBG (SESSION_EVT_DEQ_NODE, 0); - /* - * If we didn't manage to process previous events try going - * over them again without dequeuing new ones. - * XXX: Handle senders to sessions that can't keep up - */ - if (0 && vec_len (pending_events) >= 100) - { - clib_warning ("too many fifo events unsolved"); - goto skip_dequeue; - } + /* Make sure postponed events are handled first */ + fifo_events = wrk->free_event_vector; + vec_append (fifo_events, wrk->postponed_event_vector); + _vec_len (wrk->pending_disconnects) = 0; - /* See you in the next life, don't be late + /* Try to dequeue what is available. Don't wait for lock. * XXX: we may need priorities here */ - if (svm_msg_q_try_lock (mq)) - return 0; - - for (i = 0; i < n_to_dequeue; i++) + mq = wrk->vpp_event_queue; + n_to_dequeue = svm_msg_q_size (mq); + if (n_to_dequeue && svm_msg_q_try_lock (mq) == 0) { - vec_add2 (fifo_events, e, 1); - svm_msg_q_sub_w_lock (mq, msg); - clib_memcpy (e, svm_msg_q_msg_data (mq, msg), sizeof (*e)); - svm_msg_q_free_msg (mq, msg); + for (i = 0; i < n_to_dequeue; i++) + { + vec_add2 (fifo_events, e, 1); + svm_msg_q_sub_w_lock (mq, msg); + clib_memcpy (e, svm_msg_q_msg_data (mq, msg), sizeof (*e)); + svm_msg_q_free_msg (mq, msg); + } + svm_msg_q_unlock (mq); } - svm_msg_q_unlock (mq); - - vec_append (fifo_events, pending_events); + vec_append (fifo_events, wrk->pending_event_vector); vec_append (fifo_events, wrk->pending_disconnects); - _vec_len (pending_events) = 0; - wrk->pending_event_vector = pending_events; - _vec_len (wrk->pending_disconnects) = 0; + _vec_len (wrk->postponed_event_vector) = 0; + _vec_len (wrk->pending_event_vector) = 0; -skip_dequeue: n_events = vec_len (fifo_events); + if (PREDICT_FALSE (!n_events)) + return 0; + for (i = 0; i < n_events; i++) { stream_session_t *s; /* $$$ prefetch 1 ahead maybe */ @@ -844,7 +827,7 @@ skip_dequeue: /* Don't try to send more that one frame per dispatch cycle */ if (n_tx_packets == VLIB_FRAME_SIZE) { - vec_add1 (wrk->pending_event_vector, *e); + vec_add1 (wrk->postponed_event_vector, *e); break; } |