diff options
author | 2024-12-25 06:49:46 +0100 | |
---|---|---|
committer | 2025-01-20 22:58:02 +0000 | |
commit | bf49b4ae0834dc917300a4d74e17d3cf6c54093c (patch) | |
tree | d2410ce8ba028e557223213bbb48d1e6a00257bc | |
parent | bf3e683584fb114cb3effab96b578845feb467a8 (diff) |
tls: async event handling enhancement
This patch updates async processing logic by adding async event
handlers separately for handshake, read and write events.
Type: improvement
Change-Id: I6366689fec7e29fa9850cb22e20ab3659bf5245a
Signed-off-by: Varun Rapelly <vrapelly@marvell.com>
-rw-r--r-- | src/plugins/tlsopenssl/tls_async.c | 820 | ||||
-rw-r--r-- | src/plugins/tlsopenssl/tls_openssl.c | 107 | ||||
-rw-r--r-- | src/plugins/tlsopenssl/tls_openssl.h | 17 |
3 files changed, 345 insertions, 599 deletions
diff --git a/src/plugins/tlsopenssl/tls_async.c b/src/plugins/tlsopenssl/tls_async.c index c6d2b2fe9e1..cd08da5d9ea 100644 --- a/src/plugins/tlsopenssl/tls_async.c +++ b/src/plugins/tlsopenssl/tls_async.c @@ -31,13 +31,15 @@ [SSL_CLIENT_HELLO_CB] = "SSL_CLIENT_HELLO_CB", \ } -static const char *ssl_want[] = SSL_WANT_NAMES; +const char *ssl_want[] = SSL_WANT_NAMES; #define foreach_ssl_evt_status_type_ \ _ (INVALID_STATUS, "Async event invalid status") \ _ (INFLIGHT, "Async event inflight") \ _ (READY, "Async event ready") \ _ (REENTER, "Async event reenter") \ + _ (DEQ_DONE, "Async event dequeued") \ + _ (CB_EXECUTED, "Async callback executed") \ _ (MAX_STATUS, "Async event max status") typedef enum ssl_evt_status_type_ @@ -51,6 +53,8 @@ typedef struct openssl_tls_callback_arg_ { int thread_index; int event_index; + ssl_async_evt_type_t async_evt_type; + openssl_resume_handler *evt_handler; } openssl_tls_callback_arg_t; typedef struct openssl_event_ @@ -58,12 +62,13 @@ typedef struct openssl_event_ u32 ctx_index; int session_index; ssl_evt_status_type_t status; - ssl_async_evt_type_t type; - - openssl_resume_handler *handler; + transport_send_params_t *tran_sp; openssl_tls_callback_arg_t cb_args; + #define thread_idx cb_args.thread_index #define event_idx cb_args.event_index +#define async_event_type cb_args.async_evt_type +#define async_evt_handler cb_args.evt_handler int next; } openssl_evt_t; @@ -72,7 +77,6 @@ typedef struct openssl_async_queue_ int evt_run_head; int evt_run_tail; int depth; - int max_depth; } openssl_async_queue_t; typedef struct openssl_async_ @@ -136,12 +140,10 @@ evt_pool_init (vlib_main_t * vm) om->queue[i].evt_run_head = -1; om->queue[i].evt_run_tail = -1; om->queue[i].depth = 0; - om->queue[i].max_depth = 0; om->queue_in_init[i].evt_run_head = -1; om->queue_in_init[i].evt_run_tail = -1; om->queue_in_init[i].depth = 0; - om->queue_in_init[i].max_depth = 0; } om->polling = NULL; @@ -284,17 +286,19 @@ tls_async_openssl_callback (SSL * s, void *cb_arg) openssl_tls_callback_arg_t *args = (openssl_tls_callback_arg_t *) cb_arg; int thread_index = args->thread_index; int event_index = args->event_index; + ssl_async_evt_type_t evt_type = args->async_evt_type; + int *evt_run_tail, *evt_run_head; TLS_DBG (2, "Set event %d to run\n", event_index); event = openssl_evt_get_w_thread (event_index, thread_index); - if (event->type == SSL_ASYNC_EVT_INIT) + if (evt_type == SSL_ASYNC_EVT_INIT) queue = om->queue_in_init; else queue = om->queue; - int *evt_run_tail = &queue[thread_index].evt_run_tail; - int *evt_run_head = &queue[thread_index].evt_run_head; + evt_run_tail = &queue[thread_index].evt_run_tail; + evt_run_head = &queue[thread_index].evt_run_head; /* Happend when a recursive case, especially in SW simulation */ if (PREDICT_FALSE (event->status == SSL_ASYNC_READY)) @@ -305,16 +309,17 @@ tls_async_openssl_callback (SSL * s, void *cb_arg) event->status = SSL_ASYNC_READY; event->next = -1; - if (*evt_run_tail >= 0) + if (*evt_run_head < 0) + *evt_run_head = event_index; + else if (*evt_run_tail >= 0) { event_tail = openssl_evt_get_w_thread (*evt_run_tail, thread_index); event_tail->next = event_index; } + + queue[thread_index].depth++; + *evt_run_tail = event_index; - if (*evt_run_head < 0) - { - *evt_run_head = event_index; - } return 1; } @@ -344,42 +349,6 @@ openssl_async_write_from_fifo_into_ssl (svm_fifo_t *f, SSL *ssl, return wrote; } -/* - * Perform SSL_write from TX FIFO head. - * On successful write, TLS context total_async_write bytes are updated. - */ -static_always_inline int -openssl_write_from_fifo_head_into_ssl (svm_fifo_t *f, SSL *ssl, - openssl_ctx_t *oc, u32 max_len) -{ - int wrote = 0, rv, i = 0, len; - u32 n_segs = 2; - svm_fifo_seg_t fs[n_segs]; - - max_len = clib_min (oc->total_async_write, max_len); - - len = svm_fifo_segments (f, 0, fs, &n_segs, max_len); - if (len <= 0) - return 0; - - while (wrote < len && i < n_segs) - { - rv = SSL_write (ssl, fs[i].data, fs[i].len); - wrote += (rv > 0) ? rv : 0; - if (rv < (int) fs[i].len) - break; - i++; - } - - if (wrote) - { - oc->total_async_write -= wrote; - svm_fifo_dequeue_drop (f, wrote); - } - - return wrote; -} - static int openssl_async_read_from_ssl_into_fifo (svm_fifo_t *f, SSL *ssl) { @@ -394,455 +363,53 @@ openssl_async_read_from_ssl_into_fifo (svm_fifo_t *f, SSL *ssl) return read; } -/* - * Pop the current event from queue and update tail if needed - */ -static void -tls_async_dequeue_update (openssl_evt_t *event, int *evt_run_head, - int *evt_run_tail, int *queue_depth) -{ - /* remove the event from queue head */ - *evt_run_head = event->next; - event->status = SSL_ASYNC_INVALID_STATUS; - event->next = -1; - - (*queue_depth)--; - - if (*evt_run_head < 0) - { - *evt_run_tail = -1; - if (*queue_depth) - clib_warning ("queue empty but depth:%d\n", *queue_depth); - } -} - -static int -tls_async_dequeue_event (int thread_index) -{ - openssl_evt_t *event; - openssl_async_t *om = &openssl_async_main; - openssl_async_queue_t *queue = om->queue; - int *evt_run_tail = &queue[thread_index].evt_run_tail; - int *evt_run_head = &queue[thread_index].evt_run_head; - int dequeue_cnt = clib_min (queue[thread_index].depth, MAX_VECTOR_ASYNC); - const u32 max_len = 128 << 10; - - /* dequeue all pending events, events enqueued during this routine call, - * will be handled next time tls_async_dequeue_event is invoked */ - while (*evt_run_head >= 0 && dequeue_cnt--) - { - session_t *app_session, *tls_session; - openssl_ctx_t *oc; - tls_ctx_t *ctx; - SSL *ssl; - - event = openssl_evt_get_w_thread (*evt_run_head, thread_index); - ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index); - oc = (openssl_ctx_t *) ctx; - ssl = oc->ssl; - - if (event->type == SSL_ASYNC_EVT_RD) - { - /* read event */ - svm_fifo_t *app_rx_fifo, *tls_rx_fifo; - int read; - - app_session = session_get_from_handle (ctx->app_session_handle); - app_rx_fifo = app_session->rx_fifo; - - tls_session = session_get_from_handle (ctx->tls_session_handle); - tls_rx_fifo = tls_session->rx_fifo; - - /* continue the paused job */ - read = openssl_async_read_from_ssl_into_fifo (app_rx_fifo, ssl); - if (read < 0) - { - if (SSL_want_async (ssl)) - goto handle_later; - - tls_async_dequeue_update (event, evt_run_head, evt_run_tail, - &queue[thread_index].depth); - goto ev_rd_done; - } - - /* read finished or in error, remove the event from queue */ - tls_async_dequeue_update (event, evt_run_head, evt_run_tail, - &queue[thread_index].depth); - - /* Unrecoverable protocol error. Reset connection */ - if (PREDICT_FALSE ((read < 0) && - (SSL_get_error (ssl, read) == SSL_ERROR_SSL))) - { - tls_notify_app_io_error (ctx); - goto ev_rd_done; - } - - /* - * Managed to read some data. If handshake just completed, session - * may still be in accepting state. - */ - if (app_session->session_state >= SESSION_STATE_READY) - tls_notify_app_enqueue (ctx, app_session); - - /* managed to read, try to read more */ - while (read > 0) - { - read = - openssl_read_from_ssl_into_fifo (app_rx_fifo, ctx, max_len); - if (read < 0) - { - if (SSL_want_async (ssl)) - { - vpp_tls_async_enqueue_event (oc, SSL_ASYNC_EVT_RD, NULL, - 0); - goto ev_rd_queued; - } - } - - /* Unrecoverable protocol error. Reset connection */ - if (PREDICT_FALSE ((read < 0) && - (SSL_get_error (ssl, read) == SSL_ERROR_SSL))) - { - tls_notify_app_io_error (ctx); - goto ev_rd_done; - } - - /* If handshake just completed, session may still be in accepting - * state */ - if (read >= 0 && - app_session->session_state >= SESSION_STATE_READY) - tls_notify_app_enqueue (ctx, app_session); - } - - ev_rd_done: - /* read done */ - ctx->flags &= ~TLS_CONN_F_ASYNC_RD; - - if ((SSL_pending (ssl) > 0) || - svm_fifo_max_dequeue_cons (tls_rx_fifo)) - { - tls_add_vpp_q_builtin_rx_evt (tls_session); - } - - ev_rd_queued: - continue; - } - else if (event->type == SSL_ASYNC_EVT_WR) - { - /* write event */ - int wrote, wrote_sum = 0; - u32 space, enq_buf; - svm_fifo_t *app_tx_fifo, *tls_tx_fifo; - transport_send_params_t *sp = - (transport_send_params_t *) event->handler; - - app_session = session_get_from_handle (ctx->app_session_handle); - app_tx_fifo = app_session->tx_fifo; - - /* continue the paused job */ - wrote = - openssl_async_write_from_fifo_into_ssl (app_tx_fifo, ssl, oc); - if (wrote < 0) - { - if (SSL_want_async (ssl)) - /* paused job not ready, wait */ - goto handle_later; - clib_warning ("[wrote:%d want:%s ctx:%d]\n", wrote, - ssl_want[SSL_want (ssl)], oc->openssl_ctx_index); - } - wrote_sum += wrote; - - /* paused job done, remove event, update queue */ - tls_async_dequeue_update (event, evt_run_head, evt_run_tail, - &queue[thread_index].depth); - - /* Unrecoverable protocol error. Reset connection */ - if (PREDICT_FALSE (wrote < 0)) - { - tls_notify_app_io_error (ctx); - clib_warning ( - "Unrecoverable protocol error. Reset connection\n"); - goto ev_in_queue; - } - - tls_session = session_get_from_handle (ctx->tls_session_handle); - tls_tx_fifo = tls_session->tx_fifo; - - /* prepare for remaining write(s) */ - space = svm_fifo_max_enqueue_prod (tls_tx_fifo); - /* Leave a bit of extra space for tls ctrl data, if any needed */ - space = clib_max ((int) space - TLSO_CTRL_BYTES, 0); - - /* continue remaining openssl_ctx_write request */ - while (oc->total_async_write) - { - int rv; - u32 deq_max = svm_fifo_max_dequeue_cons (app_tx_fifo); - - deq_max = clib_min (deq_max, space); - deq_max = clib_min (deq_max, sp->max_burst_size); - if (!deq_max) - goto check_tls_fifo; - - /* Make sure tcp's tx fifo can actually buffer all bytes to - * be dequeued. If under memory pressure, tls's fifo segment - * might not be able to allocate the chunks needed. This also - * avoids errors from the underlying custom bio to the ssl - * infra which at times can get stuck. */ - if (svm_fifo_provision_chunks (tls_tx_fifo, 0, 0, - deq_max + TLSO_CTRL_BYTES)) - goto check_tls_fifo; - - rv = openssl_write_from_fifo_head_into_ssl (app_tx_fifo, ssl, oc, - deq_max); - - /* Unrecoverable protocol error. Reset connection */ - if (PREDICT_FALSE (rv < 0)) - { - tls_notify_app_io_error (ctx); - clib_warning ( - "Unrecoverable protocol error. Reset connection\n"); - goto ev_in_queue; - } - - if (!rv) - { - if (SSL_want_async (ssl)) - { - /* new paused job, add queue event and wait */ - vpp_tls_async_enqueue_event (oc, SSL_ASYNC_EVT_WR, sp, - 0); - goto ev_in_queue; - } - clib_warning ("[rv:%d want:%s ctx:%d]\n", rv, - ssl_want[SSL_want (ssl)], - oc->openssl_ctx_index); - break; - } - wrote_sum += rv; - } - - if (svm_fifo_needs_deq_ntf (app_tx_fifo, wrote_sum)) - session_dequeue_notify (app_session); - - check_tls_fifo: - /* we got here, async write is done or not possible */ - oc->total_async_write = 0; - - if (PREDICT_FALSE (BIO_ctrl_pending (oc->rbio) <= 0)) - tls_notify_app_io_error (ctx); - - /* Deschedule and wait for deq notification if fifo is almost full */ - enq_buf = - clib_min (svm_fifo_size (tls_tx_fifo) / 2, TLSO_MIN_ENQ_SPACE); - if (space < wrote_sum + enq_buf) - { - svm_fifo_add_want_deq_ntf (tls_tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF); - transport_connection_deschedule (&ctx->connection); - sp->flags |= TRANSPORT_SND_F_DESCHED; - } - else - { - /* Request tx reschedule of the app session */ - app_session->flags |= SESSION_F_CUSTOM_TX; - transport_connection_reschedule (&ctx->connection); - } - - ev_in_queue: - /* job removed, openssl_ctx_write will resume */ - continue; - } - else - { - /* wrong event type */ - clib_warning ("goto remove_event [event->type:%d]\n", event->type); - tls_async_dequeue_update (event, evt_run_head, evt_run_tail, - &queue[thread_index].depth); - } - } - -handle_later: - return 1; -} - -static int -tls_async_dequeue_event_in_init (int thread_index) +int +vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler, + session_t *session, ssl_async_evt_type_t evt_type, + transport_send_params_t *sp, int wr_size) { - openssl_evt_t *event; - openssl_async_t *om = &openssl_async_main; - openssl_async_queue_t *queue = om->queue_in_init; - int *evt_run_tail = &queue[thread_index].evt_run_tail; - int *evt_run_head = &queue[thread_index].evt_run_head; + u32 eidx; + openssl_evt_t *event = NULL; + openssl_ctx_t *oc = (openssl_ctx_t *) ctx; + u32 thread_id = ctx->c_thread_index; - /* dequeue events if exists */ - while (*evt_run_head >= 0) + if (oc->evt_alloc_flag[evt_type]) { - openssl_ctx_t *oc; - tls_ctx_t *ctx; - int rv, err; - - event = openssl_evt_get_w_thread (*evt_run_head, thread_index); - ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index); - oc = (openssl_ctx_t *) ctx; - - if (event->type != SSL_ASYNC_EVT_INIT) - { - /* wrong event type */ - clib_warning ("goto remove_event [event->type:%d]\n", event->type); - goto remove_event; - } - - if (!SSL_in_init (oc->ssl)) - { - clib_warning ("[!SSL_in_init() != ev->type:%d] th:%d ev:%d\n", - event->type, event->cb_args.thread_index, - event->cb_args.event_index); - goto remove_event; - } - - rv = SSL_do_handshake (oc->ssl); - err = SSL_get_error (oc->ssl, rv); - - /* Do not remove session from tail */ - if (err == SSL_ERROR_WANT_ASYNC) - goto handle_later; - - if (err == SSL_ERROR_SSL) + eidx = oc->evt_index[evt_type]; + if (evt_type == SSL_ASYNC_EVT_WR) { - char buf[512]; - ERR_error_string (ERR_get_error (), buf); - clib_warning ("Err: %s\n", buf); - openssl_handle_handshake_failure (ctx); - goto remove_event; - } - - if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ) - goto handle_later; - - /* client not supported */ - if (!SSL_is_server (oc->ssl)) - { - clib_warning ("goto remove_event [!SSL_is_server]\n"); - goto remove_event; - } - - if (tls_notify_app_accept (ctx)) - { - ctx->c_s_index = SESSION_INVALID_INDEX; - tls_disconnect_transport (ctx); - } - - TLS_DBG (1, "Handshake for %u complete. TLS cipher is %s", - oc->openssl_ctx_index, SSL_get_cipher (oc->ssl)); - - remove_event: - *evt_run_head = event->next; - queue[thread_index].depth--; - - if (*evt_run_head < 0) - { - /* queue empty, bail out */ - *evt_run_tail = -1; - if (queue[thread_index].depth) - clib_warning ("queue empty but depth:%d\n", - queue[thread_index].depth); - break; + event = openssl_evt_get (eidx); + goto update_wr_evnt; } + return 1; } - -handle_later: - return 1; -} - -int -vpp_tls_async_enqueue_event (openssl_ctx_t *ctx, int evt_type, - transport_send_params_t *sp, int size) -{ - openssl_evt_t *event; - openssl_async_t *om = &openssl_async_main; - openssl_async_queue_t *queue; - int thread_index; - int event_index; - int *evt_run_tail; - int *evt_run_head; - - event = openssl_evt_get (ctx->evt_index[evt_type]); - - thread_index = event->thread_idx; - event_index = event->event_idx; - - /* set queue to be used */ - if (SSL_in_init (ctx->ssl)) - queue = om->queue_in_init; else - queue = om->queue; - - evt_run_tail = &queue[thread_index].evt_run_tail; - evt_run_head = &queue[thread_index].evt_run_head; - - event->type = evt_type; - event->handler = (openssl_resume_handler *) sp; - event->next = -1; - - /* first we enqueue the request */ - if (*evt_run_tail >= 0) { - openssl_evt_t *event_tail; - - /* queue not empty, append to tail event */ - event_tail = openssl_evt_get_w_thread (*evt_run_tail, thread_index); - event_tail->next = event_index; + eidx = openssl_evt_alloc (); + oc->evt_alloc_flag[evt_type] = true; } - /* set tail to use new event index */ - *evt_run_tail = event_index; - - if (*evt_run_head < 0) - /* queue is empty, update head */ - *evt_run_head = event_index; - - queue[thread_index].depth++; - if (queue[thread_index].depth > queue[thread_index].max_depth) - queue[thread_index].max_depth = queue[thread_index].depth; - - return 1; -} - -static int -vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler, - session_t *session, ssl_async_evt_type_t evt_type) -{ - u32 eidx; - openssl_evt_t *event; - openssl_ctx_t *oc = (openssl_ctx_t *) ctx; - u32 thread_id = ctx->c_thread_index; - - eidx = openssl_evt_alloc (); event = openssl_evt_get (eidx); event->ctx_index = oc->openssl_ctx_index; + /* async call back args */ event->event_idx = eidx; event->thread_idx = thread_id; - event->handler = handler; + event->async_event_type = evt_type; + event->async_evt_handler = handler; event->session_index = session->session_index; - event->type = evt_type; event->status = SSL_ASYNC_INVALID_STATUS; oc->evt_index[evt_type] = eidx; - event->next = -1; #ifdef HAVE_OPENSSL_ASYNC SSL_set_async_callback_arg (oc->ssl, &event->cb_args); #endif - - return 1; -} - -int -vpp_tls_async_init_events (tls_ctx_t *ctx, openssl_resume_handler *handler, - session_t *session) -{ - vpp_tls_async_init_event (ctx, handler, session, SSL_ASYNC_EVT_INIT); - vpp_tls_async_init_event (ctx, handler, session, SSL_ASYNC_EVT_RD); - vpp_tls_async_init_event (ctx, handler, session, SSL_ASYNC_EVT_WR); - +update_wr_evnt: + if (evt_type == SSL_ASYNC_EVT_WR) + { + transport_connection_deschedule (&ctx->connection); + sp->flags |= TRANSPORT_SND_F_DESCHED; + oc->total_async_write = wr_size; + } + event->tran_sp = sp; return 1; } @@ -866,23 +433,6 @@ vpp_openssl_is_inflight (tls_ctx_t *ctx) return 0; } -int -vpp_tls_async_update_event (tls_ctx_t *ctx, int eagain, - ssl_async_evt_type_t type) -{ - u32 eidx; - openssl_ctx_t *oc = (openssl_ctx_t *) ctx; - openssl_evt_t *event; - - eidx = oc->evt_index[type]; - event = openssl_evt_get (eidx); - event->status = SSL_ASYNC_INFLIGHT; - if (eagain) - return tls_async_openssl_callback (0, &event->cb_args); - - return 1; -} - void event_handler (void *tls_async) { @@ -890,17 +440,16 @@ event_handler (void *tls_async) openssl_evt_t *event; session_t *session; int thread_index; - tls_ctx_t *ctx; event = (openssl_evt_t *) tls_async; thread_index = event->thread_idx; - ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index); - handler = event->handler; + handler = event->async_evt_handler; session = session_get (event->session_index, thread_index); if (handler) { - (*handler) (ctx, session); + (*handler) (event, session); + event->status = SSL_ASYNC_CB_EXECUTED; } return; @@ -997,35 +546,33 @@ tls_async_do_job (int eidx, u32 thread_index) } int -tls_resume_from_crypto (int thread_index) +handle_async_cb_events (openssl_async_queue_t *queue, int thread_index) { int i; - - openssl_async_t *om = &openssl_async_main; openssl_evt_t *event; - int *evt_run_head = &om->queue[thread_index].evt_run_head; - int *evt_run_tail = &om->queue[thread_index].evt_run_tail; + + int *evt_run_head = &queue[thread_index].evt_run_head; + int *evt_run_tail = &queue[thread_index].evt_run_tail; if (*evt_run_head < 0) return 0; for (i = 0; i < MAX_VECTOR_ASYNC; i++) { - if (*evt_run_head >= 0) + if (*evt_run_head >= 0 && queue[thread_index].depth) { event = openssl_evt_get_w_thread (*evt_run_head, thread_index); - tls_async_do_job (*evt_run_head, thread_index); if (PREDICT_FALSE (event->status == SSL_ASYNC_REENTER)) - { - /* recusive event triggered */ - event->status = SSL_ASYNC_READY; - continue; - } + /* recusive event triggered */ + goto deq_event; + tls_async_do_job (*evt_run_head, thread_index); - event->status = SSL_ASYNC_INVALID_STATUS; + deq_event: *evt_run_head = event->next; + event->status = SSL_ASYNC_DEQ_DONE; + queue[thread_index].depth--; - if (event->next < 0) + if (*evt_run_head < 0) { *evt_run_tail = -1; break; @@ -1034,7 +581,32 @@ tls_resume_from_crypto (int thread_index) } return 0; +} +void +resume_handshake_events (int thread_index) +{ + openssl_async_t *om = &openssl_async_main; + + openssl_async_queue_t *queue = om->queue_in_init; + handle_async_cb_events (queue, thread_index); +} + +void +resume_read_write_events (int thread_index) +{ + openssl_async_t *om = &openssl_async_main; + + openssl_async_queue_t *queue = om->queue; + handle_async_cb_events (queue, thread_index); +} + +int +tls_resume_from_crypto (int thread_index) +{ + resume_read_write_events (thread_index); + resume_handshake_events (thread_index); + return 0; } static clib_error_t * @@ -1044,6 +616,221 @@ tls_async_init (vlib_main_t * vm) return 0; } +int +tls_async_handshake_event_handler (void *async_evt, void *unused) +{ + openssl_evt_t *event = (openssl_evt_t *) async_evt; + int thread_index = event->thread_idx; + openssl_ctx_t *oc; + tls_ctx_t *ctx; + int rv, err; + + ASSERT (thread_index == vlib_get_thread_index ()); + ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index); + oc = (openssl_ctx_t *) ctx; + session_t *tls_session = session_get_from_handle (ctx->tls_session_handle); + + if (!SSL_in_init (oc->ssl)) + { + TLS_DBG (2, "[!SSL_in_init]==>CTX: %p EVT: %p EIDX: %d", ctx, event, + event->event_idx); + return 0; + } + + if (ctx->flags & TLS_CONN_F_RESUME) + { + ctx->flags &= ~TLS_CONN_F_RESUME; + } + else if (!svm_fifo_max_dequeue_cons (tls_session->rx_fifo)) + return 0; + + rv = SSL_do_handshake (oc->ssl); + err = SSL_get_error (oc->ssl, rv); + + if (err == SSL_ERROR_WANT_ASYNC) + return 0; + + if (err == SSL_ERROR_SSL) + { + char buf[512]; + ERR_error_string (ERR_get_error (), buf); + TLS_DBG (2, "[SSL_ERROR_SSL]==>CTX: %p EVT: %p EIDX: %d Buf: %s", ctx, + event, event->event_idx, buf); + openssl_handle_handshake_failure (ctx); + return 0; + } + + if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ) + return 0; + + /* client not supported */ + if (!SSL_is_server (oc->ssl)) + return 0; + + /* Need to check transport status */ + if (ctx->flags & TLS_CONN_F_PASSIVE_CLOSE) + { + openssl_handle_handshake_failure (ctx); + return 0; + } + + if (tls_notify_app_accept (ctx)) + { + ctx->c_s_index = SESSION_INVALID_INDEX; + tls_disconnect_transport (ctx); + } + + TLS_DBG (1, + "<=====Handshake for %u complete. TLS cipher is %s EVT: %p =====>", + oc->openssl_ctx_index, SSL_get_cipher (oc->ssl), event); + + ctx->flags |= TLS_CONN_F_HS_DONE; + + return 1; +} + +int +tls_async_read_event_handler (void *async_evt, void *unused) +{ + openssl_evt_t *event = (openssl_evt_t *) async_evt; + int thread_index = event->thread_idx; + session_t *app_session, *tls_session; + openssl_ctx_t *oc; + tls_ctx_t *ctx; + SSL *ssl; + + ASSERT (thread_index == vlib_get_thread_index ()); + ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index); + oc = (openssl_ctx_t *) ctx; + ssl = oc->ssl; + + ctx->flags |= TLS_CONN_F_ASYNC_RD; + /* read event */ + svm_fifo_t *app_rx_fifo, *tls_rx_fifo; + int read, err; + + app_session = session_get_from_handle (ctx->app_session_handle); + app_rx_fifo = app_session->rx_fifo; + + tls_session = session_get_from_handle (ctx->tls_session_handle); + tls_rx_fifo = tls_session->rx_fifo; + + /* continue the paused job */ + read = openssl_async_read_from_ssl_into_fifo (app_rx_fifo, ssl); + err = SSL_get_error (oc->ssl, read); + + if (err == SSL_ERROR_WANT_ASYNC) + return 0; + + if (read <= 0) + { + if (SSL_want_async (ssl)) + return 0; + goto ev_rd_done; + } + + /* Unrecoverable protocol error. Reset connection */ + if (PREDICT_FALSE ((read <= 0) && (err == SSL_ERROR_SSL))) + { + tls_notify_app_io_error (ctx); + goto ev_rd_done; + } + + /* + * Managed to read some data. If handshake just completed, session + * may still be in accepting state. + */ + if (app_session->session_state >= SESSION_STATE_READY) + tls_notify_app_enqueue (ctx, app_session); + +ev_rd_done: + /* read done */ + ctx->flags &= ~TLS_CONN_F_ASYNC_RD; + + if ((SSL_pending (ssl) > 0) || svm_fifo_max_dequeue_cons (tls_rx_fifo)) + tls_add_vpp_q_builtin_rx_evt (tls_session); + + return 1; +} + +int +tls_async_write_event_handler (void *async_evt, void *unused) +{ + openssl_evt_t *event = (openssl_evt_t *) async_evt; + int thread_index = event->thread_idx; + session_t *app_session, *tls_session; + openssl_ctx_t *oc; + tls_ctx_t *ctx; + SSL *ssl; + + ASSERT (thread_index == vlib_get_thread_index ()); + ctx = openssl_ctx_get_w_thread (event->ctx_index, thread_index); + oc = (openssl_ctx_t *) ctx; + ssl = oc->ssl; + + /* write event */ + int wrote = 0; + u32 space, enq_buf; + svm_fifo_t *app_tx_fifo, *tls_tx_fifo; + transport_send_params_t *sp = event->tran_sp; + + app_session = session_get_from_handle (ctx->app_session_handle); + app_tx_fifo = app_session->tx_fifo; + + /* Check if already data write is completed or not */ + if (oc->total_async_write == 0) + return 0; + + wrote = openssl_async_write_from_fifo_into_ssl (app_tx_fifo, ssl, oc); + if (PREDICT_FALSE (!wrote)) + { + if (SSL_want_async (ssl)) + return 0; + } + + /* Unrecoverable protocol error. Reset connection */ + if (PREDICT_FALSE (wrote < 0)) + { + tls_notify_app_io_error (ctx); + return 0; + } + + tls_session = session_get_from_handle (ctx->tls_session_handle); + tls_tx_fifo = tls_session->tx_fifo; + + /* prepare for remaining write(s) */ + space = svm_fifo_max_enqueue_prod (tls_tx_fifo); + /* Leave a bit of extra space for tls ctrl data, if any needed */ + space = clib_max ((int) space - TLSO_CTRL_BYTES, 0); + + if (svm_fifo_needs_deq_ntf (app_tx_fifo, wrote)) + session_dequeue_notify (app_session); + + /* we got here, async write is done */ + oc->total_async_write = 0; + + if (PREDICT_FALSE (ctx->flags & TLS_CONN_F_APP_CLOSED && + BIO_ctrl_pending (oc->rbio) <= 0)) + openssl_confirm_app_close (ctx); + + /* Deschedule and wait for deq notification if fifo is almost full */ + enq_buf = clib_min (svm_fifo_size (tls_tx_fifo) / 2, TLSO_MIN_ENQ_SPACE); + if (space < wrote + enq_buf) + { + svm_fifo_add_want_deq_ntf (tls_tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF); + transport_connection_deschedule (&ctx->connection); + sp->flags |= TRANSPORT_SND_F_DESCHED; + } + else + { + /* Request tx reschedule of the app session */ + app_session->flags |= SESSION_F_CUSTOM_TX; + transport_connection_reschedule (&ctx->connection); + } + + return 1; +} + static uword tls_async_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) @@ -1055,8 +842,7 @@ tls_async_process (vlib_main_t * vm, vlib_node_runtime_t * rt, if (pool_elts (om->evt_pool[thread_index]) > 0) { openssl_async_polling (); - tls_async_dequeue_event_in_init (thread_index); - tls_async_dequeue_event (thread_index); + tls_resume_from_crypto (thread_index); } return 0; diff --git a/src/plugins/tlsopenssl/tls_openssl.c b/src/plugins/tlsopenssl/tls_openssl.c index d7adbed3269..f0be025a207 100644 --- a/src/plugins/tlsopenssl/tls_openssl.c +++ b/src/plugins/tlsopenssl/tls_openssl.c @@ -166,31 +166,6 @@ openssl_lctx_get (u32 lctx_index) return pool_elt_at_index (openssl_main.lctx_pool, lctx_index); } -static int -openssl_handle_want_async (tls_ctx_t *ctx, int evt_type, - transport_send_params_t *sp, int size) -{ - int ret; - openssl_ctx_t *oc = (openssl_ctx_t *) ctx; - - if (evt_type >= SSL_ASYNC_EVT_MAX || evt_type == 0) - { - clib_warning ("return 0 [illegal evt_type value:%d]\n", evt_type); - return 0; - } - - if (evt_type == SSL_ASYNC_EVT_WR) - { - /* de-schedule transport connection */ - transport_connection_deschedule (&ctx->connection); - sp->flags |= TRANSPORT_SND_F_DESCHED; - oc->total_async_write = size; - } - ret = vpp_tls_async_enqueue_event (oc, evt_type, sp, size); - - return ret; -} - int openssl_read_from_ssl_into_fifo (svm_fifo_t *f, tls_ctx_t *ctx, u32 max_len) { @@ -219,8 +194,10 @@ openssl_read_from_ssl_into_fifo (svm_fifo_t *f, tls_ctx_t *ctx, u32 max_len) { if (openssl_main.async && SSL_want_async (oc->ssl)) { - ctx->flags |= TLS_CONN_F_ASYNC_RD; - openssl_handle_want_async (ctx, SSL_ASYNC_EVT_RD, NULL, 0); + session_t *tls_session = + session_get_from_handle (ctx->tls_session_handle); + vpp_tls_async_init_event (ctx, tls_async_read_event_handler, + tls_session, SSL_ASYNC_EVT_RD, NULL, 0); return 0; } ossl_check_err_is_fatal (ssl, read); @@ -264,53 +241,39 @@ openssl_write_from_fifo_into_ssl (svm_fifo_t *f, tls_ctx_t *ctx, { rv = SSL_write (ssl, fs[i].data, fs[i].len); wrote += (rv > 0) ? rv : 0; + if (rv < (int) fs[i].len) { - ossl_check_err_is_fatal (ssl, rv); + if (rv < 0) + { + int err = SSL_get_error (ssl, rv); + if (err == SSL_ERROR_SSL) + return -1; + + if (err == SSL_ERROR_WANT_WRITE) + break; + + if (openssl_main.async && SSL_want_async (ssl)) + { + session_t *ts = + session_get_from_handle (ctx->tls_session_handle); + vpp_tls_async_init_event (ctx, tls_async_write_event_handler, + ts, SSL_ASYNC_EVT_WR, sp, + sp->max_burst_size); + return 0; + } + } break; } i++; } - if (openssl_main.async && SSL_want_async (ssl)) - { - openssl_handle_want_async (ctx, SSL_ASYNC_EVT_WR, sp, max_len); - return 0; - } if (wrote) svm_fifo_dequeue_drop (f, wrote); return wrote; } -#ifdef HAVE_OPENSSL_ASYNC -static int -openssl_check_async_status (tls_ctx_t * ctx, openssl_resume_handler * handler, - session_t * session) -{ - openssl_ctx_t *oc = (openssl_ctx_t *) ctx; - int estatus; - - SSL_get_async_status (oc->ssl, &estatus); - if (estatus == ASYNC_STATUS_EAGAIN) - { - vpp_tls_async_update_event (ctx, 1, SSL_ASYNC_EVT_INIT); - vpp_tls_async_update_event (ctx, 1, SSL_ASYNC_EVT_RD); - vpp_tls_async_update_event (ctx, 1, SSL_ASYNC_EVT_WR); - } - else - { - vpp_tls_async_update_event (ctx, 0, SSL_ASYNC_EVT_INIT); - vpp_tls_async_update_event (ctx, 0, SSL_ASYNC_EVT_RD); - vpp_tls_async_update_event (ctx, 0, SSL_ASYNC_EVT_WR); - } - - return 1; - -} - -#endif - void openssl_handle_handshake_failure (tls_ctx_t *ctx) { @@ -338,7 +301,7 @@ openssl_handle_handshake_failure (tls_ctx_t *ctx) } int -openssl_ctx_handshake_rx (tls_ctx_t * ctx, session_t * tls_session) +openssl_ctx_handshake_rx (tls_ctx_t *ctx, session_t *tls_session) { openssl_ctx_t *oc = (openssl_ctx_t *) ctx; int rv = 0, err; @@ -357,7 +320,8 @@ openssl_ctx_handshake_rx (tls_ctx_t * ctx, session_t * tls_session) if (openssl_main.async && err == SSL_ERROR_WANT_ASYNC) { - openssl_handle_want_async (ctx, SSL_ASYNC_EVT_INIT, NULL, 0); + vpp_tls_async_init_event (ctx, tls_async_handshake_event_handler, + tls_session, SSL_ASYNC_EVT_INIT, NULL, 0); return -1; } @@ -868,7 +832,7 @@ openssl_ctx_init_client (tls_ctx_t * ctx) #ifdef HAVE_OPENSSL_ASYNC session_t *tls_session = session_get_from_handle (ctx->tls_session_handle); - vpp_tls_async_init_events (ctx, openssl_ctx_handshake_rx, tls_session); + openssl_ctx_handshake_rx (ctx, tls_session); #endif while (1) { @@ -876,11 +840,7 @@ openssl_ctx_init_client (tls_ctx_t * ctx) err = SSL_get_error (oc->ssl, rv); #ifdef HAVE_OPENSSL_ASYNC if (err == SSL_ERROR_WANT_ASYNC) - { - openssl_check_async_status (ctx, openssl_ctx_handshake_rx, - tls_session); - break; - } + break; #endif if (err != SSL_ERROR_WANT_WRITE) break; @@ -1107,7 +1067,7 @@ openssl_ctx_init_server (tls_ctx_t * ctx) { session_t *tls_session = session_get_from_handle (ctx->tls_session_handle); - vpp_tls_async_init_events (ctx, openssl_ctx_handshake_rx, tls_session); + openssl_ctx_handshake_rx (ctx, tls_session); } while (1) @@ -1115,16 +1075,13 @@ openssl_ctx_init_server (tls_ctx_t * ctx) rv = SSL_do_handshake (oc->ssl); err = SSL_get_error (oc->ssl, rv); if (openssl_main.async && err == SSL_ERROR_WANT_ASYNC) - { - openssl_handle_want_async (ctx, SSL_ASYNC_EVT_INIT, NULL, 0); + break; - break; - } if (err != SSL_ERROR_WANT_WRITE) break; } - TLS_DBG (2, "tls state for [%u]%u is su", ctx->c_thread_index, + TLS_DBG (2, "tls state for [%u]%u is %s", ctx->c_thread_index, oc->openssl_ctx_index, SSL_state_string_long (oc->ssl)); return 0; } diff --git a/src/plugins/tlsopenssl/tls_openssl.h b/src/plugins/tlsopenssl/tls_openssl.h index 8f6c6652a52..1a566f35fa6 100644 --- a/src/plugins/tlsopenssl/tls_openssl.h +++ b/src/plugins/tlsopenssl/tls_openssl.h @@ -40,6 +40,7 @@ typedef struct tls_ctx_openssl_ SSL_CTX *client_ssl_ctx; SSL *ssl; u32 evt_index[SSL_ASYNC_EVT_MAX]; + bool evt_alloc_flag[SSL_ASYNC_EVT_MAX]; u32 total_async_write; BIO *rbio; BIO *wbio; @@ -74,15 +75,14 @@ typedef struct openssl_main_ u32 max_pipelines; } openssl_main_t; -typedef int openssl_resume_handler (tls_ctx_t * ctx, session_t * tls_session); +typedef int openssl_resume_handler (void *event, void *session); +typedef int (*async_handlers) (void *event, void *session); tls_ctx_t *openssl_ctx_get_w_thread (u32 ctx_index, u8 thread_index); -int vpp_tls_async_init_events (tls_ctx_t *ctx, openssl_resume_handler *handler, - session_t *session); -int vpp_tls_async_update_event (tls_ctx_t *ctx, int eagain, - ssl_async_evt_type_t type); -int vpp_tls_async_enqueue_event (openssl_ctx_t *ctx, int evt_type, - transport_send_params_t *sp, int size); +int vpp_tls_async_init_event (tls_ctx_t *ctx, openssl_resume_handler *handler, + session_t *session, + ssl_async_evt_type_t evt_type, + transport_send_params_t *sp, int wr_size); int tls_async_openssl_callback (SSL * s, void *evt); int openssl_evt_free (int event_idx, u8 thread_index); void openssl_polling_start (ENGINE * engine); @@ -96,6 +96,9 @@ int openssl_read_from_ssl_into_fifo (svm_fifo_t *f, tls_ctx_t *ctx, void openssl_handle_handshake_failure (tls_ctx_t *ctx); void openssl_confirm_app_close (tls_ctx_t *ctx); +int tls_async_write_event_handler (void *event, void *session); +int tls_async_read_event_handler (void *event, void *session); +int tls_async_handshake_event_handler (void *event, void *session); #endif /* SRC_PLUGINS_TLSOPENSSL_TLS_OPENSSL_H_ */ /* |