diff options
author | Florin Coras <fcoras@cisco.com> | 2019-03-04 10:56:23 -0800 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2019-03-06 17:53:39 +0000 |
commit | 653e43f06a974121343b2c1f0e4533926020877b (patch) | |
tree | 6ab92561e8eccbda6b29316f794de531032a1259 /src/tests/vnet/session | |
parent | a55df1081762b4e40698ef7d9196551851be646a (diff) |
session: use vpp to switch io events for ct sessions
Instead of allocating pairs of message queues per cut-thru session and
having the applications map them, this uses vpp as an io event message
switch.
Change-Id: I51db1c7564df479a7d1a3288342394251fd188bb
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/tests/vnet/session')
-rw-r--r-- | src/tests/vnet/session/udp_echo.c | 81 |
1 files changed, 17 insertions, 64 deletions
diff --git a/src/tests/vnet/session/udp_echo.c b/src/tests/vnet/session/udp_echo.c index 462e113dbbd..9fda73d1307 100644 --- a/src/tests/vnet/session/udp_echo.c +++ b/src/tests/vnet/session/udp_echo.c @@ -510,39 +510,14 @@ session_accepted_handler (session_accepted_msg_t * mp) session_index = session - utm->sessions; session->session_index = session_index; - /* Cut-through case */ - if (mp->server_event_queue_address) - { - clib_warning ("cut-through session"); - session->vpp_evt_q = uword_to_pointer (mp->client_event_queue_address, - svm_msg_q_t *); - sleep (1); - rx_fifo->master_session_index = session_index; - tx_fifo->master_session_index = session_index; - utm->cut_through_session_index = session_index; - session->rx_fifo = rx_fifo; - session->tx_fifo = tx_fifo; - session->is_dgram = 0; - - rv = pthread_create (&utm->cut_through_thread_handle, - NULL /*attr */ , cut_through_thread_fn, 0); - if (rv) - { - clib_warning ("pthread_create returned %d", rv); - rv = VNET_API_ERROR_SYSCALL_ERROR_1; - } - } - else - { - rx_fifo->client_session_index = session_index; - tx_fifo->client_session_index = session_index; - session->rx_fifo = rx_fifo; - session->tx_fifo = tx_fifo; - clib_memcpy_fast (&session->transport.rmt_ip, mp->ip, - sizeof (ip46_address_t)); - session->transport.is_ip4 = mp->is_ip4; - session->transport.rmt_port = mp->port; - } + rx_fifo->client_session_index = session_index; + tx_fifo->client_session_index = session_index; + session->rx_fifo = rx_fifo; + session->tx_fifo = tx_fifo; + clib_memcpy_fast (&session->transport.rmt_ip, mp->ip, + sizeof (ip46_address_t)); + session->transport.is_ip4 = mp->is_ip4; + session->transport.rmt_port = mp->port; hash_set (utm->session_index_by_vpp_handles, mp->handle, session_index); if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) @@ -623,18 +598,17 @@ session_connected_handler (session_connected_msg_t * mp) session->tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); /* Cut-through case */ - if (mp->client_event_queue_address) + if (mp->ct_rx_fifo) { clib_warning ("cut-through session"); - session->vpp_evt_q = uword_to_pointer (mp->server_event_queue_address, + session->vpp_evt_q = uword_to_pointer (mp->vpp_event_queue_address, svm_msg_q_t *); - utm->ct_event_queue = uword_to_pointer (mp->client_event_queue_address, - svm_msg_q_t *); utm->cut_through_session_index = session->session_index; session->is_dgram = 0; sleep (1); session->rx_fifo->client_session_index = session->session_index; session->tx_fifo->client_session_index = session->session_index; + /* TODO use ct fifos */ } else { @@ -744,7 +718,6 @@ send_test_chunk (udp_echo_main_t * utm, app_session_t * s, u32 bytes) u8 *test_data = utm->connect_test_data; u32 bytes_to_snd, enq_space, min_chunk; - session_evt_type_t et = FIFO_EVENT_APP_TX; int written; test_buf_len = vec_len (test_data); @@ -753,17 +726,9 @@ send_test_chunk (udp_echo_main_t * utm, app_session_t * s, u32 bytes) utm->bytes_to_send); enq_space = svm_fifo_max_enqueue (s->tx_fifo); bytes_this_chunk = clib_min (bytes_this_chunk, enq_space); - et += (s->session_index == utm->cut_through_session_index); - - if (s->is_dgram) - written = app_send_dgram_raw (s->tx_fifo, &s->transport, s->vpp_evt_q, - test_data + test_buf_offset, - bytes_this_chunk, et, SVM_Q_WAIT); - else - written = app_send_stream_raw (s->tx_fifo, s->vpp_evt_q, - test_data + test_buf_offset, - bytes_this_chunk, et, SVM_Q_WAIT); + written = app_send (s, test_data + test_buf_offset, bytes_this_chunk, + SVM_Q_WAIT); if (written > 0) { utm->bytes_to_send -= written; @@ -1004,15 +969,12 @@ server_handle_fifo_event_rx (udp_echo_main_t * utm, u32 session_index) app_session_t *session; int rv; u32 max_dequeue, offset, max_transfer, rx_buf_len; - session_evt_type_t et = FIFO_EVENT_APP_TX; session = pool_elt_at_index (utm->sessions, session_index); rx_buf_len = vec_len (utm->rx_buf); rx_fifo = session->rx_fifo; tx_fifo = session->tx_fifo; - et += (session->session_index == utm->cut_through_session_index); - max_dequeue = svm_fifo_max_dequeue (rx_fifo); /* Allow enqueuing of a new event */ svm_fifo_unset_event (rx_fifo); @@ -1040,15 +1002,8 @@ server_handle_fifo_event_rx (udp_echo_main_t * utm, u32 session_index) offset = 0; do { - if (session->is_dgram) - rv = app_send_dgram_raw (tx_fifo, &session->transport, - session->vpp_evt_q, - &utm->rx_buf[offset], n_read, et, - SVM_Q_WAIT); - else - rv = app_send_stream_raw (tx_fifo, session->vpp_evt_q, - &utm->rx_buf[offset], n_read, et, - SVM_Q_WAIT); + rv = app_send (session, &utm->rx_buf[offset], n_read, + SVM_Q_WAIT); if (rv > 0) { n_read -= rv; @@ -1060,7 +1015,7 @@ server_handle_fifo_event_rx (udp_echo_main_t * utm, u32 session_index) /* If event wasn't set, add one */ if (svm_fifo_set_event (tx_fifo)) app_send_io_evt_to_vpp (session->vpp_evt_q, tx_fifo, - et, SVM_Q_WAIT); + SESSION_IO_EVT_TX, SVM_Q_WAIT); } } while ((n_read < 0 || max_dequeue > 0) && !utm->time_to_stop); @@ -1087,11 +1042,9 @@ server_handle_event_queue (udp_echo_main_t * utm) e = svm_msg_q_msg_data (mq, &msg); switch (e->event_type) { - case FIFO_EVENT_APP_RX: + case SESSION_IO_EVT_RX: server_handle_fifo_event_rx (utm, e->fifo->client_session_index); break; - case SESSION_IO_EVT_CT_TX: - break; default: handle_mq_event (e); |