From 7fb0fe1f6972a7a35146fa9115b866ba29a6fbb7 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Mon, 9 Apr 2018 09:24:52 -0700 Subject: udp/session: refactor to support dgram mode - adds session layer support for datagram based protocols - updates udp to work in pure connectionless and datagram mode. The existing connected mode is now 'accessible' for apps as a dummy UDPC, as in, connected udp, protocol. - updates udp_echo, echo client, echo server code to work in datagram mode. Change-Id: I2960c0d2d246cb166005f545794ec31fe0d546dd Signed-off-by: Florin Coras --- src/vnet/session-apps/echo_client.c | 114 +++++++++++++++++++++--------------- src/vnet/session-apps/echo_client.h | 9 ++- src/vnet/session-apps/echo_server.c | 104 +++++++++++++++++++------------- 3 files changed, 132 insertions(+), 95 deletions(-) (limited to 'src/vnet/session-apps') diff --git a/src/vnet/session-apps/echo_client.c b/src/vnet/session-apps/echo_client.c index 2cfb471413c..11528ef1899 100644 --- a/src/vnet/session-apps/echo_client.c +++ b/src/vnet/session-apps/echo_client.c @@ -46,23 +46,21 @@ static void send_data_chunk (echo_client_main_t * ecm, session_t * s) { u8 *test_data = ecm->connect_test_data; - int test_buf_offset; + int test_buf_len, test_buf_offset, rv; u32 bytes_this_chunk; - session_fifo_event_t evt; - svm_fifo_t *txf; - int rv; ASSERT (vec_len (test_data) > 0); - - test_buf_offset = s->bytes_sent % vec_len (test_data); - bytes_this_chunk = vec_len (test_data) - test_buf_offset; - - bytes_this_chunk = bytes_this_chunk < s->bytes_to_send - ? bytes_this_chunk : s->bytes_to_send; - - txf = s->server_tx_fifo; - rv = svm_fifo_enqueue_nowait (txf, bytes_this_chunk, - test_data + test_buf_offset); + test_buf_len = vec_len (test_data); + test_buf_offset = s->bytes_sent % test_buf_len; + bytes_this_chunk = clib_min (test_buf_len - test_buf_offset, + s->bytes_to_send); + + if (!ecm->is_dgram) + rv = app_send_stream (&s->data, test_data + test_buf_offset, + bytes_this_chunk, 0); + else + rv = app_send_dgram (&s->data, test_data + test_buf_offset, + bytes_this_chunk, 0); /* If we managed to enqueue data... */ if (rv > 0) @@ -89,35 +87,24 @@ send_data_chunk (echo_client_main_t * ecm, session_t * s) ed->data[1] = s->bytes_sent; ed->data[2] = s->bytes_to_send; } - - /* Poke the session layer */ - if (svm_fifo_set_event (txf)) - { - /* Fabricate TX event, send to vpp */ - evt.fifo = txf; - evt.event_type = FIFO_EVENT_APP_TX; - - if (svm_queue_add - (ecm->vpp_event_queue[txf->master_thread_index], (u8 *) & evt, - 0 /* do wait for mutex */ )) - clib_warning ("could not enqueue event"); - } } } static void receive_data_chunk (echo_client_main_t * ecm, session_t * s) { - svm_fifo_t *rx_fifo = s->server_rx_fifo; - u32 my_thread_index = vlib_get_thread_index (); + svm_fifo_t *rx_fifo = s->data.rx_fifo; + u32 thread_index = vlib_get_thread_index (); int n_read, i; if (ecm->test_bytes) { - n_read = svm_fifo_dequeue_nowait (rx_fifo, - vec_len (ecm->rx_buf - [my_thread_index]), - ecm->rx_buf[my_thread_index]); + if (!ecm->is_dgram) + n_read = app_recv_stream (&s->data, ecm->rx_buf[thread_index], + vec_len (ecm->rx_buf[thread_index])); + else + n_read = app_recv_dgram (&s->data, ecm->rx_buf[thread_index], + vec_len (ecm->rx_buf[thread_index])); } else { @@ -148,17 +135,18 @@ receive_data_chunk (echo_client_main_t * ecm, session_t * s) { for (i = 0; i < n_read; i++) { - if (ecm->rx_buf[my_thread_index][i] + if (ecm->rx_buf[thread_index][i] != ((s->bytes_received + i) & 0xff)) { clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", n_read, s->bytes_received + i, - ecm->rx_buf[my_thread_index][i], + ecm->rx_buf[thread_index][i], ((s->bytes_received + i) & 0xff)); ecm->test_failed = 1; } } } + ASSERT (n_read <= s->bytes_to_receive); s->bytes_to_receive -= n_read; s->bytes_received += n_read; } @@ -230,20 +218,15 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } if (sp->bytes_to_receive > 0) { - receive_data_chunk (ecm, sp); delete_session = 0; } if (PREDICT_FALSE (delete_session == 1)) { - u32 index, thread_index; stream_session_t *s; __sync_fetch_and_add (&ecm->tx_total, sp->bytes_sent); __sync_fetch_and_add (&ecm->rx_total, sp->bytes_received); - - session_parse_handle (sp->vpp_session_handle, - &index, &thread_index); - s = session_get_if_valid (index, thread_index); + s = session_get_from_handle_if_valid (sp->vpp_session_handle); if (s) { @@ -312,8 +295,8 @@ echo_clients_init (vlib_main_t * vm) num_threads = 1 /* main thread */ + vtm->n_threads; - /* Init test data. Bigecmuffer */ - vec_validate (ecm->connect_test_data, 1024 * 1024 - 1); + /* Init test data. Big buffer */ + vec_validate (ecm->connect_test_data, 4 * 1024 * 1024 - 1); for (i = 0; i < vec_len (ecm->connect_test_data); i++) ecm->connect_test_data[i] = i & 0xff; @@ -363,12 +346,22 @@ echo_clients_session_connected_callback (u32 app_index, u32 api_context, session_index = session - ecm->sessions; session->bytes_to_send = ecm->bytes_to_send; session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send; - session->server_rx_fifo = s->server_rx_fifo; - session->server_rx_fifo->client_session_index = session_index; - session->server_tx_fifo = s->server_tx_fifo; - session->server_tx_fifo->client_session_index = session_index; + session->data.rx_fifo = s->server_rx_fifo; + session->data.rx_fifo->client_session_index = session_index; + session->data.tx_fifo = s->server_tx_fifo; + session->data.tx_fifo->client_session_index = session_index; + session->data.vpp_evt_q = ecm->vpp_event_queue[thread_index]; session->vpp_session_handle = session_handle (s); + if (ecm->is_dgram) + { + transport_connection_t *tc; + tc = session_get_transport (s); + clib_memcpy (&session->data.transport, tc, + sizeof (session->data.transport)); + session->data.is_dgram = 1; + } + vec_add1 (ecm->connection_index_by_thread[thread_index], session_index); __sync_fetch_and_add (&ecm->ready_connections, 1); if (ecm->ready_connections == ecm->expected_connections) @@ -410,6 +403,28 @@ echo_clients_session_disconnect_callback (stream_session_t * s) static int echo_clients_rx_callback (stream_session_t * s) { + echo_client_main_t *ecm = &echo_client_main; + session_t *sp; + + sp = pool_elt_at_index (ecm->sessions, + s->server_rx_fifo->client_session_index); + receive_data_chunk (ecm, sp); + + if (svm_fifo_max_dequeue (s->server_rx_fifo)) + { + session_fifo_event_t evt; + svm_queue_t *q; + if (svm_fifo_set_event (s->server_rx_fifo)) + { + evt.fifo = s->server_rx_fifo; + evt.event_type = FIFO_EVENT_BUILTIN_RX; + q = session_manager_get_vpp_event_queue (s->thread_index); + if (PREDICT_FALSE (q->cursize == q->maxsize)) + clib_warning ("out of event queue space"); + else if (svm_queue_add (q, (u8 *) & evt, 0)) + clib_warning ("failed to enqueue self-tap"); + } + } return 0; } @@ -544,7 +559,7 @@ echo_clients_connect (vlib_main_t * vm, u32 n_clients) } #define ec_cli_output(_fmt, _args...) \ - if (!ecm->no_output) \ + if (!ecm->no_output) \ vlib_cli_output(vm, _fmt, ##_args) static clib_error_t * @@ -663,6 +678,9 @@ echo_clients_command_fn (vlib_main_t * vm, ecm->connect_uri = format (0, "%s%c", default_uri, 0); } + if (ecm->connect_uri[0] == 'u' && ecm->connect_uri[3] != 'c') + ecm->is_dgram = 1; + #if ECHO_CLIENT_PTHREAD echo_clients_start_tx_pthread (); #endif diff --git a/src/vnet/session-apps/echo_client.h b/src/vnet/session-apps/echo_client.h index 5712da5b8c8..344e43865b5 100644 --- a/src/vnet/session-apps/echo_client.h +++ b/src/vnet/session-apps/echo_client.h @@ -30,15 +30,13 @@ typedef struct { + app_session_t data; u64 bytes_to_send; u64 bytes_sent; u64 bytes_to_receive; u64 bytes_received; - - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; - u64 vpp_session_handle; + u8 thread_index; } session_t; typedef struct @@ -46,7 +44,7 @@ typedef struct /* * Application setup parameters */ - svm_queue_t *vl_input_queue; /**< vpe input queue */ + svm_queue_t *vl_input_queue; /**< vpe input queue */ svm_queue_t **vpp_event_queue; u32 cli_node_index; /**< cli process node index */ @@ -65,6 +63,7 @@ typedef struct u32 private_segment_count; /**< Number of private fifo segs */ u32 private_segment_size; /**< size of private fifo segs */ u32 tls_engine; /**< TLS engine mbedtls/openssl */ + u8 is_dgram; /* * Test state variables */ diff --git a/src/vnet/session-apps/echo_server.c b/src/vnet/session-apps/echo_server.c index 0bde2e4995f..85e6c299919 100644 --- a/src/vnet/session-apps/echo_server.c +++ b/src/vnet/session-apps/echo_server.c @@ -34,13 +34,14 @@ typedef struct * Config params */ u8 no_echo; /**< Don't echo traffic */ - u32 fifo_size; /**< Fifo size */ + u32 fifo_size; /**< Fifo size */ u32 rcv_buffer_size; /**< Rcv buffer size */ u32 prealloc_fifos; /**< Preallocate fifos */ u32 private_segment_count; /**< Number of private segments */ u32 private_segment_size; /**< Size of private segments */ char *server_uri; /**< Server URI */ u32 tls_engine; /**< TLS engine: mbedtls/openssl */ + u8 is_dgram; /**< set if transport is dgram */ /* * Test state */ @@ -126,25 +127,13 @@ test_bytes (echo_server_main_t * esm, int actual_transfer) } /* - * If no-echo, just read the data and be done with it + * If no-echo, just drop the data and be done with it. */ int echo_server_builtin_server_rx_callback_no_echo (stream_session_t * s) { - echo_server_main_t *esm = &echo_server_main; - u32 my_thread_id = vlib_get_thread_index (); - int actual_transfer; - svm_fifo_t *rx_fifo; - - rx_fifo = s->server_rx_fifo; - - do - { - actual_transfer = - svm_fifo_dequeue_nowait (rx_fifo, esm->rcv_buffer_size, - esm->rx_buf[my_thread_id]); - } - while (actual_transfer > 0); + svm_fifo_t *rx_fifo = s->server_rx_fifo; + svm_fifo_dequeue_drop (rx_fifo, svm_fifo_max_dequeue (rx_fifo)); return 0; } @@ -157,6 +146,8 @@ echo_server_rx_callback (stream_session_t * s) echo_server_main_t *esm = &echo_server_main; session_fifo_event_t evt; u32 thread_index = vlib_get_thread_index (); + app_session_transport_t at; + svm_queue_t *q; ASSERT (s->thread_index == thread_index); @@ -166,14 +157,29 @@ echo_server_rx_callback (stream_session_t * s) ASSERT (rx_fifo->master_thread_index == thread_index); ASSERT (tx_fifo->master_thread_index == thread_index); - max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); - max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); + max_enqueue = svm_fifo_max_enqueue (tx_fifo); + if (!esm->is_dgram) + { + max_dequeue = svm_fifo_max_dequeue (rx_fifo); + } + else + { + session_dgram_pre_hdr_t ph; + svm_fifo_peek (rx_fifo, 0, sizeof (ph), (u8 *) & ph); + max_dequeue = ph.data_length - ph.data_offset; + if (!esm->vpp_queue[s->thread_index]) + { + q = session_manager_get_vpp_event_queue (s->thread_index); + esm->vpp_queue[s->thread_index] = q; + } + max_enqueue -= sizeof (session_dgram_hdr_t); + } if (PREDICT_FALSE (max_dequeue == 0)) return 0; /* Number of bytes we're going to copy */ - max_transfer = (max_dequeue < max_enqueue) ? max_dequeue : max_enqueue; + max_transfer = clib_min (max_dequeue, max_enqueue); /* No space in tx fifo */ if (PREDICT_FALSE (max_transfer == 0)) @@ -184,16 +190,16 @@ echo_server_rx_callback (stream_session_t * s) /* Program self-tap to retry */ if (svm_fifo_set_event (rx_fifo)) { - svm_queue_t *q; evt.fifo = rx_fifo; evt.event_type = FIFO_EVENT_BUILTIN_RX; - q = esm->vpp_queue[thread_index]; + q = esm->vpp_queue[s->thread_index]; if (PREDICT_FALSE (q->cursize == q->maxsize)) clib_warning ("out of event queue space"); else if (svm_queue_add (q, (u8 *) & evt, 0)) clib_warning ("failed to enqueue self-tap"); + vec_validate (esm->rx_retries[s->thread_index], s->session_index); if (esm->rx_retries[thread_index][s->session_index] == 500000) { clib_warning ("session stuck: %U", format_stream_session, s, 2); @@ -205,36 +211,47 @@ echo_server_rx_callback (stream_session_t * s) return 0; } - _vec_len (esm->rx_buf[thread_index]) = max_transfer; - - actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, - esm->rx_buf[thread_index]); + vec_validate (esm->rx_buf[thread_index], max_transfer); + if (!esm->is_dgram) + { + actual_transfer = app_recv_stream_raw (rx_fifo, + esm->rx_buf[thread_index], + max_transfer, + 0 /* don't clear event */ ); + } + else + { + actual_transfer = app_recv_dgram_raw (rx_fifo, + esm->rx_buf[thread_index], + max_transfer, &at, + 0 /* don't clear event */ ); + } ASSERT (actual_transfer == max_transfer); - -// test_bytes (esm, actual_transfer); + /* test_bytes (esm, actual_transfer); */ /* * Echo back */ - n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, - esm->rx_buf[thread_index]); - - if (n_written != max_transfer) - clib_warning ("short trout!"); - - if (svm_fifo_set_event (tx_fifo)) + if (!esm->is_dgram) { - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_APP_TX; - - if (svm_queue_add (esm->vpp_queue[s->thread_index], - (u8 *) & evt, 0 /* do wait for mutex */ )) - clib_warning ("failed to enqueue tx evt"); + n_written = app_send_stream_raw (tx_fifo, + esm->vpp_queue[thread_index], + esm->rx_buf[thread_index], + actual_transfer, 0); } + else + { + n_written = app_send_dgram_raw (tx_fifo, &at, + esm->vpp_queue[s->thread_index], + esm->rx_buf[thread_index], + actual_transfer, 0); + } + + if (n_written != max_transfer) + clib_warning ("short trout! written %u read %u", n_written, max_transfer); - if (PREDICT_FALSE (n_written < max_dequeue)) + if (PREDICT_FALSE (svm_fifo_max_dequeue (rx_fifo))) goto rx_event; return 0; @@ -411,6 +428,7 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, esm->private_segment_count = 0; esm->private_segment_size = 0; esm->tls_engine = TLS_ENGINE_OPENSSL; + esm->is_dgram = 0; vec_free (esm->server_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) @@ -479,6 +497,8 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, clib_warning ("No uri provided! Using default: %s", default_uri); esm->server_uri = (char *) format (0, "%s%c", default_uri, 0); } + if (esm->server_uri[0] == 'u' && esm->server_uri[3] != 'c') + esm->is_dgram = 1; rv = echo_server_create (vm, appns_id, appns_flags, appns_secret); vec_free (appns_id); -- cgit 1.2.3-korg