diff options
-rw-r--r-- | src/svm/svm_fifo.c | 19 | ||||
-rw-r--r-- | src/svm/svm_fifo.h | 6 | ||||
-rw-r--r-- | src/tests/vnet/session/udp_echo.c | 475 | ||||
-rw-r--r-- | src/vnet/session-apps/echo_client.c | 114 | ||||
-rw-r--r-- | src/vnet/session-apps/echo_client.h | 9 | ||||
-rw-r--r-- | src/vnet/session-apps/echo_server.c | 104 | ||||
-rw-r--r-- | src/vnet/session/application.c | 13 | ||||
-rw-r--r-- | src/vnet/session/application_interface.h | 157 | ||||
-rw-r--r-- | src/vnet/session/session.api | 28 | ||||
-rw-r--r-- | src/vnet/session/session.c | 95 | ||||
-rw-r--r-- | src/vnet/session/session.h | 40 | ||||
-rwxr-xr-x | src/vnet/session/session_api.c | 41 | ||||
-rwxr-xr-x | src/vnet/session/session_cli.c | 5 | ||||
-rw-r--r-- | src/vnet/session/session_node.c | 134 | ||||
-rw-r--r-- | src/vnet/session/stream_session.h | 27 | ||||
-rw-r--r-- | src/vnet/session/transport.c | 16 | ||||
-rw-r--r-- | src/vnet/session/transport.h | 19 | ||||
-rw-r--r-- | src/vnet/session/transport_interface.h | 6 | ||||
-rw-r--r-- | src/vnet/udp/udp.c | 56 | ||||
-rw-r--r-- | src/vnet/udp/udp.h | 8 | ||||
-rw-r--r-- | src/vnet/udp/udp_input.c | 180 |
21 files changed, 1105 insertions, 447 deletions
diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index b2c8e5bdb16..3552192a768 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -169,6 +169,9 @@ format_svm_fifo (u8 * s, va_list * args) svm_fifo_t *f = va_arg (*args, svm_fifo_t *); int verbose = va_arg (*args, int); + if (!s) + return s; + s = format (s, "cursize %u nitems %u has_event %d\n", f->cursize, f->nitems, f->has_event); s = format (s, " head %d tail %d\n", f->head, f->tail); @@ -459,7 +462,7 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; if (PREDICT_FALSE (cursize == f->nitems)) - return -2; /* fifo stuffed */ + return SVM_FIFO_FULL; nitems = f->nitems; @@ -615,6 +618,20 @@ svm_fifo_enqueue_with_offset (svm_fifo_t * f, copy_from_here); } +void +svm_fifo_overwrite_head (svm_fifo_t * f, u8 * data, u32 len) +{ + u32 first_chunk; + ASSERT (len <= f->nitems); + if (len < f->nitems - f->head) + clib_memcpy (&f->data[f->head], data, len); + else + { + first_chunk = len - (f->nitems - f->head); + clib_memcpy (&f->data[f->head], data, first_chunk); + clib_memcpy (f->data, data + first_chunk, len - first_chunk); + } +} static int svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index d06d77a42f7..39cdcc06a0c 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -80,6 +80,11 @@ typedef struct _svm_fifo CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; +typedef enum +{ + SVM_FIFO_FULL = -2, +} svm_fifo_err_t; + #if SVM_FIFO_TRACE #define svm_fifo_trace_add(_f, _s, _l, _t) \ { \ @@ -150,6 +155,7 @@ int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes); u32 svm_fifo_number_ooo_segments (svm_fifo_t * f); ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); void svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer); +void svm_fifo_overwrite_head (svm_fifo_t * f, u8 * data, u32 len); format_function_t format_svm_fifo; diff --git a/src/tests/vnet/session/udp_echo.c b/src/tests/vnet/session/udp_echo.c index d1363fc7104..fe5461d2c59 100644 --- a/src/tests/vnet/session/udp_echo.c +++ b/src/tests/vnet/session/udp_echo.c @@ -58,12 +58,6 @@ typedef enum typedef struct { - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; -} session_t; - -typedef struct -{ /* vpe input queue */ svm_queue_t *vl_input_queue; @@ -71,10 +65,13 @@ typedef struct u32 my_client_index; /* The URI we're playing with */ - u8 *uri; + u8 *listen_uri; + + /* URI for connect */ + u8 *connect_uri; /* Session pool */ - session_t *sessions; + app_session_t *sessions; /* Hash table for disconnect processing */ uword *session_index_by_vpp_handles; @@ -85,10 +82,9 @@ typedef struct /* intermediate rx buffer */ u8 *rx_buf; - /* URI for connect */ - u8 *connect_uri; - - int i_am_master; + u32 fifo_size; + int i_am_server; + u8 is_connected; /* Our event queue */ svm_queue_t *our_event_queue; @@ -128,7 +124,8 @@ typedef struct u8 *connect_test_data; uword *segments_table; -} uri_udp_test_main_t; + u8 do_echo; +} udp_echo_main_t; #if CLIB_DEBUG > 0 #define NITER 10000 @@ -136,12 +133,12 @@ typedef struct #define NITER 4000000 #endif -uri_udp_test_main_t uri_udp_test_main; +udp_echo_main_t udp_echo_main; static void stop_signal (int signum) { - uri_udp_test_main_t *um = &uri_udp_test_main; + udp_echo_main_t *um = &udp_echo_main; um->time_to_stop = 1; } @@ -149,7 +146,7 @@ stop_signal (int signum) static void stats_signal (int signum) { - uri_udp_test_main_t *um = &uri_udp_test_main; + udp_echo_main_t *um = &udp_echo_main; um->time_to_print_stats = 1; } @@ -164,11 +161,146 @@ setup_signal_handlers (void) return 0; } +uword +unformat_ip4_address (unformat_input_t * input, va_list * args) +{ + u8 *result = va_arg (*args, u8 *); + unsigned a[4]; + + if (!unformat (input, "%d.%d.%d.%d", &a[0], &a[1], &a[2], &a[3])) + return 0; + + if (a[0] >= 256 || a[1] >= 256 || a[2] >= 256 || a[3] >= 256) + return 0; + + result[0] = a[0]; + result[1] = a[1]; + result[2] = a[2]; + result[3] = a[3]; + + return 1; +} + +uword +unformat_ip6_address (unformat_input_t * input, va_list * args) +{ + ip6_address_t *result = va_arg (*args, ip6_address_t *); + u16 hex_quads[8]; + uword hex_quad, n_hex_quads, hex_digit, n_hex_digits; + uword c, n_colon, double_colon_index; + + n_hex_quads = hex_quad = n_hex_digits = n_colon = 0; + double_colon_index = ARRAY_LEN (hex_quads); + while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT) + { + hex_digit = 16; + if (c >= '0' && c <= '9') + hex_digit = c - '0'; + else if (c >= 'a' && c <= 'f') + hex_digit = c + 10 - 'a'; + else if (c >= 'A' && c <= 'F') + hex_digit = c + 10 - 'A'; + else if (c == ':' && n_colon < 2) + n_colon++; + else + { + unformat_put_input (input); + break; + } + + /* Too many hex quads. */ + if (n_hex_quads >= ARRAY_LEN (hex_quads)) + return 0; + + if (hex_digit < 16) + { + hex_quad = (hex_quad << 4) | hex_digit; + + /* Hex quad must fit in 16 bits. */ + if (n_hex_digits >= 4) + return 0; + + n_colon = 0; + n_hex_digits++; + } + + /* Save position of :: */ + if (n_colon == 2) + { + /* More than one :: ? */ + if (double_colon_index < ARRAY_LEN (hex_quads)) + return 0; + double_colon_index = n_hex_quads; + } + + if (n_colon > 0 && n_hex_digits > 0) + { + hex_quads[n_hex_quads++] = hex_quad; + hex_quad = 0; + n_hex_digits = 0; + } + } + + if (n_hex_digits > 0) + hex_quads[n_hex_quads++] = hex_quad; + + { + word i; + + /* Expand :: to appropriate number of zero hex quads. */ + if (double_colon_index < ARRAY_LEN (hex_quads)) + { + word n_zero = ARRAY_LEN (hex_quads) - n_hex_quads; + + for (i = n_hex_quads - 1; i >= (signed) double_colon_index; i--) + hex_quads[n_zero + i] = hex_quads[i]; + + for (i = 0; i < n_zero; i++) + hex_quads[double_colon_index + i] = 0; + + n_hex_quads = ARRAY_LEN (hex_quads); + } + + /* Too few hex quads given. */ + if (n_hex_quads < ARRAY_LEN (hex_quads)) + return 0; + + for (i = 0; i < ARRAY_LEN (hex_quads); i++) + result->as_u16[i] = clib_host_to_net_u16 (hex_quads[i]); + + return 1; + } +} + +uword +unformat_uri (unformat_input_t * input, va_list * args) +{ + session_endpoint_extended_t *sep = va_arg (*args, + session_endpoint_extended_t *); + u32 port; + char *tmp; + + if (unformat (input, "%s://%U/%d", &tmp, unformat_ip4_address, &sep->ip.ip4, + &port)) + { + sep->port = clib_host_to_net_u16 (port); + sep->is_ip4 = 1; + return 1; + } + else if (unformat (input, "%s://%U/%d", &tmp, unformat_ip6_address, + &sep->ip.ip6, &port)) + { + sep->port = clib_host_to_net_u16 (port); + sep->is_ip4 = 0; + return 1; + } + return 0; +} + void -application_send_attach (uri_udp_test_main_t * utm) +application_send_attach (udp_echo_main_t * utm) { vl_api_application_attach_t *bmp; - u32 fifo_size = 1 << 20; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); @@ -179,8 +311,8 @@ application_send_attach (uri_udp_test_main_t * utm) bmp->options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE; bmp->options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE; bmp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = 2; - bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = fifo_size; - bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[APP_OPTIONS_RX_FIFO_SIZE] = utm->fifo_size; + bmp->options[APP_OPTIONS_TX_FIFO_SIZE] = utm->fifo_size; bmp->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; bmp->options[APP_OPTIONS_SEGMENT_SIZE] = 256 << 20; bmp->options[APP_OPTIONS_EVT_QUEUE_SIZE] = 16768; @@ -188,7 +320,7 @@ application_send_attach (uri_udp_test_main_t * utm) } void -application_detach (uri_udp_test_main_t * utm) +application_detach (udp_echo_main_t * utm) { vl_api_application_detach_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); @@ -204,7 +336,7 @@ static void vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; svm_fifo_segment_create_args_t _a = { 0 }, *a = &_a; int rv; @@ -250,7 +382,7 @@ vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * u8 * format_api_error (u8 * s, va_list * args) { - uri_udp_test_main_t *utm = va_arg (*args, uri_udp_test_main_t *); + udp_echo_main_t *utm = va_arg (*args, udp_echo_main_t *); i32 error = va_arg (*args, u32); uword *p; @@ -264,7 +396,7 @@ format_api_error (u8 * s, va_list * args) } int -wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) +wait_for_state_change (udp_echo_main_t * utm, connection_state_t state) { #if CLIB_DEBUG > 0 #define TIMEOUT 600.0 @@ -287,11 +419,11 @@ u64 server_bytes_received, server_bytes_sent; static void * cut_through_thread_fn (void *arg) { - session_t *s; + app_session_t *s; svm_fifo_t *rx_fifo; svm_fifo_t *tx_fifo; u8 *my_copy_buffer = 0; - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; i32 actual_transfer; int rv; u32 buffer_offset; @@ -301,8 +433,8 @@ cut_through_thread_fn (void *arg) s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - rx_fifo = s->server_rx_fifo; - tx_fifo = s->server_tx_fifo; + rx_fifo = s->rx_fifo; + tx_fifo = s->tx_fifo; vec_validate (my_copy_buffer, 64 * 1024 - 1); @@ -340,7 +472,7 @@ cut_through_thread_fn (void *arg) } static void -udp_client_connect (uri_udp_test_main_t * utm) +udp_client_connect (udp_echo_main_t * utm) { vl_api_connect_uri_t *cmp; cmp = vl_msg_api_alloc (sizeof (*cmp)); @@ -354,7 +486,7 @@ udp_client_connect (uri_udp_test_main_t * utm) } static void -client_send_cut_through (uri_udp_test_main_t * utm, session_t * session) +client_send_cut_through (udp_echo_main_t * utm, app_session_t * session) { int i; u8 *test_data = 0; @@ -372,8 +504,8 @@ client_send_cut_through (uri_udp_test_main_t * utm, session_t * session) for (i = 0; i < vec_len (test_data); i++) test_data[i] = i & 0xff; - rx_fifo = session->server_rx_fifo; - tx_fifo = session->server_tx_fifo; + rx_fifo = session->rx_fifo; + tx_fifo = session->tx_fifo; before = clib_time_now (&utm->clib_time); @@ -456,15 +588,12 @@ client_send_cut_through (uri_udp_test_main_t * utm, session_t * session) } static void -send_test_chunk (uri_udp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, - u32 bytes) +send_test_chunk (udp_echo_main_t * utm, app_session_t * s, u32 bytes) { u8 *test_data = utm->connect_test_data; - u64 bytes_sent = 0; int test_buf_offset = 0; + u64 bytes_sent = 0; u32 bytes_to_snd; - u32 queue_max_chunk = 128 << 10, actual_write; - session_fifo_event_t evt; int rv; bytes_to_snd = (bytes == 0) ? vec_len (test_data) : bytes; @@ -473,62 +602,27 @@ send_test_chunk (uri_udp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, while (bytes_to_snd > 0 && !utm->time_to_stop) { - actual_write = (bytes_to_snd > queue_max_chunk) ? - queue_max_chunk : bytes_to_snd; - rv = svm_fifo_enqueue_nowait (tx_fifo, actual_write, - test_data + test_buf_offset); - + rv = app_send (s, test_data + test_buf_offset, bytes_to_snd, 0); if (rv > 0) { bytes_to_snd -= rv; test_buf_offset += rv; bytes_sent += rv; - - if (svm_fifo_set_event (tx_fifo)) - { - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_APP_TX; - - svm_queue_add (utm->vpp_event_queue, - (u8 *) & evt, 0 /* do wait for mutex */ ); - } } } } static void -recv_test_chunk (uri_udp_test_main_t * utm, session_t * session) +recv_test_chunk (udp_echo_main_t * utm, app_session_t * s) { - svm_fifo_t *rx_fifo; - int buffer_offset, bytes_to_read = 0, rv; - - rx_fifo = session->server_rx_fifo; - bytes_to_read = svm_fifo_max_dequeue (rx_fifo); - bytes_to_read = - vec_len (utm->rx_buf) > bytes_to_read ? - bytes_to_read : vec_len (utm->rx_buf); - - buffer_offset = 0; - while (bytes_to_read > 0) - { - rv = svm_fifo_dequeue_nowait (rx_fifo, bytes_to_read, - utm->rx_buf + buffer_offset); - if (rv > 0) - { - bytes_to_read -= rv; - buffer_offset += rv; - } - } + app_recv (s, utm->rx_buf, vec_len (utm->rx_buf)); } void -client_send_data (uri_udp_test_main_t * utm) +client_send_data (udp_echo_main_t * utm) { u8 *test_data; - int mypid = getpid (); - session_t *session; - svm_fifo_t *tx_fifo; + app_session_t *session; u32 n_iterations; int i; @@ -538,8 +632,6 @@ client_send_data (uri_udp_test_main_t * utm) test_data = utm->connect_test_data; session = pool_elt_at_index (utm->sessions, utm->connected_session); - tx_fifo = session->server_tx_fifo; - ASSERT (vec_len (test_data) > 0); vec_validate (utm->rx_buf, vec_len (test_data) - 1); @@ -547,7 +639,7 @@ client_send_data (uri_udp_test_main_t * utm) for (i = 0; i < n_iterations; i++) { - send_test_chunk (utm, tx_fifo, mypid, 0); + send_test_chunk (utm, session, 0); recv_test_chunk (utm, session); if (utm->time_to_stop) break; @@ -562,9 +654,9 @@ client_send_data (uri_udp_test_main_t * utm) } static void -client_test (uri_udp_test_main_t * utm) +client_test (udp_echo_main_t * utm) { - session_t *session; + app_session_t *session; application_send_attach (utm); udp_client_connect (utm); @@ -593,7 +685,10 @@ client_test (uri_udp_test_main_t * utm) static void vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; + svm_fifo_t *rx_fifo, *tx_fifo; + app_session_t *session; + u32 session_index; if (mp->retval) { @@ -602,13 +697,30 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) return; } - utm->state = STATE_BOUND; + rx_fifo = uword_to_pointer (mp->rx_fifo, svm_fifo_t *); + tx_fifo = uword_to_pointer (mp->tx_fifo, svm_fifo_t *); + + pool_get (utm->sessions, session); + memset (session, 0, sizeof (*session)); + session_index = session - utm->sessions; + + rx_fifo->client_session_index = session_index; + tx_fifo->client_session_index = session_index; + session->rx_fifo = rx_fifo; + session->tx_fifo = tx_fifo; + clib_memcpy (&session->transport.lcl_ip, mp->lcl_ip, + sizeof (ip46_address_t)); + session->transport.is_ip4 = mp->lcl_is_ip4; + session->transport.lcl_port = mp->lcl_port; + session->vpp_evt_q = uword_to_pointer (mp->vpp_evt_q, svm_queue_t *); + + utm->state = utm->is_connected ? STATE_BOUND : STATE_READY; } static void vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; svm_fifo_segment_create_args_t _a, *a = &_a; svm_fifo_segment_private_t *seg; u8 *seg_name; @@ -636,7 +748,7 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) static void vl_api_unmap_segment_t_handler (vl_api_unmap_segment_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; svm_fifo_segment_private_t *seg; u64 *seg_indexp; u8 *seg_name; @@ -663,13 +775,13 @@ static void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { u32 segment_index; - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; svm_fifo_segment_create_args_t _a, *a = &_a; svm_fifo_segment_private_t *seg; svm_queue_t *client_q; vl_api_connect_session_reply_t *rmp; - session_t *session = 0; + app_session_t *session = 0; int rv = 0; /* Create the segment */ @@ -692,16 +804,16 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) pool_get (utm->sessions, session); - session->server_rx_fifo = svm_fifo_segment_alloc_fifo + session->rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 128 * 1024, FIFO_SEGMENT_RX_FREELIST); - ASSERT (session->server_rx_fifo); + ASSERT (session->rx_fifo); - session->server_tx_fifo = svm_fifo_segment_alloc_fifo + session->tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 128 * 1024, FIFO_SEGMENT_TX_FREELIST); - ASSERT (session->server_tx_fifo); + ASSERT (session->tx_fifo); - session->server_rx_fifo->master_session_index = session - utm->sessions; - session->server_tx_fifo->master_session_index = session - utm->sessions; + session->rx_fifo->master_session_index = session - utm->sessions; + session->tx_fifo->master_session_index = session - utm->sessions; utm->cut_through_session_index = session - utm->sessions; rv = pthread_create (&utm->cut_through_thread_handle, @@ -722,8 +834,8 @@ send_reply: rmp->segment_name_length = vec_len (a->segment_name); if (session) { - rmp->server_rx_fifo = pointer_to_uword (session->server_rx_fifo); - rmp->server_tx_fifo = pointer_to_uword (session->server_tx_fifo); + rmp->server_rx_fifo = pointer_to_uword (session->rx_fifo); + rmp->server_tx_fifo = pointer_to_uword (session->tx_fifo); } memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); @@ -737,7 +849,7 @@ send_reply: static void vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; if (mp->retval != 0) clib_warning ("returned %d", ntohl (mp->retval)); @@ -748,10 +860,10 @@ vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) static void vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; vl_api_accept_session_reply_t *rmp; svm_fifo_t *rx_fifo, *tx_fifo; - session_t *session; + app_session_t *session; static f64 start_time; u32 session_index; int rv = 0; @@ -759,9 +871,8 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); - utm->vpp_event_queue = - uword_to_pointer (mp->vpp_event_queue_address, svm_queue_t *); - + utm->vpp_event_queue = uword_to_pointer (mp->vpp_event_queue_address, + svm_queue_t *); rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); @@ -778,8 +889,8 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) rx_fifo->master_session_index = session_index; tx_fifo->master_session_index = session_index; utm->cut_through_session_index = session_index; - session->server_rx_fifo = rx_fifo; - session->server_tx_fifo = tx_fifo; + session->rx_fifo = rx_fifo; + session->tx_fifo = tx_fifo; rv = pthread_create (&utm->cut_through_thread_handle, NULL /*attr */ , cut_through_thread_fn, 0); @@ -788,13 +899,18 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) clib_warning ("pthread_create returned %d", rv); rv = VNET_API_ERROR_SYSCALL_ERROR_1; } + utm->do_echo = 1; } else { rx_fifo->client_session_index = session_index; tx_fifo->client_session_index = session_index; - session->server_rx_fifo = rx_fifo; - session->server_tx_fifo = tx_fifo; + session->rx_fifo = rx_fifo; + session->tx_fifo = tx_fifo; + clib_memcpy (&session->transport.rmt_ip, mp->ip, + sizeof (ip46_address_t)); + session->transport.is_ip4 = mp->is_ip4; + session->transport.rmt_port = mp->port; } hash_set (utm->session_index_by_vpp_handles, mp->handle, session_index); @@ -821,8 +937,8 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; - session_t *session; + udp_echo_main_t *utm = &udp_echo_main; + app_session_t *session; vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; @@ -853,10 +969,12 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) static void vl_api_connect_session_reply_t_handler (vl_api_connect_session_reply_t * mp) { - uri_udp_test_main_t *utm = &uri_udp_test_main; - session_t *session; + udp_echo_main_t *utm = &udp_echo_main; + unformat_input_t _input, *input = &_input; + session_endpoint_extended_t _sep, *sep = &_sep; + app_session_t *session; - ASSERT (utm->i_am_master == 0); + ASSERT (utm->i_am_server == 0); if (mp->retval) { @@ -864,14 +982,13 @@ vl_api_connect_session_reply_t_handler (vl_api_connect_session_reply_t * mp) return; } - pool_get (utm->sessions, session); - session->server_rx_fifo = uword_to_pointer (mp->server_rx_fifo, - svm_fifo_t *); - ASSERT (session->server_rx_fifo); - session->server_tx_fifo = uword_to_pointer (mp->server_tx_fifo, - svm_fifo_t *); - ASSERT (session->server_tx_fifo); + ASSERT (mp->server_rx_fifo && mp->server_tx_fifo); + pool_get (utm->sessions, session); + session->rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); + session->tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); + session->vpp_evt_q = uword_to_pointer (mp->vpp_event_queue_address, + svm_queue_t *); /* Cut-through case */ if (mp->client_event_queue_address) { @@ -881,12 +998,32 @@ vl_api_connect_session_reply_t_handler (vl_api_connect_session_reply_t * mp) svm_queue_t *); utm->our_event_queue = uword_to_pointer (mp->client_event_queue_address, svm_queue_t *); + utm->do_echo = 1; } else { utm->connected_session = session - utm->sessions; utm->vpp_event_queue = uword_to_pointer (mp->vpp_event_queue_address, svm_queue_t *); + + clib_memcpy (&session->transport.lcl_ip, mp->lcl_ip, + sizeof (ip46_address_t)); + session->transport.is_ip4 = mp->is_ip4; + session->transport.lcl_port = mp->lcl_port; + + unformat_init_vector (input, utm->connect_uri); + if (!unformat (input, "%U", unformat_uri, sep)) + { + clib_warning ("can't figure out remote ip and port"); + utm->state = STATE_FAILED; + unformat_free (input); + return; + } + unformat_free (input); + clib_memcpy (&session->transport.rmt_ip, &sep->ip, + sizeof (ip46_address_t)); + session->transport.rmt_port = sep->port; + session->is_dgram = !utm->is_connected; } utm->state = STATE_READY; } @@ -904,7 +1041,7 @@ _(APPLICATION_ATTACH_REPLY, application_attach_reply) \ _(APPLICATION_DETACH_REPLY, application_detach_reply) \ void -tcp_echo_api_hookup (uri_udp_test_main_t * utm) +tcp_echo_api_hookup (udp_echo_main_t * utm) { #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ @@ -921,7 +1058,7 @@ tcp_echo_api_hookup (uri_udp_test_main_t * utm) int connect_to_vpp (char *name) { - uri_udp_test_main_t *utm = &uri_udp_test_main; + udp_echo_main_t *utm = &udp_echo_main; api_main_t *am = &api_main; if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) @@ -940,7 +1077,7 @@ vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) } static void -init_error_string_table (uri_udp_test_main_t * utm) +init_error_string_table (udp_echo_main_t * utm) { utm->error_string_by_error_number = hash_create (0, sizeof (uword)); @@ -952,43 +1089,26 @@ init_error_string_table (uri_udp_test_main_t * utm) } void -server_handle_fifo_event_rx (uri_udp_test_main_t * utm, - session_fifo_event_t * e) +server_handle_fifo_event_rx (udp_echo_main_t * utm, session_fifo_event_t * e) { - svm_fifo_t *rx_fifo, *tx_fifo; - int nbytes; - session_fifo_event_t evt; - svm_queue_t *q; + app_session_t *s; int rv; - rx_fifo = e->fifo; - tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; - svm_fifo_unset_event (rx_fifo); - - do - { - nbytes = svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), - utm->rx_buf); - } - while (nbytes <= 0); - do - { - rv = svm_fifo_enqueue_nowait (tx_fifo, nbytes, utm->rx_buf); - } - while (rv == -2); + s = pool_elt_at_index (utm->sessions, e->fifo->client_session_index); + app_recv (s, utm->rx_buf, vec_len (utm->rx_buf)); - if (svm_fifo_set_event (tx_fifo)) + if (utm->do_echo) { - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_APP_TX; - q = utm->vpp_event_queue; - svm_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); + do + { + rv = app_send_stream (s, utm->rx_buf, vec_len (utm->rx_buf), 0); + } + while (rv == SVM_FIFO_FULL); } } void -server_handle_event_queue (uri_udp_test_main_t * utm) +server_handle_event_queue (udp_echo_main_t * utm) { session_fifo_event_t _e, *e = &_e; @@ -1022,7 +1142,7 @@ server_handle_event_queue (uri_udp_test_main_t * utm) } static void -server_unbind (uri_udp_test_main_t * utm) +server_unbind (udp_echo_main_t * utm) { vl_api_unbind_uri_t *ump; @@ -1031,12 +1151,12 @@ server_unbind (uri_udp_test_main_t * utm) ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + memcpy (ump->uri, utm->listen_uri, vec_len (utm->listen_uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); } static void -server_bind (uri_udp_test_main_t * utm) +server_bind (udp_echo_main_t * utm) { vl_api_bind_uri_t *bmp; @@ -1046,22 +1166,22 @@ server_bind (uri_udp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + memcpy (bmp->uri, utm->listen_uri, vec_len (utm->listen_uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } void -udp_server_test (uri_udp_test_main_t * utm) +udp_server_test (udp_echo_main_t * utm) { - + u8 wait_for_state = utm->is_connected ? STATE_BOUND : STATE_READY; application_send_attach (utm); /* Bind to uri */ server_bind (utm); - if (wait_for_state_change (utm, STATE_BOUND)) + if (wait_for_state_change (utm, wait_for_state)) { - clib_warning ("timeout waiting for STATE_BOUND"); + clib_warning ("timeout waiting for state change"); return; } @@ -1084,11 +1204,11 @@ udp_server_test (uri_udp_test_main_t * utm) int main (int argc, char **argv) { - uri_udp_test_main_t *utm = &uri_udp_test_main; - u8 *bind_name = (u8 *) "udp://0.0.0.0/1234"; + udp_echo_main_t *utm = &udp_echo_main; + u8 *uri = (u8 *) "udp://0.0.0.0/1234"; unformat_input_t _argv, *a = &_argv; - int i_am_master = 1; - session_t *session; + int i_am_server = 1; + app_session_t *session; u8 *chroot_prefix; char *app_name; mheap_t *h; @@ -1116,39 +1236,50 @@ main (int argc, char **argv) svm_fifo_segment_main_init (0x200000000ULL, 20); unformat_init_command_line (a, argv); + utm->fifo_size = 128 << 10; + while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) { if (unformat (a, "chroot prefix %s", &chroot_prefix)) { vl_set_memory_root_path ((char *) chroot_prefix); } - else if (unformat (a, "uri %s", &bind_name)) + else if (unformat (a, "uri %s", &uri)) ; else if (unformat (a, "segment-size %dM", &tmp)) utm->configured_segment_size = tmp << 20; else if (unformat (a, "segment-size %dG", &tmp)) utm->configured_segment_size = tmp << 30; - else if (unformat (a, "master")) - i_am_master = 1; - else if (unformat (a, "slave")) - i_am_master = 0; + else if (unformat (a, "server")) + i_am_server = 1; + else if (unformat (a, "client")) + i_am_server = 0; else { - fformat (stderr, "%s: usage [master|slave]\n"); + fformat (stderr, "%s: usage [server|client]\n"); exit (1); } } utm->cut_through_session_index = ~0; - utm->uri = format (0, "%s%c", bind_name, 0); - utm->i_am_master = i_am_master; + utm->i_am_server = i_am_server; utm->segment_main = &svm_fifo_segment_main; - utm->connect_uri = format (0, "udp://6.0.1.2/1234%c", 0); setup_signal_handlers (); tcp_echo_api_hookup (utm); - app_name = i_am_master ? "udp_echo_master" : "udp_echo_slave"; + if (i_am_server) + { + utm->listen_uri = format (0, "%s%c", uri, 0); + utm->is_connected = (utm->listen_uri[4] == 'c'); + app_name = "udp_echo_server"; + } + else + { + app_name = "udp_echo_client"; + utm->connect_uri = format (0, "%s%c", uri, 0); + utm->is_connected = (utm->connect_uri[4] == 'c'); + } if (connect_to_vpp (app_name) < 0) { svm_region_exit (); @@ -1156,7 +1287,7 @@ main (int argc, char **argv) exit (1); } - if (i_am_master == 0) + if (i_am_server == 0) { client_test (utm); exit (0); diff --git a/src/vnet/session-apps/echo_client.c b/src/vnet/session-apps/echo_client.c index 2cfb471413c..11528ef1899 100644 --- a/src/vnet/session-apps/echo_client.c +++ b/src/vnet/session-apps/echo_client.c @@ -46,23 +46,21 @@ static void send_data_chunk (echo_client_main_t * ecm, session_t * s) { u8 *test_data = ecm->connect_test_data; - int test_buf_offset; + int test_buf_len, test_buf_offset, rv; u32 bytes_this_chunk; - session_fifo_event_t evt; - svm_fifo_t *txf; - int rv; ASSERT (vec_len (test_data) > 0); - - test_buf_offset = s->bytes_sent % vec_len (test_data); - bytes_this_chunk = vec_len (test_data) - test_buf_offset; - - bytes_this_chunk = bytes_this_chunk < s->bytes_to_send - ? bytes_this_chunk : s->bytes_to_send; - - txf = s->server_tx_fifo; - rv = svm_fifo_enqueue_nowait (txf, bytes_this_chunk, - test_data + test_buf_offset); + test_buf_len = vec_len (test_data); + test_buf_offset = s->bytes_sent % test_buf_len; + bytes_this_chunk = clib_min (test_buf_len - test_buf_offset, + s->bytes_to_send); + + if (!ecm->is_dgram) + rv = app_send_stream (&s->data, test_data + test_buf_offset, + bytes_this_chunk, 0); + else + rv = app_send_dgram (&s->data, test_data + test_buf_offset, + bytes_this_chunk, 0); /* If we managed to enqueue data... */ if (rv > 0) @@ -89,35 +87,24 @@ send_data_chunk (echo_client_main_t * ecm, session_t * s) ed->data[1] = s->bytes_sent; ed->data[2] = s->bytes_to_send; } - - /* Poke the session layer */ - if (svm_fifo_set_event (txf)) - { - /* Fabricate TX event, send to vpp */ - evt.fifo = txf; - evt.event_type = FIFO_EVENT_APP_TX; - - if (svm_queue_add - (ecm->vpp_event_queue[txf->master_thread_index], (u8 *) & evt, - 0 /* do wait for mutex */ )) - clib_warning ("could not enqueue event"); - } } } static void receive_data_chunk (echo_client_main_t * ecm, session_t * s) { - svm_fifo_t *rx_fifo = s->server_rx_fifo; - u32 my_thread_index = vlib_get_thread_index (); + svm_fifo_t *rx_fifo = s->data.rx_fifo; + u32 thread_index = vlib_get_thread_index (); int n_read, i; if (ecm->test_bytes) { - n_read = svm_fifo_dequeue_nowait (rx_fifo, - vec_len (ecm->rx_buf - [my_thread_index]), - ecm->rx_buf[my_thread_index]); + if (!ecm->is_dgram) + n_read = app_recv_stream (&s->data, ecm->rx_buf[thread_index], + vec_len (ecm->rx_buf[thread_index])); + else + n_read = app_recv_dgram (&s->data, ecm->rx_buf[thread_index], + vec_len (ecm->rx_buf[thread_index])); } else { @@ -148,17 +135,18 @@ receive_data_chunk (echo_client_main_t * ecm, session_t * s) { for (i = 0; i < n_read; i++) { - if (ecm->rx_buf[my_thread_index][i] + if (ecm->rx_buf[thread_index][i] != ((s->bytes_received + i) & 0xff)) { clib_warning ("read %d error at byte %lld, 0x%x not 0x%x", n_read, s->bytes_received + i, - ecm->rx_buf[my_thread_index][i], + ecm->rx_buf[thread_index][i], ((s->bytes_received + i) & 0xff)); ecm->test_failed = 1; } } } + ASSERT (n_read <= s->bytes_to_receive); s->bytes_to_receive -= n_read; s->bytes_received += n_read; } @@ -230,20 +218,15 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } if (sp->bytes_to_receive > 0) { - receive_data_chunk (ecm, sp); delete_session = 0; } if (PREDICT_FALSE (delete_session == 1)) { - u32 index, thread_index; stream_session_t *s; __sync_fetch_and_add (&ecm->tx_total, sp->bytes_sent); __sync_fetch_and_add (&ecm->rx_total, sp->bytes_received); - - session_parse_handle (sp->vpp_session_handle, - &index, &thread_index); - s = session_get_if_valid (index, thread_index); + s = session_get_from_handle_if_valid (sp->vpp_session_handle); if (s) { @@ -312,8 +295,8 @@ echo_clients_init (vlib_main_t * vm) num_threads = 1 /* main thread */ + vtm->n_threads; - /* Init test data. Bigecmuffer */ - vec_validate (ecm->connect_test_data, 1024 * 1024 - 1); + /* Init test data. Big buffer */ + vec_validate (ecm->connect_test_data, 4 * 1024 * 1024 - 1); for (i = 0; i < vec_len (ecm->connect_test_data); i++) ecm->connect_test_data[i] = i & 0xff; @@ -363,12 +346,22 @@ echo_clients_session_connected_callback (u32 app_index, u32 api_context, session_index = session - ecm->sessions; session->bytes_to_send = ecm->bytes_to_send; session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send; - session->server_rx_fifo = s->server_rx_fifo; - session->server_rx_fifo->client_session_index = session_index; - session->server_tx_fifo = s->server_tx_fifo; - session->server_tx_fifo->client_session_index = session_index; + session->data.rx_fifo = s->server_rx_fifo; + session->data.rx_fifo->client_session_index = session_index; + session->data.tx_fifo = s->server_tx_fifo; + session->data.tx_fifo->client_session_index = session_index; + session->data.vpp_evt_q = ecm->vpp_event_queue[thread_index]; session->vpp_session_handle = session_handle (s); + if (ecm->is_dgram) + { + transport_connection_t *tc; + tc = session_get_transport (s); + clib_memcpy (&session->data.transport, tc, + sizeof (session->data.transport)); + session->data.is_dgram = 1; + } + vec_add1 (ecm->connection_index_by_thread[thread_index], session_index); __sync_fetch_and_add (&ecm->ready_connections, 1); if (ecm->ready_connections == ecm->expected_connections) @@ -410,6 +403,28 @@ echo_clients_session_disconnect_callback (stream_session_t * s) static int echo_clients_rx_callback (stream_session_t * s) { + echo_client_main_t *ecm = &echo_client_main; + session_t *sp; + + sp = pool_elt_at_index (ecm->sessions, + s->server_rx_fifo->client_session_index); + receive_data_chunk (ecm, sp); + + if (svm_fifo_max_dequeue (s->server_rx_fifo)) + { + session_fifo_event_t evt; + svm_queue_t *q; + if (svm_fifo_set_event (s->server_rx_fifo)) + { + evt.fifo = s->server_rx_fifo; + evt.event_type = FIFO_EVENT_BUILTIN_RX; + q = session_manager_get_vpp_event_queue (s->thread_index); + if (PREDICT_FALSE (q->cursize == q->maxsize)) + clib_warning ("out of event queue space"); + else if (svm_queue_add (q, (u8 *) & evt, 0)) + clib_warning ("failed to enqueue self-tap"); + } + } return 0; } @@ -544,7 +559,7 @@ echo_clients_connect (vlib_main_t * vm, u32 n_clients) } #define ec_cli_output(_fmt, _args...) \ - if (!ecm->no_output) \ + if (!ecm->no_output) \ vlib_cli_output(vm, _fmt, ##_args) static clib_error_t * @@ -663,6 +678,9 @@ echo_clients_command_fn (vlib_main_t * vm, ecm->connect_uri = format (0, "%s%c", default_uri, 0); } + if (ecm->connect_uri[0] == 'u' && ecm->connect_uri[3] != 'c') + ecm->is_dgram = 1; + #if ECHO_CLIENT_PTHREAD echo_clients_start_tx_pthread (); #endif diff --git a/src/vnet/session-apps/echo_client.h b/src/vnet/session-apps/echo_client.h index 5712da5b8c8..344e43865b5 100644 --- a/src/vnet/session-apps/echo_client.h +++ b/src/vnet/session-apps/echo_client.h @@ -30,15 +30,13 @@ typedef struct { + app_session_t data; u64 bytes_to_send; u64 bytes_sent; u64 bytes_to_receive; u64 bytes_received; - - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; - u64 vpp_session_handle; + u8 thread_index; } session_t; typedef struct @@ -46,7 +44,7 @@ typedef struct /* * Application setup parameters */ - svm_queue_t *vl_input_queue; /**< vpe input queue */ + svm_queue_t *vl_input_queue; /**< vpe input queue */ svm_queue_t **vpp_event_queue; u32 cli_node_index; /**< cli process node index */ @@ -65,6 +63,7 @@ typedef struct u32 private_segment_count; /**< Number of private fifo segs */ u32 private_segment_size; /**< size of private fifo segs */ u32 tls_engine; /**< TLS engine mbedtls/openssl */ + u8 is_dgram; /* * Test state variables */ diff --git a/src/vnet/session-apps/echo_server.c b/src/vnet/session-apps/echo_server.c index 0bde2e4995f..85e6c299919 100644 --- a/src/vnet/session-apps/echo_server.c +++ b/src/vnet/session-apps/echo_server.c @@ -34,13 +34,14 @@ typedef struct * Config params */ u8 no_echo; /**< Don't echo traffic */ - u32 fifo_size; /**< Fifo size */ + u32 fifo_size; /**< Fifo size */ u32 rcv_buffer_size; /**< Rcv buffer size */ u32 prealloc_fifos; /**< Preallocate fifos */ u32 private_segment_count; /**< Number of private segments */ u32 private_segment_size; /**< Size of private segments */ char *server_uri; /**< Server URI */ u32 tls_engine; /**< TLS engine: mbedtls/openssl */ + u8 is_dgram; /**< set if transport is dgram */ /* * Test state */ @@ -126,25 +127,13 @@ test_bytes (echo_server_main_t * esm, int actual_transfer) } /* - * If no-echo, just read the data and be done with it + * If no-echo, just drop the data and be done with it. */ int echo_server_builtin_server_rx_callback_no_echo (stream_session_t * s) { - echo_server_main_t *esm = &echo_server_main; - u32 my_thread_id = vlib_get_thread_index (); - int actual_transfer; - svm_fifo_t *rx_fifo; - - rx_fifo = s->server_rx_fifo; - - do - { - actual_transfer = - svm_fifo_dequeue_nowait (rx_fifo, esm->rcv_buffer_size, - esm->rx_buf[my_thread_id]); - } - while (actual_transfer > 0); + svm_fifo_t *rx_fifo = s->server_rx_fifo; + svm_fifo_dequeue_drop (rx_fifo, svm_fifo_max_dequeue (rx_fifo)); return 0; } @@ -157,6 +146,8 @@ echo_server_rx_callback (stream_session_t * s) echo_server_main_t *esm = &echo_server_main; session_fifo_event_t evt; u32 thread_index = vlib_get_thread_index (); + app_session_transport_t at; + svm_queue_t *q; ASSERT (s->thread_index == thread_index); @@ -166,14 +157,29 @@ echo_server_rx_callback (stream_session_t * s) ASSERT (rx_fifo->master_thread_index == thread_index); ASSERT (tx_fifo->master_thread_index == thread_index); - max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); - max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); + max_enqueue = svm_fifo_max_enqueue (tx_fifo); + if (!esm->is_dgram) + { + max_dequeue = svm_fifo_max_dequeue (rx_fifo); + } + else + { + session_dgram_pre_hdr_t ph; + svm_fifo_peek (rx_fifo, 0, sizeof (ph), (u8 *) & ph); + max_dequeue = ph.data_length - ph.data_offset; + if (!esm->vpp_queue[s->thread_index]) + { + q = session_manager_get_vpp_event_queue (s->thread_index); + esm->vpp_queue[s->thread_index] = q; + } + max_enqueue -= sizeof (session_dgram_hdr_t); + } if (PREDICT_FALSE (max_dequeue == 0)) return 0; /* Number of bytes we're going to copy */ - max_transfer = (max_dequeue < max_enqueue) ? max_dequeue : max_enqueue; + max_transfer = clib_min (max_dequeue, max_enqueue); /* No space in tx fifo */ if (PREDICT_FALSE (max_transfer == 0)) @@ -184,16 +190,16 @@ echo_server_rx_callback (stream_session_t * s) /* Program self-tap to retry */ if (svm_fifo_set_event (rx_fifo)) { - svm_queue_t *q; evt.fifo = rx_fifo; evt.event_type = FIFO_EVENT_BUILTIN_RX; - q = esm->vpp_queue[thread_index]; + q = esm->vpp_queue[s->thread_index]; if (PREDICT_FALSE (q->cursize == q->maxsize)) clib_warning ("out of event queue space"); else if (svm_queue_add (q, (u8 *) & evt, 0)) clib_warning ("failed to enqueue self-tap"); + vec_validate (esm->rx_retries[s->thread_index], s->session_index); if (esm->rx_retries[thread_index][s->session_index] == 500000) { clib_warning ("session stuck: %U", format_stream_session, s, 2); @@ -205,36 +211,47 @@ echo_server_rx_callback (stream_session_t * s) return 0; } - _vec_len (esm->rx_buf[thread_index]) = max_transfer; - - actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, - esm->rx_buf[thread_index]); + vec_validate (esm->rx_buf[thread_index], max_transfer); + if (!esm->is_dgram) + { + actual_transfer = app_recv_stream_raw (rx_fifo, + esm->rx_buf[thread_index], + max_transfer, + 0 /* don't clear event */ ); + } + else + { + actual_transfer = app_recv_dgram_raw (rx_fifo, + esm->rx_buf[thread_index], + max_transfer, &at, + 0 /* don't clear event */ ); + } ASSERT (actual_transfer == max_transfer); - -// test_bytes (esm, actual_transfer); + /* test_bytes (esm, actual_transfer); */ /* * Echo back */ - n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, - esm->rx_buf[thread_index]); - - if (n_written != max_transfer) - clib_warning ("short trout!"); - - if (svm_fifo_set_event (tx_fifo)) + if (!esm->is_dgram) { - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_APP_TX; - - if (svm_queue_add (esm->vpp_queue[s->thread_index], - (u8 *) & evt, 0 /* do wait for mutex */ )) - clib_warning ("failed to enqueue tx evt"); + n_written = app_send_stream_raw (tx_fifo, + esm->vpp_queue[thread_index], + esm->rx_buf[thread_index], + actual_transfer, 0); } + else + { + n_written = app_send_dgram_raw (tx_fifo, &at, + esm->vpp_queue[s->thread_index], + esm->rx_buf[thread_index], + actual_transfer, 0); + } + + if (n_written != max_transfer) + clib_warning ("short trout! written %u read %u", n_written, max_transfer); - if (PREDICT_FALSE (n_written < max_dequeue)) + if (PREDICT_FALSE (svm_fifo_max_dequeue (rx_fifo))) goto rx_event; return 0; @@ -411,6 +428,7 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, esm->private_segment_count = 0; esm->private_segment_size = 0; esm->tls_engine = TLS_ENGINE_OPENSSL; + esm->is_dgram = 0; vec_free (esm->server_uri); while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) @@ -479,6 +497,8 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, clib_warning ("No uri provided! Using default: %s", default_uri); esm->server_uri = (char *) format (0, "%s%c", default_uri, 0); } + if (esm->server_uri[0] == 'u' && esm->server_uri[3] != 'c') + esm->is_dgram = 1; rv = echo_server_create (vm, appns_id, appns_flags, appns_secret); vec_free (appns_id); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 1c46e786e13..68bbd59098e 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -445,13 +445,9 @@ application_start_listen (application_t * srv, session_endpoint_t * sep, s = listen_session_new (0, sst); s->app_index = srv->index; - if (stream_session_listen (s, sep)) - goto err; - /* Allocate segment manager. All sessions derived out of a listen session * have fifos allocated by the same segment manager. */ - sm = application_alloc_segment_manager (srv); - if (sm == 0) + if (!(sm = application_alloc_segment_manager (srv))) goto err; /* Add to app's listener table. Useful to find all child listeners @@ -459,6 +455,13 @@ application_start_listen (application_t * srv, session_endpoint_t * sep, handle = listen_session_get_handle (s); hash_set (srv->listeners_table, handle, segment_manager_index (sm)); + if (stream_session_listen (s, sep)) + { + segment_manager_del (sm); + hash_unset (srv->listeners_table, handle); + goto err; + } + *res = handle; return 0; diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 5dc237f6e7b..5fd218533dc 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -173,6 +173,163 @@ extern const u32 test_srv_crt_rsa_len; extern const char test_srv_key_rsa[]; extern const u32 test_srv_key_rsa_len; +typedef struct app_session_transport_ +{ + ip46_address_t rmt_ip; /**< remote ip */ + ip46_address_t lcl_ip; /**< local ip */ + u16 rmt_port; /**< remote port */ + u16 lcl_port; /**< local port */ + u8 is_ip4; /**< set if uses ip4 networking */ +} app_session_transport_t; + +typedef struct app_session_ +{ + svm_fifo_t *rx_fifo; /**< rx fifo */ + svm_fifo_t *tx_fifo; /**< tx fifo */ + session_type_t session_type; /**< session type */ + volatile u8 session_state; /**< session state */ + u32 session_index; /**< index in owning pool */ + app_session_transport_t transport; /**< transport info */ + svm_queue_t *vpp_evt_q; /**< vpp event queue for session */ + u8 is_dgram; /**< set if it works in dgram mode */ +} app_session_t; + +always_inline int +app_send_dgram_raw (svm_fifo_t * f, app_session_transport_t * at, + svm_queue_t * vpp_evt_q, u8 * data, u32 len, u8 noblock) +{ + u32 max_enqueue, actual_write; + session_dgram_hdr_t hdr; + session_fifo_event_t evt; + int rv; + + max_enqueue = svm_fifo_max_enqueue (f); + if (svm_fifo_max_enqueue (f) <= sizeof (session_dgram_hdr_t)) + return 0; + + max_enqueue -= sizeof (session_dgram_hdr_t); + actual_write = clib_min (len, max_enqueue); + hdr.data_length = actual_write; + hdr.data_offset = 0; + clib_memcpy (&hdr.rmt_ip, &at->rmt_ip, sizeof (ip46_address_t)); + hdr.is_ip4 = at->is_ip4; + hdr.rmt_port = at->rmt_port; + clib_memcpy (&hdr.lcl_ip, &at->lcl_ip, sizeof (ip46_address_t)); + hdr.lcl_port = at->lcl_port; + rv = svm_fifo_enqueue_nowait (f, sizeof (hdr), (u8 *) & hdr); + if (rv <= 0) + return 0; + + ASSERT (rv == sizeof (hdr)); + + if ((rv = svm_fifo_enqueue_nowait (f, actual_write, data)) > 0) + { + if (svm_fifo_set_event (f)) + { + evt.fifo = f; + evt.event_type = FIFO_EVENT_APP_TX; + svm_queue_add (vpp_evt_q, (u8 *) & evt, noblock); + } + } + return rv; +} + +always_inline int +app_send_dgram (app_session_t * s, u8 * data, u32 len, u8 noblock) +{ + return app_send_dgram_raw (s->tx_fifo, &s->transport, s->vpp_evt_q, data, + len, noblock); +} + +always_inline int +app_send_stream_raw (svm_fifo_t * f, svm_queue_t * vpp_evt_q, u8 * data, + u32 len, u8 noblock) +{ + session_fifo_event_t evt; + int rv; + + if ((rv = svm_fifo_enqueue_nowait (f, len, data)) > 0) + { + if (svm_fifo_set_event (f)) + { + evt.fifo = f; + evt.event_type = FIFO_EVENT_APP_TX; + svm_queue_add (vpp_evt_q, (u8 *) & evt, noblock); + } + } + return rv; +} + +always_inline int +app_send_stream (app_session_t * s, u8 * data, u32 len, u8 noblock) +{ + return app_send_stream_raw (s->tx_fifo, s->vpp_evt_q, data, len, noblock); +} + +always_inline int +app_send (app_session_t * s, u8 * data, u32 len, u8 noblock) +{ + if (s->is_dgram) + return app_send_dgram (s, data, len, noblock); + return app_send_stream (s, data, len, noblock); +} + +always_inline int +app_recv_dgram_raw (svm_fifo_t * f, u8 * buf, u32 len, + app_session_transport_t * at, u8 clear_evt) +{ + session_dgram_pre_hdr_t ph; + u32 max_deq; + int rv; + + if (clear_evt) + svm_fifo_unset_event (f); + max_deq = svm_fifo_max_dequeue (f); + if (max_deq < sizeof (session_dgram_hdr_t)) + return 0; + + svm_fifo_peek (f, 0, sizeof (ph), (u8 *) & ph); + ASSERT (ph.data_length >= ph.data_offset); + if (!ph.data_offset) + svm_fifo_peek (f, sizeof (ph), sizeof (*at), (u8 *) at); + len = clib_min (len, ph.data_length - ph.data_offset); + rv = svm_fifo_peek (f, ph.data_offset + SESSION_CONN_HDR_LEN, len, buf); + ph.data_offset += rv; + if (ph.data_offset == ph.data_length) + svm_fifo_dequeue_drop (f, ph.data_length + SESSION_CONN_HDR_LEN); + else + svm_fifo_overwrite_head (f, (u8 *) & ph, sizeof (ph)); + return rv; +} + +always_inline int +app_recv_dgram (app_session_t * s, u8 * buf, u32 len) +{ + return app_recv_dgram_raw (s->rx_fifo, buf, len, &s->transport, 1); +} + +always_inline int +app_recv_stream_raw (svm_fifo_t * f, u8 * buf, u32 len, u8 clear_evt) +{ + if (clear_evt) + svm_fifo_unset_event (f); + return svm_fifo_dequeue_nowait (f, len, buf); +} + +always_inline int +app_recv_stream (app_session_t * s, u8 * buf, u32 len) +{ + return app_recv_stream_raw (s->rx_fifo, buf, len, 1); +} + +always_inline int +app_recv (app_session_t * s, u8 * data, u32 len) +{ + if (s->is_dgram) + return app_recv_dgram (s, data, len); + return app_recv_stream (s, data, len); +} + #endif /* __included_uri_h__ */ /* diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api index bf88e82f336..98748d8fbbb 100644 --- a/src/vnet/session/session.api +++ b/src/vnet/session/session.api @@ -13,7 +13,7 @@ * limitations under the License. */ -option version = "1.0.2"; +option version = "1.0.3"; /** \brief client->vpp, attach application to session layer @param client_index - opaque cookie to identify the sender @@ -119,13 +119,25 @@ autoreply define unmap_segment { "tcp://::/0/80" [ipv6] etc. @param options - socket options, fifo sizes, etc. */ -autoreply define bind_uri { +define bind_uri { u32 client_index; u32 context; u32 accept_cookie; u8 uri[128]; }; +define bind_uri_reply { + u32 context; + u64 handle; + i32 retval; + u64 rx_fifo; + u64 tx_fifo; + u8 lcl_is_ip4; + u8 lcl_ip[16]; + u16 lcl_port; + u64 vpp_evt_q; +}; + /** \brief Unbind a given URI @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -314,8 +326,12 @@ autoreply define connect_sock { @param context - sender context, to match reply w/ request @param handle - bind handle @param retval - return code for the request - @param event_queue_address - vpp event queue address or 0 if this - connection shouldn't send events + @param lcl_is_ip4 - local ip address type + @param lcl_ip6 - local ip address + @param lcl_port - local port + @param rx_fifo - rx fifo address if allocated (connectionless) + @param tx_fifo - tx fifo address if allocated (connectionless) + @param vpp_evt_q - vpp event queue address (connectionless) @param segment_name_length - length of segment name @param segment_name - name of segment client needs to attach to */ @@ -323,10 +339,12 @@ define bind_sock_reply { u32 context; u64 handle; i32 retval; - u64 server_event_queue_address; u8 lcl_is_ip4; u8 lcl_ip[16]; u16 lcl_port; + u64 rx_fifo; + u64 tx_fifo; + u64 vpp_evt_q; u32 segment_size; u8 segment_name_length; u8 segment_name[128]; diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index d258b82c983..dfc967b12dc 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -355,14 +355,19 @@ session_enqueue_stream_connection (transport_connection_t * tc, return enqueued; } + int -session_enqueue_dgram_connection (stream_session_t * s, vlib_buffer_t * b, - u8 proto, u8 queue_event) +session_enqueue_dgram_connection (stream_session_t * s, + session_dgram_hdr_t * hdr, + vlib_buffer_t * b, u8 proto, u8 queue_event) { int enqueued = 0, rv, in_order_off; - if (svm_fifo_max_enqueue (s->server_rx_fifo) < b->current_length) - return -1; + ASSERT (svm_fifo_max_enqueue (s->server_rx_fifo) + >= b->current_length + sizeof (*hdr)); + + svm_fifo_enqueue_nowait (s->server_rx_fifo, sizeof (session_dgram_hdr_t), + (u8 *) hdr); enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, b->current_length, vlib_buffer_get_current (b)); if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) && enqueued >= 0)) @@ -530,6 +535,16 @@ session_manager_flush_enqueue_events (u8 transport_proto, u32 thread_index) return errors; } +int +session_manager_flush_all_enqueue_events (u8 transport_proto) +{ + vlib_thread_main_t *vtm = vlib_get_thread_main (); + int i, errors = 0; + for (i = 0; i < 1 + vtm->n_threads; i++) + errors += session_manager_flush_enqueue_events (transport_proto, i); + return errors; +} + /** * Init fifo tail and head pointers * @@ -825,7 +840,7 @@ session_open_cl (u32 app_index, session_endpoint_t * rmt, u32 opaque) if (session_alloc_and_init (sm, tc, 1, &s)) return -1; s->app_index = app->index; - s->session_state = SESSION_STATE_CONNECTING_READY; + s->session_state = SESSION_STATE_OPENED; /* Tell the app about the new event fifo for this session */ app->cb_fns.session_connected_callback (app->index, opaque, s, 0); @@ -841,10 +856,6 @@ session_open_vc (u32 app_index, session_endpoint_t * rmt, u32 opaque) u64 handle; int rv; - /* TODO until udp is fixed */ - if (rmt->transport_proto == TRANSPORT_PROTO_UDP) - return session_open_cl (app_index, rmt, opaque); - tep = session_endpoint_to_transport (rmt); rv = tp_vfts[rmt->transport_proto].open (tep); if (rv < 0) @@ -912,14 +923,6 @@ session_open (u32 app_index, session_endpoint_t * rmt, u32 opaque) return session_open_srv_fns[tst] (app_index, rmt, opaque); } -/** - * Ask transport to listen on local transport endpoint. - * - * @param s Session for which listen will be called. Note that unlike - * established sessions, listen sessions are not associated to a - * thread. - * @param tep Local endpoint to be listened on. - */ int session_listen_vc (stream_session_t * s, session_endpoint_t * sep) { @@ -948,6 +951,40 @@ session_listen_vc (stream_session_t * s, session_endpoint_t * sep) } int +session_listen_cl (stream_session_t * s, session_endpoint_t * sep) +{ + transport_connection_t *tc; + application_t *server; + segment_manager_t *sm; + u32 tci; + + /* Transport bind/listen */ + tci = tp_vfts[sep->transport_proto].bind (s->session_index, + session_endpoint_to_transport + (sep)); + + if (tci == (u32) ~ 0) + return -1; + + /* Attach transport to session */ + s->connection_index = tci; + tc = tp_vfts[sep->transport_proto].get_listener (tci); + + /* Weird but handle it ... */ + if (tc == 0) + return -1; + + server = application_get (s->app_index); + sm = application_get_listen_segment_manager (server, s); + if (session_alloc_fifos (sm, s)) + return -1; + + /* Add to the main lookup table */ + session_lookup_add_connection (tc, s->session_index); + return 0; +} + +int session_listen_app (stream_session_t * s, session_endpoint_t * sep) { session_endpoint_extended_t esep; @@ -965,11 +1002,19 @@ typedef int (*session_listen_service_fn) (stream_session_t *, static session_listen_service_fn session_listen_srv_fns[TRANSPORT_N_SERVICES] = { session_listen_vc, - session_listen_vc, + session_listen_cl, session_listen_app, }; /* *INDENT-ON* */ +/** + * Ask transport to listen on local transport endpoint. + * + * @param s Session for which listen will be called. Note that unlike + * established sessions, listen sessions are not associated to a + * thread. + * @param tep Local endpoint to be listened on. + */ int stream_session_listen (stream_session_t * s, session_endpoint_t * sep) { @@ -1125,7 +1170,8 @@ session_manager_get_evt_q_segment (void) static session_fifo_rx_fn *session_tx_fns[TRANSPORT_TX_N_FNS] = { session_tx_fifo_peek_and_snd, session_tx_fifo_dequeue_and_snd, - session_tx_fifo_dequeue_internal + session_tx_fifo_dequeue_internal, + session_tx_fifo_dequeue_and_snd }; /* *INDENT-ON* */ @@ -1228,11 +1274,12 @@ session_manager_main_enable (vlib_main_t * vm) vec_validate (smm->peekers_rw_locks, num_threads - 1); for (i = 0; i < TRANSPORT_N_PROTO; i++) - for (j = 0; j < num_threads; j++) - { - vec_validate (smm->session_to_enqueue[i], num_threads - 1); - vec_validate (smm->current_enqueue_epoch[i], num_threads - 1); - } + { + vec_validate (smm->current_enqueue_epoch[i], num_threads - 1); + vec_validate (smm->session_to_enqueue[i], num_threads - 1); + for (j = 0; j < num_threads; j++) + smm->current_enqueue_epoch[i][j] = 1; + } for (i = 0; i < num_threads; i++) { diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 0b53f61a10a..9d534aec19b 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -204,6 +204,31 @@ struct _session_manager_main }; +typedef struct session_dgram_pre_hdr_ +{ + u32 data_length; + u32 data_offset; +} session_dgram_pre_hdr_t; + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct session_dgram_header_ +{ + u32 data_length; + u32 data_offset; + ip46_address_t rmt_ip; + ip46_address_t lcl_ip; + u16 rmt_port; + u16 lcl_port; + u8 is_ip4; +}) session_dgram_hdr_t; +/* *INDENT-ON* */ + +#define SESSION_CONN_ID_LEN 37 +#define SESSION_CONN_HDR_LEN 45 + +STATIC_ASSERT (sizeof (session_dgram_hdr_t) == (SESSION_CONN_ID_LEN + 8), + "session conn id wrong length"); + extern session_manager_main_t session_manager_main; extern vlib_node_registration_t session_queue_node; @@ -342,6 +367,14 @@ session_has_transport (stream_session_t * s) return (session_get_transport_proto (s) != TRANSPORT_PROTO_NONE); } +always_inline transport_service_type_t +session_transport_service_type (stream_session_t * s) +{ + transport_proto_t tp; + tp = session_get_transport_proto (s); + return transport_protocol_service_type (tp); +} + /** * Acquires a lock that blocks a session pool from expanding. * @@ -442,8 +475,10 @@ int session_enqueue_stream_connection (transport_connection_t * tc, vlib_buffer_t * b, u32 offset, u8 queue_event, u8 is_in_order); -int session_enqueue_dgram_connection (stream_session_t * s, vlib_buffer_t * b, - u8 proto, u8 queue_event); +int session_enqueue_dgram_connection (stream_session_t * s, + session_dgram_hdr_t * hdr, + vlib_buffer_t * b, u8 proto, + u8 queue_event); int stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes); u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes); @@ -490,6 +525,7 @@ session_manager_get_vpp_event_queue (u32 thread_index) } int session_manager_flush_enqueue_events (u8 proto, u32 thread_index); +int session_manager_flush_all_enqueue_events (u8 transport_proto); always_inline u64 listen_session_get_handle (stream_session_t * s) diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 2a74a196201..67c42faf692 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -480,9 +480,12 @@ done: static void vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) { - vl_api_bind_uri_reply_t *rmp; + transport_connection_t *tc = 0; vnet_bind_args_t _a, *a = &_a; - application_t *app; + vl_api_bind_uri_reply_t *rmp; + stream_session_t *s; + application_t *app = 0; + svm_queue_t *vpp_evt_q; int rv; if (session_manager_is_enabled () == 0) @@ -505,7 +508,30 @@ vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) } done: - REPLY_MACRO (VL_API_BIND_URI_REPLY); + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_BIND_URI_REPLY, ({ + if (!rv) + { + rmp->handle = a->handle; + rmp->lcl_is_ip4 = tc->is_ip4; + rmp->lcl_port = tc->lcl_port; + if (app && application_has_global_scope (app)) + { + s = listen_session_get_from_handle (a->handle); + tc = listen_session_get_transport (s); + clib_memcpy (rmp->lcl_ip, &tc->lcl_ip, sizeof(tc->lcl_ip)); + if (session_transport_service_type (s) == TRANSPORT_SERVICE_CL) + { + rmp->rx_fifo = pointer_to_uword (s->server_rx_fifo); + rmp->tx_fifo = pointer_to_uword (s->server_tx_fifo); + vpp_evt_q = session_manager_get_vpp_event_queue (0); + rmp->vpp_evt_q = pointer_to_uword (vpp_evt_q); + } + } + } + })); + /* *INDENT-ON* */ } static void @@ -733,6 +759,7 @@ vl_api_bind_sock_t_handler (vl_api_bind_sock_t * mp) stream_session_t *s; transport_connection_t *tc = 0; ip46_address_t *ip46; + svm_queue_t *vpp_evt_q; if (session_manager_is_enabled () == 0) { @@ -775,8 +802,14 @@ done: { s = listen_session_get_from_handle (a->handle); tc = listen_session_get_transport (s); - rmp->lcl_is_ip4 = tc->is_ip4; clib_memcpy (rmp->lcl_ip, &tc->lcl_ip, sizeof (tc->lcl_ip)); + if (session_transport_service_type (s) == TRANSPORT_SERVICE_CL) + { + rmp->rx_fifo = pointer_to_uword (s->server_rx_fifo); + rmp->tx_fifo = pointer_to_uword (s->server_tx_fifo); + vpp_evt_q = session_manager_get_vpp_event_queue (0); + rmp->vpp_evt_q = pointer_to_uword (vpp_evt_q); + } } } })); diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 52833554a53..201f6f1d66e 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -23,6 +23,9 @@ format_stream_session_fifos (u8 * s, va_list * args) session_fifo_event_t _e, *e = &_e; u8 found; + if (!ss->server_rx_fifo || !ss->server_tx_fifo) + return s; + s = format (s, " Rx fifo: %U", format_svm_fifo, ss->server_rx_fifo, 1); if (verbose > 2 && ss->server_rx_fifo->has_event) { @@ -76,6 +79,8 @@ format_stream_session (u8 * s, va_list * args) { s = format (s, "%-40U%v", tp_vft->format_listener, ss->connection_index, str); + if (verbose > 1) + s = format (s, "\n%U", format_stream_session_fifos, ss, verbose); } else if (ss->session_state == SESSION_STATE_CONNECTING) { diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index b8f429eb1d7..14716965547 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -70,7 +70,7 @@ session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0, u8 n_bufs_per_seg, u32 left_from_seg, u32 * left_to_snd0, u16 * n_bufs, u32 * tx_offset, u16 deq_per_buf, - u8 peek_data) + u8 peek_data, transport_tx_fn_type_t tx_type) { vlib_buffer_t *chain_b0, *prev_b0; u32 chain_bi0, to_deq; @@ -102,7 +102,23 @@ session_tx_fifo_chain_tail (session_manager_main_t * smm, vlib_main_t * vm, } else { - n_bytes_read = svm_fifo_dequeue_nowait (fifo, len_to_deq0, data0); + if (tx_type == TRANSPORT_TX_DGRAM) + { + session_dgram_hdr_t *hdr; + u16 deq_now; + hdr = (session_dgram_hdr_t *) svm_fifo_head (fifo); + deq_now = clib_min (hdr->data_length - hdr->data_offset, + len_to_deq0); + n_bytes_read = svm_fifo_peek (fifo, hdr->data_offset, deq_now, + data0); + ASSERT (n_bytes_read > 0); + + hdr->data_offset += n_bytes_read; + if (hdr->data_offset == hdr->data_length) + svm_fifo_dequeue_drop (fifo, hdr->data_length); + } + else + n_bytes_read = svm_fifo_dequeue_nowait (fifo, len_to_deq0, data0); } ASSERT (n_bytes_read == len_to_deq0); chain_b0->current_length = n_bytes_read; @@ -145,12 +161,35 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, int i, n_bytes_read; u32 n_bytes_per_buf, deq_per_buf, deq_per_first_buf; u32 bufs_alloc, bufs_now; + session_dgram_hdr_t hdr; next_index = next0 = smm->session_type_to_next[s0->session_type]; - tp = session_get_transport_proto (s0); transport_vft = transport_protocol_get_vft (tp); - tc0 = transport_vft->get_connection (s0->connection_index, thread_index); + if (peek_data) + { + if (PREDICT_FALSE (s0->session_state < SESSION_STATE_READY)) + { + /* Can retransmit for closed sessions but can't send new data if + * session is not ready or closed */ + vec_add1 (smm->pending_event_vector[thread_index], *e0); + return 0; + } + tc0 = + transport_vft->get_connection (s0->connection_index, thread_index); + } + else + { + if (s0->session_state == SESSION_STATE_LISTENING) + tc0 = transport_vft->get_listener (s0->connection_index); + else + { + if (PREDICT_FALSE (s0->session_state == SESSION_STATE_CLOSED)) + return 0; + tc0 = transport_vft->get_connection (s0->connection_index, + thread_index); + } + } /* Make sure we have space to send and there's something to dequeue */ snd_mss0 = transport_vft->send_mss (tc0); @@ -168,20 +207,26 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* Check how much we can pull. */ max_dequeue0 = svm_fifo_max_dequeue (s0->server_tx_fifo); - if (peek_data) { /* Offset in rx fifo from where to peek data */ tx_offset = transport_vft->tx_fifo_offset (tc0); if (PREDICT_FALSE (tx_offset >= max_dequeue0)) - max_dequeue0 = 0; - else - max_dequeue0 -= tx_offset; + return 0; + max_dequeue0 -= tx_offset; } - - /* Nothing to read return */ - if (max_dequeue0 == 0) - return 0; + else + { + if (transport_vft->tx_type == TRANSPORT_TX_DGRAM) + { + if (max_dequeue0 < sizeof (hdr)) + return 0; + svm_fifo_peek (s0->server_tx_fifo, 0, sizeof (hdr), (u8 *) & hdr); + ASSERT (hdr.data_length > hdr.data_offset); + max_dequeue0 = hdr.data_length - hdr.data_offset; + } + } + ASSERT (max_dequeue0 > 0); /* Ensure we're not writing more than transport window allows */ if (max_dequeue0 < snd_space0) @@ -286,14 +331,42 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, } else { - n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, - len_to_deq0, data0); - if (n_bytes_read <= 0) - goto dequeue_fail; + if (transport_vft->tx_type == TRANSPORT_TX_DGRAM) + { + svm_fifo_t *f = s0->server_tx_fifo; + u16 deq_now; + u32 offset; + + ASSERT (hdr.data_length > hdr.data_offset); + deq_now = clib_min (hdr.data_length - hdr.data_offset, + len_to_deq0); + offset = hdr.data_offset + SESSION_CONN_HDR_LEN; + n_bytes_read = svm_fifo_peek (f, offset, deq_now, data0); + if (PREDICT_FALSE (n_bytes_read <= 0)) + goto dequeue_fail; + + if (s0->session_state == SESSION_STATE_LISTENING) + { + ip_copy (&tc0->rmt_ip, &hdr.rmt_ip, tc0->is_ip4); + tc0->rmt_port = hdr.rmt_port; + } + hdr.data_offset += n_bytes_read; + if (hdr.data_offset == hdr.data_length) + { + offset = hdr.data_length + SESSION_CONN_HDR_LEN; + svm_fifo_dequeue_drop (f, offset); + } + } + else + { + n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, + len_to_deq0, data0); + if (n_bytes_read <= 0) + goto dequeue_fail; + } } b0->current_length = n_bytes_read; - left_to_snd0 -= n_bytes_read; *n_tx_packets = *n_tx_packets + 1; @@ -307,7 +380,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, s0->server_tx_fifo, b0, bi0, n_bufs_per_seg, left_for_seg, &left_to_snd0, &n_bufs, &tx_offset, - deq_per_buf, peek_data); + deq_per_buf, peek_data, + transport_vft->tx_type); } /* Ask transport to push header after current_length and @@ -345,12 +419,18 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, /* If we couldn't dequeue all bytes mark as partially read */ if (max_len_to_snd0 < max_dequeue0) + if (svm_fifo_set_event (s0->server_tx_fifo)) + vec_add1 (smm->pending_event_vector[thread_index], *e0); + + if (!peek_data && transport_vft->tx_type == TRANSPORT_TX_DGRAM) { - /* If we don't already have new event */ - if (svm_fifo_set_event (s0->server_tx_fifo)) - { - vec_add1 (smm->pending_event_vector[thread_index], *e0); - } + /* Fix dgram pre header */ + if (max_len_to_snd0 < max_dequeue0) + svm_fifo_overwrite_head (s0->server_tx_fifo, (u8 *) & hdr, + sizeof (session_dgram_pre_hdr_t)); + /* More data needs to be read */ + else if (svm_fifo_max_dequeue (s0->server_tx_fifo) > 0) + vec_add1 (smm->pending_event_vector[thread_index], *e0); } return 0; @@ -360,7 +440,6 @@ dequeue_fail: * read, return buff to free list and return */ clib_warning ("dequeue fail"); - if (svm_fifo_set_event (s0->server_tx_fifo)) { vec_add1 (smm->pending_event_vector[thread_index], *e0); @@ -638,13 +717,6 @@ skip_dequeue: clib_warning ("It's dead, Jim!"); continue; } - /* Can retransmit for closed sessions but can't do anything if - * session is not ready or closed */ - if (PREDICT_FALSE (s0->session_state < SESSION_STATE_READY)) - { - vec_add1 (smm->pending_event_vector[my_thread_index], *e0); - continue; - } /* Spray packets in per session type frames, since they go to * different nodes */ rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0, diff --git a/src/vnet/session/stream_session.h b/src/vnet/session/stream_session.h index b7a5eee4b12..9e0e4d98990 100644 --- a/src/vnet/session/stream_session.h +++ b/src/vnet/session/stream_session.h @@ -31,30 +31,19 @@ typedef enum SESSION_STATE_CONNECTING, SESSION_STATE_ACCEPTING, SESSION_STATE_READY, - SESSION_STATE_CONNECTING_READY, + SESSION_STATE_OPENED, SESSION_STATE_CLOSED, SESSION_STATE_N_STATES, } stream_session_state_t; -/* TODO convert to macro once cleanup completed */ -typedef struct app_session_ +typedef struct generic_session_ { - /** fifo pointers. Once allocated, these do not move */ - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; - - /** Type */ - session_type_t session_type; - - /** State */ - volatile u8 session_state; - - /** Session index in owning pool */ - u32 session_index; - - /** Application index */ - u32 app_index; -} app_session_t; + svm_fifo_t *rx_fifo; /**< rx fifo */ + svm_fifo_t *tx_fifo; /**< tx fifo */ + session_type_t session_type; /**< session type */ + volatile u8 session_state; /**< session state */ + u32 session_index; /**< index in owning pool */ +} generic_session_t; typedef struct _stream_session_t { diff --git a/src/vnet/session/transport.c b/src/vnet/session/transport.c index 797bdad1eaa..20b912929b4 100644 --- a/src/vnet/session/transport.c +++ b/src/vnet/session/transport.c @@ -57,6 +57,9 @@ format_transport_proto (u8 * s, va_list * args) case TRANSPORT_PROTO_SCTP: s = format (s, "SCTP"); break; + case TRANSPORT_PROTO_UDPC: + s = format (s, "UDPC"); + break; } return s; } @@ -76,6 +79,9 @@ format_transport_proto_short (u8 * s, va_list * args) case TRANSPORT_PROTO_SCTP: s = format (s, "S"); break; + case TRANSPORT_PROTO_UDPC: + s = format (s, "U"); + break; } return s; } @@ -100,6 +106,10 @@ unformat_transport_proto (unformat_input_t * input, va_list * args) *proto = TRANSPORT_PROTO_TLS; else if (unformat (input, "TLS")) *proto = TRANSPORT_PROTO_TLS; + else if (unformat (input, "udpc")) + *proto = TRANSPORT_PROTO_UDPC; + else if (unformat (input, "UDPC")) + *proto = TRANSPORT_PROTO_UDPC; else return 0; return 1; @@ -185,6 +195,12 @@ transport_protocol_get_vft (transport_proto_t transport_proto) return &tp_vfts[transport_proto]; } +transport_service_type_t +transport_protocol_service_type (transport_proto_t tp) +{ + return tp_vfts[tp].service_type; +} + #define PORT_MASK ((1 << 16)- 1) void diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index ed9eb02754e..8340fd859ac 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -35,10 +35,10 @@ typedef struct _transport_connection { ip46_address_t rmt_ip; /**< Remote IP */ ip46_address_t lcl_ip; /**< Local IP */ - u16 lcl_port; /**< Local port */ u16 rmt_port; /**< Remote port */ - u8 proto; /**< Protocol id */ + u16 lcl_port; /**< Local port */ u8 is_ip4; /**< Flag if IP4 connection */ + u8 proto; /**< Protocol id */ u32 fib_index; /**< Network namespace */ }; /* @@ -88,6 +88,7 @@ typedef enum _transport_proto TRANSPORT_PROTO_SCTP, TRANSPORT_PROTO_NONE, TRANSPORT_PROTO_TLS, + TRANSPORT_PROTO_UDPC, TRANSPORT_N_PROTO } transport_proto_t; @@ -99,7 +100,7 @@ uword unformat_transport_proto (unformat_input_t * input, va_list * args); _(u32, sw_if_index) /**< interface endpoint is associated with */ \ _(ip46_address_t, ip) /**< ip address */ \ _(u32, fib_index) /**< fib table endpoint is associated with */ \ - _(u8, is_ip4) /**< 1 if ip4 */ \ + _(u8, is_ip4) /**< set if ip4 */ \ _(u16, port) /**< port in net order */ \ typedef struct _transport_endpoint @@ -125,18 +126,6 @@ transport_endpoint_fib_proto (transport_endpoint_t * tep) return tep->is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6; } -always_inline u8 -transport_is_stream (u8 proto) -{ - return ((proto == TRANSPORT_PROTO_TCP) || (proto == TRANSPORT_PROTO_SCTP)); -} - -always_inline u8 -transport_is_dgram (u8 proto) -{ - return (proto == TRANSPORT_PROTO_UDP); -} - int transport_alloc_local_port (u8 proto, ip46_address_t * ip); int transport_alloc_local_endpoint (u8 proto, transport_endpoint_t * rmt, ip46_address_t * lcl_addr, diff --git a/src/vnet/session/transport_interface.h b/src/vnet/session/transport_interface.h index 04a5ff263b1..f21e483c715 100644 --- a/src/vnet/session/transport_interface.h +++ b/src/vnet/session/transport_interface.h @@ -23,7 +23,8 @@ typedef enum transport_dequeue_type_ { TRANSPORT_TX_PEEK, /**< reliable transport protos */ TRANSPORT_TX_DEQUEUE, /**< unreliable transport protos */ - TRANSPORT_TX_INTERNAL, /**< apps acting as transports */ + TRANSPORT_TX_INTERNAL, /**< apps acting as transports */ + TRANSPORT_TX_DGRAM, /**< datagram mode */ TRANSPORT_TX_N_FNS } transport_tx_fn_type_t; @@ -31,7 +32,7 @@ typedef enum transport_service_type_ { TRANSPORT_SERVICE_VC, /**< virtual circuit service */ TRANSPORT_SERVICE_CL, /**< connectionless service */ - TRANSPORT_SERVICE_APP, /**< app transport service */ + TRANSPORT_SERVICE_APP, /**< app transport service */ TRANSPORT_N_SERVICES } transport_service_type_t; @@ -96,6 +97,7 @@ void transport_register_protocol (transport_proto_t transport_proto, const transport_proto_vft_t * vft, fib_protocol_t fib_proto, u32 output_node); transport_proto_vft_t *transport_protocol_get_vft (transport_proto_t tp); +transport_service_type_t transport_protocol_service_type (transport_proto_t); void transport_update_time (f64 time_now, u8 thread_index); void transport_enable_disable (vlib_main_t * vm, u8 is_en); diff --git a/src/vnet/udp/udp.c b/src/vnet/udp/udp.c index 3b8b707abe1..947cc1e38e1 100644 --- a/src/vnet/udp/udp.c +++ b/src/vnet/udp/udp.c @@ -49,6 +49,7 @@ udp_connection_alloc (u32 thread_index) uc->c_c_index = uc - um->connections[thread_index]; uc->c_thread_index = thread_index; uc->c_proto = TRANSPORT_PROTO_UDP; + clib_spinlock_init (&uc->rx_lock); return uc; } @@ -92,6 +93,7 @@ udp_session_bind (u32 session_index, transport_endpoint_t * lcl) listener->c_proto = TRANSPORT_PROTO_UDP; listener->c_s_index = session_index; listener->c_fib_index = lcl->fib_index; + clib_spinlock_init (&listener->rx_lock); node_index = lcl->is_ip4 ? udp4_input_node.index : udp6_input_node.index; udp_register_dst_port (vm, clib_net_to_host_u16 (lcl->port), node_index, @@ -140,7 +142,7 @@ udp_push_header (transport_connection_t * tc, vlib_buffer_t * b) vnet_buffer (b)->l3_hdr_offset = (u8 *) ih - b->data; } vnet_buffer (b)->sw_if_index[VLIB_RX] = 0; - vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0; + vnet_buffer (b)->sw_if_index[VLIB_TX] = uc->c_fib_index; b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED; return 0; @@ -210,7 +212,7 @@ format_udp_connection (u8 * s, va_list * args) if (verbose) { if (verbose == 1) - s = format (s, "%-15s", "-"); + s = format (s, "%-15s\n", "-"); else s = format (s, "\n"); } @@ -322,8 +324,52 @@ const static transport_proto_vft_t udp_proto = { .format_connection = format_udp_session, .format_half_open = format_udp_half_open_session, .format_listener = format_udp_listener_session, + .tx_type = TRANSPORT_TX_DGRAM, + .service_type = TRANSPORT_SERVICE_CL, +}; +/* *INDENT-ON* */ + + +int +udpc_connection_open (transport_endpoint_t * rmt) +{ + udp_connection_t *uc; + u32 uc_index; + uc_index = udp_open_connection (rmt); + uc = udp_connection_get (uc_index, vlib_get_thread_index ()); + uc->is_connected = 1; + return uc_index; +} + +u32 +udpc_connection_listen (u32 session_index, transport_endpoint_t * lcl) +{ + udp_connection_t *listener; + u32 li; + li = udp_session_bind (session_index, lcl); + listener = udp_listener_get (li); + listener->is_connected = 1; + return li; +} + +/* *INDENT-OFF* */ +const static transport_proto_vft_t udpc_proto = { + .bind = udpc_connection_listen, + .open = udpc_connection_open, + .unbind = udp_session_unbind, + .push_header = udp_push_header, + .get_connection = udp_session_get, + .get_listener = udp_session_get_listener, + .get_half_open = udp_half_open_session_get_transport, + .close = udp_session_close, + .cleanup = udp_session_cleanup, + .send_mss = udp_send_mss, + .send_space = udp_send_space, + .format_connection = format_udp_session, + .format_half_open = format_udp_half_open_session, + .format_listener = format_udp_listener_session, .tx_type = TRANSPORT_TX_DEQUEUE, - .service_type = TRANSPORT_SERVICE_VC, + .service_type = TRANSPORT_SERVICE_CL, }; /* *INDENT-ON* */ @@ -361,6 +407,10 @@ udp_init (vlib_main_t * vm) FIB_PROTOCOL_IP4, ip4_lookup_node.index); transport_register_protocol (TRANSPORT_PROTO_UDP, &udp_proto, FIB_PROTOCOL_IP6, ip6_lookup_node.index); + transport_register_protocol (TRANSPORT_PROTO_UDPC, &udpc_proto, + FIB_PROTOCOL_IP4, ip4_lookup_node.index); + transport_register_protocol (TRANSPORT_PROTO_UDPC, &udpc_proto, + FIB_PROTOCOL_IP6, ip6_lookup_node.index); /* * Initialize data structures diff --git a/src/vnet/udp/udp.h b/src/vnet/udp/udp.h index af6c6b82c6a..8e3ab9ec59e 100644 --- a/src/vnet/udp/udp.h +++ b/src/vnet/udp/udp.h @@ -36,9 +36,9 @@ typedef enum typedef struct { - transport_connection_t connection; /** must be first */ - /** ersatz MTU to limit fifo pushes to test data size */ - u32 mtu; + transport_connection_t connection; /**< must be first */ + clib_spinlock_t rx_lock; /**< rx fifo lock */ + u8 is_connected; /**< connected mode */ } udp_connection_t; #define foreach_udp4_dst_port \ @@ -207,7 +207,7 @@ udp_pool_remove_peeker (u32 thread_index) } always_inline udp_connection_t * -udp_conenction_clone_safe (u32 connection_index, u32 thread_index) +udp_connection_clone_safe (u32 connection_index, u32 thread_index) { udp_connection_t *old_c, *new_c; u32 current_thread_index = vlib_get_thread_index (); diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 8170cfbe49a..ad469f69667 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -13,20 +13,19 @@ * limitations under the License. */ +#include <vlibmemory/api.h> #include <vlib/vlib.h> -#include <vnet/vnet.h> -#include <vnet/pg/pg.h> -#include <vnet/ip/ip.h> -#include <vnet/udp/udp.h> #include <vppinfra/hash.h> #include <vppinfra/error.h> #include <vppinfra/elog.h> +#include <vnet/vnet.h> +#include <vnet/pg/pg.h> +#include <vnet/ip/ip.h> +#include <vnet/udp/udp.h> #include <vnet/udp/udp_packet.h> - -#include <vlibmemory/api.h> -#include "../session/application_interface.h" +#include <vnet/session/session.h> static char *udp_error_strings[] = { #define udp_error(n,s) s, @@ -106,9 +105,11 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ip6_header_t *ip60; u8 *data0; stream_session_t *s0; - transport_connection_t *tc0 = 0; - udp_connection_t *child0, *new_uc0; - int written0; + udp_connection_t *uc0, *child0, *new_uc0; + transport_connection_t *tc0; + int wrote0; + void *rmt_addr, *lcl_addr; + session_dgram_hdr_t hdr0; /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; @@ -127,11 +128,14 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (is_ip4) { - /* $$$$ fixme: udp_local doesn't do ip options correctly anyhow */ + /* TODO: must fix once udp_local does ip options correctly */ ip40 = (ip4_header_t *) (((u8 *) udp0) - sizeof (*ip40)); s0 = session_lookup_safe4 (fib_index0, &ip40->dst_address, &ip40->src_address, udp0->dst_port, udp0->src_port, TRANSPORT_PROTO_UDP); + lcl_addr = &ip40->dst_address; + rmt_addr = &ip40->src_address; + } else { @@ -139,67 +143,83 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, s0 = session_lookup_safe6 (fib_index0, &ip60->dst_address, &ip60->src_address, udp0->dst_port, udp0->src_port, TRANSPORT_PROTO_UDP); + lcl_addr = &ip60->dst_address; + rmt_addr = &ip60->src_address; } - if (PREDICT_FALSE (s0 == 0)) + if (PREDICT_FALSE (!s0)) { error0 = UDP_ERROR_NO_LISTENER; goto trace0; } - if (PREDICT_TRUE (s0->session_state == SESSION_STATE_READY)) + if (s0->session_state == SESSION_STATE_OPENED) { + /* TODO optimization: move cl session to right thread + * However, since such a move would affect the session handle, + * which we pass 'raw' to the app, we'd also have notify the + * app of the change or change the way we pass handles to apps. + */ tc0 = session_get_transport (s0); + uc0 = udp_get_connection_from_transport (tc0); + if (uc0->is_connected) + { + /* + * Clone the transport. It will be cleaned up with the + * session once we notify the session layer. + */ + new_uc0 = udp_connection_clone_safe (s0->connection_index, + s0->thread_index); + ASSERT (s0->session_index == new_uc0->c_s_index); + + /* + * Drop the 'lock' on pool resize + */ + session_pool_remove_peeker (s0->thread_index); + session_dgram_connect_notify (&new_uc0->connection, + s0->thread_index, &s0); + tc0 = &new_uc0->connection; + } } - else if (s0->session_state == SESSION_STATE_CONNECTING_READY) + else if (s0->session_state == SESSION_STATE_READY) { - /* - * Clone the transport. It will be cleaned up with the - * session once we notify the session layer. - */ - new_uc0 = udp_conenction_clone_safe (s0->connection_index, - s0->thread_index); - ASSERT (s0->session_index == new_uc0->c_s_index); - - /* - * Drop the 'lock' on pool resize - */ - session_pool_remove_peeker (s0->thread_index); - session_dgram_connect_notify (&new_uc0->connection, - s0->thread_index, &s0); - tc0 = &new_uc0->connection; + tc0 = session_get_transport (s0); + uc0 = udp_get_connection_from_transport (tc0); } else if (s0->session_state == SESSION_STATE_LISTENING) { tc0 = listen_session_get_transport (s0); - - child0 = udp_connection_alloc (my_thread_index); - if (is_ip4) - { - ip_set (&child0->c_lcl_ip, &ip40->dst_address, 1); - ip_set (&child0->c_rmt_ip, &ip40->src_address, 1); - } - else - { - ip_set (&child0->c_lcl_ip, &ip60->dst_address, 0); - ip_set (&child0->c_rmt_ip, &ip60->src_address, 0); - } - child0->c_lcl_port = udp0->dst_port; - child0->c_rmt_port = udp0->src_port; - child0->c_is_ip4 = is_ip4; - child0->mtu = 1460; /* $$$$ policy */ - - if (stream_session_accept - (&child0->connection, tc0->s_index, 1)) + uc0 = udp_get_connection_from_transport (tc0); + if (uc0->is_connected) { - error0 = UDP_ERROR_CREATE_SESSION; - goto trace0; + child0 = udp_connection_alloc (my_thread_index); + if (is_ip4) + { + ip_set (&child0->c_lcl_ip, &ip40->dst_address, 1); + ip_set (&child0->c_rmt_ip, &ip40->src_address, 1); + } + else + { + ip_set (&child0->c_lcl_ip, &ip60->dst_address, 0); + ip_set (&child0->c_rmt_ip, &ip60->src_address, 0); + } + child0->c_lcl_port = udp0->dst_port; + child0->c_rmt_port = udp0->src_port; + child0->c_is_ip4 = is_ip4; + + if (stream_session_accept (&child0->connection, + tc0->s_index, 1)) + { + error0 = UDP_ERROR_CREATE_SESSION; + goto trace0; + } + s0 = + session_get (child0->c_s_index, child0->c_thread_index); + s0->session_state = SESSION_STATE_READY; + tc0 = &child0->connection; + uc0 = udp_get_connection_from_transport (tc0); + error0 = UDP_ERROR_LISTENER; } - s0 = session_get (child0->c_s_index, child0->c_thread_index); - s0->session_state = SESSION_STATE_READY; - tc0 = &child0->connection; - - error0 = UDP_ERROR_LISTENER; } else { @@ -207,15 +227,48 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto trace0; } - written0 = session_enqueue_dgram_connection (s0, b0, tc0->proto, - 1 /* queue evt */ ); - if (PREDICT_FALSE (written0 < 0)) + if (!uc0->is_connected) { - error0 = UDP_ERROR_FIFO_FULL; - goto trace0; + if (svm_fifo_max_enqueue (s0->server_rx_fifo) + < b0->current_length + sizeof (session_dgram_hdr_t)) + { + error0 = UDP_ERROR_FIFO_FULL; + goto trace0; + } + hdr0.data_length = b0->current_length; + hdr0.data_offset = 0; + ip_set (&hdr0.lcl_ip, lcl_addr, is_ip4); + ip_set (&hdr0.rmt_ip, rmt_addr, is_ip4); + hdr0.lcl_port = udp0->dst_port; + hdr0.rmt_port = udp0->src_port; + hdr0.is_ip4 = is_ip4; + + clib_spinlock_lock (&uc0->rx_lock); + wrote0 = session_enqueue_dgram_connection (s0, &hdr0, b0, + TRANSPORT_PROTO_UDP, + 1 /* queue evt */ ); + clib_spinlock_unlock (&uc0->rx_lock); + ASSERT (wrote0 > 0); + + if (s0->session_state != SESSION_STATE_LISTENING) + session_pool_remove_peeker (s0->thread_index); + } + else + { + if (svm_fifo_max_enqueue (s0->server_rx_fifo) + < b0->current_length) + { + error0 = UDP_ERROR_FIFO_FULL; + goto trace0; + } + wrote0 = session_enqueue_stream_connection (tc0, b0, 0, + 1 /* queue evt */ , + 1 /* in order */ ); + ASSERT (wrote0 > 0); } trace0: + b0->error = node->errors[error0]; if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) @@ -224,7 +277,7 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, udp_input_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); - t->connection = tc0 ? tc0->c_index : ~0; + t->connection = s0 ? s0->connection_index : ~0; t->disposition = error0; t->thread_index = my_thread_index; } @@ -237,14 +290,11 @@ udp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - errors = session_manager_flush_enqueue_events (TRANSPORT_PROTO_UDP, - my_thread_index); + errors = session_manager_flush_all_enqueue_events (TRANSPORT_PROTO_UDP); udp_input_inc_counter (vm, is_ip4, UDP_ERROR_EVENT_FIFO_FULL, errors); return frame->n_vectors; } -vlib_node_registration_t udp4_input_node; -vlib_node_registration_t udp6_input_node; static uword udp4_input (vlib_main_t * vm, vlib_node_runtime_t * node, |