From 68b0fb0c620c7451ef1a6380c43c39de6614db51 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 28 Feb 2017 15:15:56 -0500 Subject: VPP-598: tcp stack initial commit Change-Id: I49e5ce0aae6e4ff634024387ceaf7dbc432a0351 Signed-off-by: Dave Barach Signed-off-by: Florin Coras --- src/vnet/session/application_interface.c | 459 +++++++++++++++++++++++++++++++ 1 file changed, 459 insertions(+) create mode 100644 src/vnet/session/application_interface.c (limited to 'src/vnet/session/application_interface.c') diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c new file mode 100644 index 00000000..0ea77fd8 --- /dev/null +++ b/src/vnet/session/application_interface.c @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include + +#include +#include +#include +#include + +/** @file + VPP's application/session API bind/unbind/connect/disconnect calls +*/ + +static u8 +ip_is_zero (ip46_address_t * ip46_address, u8 is_ip4) +{ + if (is_ip4) + return (ip46_address->ip4.as_u32 == 0); + else + return (ip46_address->as_u64[0] == 0 && ip46_address->as_u64[1] == 0); +} + +static u8 +ip_is_local (ip46_address_t * ip46_address, u8 is_ip4) +{ + fib_node_index_t fei; + fib_entry_flag_t flags; + fib_prefix_t prefix; + + /* Check if requester is local */ + if (is_ip4) + { + prefix.fp_len = 32; + prefix.fp_proto = FIB_PROTOCOL_IP4; + } + else + { + prefix.fp_len = 128; + prefix.fp_proto = FIB_PROTOCOL_IP6; + } + + clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address)); + fei = fib_table_lookup (0, &prefix); + flags = fib_entry_get_flags (fei); + + return (flags & FIB_ENTRY_FLAG_LOCAL); +} + +int +api_parse_session_handle (u64 handle, u32 * session_index, u32 * thread_index) +{ + session_manager_main_t *smm = vnet_get_session_manager_main (); + stream_session_t *pool; + + *thread_index = handle & 0xFFFFFFFF; + *session_index = handle >> 32; + + if (*thread_index >= vec_len (smm->sessions)) + return VNET_API_ERROR_INVALID_VALUE; + + pool = smm->sessions[*thread_index]; + + if (pool_is_free_index (pool, *session_index)) + return VNET_API_ERROR_INVALID_VALUE_2; + + return 0; +} + +int +vnet_bind_i (u32 api_client_index, ip46_address_t * ip46, u16 port_host_order, + session_type_t sst, u64 * options, session_cb_vft_t * cb_fns, + application_t ** app, u32 * len_seg_name, char *seg_name) +{ + u8 *segment_name = 0; + application_t *server = 0; + stream_session_t *listener; + u8 is_ip4; + + listener = + stream_session_lookup_listener (ip46, + clib_host_to_net_u16 (port_host_order), + sst); + + if (listener) + return VNET_API_ERROR_ADDRESS_IN_USE; + + if (application_lookup (api_client_index)) + { + clib_warning ("Only one bind supported for now"); + return VNET_API_ERROR_ADDRESS_IN_USE; + } + + is_ip4 = SESSION_TYPE_IP4_UDP == sst || SESSION_TYPE_IP4_TCP == sst; + if (!ip_is_zero (ip46, is_ip4) && !ip_is_local (ip46, is_ip4)) + return VNET_API_ERROR_INVALID_VALUE; + + /* Allocate and initialize stream server */ + server = application_new (APP_SERVER, sst, api_client_index, + options[SESSION_OPTIONS_FLAGS], cb_fns); + + application_server_init (server, options[SESSION_OPTIONS_SEGMENT_SIZE], + options[SESSION_OPTIONS_ADD_SEGMENT_SIZE], + options[SESSION_OPTIONS_RX_FIFO_SIZE], + options[SESSION_OPTIONS_TX_FIFO_SIZE], + &segment_name); + + /* Setup listen path down to transport */ + stream_session_start_listen (server->index, ip46, port_host_order); + + /* + * Return values + */ + + ASSERT (vec_len (segment_name) <= 128); + *len_seg_name = vec_len (segment_name); + memcpy (seg_name, segment_name, *len_seg_name); + *app = server; + + return 0; +} + +int +vnet_unbind_i (u32 api_client_index) +{ + application_t *server; + + /* + * Find the stream_server_t corresponding to the api client + */ + server = application_lookup (api_client_index); + if (!server) + return VNET_API_ERROR_INVALID_VALUE_2; + + /* Clear the listener */ + stream_session_stop_listen (server->index); + application_del (server); + + return 0; +} + +int +vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, + ip46_address_t * ip46, u16 port, u64 * options, void *mp, + session_cb_vft_t * cb_fns) +{ + stream_session_t *listener; + application_t *server, *app; + + /* + * Figure out if connecting to a local server + */ + listener = stream_session_lookup_listener (ip46, + clib_host_to_net_u16 (port), + sst); + if (listener) + { + server = application_get (listener->app_index); + + /* + * Server is willing to have a direct fifo connection created + * instead of going through the state machine, etc. + */ + if (server->flags & SESSION_OPTIONS_FLAGS_USE_FIFO) + return server->cb_fns. + redirect_connect_callback (server->api_client_index, mp); + } + + /* Create client app */ + app = application_new (APP_CLIENT, sst, api_client_index, + options[SESSION_OPTIONS_FLAGS], cb_fns); + + app->api_context = api_context; + + /* + * Not connecting to a local server. Create regular session + */ + stream_session_open (sst, ip46, port, app->index); + + return 0; +} + +/** + * unformat a vnet URI + * + * fifo://name + * tcp://ip46-addr:port + * udp://ip46-addr:port + * + * u8 ip46_address[16]; + * u16 port_in_host_byte_order; + * stream_session_type_t sst; + * u8 *fifo_name; + * + * if (unformat (input, "%U", unformat_vnet_uri, &ip46_address, + * &sst, &port, &fifo_name)) + * etc... + * + */ +uword +unformat_vnet_uri (unformat_input_t * input, va_list * args) +{ + ip46_address_t *address = va_arg (*args, ip46_address_t *); + session_type_t *sst = va_arg (*args, session_type_t *); + u16 *port = va_arg (*args, u16 *); + + if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &address->ip4, + port)) + { + *sst = SESSION_TYPE_IP4_TCP; + return 1; + } + if (unformat (input, "udp://%U/%d", unformat_ip4_address, &address->ip4, + port)) + { + *sst = SESSION_TYPE_IP4_UDP; + return 1; + } + if (unformat (input, "udp://%U/%d", unformat_ip6_address, &address->ip6, + port)) + { + *sst = SESSION_TYPE_IP6_UDP; + return 1; + } + if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &address->ip6, + port)) + { + *sst = SESSION_TYPE_IP6_TCP; + return 1; + } + + return 0; +} + +int +parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, + u16 * port_number_host_byte_order) +{ + unformat_input_t _input, *input = &_input; + + /* Make sure */ + uri = (char *) format (0, "%s%c", uri, 0); + + /* Parse uri */ + unformat_init_string (input, uri, strlen (uri)); + if (!unformat (input, "%U", unformat_vnet_uri, addr, sst, + port_number_host_byte_order)) + { + unformat_free (input); + return VNET_API_ERROR_INVALID_VALUE; + } + unformat_free (input); + + return 0; +} + +int +vnet_bind_uri (vnet_bind_args_t * a) +{ + application_t *server = 0; + u16 port_host_order; + session_type_t sst = SESSION_N_TYPES; + ip46_address_t ip46; + int rv; + + memset (&ip46, 0, sizeof (ip46)); + rv = parse_uri (a->uri, &sst, &ip46, &port_host_order); + if (rv) + return rv; + + if ((rv = vnet_bind_i (a->api_client_index, &ip46, port_host_order, sst, + a->options, a->session_cb_vft, &server, + &a->segment_name_length, a->segment_name))) + return rv; + + a->server_event_queue_address = (u64) server->event_queue; + return 0; +} + +session_type_t +session_type_from_proto_and_ip (session_api_proto_t proto, u8 is_ip4) +{ + if (proto == SESSION_PROTO_TCP) + { + if (is_ip4) + return SESSION_TYPE_IP4_TCP; + else + return SESSION_TYPE_IP6_TCP; + } + else + { + if (is_ip4) + return SESSION_TYPE_IP4_UDP; + else + return SESSION_TYPE_IP6_UDP; + } + + return SESSION_N_TYPES; +} + +int +vnet_unbind_uri (char *uri, u32 api_client_index) +{ + u16 port_number_host_byte_order; + session_type_t sst = SESSION_N_TYPES; + ip46_address_t ip46_address; + stream_session_t *listener; + int rv; + + rv = parse_uri (uri, &sst, &ip46_address, &port_number_host_byte_order); + if (rv) + return rv; + + listener = + stream_session_lookup_listener (&ip46_address, + clib_host_to_net_u16 + (port_number_host_byte_order), sst); + + if (!listener) + return VNET_API_ERROR_ADDRESS_NOT_IN_USE; + + /* External client? */ + if (api_client_index != ~0) + { + ASSERT (vl_api_client_index_to_registration (api_client_index)); + } + + return vnet_unbind_i (api_client_index); +} + +int +vnet_connect_uri (vnet_connect_args_t * a) +{ + ip46_address_t ip46_address; + u16 port; + session_type_t sst; + application_t *app; + int rv; + + app = application_lookup (a->api_client_index); + if (app) + { + clib_warning ("Already have a connect from this app"); + return VNET_API_ERROR_INVALID_VALUE_2; + } + + /* Parse uri */ + rv = parse_uri (a->uri, &sst, &ip46_address, &port); + if (rv) + return rv; + + return vnet_connect_i (a->api_client_index, a->api_context, sst, + &ip46_address, port, a->options, a->mp, + a->session_cb_vft); +} + +int +vnet_disconnect_session (u32 client_index, u32 session_index, + u32 thread_index) +{ + stream_session_t *session; + + session = stream_session_get (session_index, thread_index); + stream_session_disconnect (session); + + return 0; +} + + +int +vnet_bind (vnet_bind_args_t * a) +{ + application_t *server = 0; + session_type_t sst = SESSION_N_TYPES; + int rv; + + sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); + if ((rv = vnet_bind_i (a->api_client_index, &a->tep.ip, a->tep.port, sst, + a->options, a->session_cb_vft, &server, + &a->segment_name_length, a->segment_name))) + return rv; + + a->server_event_queue_address = (u64) server->event_queue; + a->handle = (u64) a->tep.vrf << 32 | (u64) server->session_index; + return 0; +} + +int +vnet_unbind (vnet_unbind_args_t * a) +{ + application_t *server; + + if (a->api_client_index != ~0) + { + ASSERT (vl_api_client_index_to_registration (a->api_client_index)); + } + + /* Make sure this is the right one */ + server = application_lookup (a->api_client_index); + ASSERT (server->session_index == (0xFFFFFFFF & a->handle)); + + /* TODO use handle to disambiguate namespaces/vrfs */ + return vnet_unbind_i (a->api_client_index); +} + +int +vnet_connect (vnet_connect_args_t * a) +{ + session_type_t sst; + application_t *app; + + app = application_lookup (a->api_client_index); + if (app) + { + clib_warning ("Already have a connect from this app"); + return VNET_API_ERROR_INVALID_VALUE_2; + } + + sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); + return vnet_connect_i (a->api_client_index, a->api_context, sst, &a->tep.ip, + a->tep.port, a->options, a->mp, a->session_cb_vft); +} + +int +vnet_disconnect (vnet_disconnect_args_t * a) +{ + stream_session_t *session; + u32 session_index, thread_index; + + if (api_parse_session_handle (a->handle, &session_index, &thread_index)) + { + clib_warning ("Invalid handle"); + return -1; + } + + session = stream_session_get (session_index, thread_index); + stream_session_disconnect (session); + + return 0; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ -- cgit 1.2.3-korg From e04c29942af6a130591059679531c9ffa3d7237a Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 1 Mar 2017 08:17:34 -0800 Subject: Cleanup URI code and TCP bugfixing - Add CLI/API to enable session layer, by default it's disabled - Improve rcv wnd computation - Improvements to tx path - URI code cleanup - Builtin test tcp server - Improve src port allocation Change-Id: I2ace498e76a0771d4c31a8075cc14fe33d7dfa38 Signed-off-by: Florin Coras --- src/scripts/vnet/uri/dummy_app.py | 65 +++ src/scripts/vnet/uri/tcp_server | 1 + src/svm/svm_fifo.c | 6 +- src/uri.am | 10 +- src/uri/uri_tcp_test.c | 792 +++++++++++++------------ src/uri/uri_udp_test.c | 442 +++++++++++++- src/uri/uri_udp_test2.c | 954 ------------------------------- src/uri/uritest.c | 484 ---------------- src/vnet.am | 1 + src/vnet/api_errno.h | 3 +- src/vnet/session/application.c | 27 +- src/vnet/session/application.h | 1 + src/vnet/session/application_interface.c | 6 +- src/vnet/session/node.c | 57 +- src/vnet/session/session.api | 22 + src/vnet/session/session.c | 86 ++- src/vnet/session/session.h | 23 +- src/vnet/session/session_api.c | 59 +- src/vnet/session/session_cli.c | 63 +- src/vnet/tcp/builtin_server.c | 135 +++++ src/vnet/tcp/tcp.c | 48 +- src/vnet/tcp/tcp.h | 4 +- src/vnet/tcp/tcp_input.c | 56 +- src/vnet/tcp/tcp_output.c | 90 ++- 24 files changed, 1460 insertions(+), 1975 deletions(-) create mode 100644 src/scripts/vnet/uri/dummy_app.py delete mode 100644 src/uri/uri_udp_test2.c delete mode 100644 src/uri/uritest.c create mode 100644 src/vnet/tcp/builtin_server.c (limited to 'src/vnet/session/application_interface.c') diff --git a/src/scripts/vnet/uri/dummy_app.py b/src/scripts/vnet/uri/dummy_app.py new file mode 100644 index 00000000..b80fbb28 --- /dev/null +++ b/src/scripts/vnet/uri/dummy_app.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +import socket +import sys +import bitstring + +# action can be reflect or drop +action = "drop" + +def handle_connection (connection, client_address): + print("Received connection from {}".format(repr(client_address))) + try: + while True: + data = connection.recv(4096) + if not data: + break; + if (action != "drop"): + connection.sendall(data) + finally: + connection.close() + +def run_server(ip, port): + print("Starting server {}:{}".format(repr(ip), repr(port))) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_address = (ip, int(port)) + sock.bind(server_address) + sock.listen(1) + + while True: + connection, client_address = sock.accept() + handle_connection (connection, client_address) + +def prepare_data(): + buf = [] + for i in range (0, pow(2, 16)): + buf.append(i & 0xff) + return bytearray(buf) + +def run_client(ip, port): + print("Starting client {}:{}".format(repr(ip), repr(port))) + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + server_address = ("6.0.1.1", 1234) + sock.connect(server_address) + + data = prepare_data() + try: + sock.sendall(data) + finally: + sock.close() + +def run(mode, ip, port): + if (mode == "server"): + run_server (ip, port) + elif (mode == "client"): + run_client (ip, port) + else: + raise Exception("Unknown mode. Only client and server supported") + +if __name__ == "__main__": + if (len(sys.argv)) < 4: + raise Exception("Usage: ./dummy_app []") + if (len(sys.argv) == 5): + action = sys.argv[4] + + run (sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/src/scripts/vnet/uri/tcp_server b/src/scripts/vnet/uri/tcp_server index 7f5a86de..c29afc6f 100644 --- a/src/scripts/vnet/uri/tcp_server +++ b/src/scripts/vnet/uri/tcp_server @@ -2,3 +2,4 @@ create host-interface name vpp1 set int state host-vpp1 up set int ip address host-vpp1 6.0.1.1/24 trace add af-packet-input 10 +session enable diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 11f90193..e3f534b1 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -508,9 +508,9 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, { /* Number of bytes in first copy segment */ first_copy_bytes = - ((nitems - f->head) < total_copy_bytes) ? - (nitems - f->head) : total_copy_bytes; - clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes); + ((nitems - f->head + offset) < total_copy_bytes) ? + (nitems - f->head + offset) : total_copy_bytes; + clib_memcpy (copy_here, &f->data[f->head + offset], first_copy_bytes); /* Number of bytes in second copy segment, if any */ second_copy_bytes = total_copy_bytes - first_copy_bytes; diff --git a/src/uri.am b/src/uri.am index 8cdd77c6..09b5b15b 100644 --- a/src/uri.am +++ b/src/uri.am @@ -11,12 +11,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -noinst_PROGRAMS += uri_udp_test2 uri_tcp_test +noinst_PROGRAMS += uri_udp_test uri_tcp_test -uri_udp_test2_SOURCES = uri/uri_udp_test2.c -uri_udp_test2_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ - libvppinfra.la -lpthread -lm -lrt +uri_udp_test_SOURCES = uri/uri_udp_test.c +uri_udp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ + libvppinfra.la -lpthread -lm -lrt uri_tcp_test_SOURCES = uri/uri_tcp_test.c uri_tcp_test_LDADD = libvlibmemoryclient.la libvlibapi.la libsvm.la \ - libvppinfra.la -lpthread -lm -lrt + libvppinfra.la -lpthread -lm -lrt diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index ed5a37d8..6c9cf1db 100644 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -20,16 +20,15 @@ #include #include #include +#include -#include "../vnet/session/application_interface.h" - -#define vl_typedefs /* define message structures */ +#define vl_typedefs /* define message structures */ #include #undef vl_typedefs /* declare message handlers for each api */ -#define vl_endianfun /* define message structures */ +#define vl_endianfun /* define message structures */ #include #undef vl_endianfun @@ -45,8 +44,8 @@ vlib_main_t **vlib_mains; typedef struct { - svm_fifo_t * server_rx_fifo; - svm_fifo_t * server_tx_fifo; + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; u32 vpp_session_index; u32 vpp_session_thread; @@ -69,19 +68,19 @@ typedef struct u32 my_client_index; /* The URI we're playing with */ - u8 * uri; + u8 *uri; /* Session pool */ - session_t * sessions; + session_t *sessions; /* Hash table for disconnect processing */ - uword * session_index_by_vpp_handles; + uword *session_index_by_vpp_handles; /* intermediate rx buffer */ - u8 * rx_buf; + u8 *rx_buf; /* URI for slave's connect */ - u8 * connect_uri; + u8 *connect_uri; u32 connected_session_index; @@ -91,10 +90,10 @@ typedef struct int drop_packets; /* Our event queue */ - unix_shared_memory_queue_t * our_event_queue; + unix_shared_memory_queue_t *our_event_queue; /* $$$ single thread only for the moment */ - unix_shared_memory_queue_t * vpp_event_queue; + unix_shared_memory_queue_t *vpp_event_queue; pid_t my_pid; @@ -111,12 +110,15 @@ typedef struct u32 configured_segment_size; /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword * error_string_by_error_number; - - /* convenience */ - svm_fifo_segment_main_t * segment_main; + uword *error_string_by_error_number; u8 *connect_test_data; + pthread_t client_rx_thread_handle; + u32 client_bytes_received; + u8 test_return_packets; + + /* convenience */ + svm_fifo_segment_main_t *segment_main; } uri_tcp_test_main_t; uri_tcp_test_main_t uri_tcp_test_main; @@ -141,7 +143,7 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) while (clib_time_now (&utm->clib_time) < timeout) { if (utm->state == state) - return 0; + return 0; if (utm->state == STATE_FAILED) return -1; } @@ -209,7 +211,7 @@ connect_to_vpp (char *name) } static void -vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp) +vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) { svm_fifo_segment_create_args_t _a, *a = &_a; int rv; @@ -221,24 +223,24 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t *mp) if (rv) { clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); + mp->segment_name); return; } clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, - mp->segment_size); + mp->segment_size); } static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - session_t * session; - vl_api_disconnect_session_reply_t * rmp; - uword * p; + session_t *session; + vl_api_disconnect_session_reply_t *rmp; + uword *p; int rv = 0; u64 key; - key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; p = hash_get (utm->session_index_by_vpp_handles, key); @@ -254,6 +256,8 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rv = -11; } + utm->time_to_stop = 1; + rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); @@ -261,32 +265,32 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rmp->retval = rv; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } static void vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - session_t * session; - vl_api_reset_session_reply_t * rmp; - uword * p; + session_t *session; + vl_api_reset_session_reply_t *rmp; + uword *p; int rv = 0; u64 key; - key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - p = hash_get(utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, key); if (p) { - session = pool_elt_at_index(utm->sessions, p[0]); - hash_unset(utm->session_index_by_vpp_handles, key); - pool_put(utm->sessions, session); + session = pool_elt_at_index (utm->sessions, p[0]); + hash_unset (utm->session_index_by_vpp_handles, key); + pool_put (utm->sessions, session); } else { - clib_warning("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", key); rv = -11; } @@ -296,301 +300,95 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) rmp->retval = rv; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } void -handle_fifo_event_connect_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e) +client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, + session_fifo_event_t * e) { - svm_fifo_t * rx_fifo; - int n_read, bytes; + svm_fifo_t *rx_fifo; + int n_read, bytes, i; rx_fifo = e->fifo; bytes = e->enqueue_length; do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf), - utm->rx_buf); + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), + utm->rx_buf); if (n_read > 0) - bytes -= n_read; + { + bytes -= n_read; + for (i = 0; i < n_read; i++) + { + if (utm->rx_buf[i] != ((utm->client_bytes_received + i) & 0xff)) + { + clib_warning ("error at byte %lld, 0x%x not 0x%x", + utm->client_bytes_received + i, + utm->rx_buf[i], + ((utm->client_bytes_received + i) & 0xff)); + } + } + utm->client_bytes_received += n_read; + } + } while (n_read < 0 || bytes > 0); - - // bytes_to_read = svm_fifo_max_dequeue (rx_fifo); - // - // bytes_to_read = vec_len(utm->rx_buf) > bytes_to_read ? - // bytes_to_read : vec_len(utm->rx_buf); - // - // buffer_offset = 0; - // while (bytes_to_read > 0) - // { - // rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid, - // bytes_to_read, - // utm->rx_buf + buffer_offset); - // if (rv > 0) - // { - // bytes_to_read -= rv; - // buffer_offset += rv; - // bytes_received += rv; - // } - // } - - - // while (bytes_received < bytes_sent) - // { - // rv = svm_fifo_dequeue_nowait2 (rx_fifo, mypid, - // vec_len (utm->rx_buf), - // utm->rx_buf); - // if (rv > 0) - // { - //#if CLIB_DEBUG > 0 - // int j; - // for (j = 0; j < rv; j++) - // { - // if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) - // { - // clib_warning ("error at byte %lld, 0x%x not 0x%x", - // bytes_received + j, - // utm->rx_buf[j], - // ((bytes_received + j )&0xff)); - // } - // } - //#endif - // bytes_received += (u64) rv; - // } - // } } void -handle_connect_event_queue (uri_tcp_test_main_t * utm) +client_handle_event_queue (uri_tcp_test_main_t * utm) { session_fifo_event_t _e, *e = &_e;; - unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, 0 /* nowait */); + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); switch (e->event_type) { case FIFO_EVENT_SERVER_RX: - handle_fifo_event_connect_rx (utm, e); + client_handle_fifo_event_rx (utm, e); break; case FIFO_EVENT_SERVER_EXIT: return; default: - clib_warning("unknown event type %d", e->event_type); + clib_warning ("unknown event type %d", e->event_type); break; } } -void -uri_tcp_connect_send (uri_tcp_test_main_t *utm) -{ - u8 *test_data = utm->connect_test_data; - u64 bytes_sent = 0; - int rv; - int mypid = getpid(); - session_t * session; - svm_fifo_t *tx_fifo; - int buffer_offset, bytes_to_send = 0; - session_fifo_event_t evt; - static int serial_number = 0; - int i; - u32 max_chunk = 64 << 10, write; - - session = pool_elt_at_index (utm->sessions, utm->connected_session_index); - tx_fifo = session->server_tx_fifo; - - vec_validate (utm->rx_buf, vec_len (test_data) - 1); - - for (i = 0; i < 10; i++) - { - bytes_to_send = vec_len (test_data); - buffer_offset = 0; - while (bytes_to_send > 0) - { - write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send; - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write, - test_data + buffer_offset); - - if (rv > 0) - { - bytes_to_send -= rv; - buffer_offset += rv; - bytes_sent += rv; - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = rv; - evt.event_id = serial_number++; - - unix_shared_memory_queue_add (utm->vpp_event_queue, (u8 *) &evt, - 0 /* do wait for mutex */); - } - } - } -} - -static void -uri_tcp_client_test (uri_tcp_test_main_t * utm) -{ - vl_api_connect_uri_t * cmp; - vl_api_disconnect_session_t *dmp; - session_t *connected_session; - int i; - - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl(0xfeedface); - memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - return; - } - - /* Init test data */ - vec_validate (utm->connect_test_data, 64 * 1024 - 1); - for (i = 0; i < vec_len (utm->connect_test_data); i++) - utm->connect_test_data[i] = i & 0xff; - - /* Start reader thread */ - /* handle_connect_event_queue (utm); */ - - /* Start send */ - uri_tcp_connect_send (utm); - - /* Disconnect */ - connected_session = pool_elt_at_index(utm->sessions, - utm->connected_session_index); - dmp = vl_msg_api_alloc (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = utm->my_client_index; - dmp->session_index = connected_session->vpp_session_index; - dmp->session_thread_index = connected_session->vpp_session_thread; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&dmp); -} - -void -handle_fifo_event_server_rx (uri_tcp_test_main_t *utm, session_fifo_event_t * e) -{ - svm_fifo_t * rx_fifo, * tx_fifo; - int n_read; - - session_fifo_event_t evt; - unix_shared_memory_queue_t *q; - int rv, bytes; - - rx_fifo = e->fifo; - tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; - - bytes = e->enqueue_length; - do - { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len(utm->rx_buf), - utm->rx_buf); - - /* Reflect if a non-drop session */ - if (!utm->drop_packets && n_read > 0) - { - do - { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); - } - while (rv == -2); - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = n_read; - evt.event_id = e->event_id; - q = utm->vpp_event_queue; - unix_shared_memory_queue_add (q, (u8 *) &evt, 0 /* do wait for mutex */); - } - - if (n_read > 0) - bytes -= n_read; - } - while (n_read < 0 || bytes > 0); -} - -void -handle_event_queue (uri_tcp_test_main_t * utm) +static void * +client_rx_thread_fn (void *arg) { - session_fifo_event_t _e, *e = &_e;; + session_fifo_event_t _e, *e = &_e; + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + utm->client_bytes_received = 0; while (1) { - unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *)e, - 0 /* nowait */); + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); switch (e->event_type) - { - case FIFO_EVENT_SERVER_RX: - handle_fifo_event_server_rx (utm, e); - break; - - case FIFO_EVENT_SERVER_EXIT: - return; - - default: - clib_warning ("unknown event type %d", e->event_type); - break; - } - if (PREDICT_FALSE(utm->time_to_stop == 1)) - break; - if (PREDICT_FALSE(utm->time_to_print_stats == 1)) - { - utm->time_to_print_stats = 0; - fformat(stdout, "%d connections\n", pool_elts (utm->sessions)); - } + { + case FIFO_EVENT_SERVER_RX: + client_handle_fifo_event_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return 0; + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + + if (PREDICT_FALSE (utm->time_to_stop == 1)) + break; } + pthread_exit (0); } -static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) -{ - uri_tcp_test_main_t *utm = &uri_tcp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - if (mp->retval) - { - clib_warning("bind failed: %d", mp->retval); - return; - } - - if (mp->segment_name_length == 0) - { - clib_warning("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT(mp->server_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning("svm_fifo_segment_attach ('%s') failed", mp->segment_name); - return; - } - - utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->server_event_queue_address; - - utm->state = STATE_READY; -} static void vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) @@ -601,6 +399,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) u32 session_index; svm_fifo_t *rx_fifo, *tx_fifo; int rv; + u64 key; if (mp->retval) { @@ -608,6 +407,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) utm->state = STATE_FAILED; return; } + /* * Attatch to segment */ @@ -622,14 +422,14 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) a->segment_name = (char *) mp->segment_name; a->segment_size = mp->segment_size; - ASSERT(mp->client_event_queue_address); + ASSERT (mp->client_event_queue_address); /* Attach to the segment vpp created */ rv = svm_fifo_segment_attach (a); if (rv) { clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); + mp->segment_name); return; } @@ -650,9 +450,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *)mp->server_rx_fifo; + rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *)mp->server_tx_fifo; + tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; @@ -662,54 +462,193 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) /* Save handle */ utm->connected_session_index = session_index; - utm->state = STATE_READY; + + /* Add it to lookup table */ + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; + hash_set (utm->session_index_by_vpp_handles, key, session_index); + + /* Start RX thread */ + rv = pthread_create (&utm->client_rx_thread_handle, + NULL /*attr */ , client_rx_thread_fn, 0); + if (rv) + { + clib_warning ("pthread_create returned %d", rv); + rv = VNET_API_ERROR_SYSCALL_ERROR_1; + } } void -uri_tcp_bind (uri_tcp_test_main_t *utm) +client_send_data (uri_tcp_test_main_t * utm) { - vl_api_bind_uri_t * bmp; - u32 fifo_size = 3 << 20; - bmp = vl_msg_api_alloc (sizeof (*bmp)); - memset (bmp, 0, sizeof (*bmp)); + u8 *test_data = utm->connect_test_data; + u64 bytes_sent = 0; + int rv; + int mypid = getpid (); + session_t *session; + svm_fifo_t *tx_fifo; + int buffer_offset, bytes_to_send = 0; + session_fifo_event_t evt; + static int serial_number = 0; + int i; + u32 max_chunk = 64 << 10, write; - bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); - bmp->client_index = utm->my_client_index; - bmp->context = ntohl(0xfeedface); - bmp->initial_segment_size = 256<<20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128<<20; - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&bmp); + session = pool_elt_at_index (utm->sessions, utm->connected_session_index); + tx_fifo = session->server_tx_fifo; + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + + for (i = 0; i < 1; i++) + { + bytes_to_send = vec_len (test_data); + buffer_offset = 0; + while (bytes_to_send > 0) + { + write = bytes_to_send > max_chunk ? max_chunk : bytes_to_send; + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, write, + test_data + buffer_offset); + + if (rv > 0) + { + bytes_to_send -= rv; + buffer_offset += rv; + bytes_sent += rv; + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = rv; + evt.event_id = serial_number++; + + unix_shared_memory_queue_add (utm->vpp_event_queue, + (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + } + } + + if (utm->test_return_packets) + { + f64 timeout = clib_time_now (&utm->clib_time) + 2; + + /* Wait for the outstanding packets */ + while (utm->client_bytes_received < vec_len (test_data)) + { + if (clib_time_now (&utm->clib_time) > timeout) + { + clib_warning ("timed out waiting for the missing packets"); + break; + } + } + + utm->time_to_stop = 1; + } +} + +void +client_connect (uri_tcp_test_main_t * utm) +{ + vl_api_connect_uri_t *cmp; + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); +} + +void +client_disconnect (uri_tcp_test_main_t * utm) +{ + session_t *connected_session; + vl_api_disconnect_session_t *dmp; + connected_session = pool_elt_at_index (utm->sessions, + utm->connected_session_index); + dmp = vl_msg_api_alloc (sizeof (*dmp)); + memset (dmp, 0, sizeof (*dmp)); + dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); + dmp->client_index = utm->my_client_index; + dmp->session_index = connected_session->vpp_session_index; + dmp->session_thread_index = connected_session->vpp_session_thread; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp); +} + +static void +client_test (uri_tcp_test_main_t * utm) +{ + int i; + + client_connect (utm); + + if (wait_for_state_change (utm, STATE_READY)) + { + return; + } + + /* Init test data */ + vec_validate (utm->connect_test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (utm->connect_test_data); i++) + utm->connect_test_data[i] = i & 0xff; + + /* Start send */ + client_send_data (utm); + + /* Disconnect */ + client_disconnect (utm); } static void -vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t *mp) +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; - if (mp->retval != 0) - clib_warning ("returned %d", ntohl(mp->retval)); + if (mp->retval) + { + clib_warning ("bind failed: %d", mp->retval); + utm->state = STATE_FAILED; + return; + } - utm->state = STATE_START; + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->server_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->server_event_queue_address; + + utm->state = STATE_READY; } -void -uri_tcp_unbind (uri_tcp_test_main_t *utm) +static void +vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) { - vl_api_unbind_uri_t * ump; + uri_tcp_test_main_t *utm = &uri_tcp_test_main; - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); + if (mp->retval != 0) + clib_warning ("returned %d", ntohl (mp->retval)); - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&ump); + utm->state = STATE_START; } static void @@ -717,14 +656,14 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; vl_api_accept_session_reply_t *rmp; - svm_fifo_t * rx_fifo, * tx_fifo; - session_t * session; + svm_fifo_t *rx_fifo, *tx_fifo; + session_t *session; static f64 start_time; u64 key; u32 session_index; if (start_time == 0.0) - start_time = clib_time_now (&utm->clib_time); + start_time = clib_time_now (&utm->clib_time); utm->vpp_event_queue = (unix_shared_memory_queue_t *) mp->vpp_event_queue_address; @@ -733,45 +672,159 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *)mp->server_rx_fifo; + rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *)mp->server_tx_fifo; + tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; /* Add it to lookup table */ - key = (((u64)mp->session_thread_index) << 32) | (u64)mp->session_index; + key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; hash_set (utm->session_index_by_vpp_handles, key, session_index); utm->state = STATE_READY; /* Stats printing */ - if (pool_elts (utm->sessions) && (pool_elts(utm->sessions) % 20000) == 0) + if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) { f64 now = clib_time_now (&utm->clib_time); fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", - pool_elts(utm->sessions), now - start_time, - (f64)pool_elts(utm->sessions) / (now - start_time)); + pool_elts (utm->sessions), now - start_time, + (f64) pool_elts (utm->sessions) / (now - start_time)); } - /* Send accept reply to vpp */ + /* + * Send accept reply to vpp + */ rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); rmp->session_type = mp->session_type; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *)&rmp); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } void -uri_tcp_server_test (uri_tcp_test_main_t * utm) +server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, + session_fifo_event_t * e) { + svm_fifo_t *rx_fifo, *tx_fifo; + int n_read; + + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + int rv, bytes; + + rx_fifo = e->fifo; + tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; + + bytes = e->enqueue_length; + do + { + n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), + utm->rx_buf); + + /* Reflect if a non-drop session */ + if (!utm->drop_packets && n_read > 0) + { + do + { + rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); + } + while (rv == -2); + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + /* $$$$ for event logging */ + evt.enqueue_length = n_read; + evt.event_id = e->event_id; + q = utm->vpp_event_queue; + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + } + + if (n_read > 0) + bytes -= n_read; + } + while (n_read < 0 || bytes > 0); +} + +void +server_handle_event_queue (uri_tcp_test_main_t * utm) +{ + session_fifo_event_t _e, *e = &_e;; + while (1) + { + unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, + 0 /* nowait */ ); + switch (e->event_type) + { + case FIFO_EVENT_SERVER_RX: + server_handle_fifo_event_rx (utm, e); + break; + + case FIFO_EVENT_SERVER_EXIT: + return; + + default: + clib_warning ("unknown event type %d", e->event_type); + break; + } + if (PREDICT_FALSE (utm->time_to_stop == 1)) + break; + if (PREDICT_FALSE (utm->time_to_print_stats == 1)) + { + utm->time_to_print_stats = 0; + fformat (stdout, "%d connections\n", pool_elts (utm->sessions)); + } + } +} + +void +server_bind (uri_tcp_test_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->initial_segment_size = 256 << 20; /* size of initial segment */ + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +server_unbind (uri_tcp_test_main_t * utm) +{ + vl_api_unbind_uri_t *ump; + + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); +} + +void +server_test (uri_tcp_test_main_t * utm) +{ /* Bind to uri */ - uri_tcp_bind (utm); + server_bind (utm); if (wait_for_state_change (utm, STATE_READY)) { @@ -780,10 +833,10 @@ uri_tcp_server_test (uri_tcp_test_main_t * utm) } /* Enter handle event loop */ - handle_event_queue (utm); + server_handle_event_queue (utm); /* Cleanup */ - uri_tcp_unbind (utm); + server_unbind (utm); if (wait_for_state_change (utm, STATE_START)) { @@ -824,12 +877,12 @@ main (int argc, char **argv) unformat_input_t _argv, *a = &_argv; u8 *chroot_prefix; u8 *heap; - u8 * bind_name = (u8 *) "tcp://0.0.0.0/1234"; + u8 *bind_name = (u8 *) "tcp://0.0.0.0/1234"; u32 tmp; mheap_t *h; - session_t * session; + session_t *session; int i; - int i_am_master = 1, drop_packets = 0; + int i_am_master = 1, drop_packets = 0, test_return_packets = 0; clib_mem_init (0, 256 << 20); @@ -841,53 +894,54 @@ main (int argc, char **argv) vec_validate (utm->rx_buf, 65536); - utm->session_index_by_vpp_handles = - hash_create (0, sizeof(uword)); + utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - utm->my_pid = getpid(); - utm->configured_segment_size = 1<<20; + utm->my_pid = getpid (); + utm->configured_segment_size = 1 << 20; clib_time_init (&utm->clib_time); init_error_string_table (utm); - svm_fifo_segment_init(0x200000000ULL, 20); + svm_fifo_segment_init (0x200000000ULL, 20); unformat_init_command_line (a, argv); while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) { if (unformat (a, "chroot prefix %s", &chroot_prefix)) - { - vl_set_memory_root_path ((char *) chroot_prefix); - } + { + vl_set_memory_root_path ((char *) chroot_prefix); + } else if (unformat (a, "uri %s", &bind_name)) - ; + ; else if (unformat (a, "segment-size %dM", &tmp)) - utm->configured_segment_size = tmp<<20; + utm->configured_segment_size = tmp << 20; else if (unformat (a, "segment-size %dG", &tmp)) - utm->configured_segment_size = tmp<<30; + utm->configured_segment_size = tmp << 30; else if (unformat (a, "master")) - i_am_master = 1; + i_am_master = 1; else if (unformat (a, "slave")) - i_am_master = 0; + i_am_master = 0; else if (unformat (a, "drop")) - drop_packets = 1; + drop_packets = 1; + else if (unformat (a, "test")) + test_return_packets = 1; else - { - fformat (stderr, "%s: usage [master|slave]\n"); - exit (1); - } + { + fformat (stderr, "%s: usage [master|slave]\n"); + exit (1); + } } utm->uri = format (0, "%s%c", bind_name, 0); utm->i_am_master = i_am_master; utm->segment_main = &svm_fifo_segment_main; utm->drop_packets = drop_packets; - + utm->test_return_packets = test_return_packets; utm->connect_uri = format (0, "tcp://6.0.1.2/1234%c", 0); - setup_signal_handlers(); + setup_signal_handlers (); uri_api_hookup (utm); - if (connect_to_vpp (i_am_master? "uri_tcp_server":"uri_tcp_client") < 0) + if (connect_to_vpp (i_am_master ? "uri_tcp_server" : "uri_tcp_client") < 0) { svm_region_exit (); fformat (stderr, "Couldn't connect to vpe, exiting...\n"); @@ -896,7 +950,7 @@ main (int argc, char **argv) if (i_am_master == 0) { - uri_tcp_client_test (utm); + client_test (utm); exit (0); } @@ -909,8 +963,16 @@ main (int argc, char **argv) for (i = 0; i < 200000; i++) pool_put_index (utm->sessions, i); - uri_tcp_server_test (utm); + server_test (utm); vl_client_disconnect_from_vlib (); exit (0); } + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 6f5284c9..54625d64 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -26,25 +26,25 @@ #include #include #include -#include -#include - -#include +#include +#include +#include +#include #define vl_typedefs /* define message structures */ -#include +#include #undef vl_typedefs /* declare message handlers for each api */ #define vl_endianfun /* define message structures */ -#include +#include #undef vl_endianfun /* instantiate all the print functions we know about */ #define vl_print(handle, ...) #define vl_printfun -#include +#include #undef vl_printfun /* Satisfy external references when not linking with -lvlib */ @@ -87,12 +87,28 @@ typedef struct /* intermediate rx buffer */ u8 *rx_buf; + /* URI for connect */ + u8 *connect_uri; + + int i_am_master; + /* Our event queue */ unix_shared_memory_queue_t *our_event_queue; /* $$$ single thread only for the moment */ unix_shared_memory_queue_t *vpp_event_queue; + /* $$$$ hack: cut-through session index */ + volatile u32 cut_through_session_index; + + /* unique segment name counter */ + u32 unique_segment_index; + + pid_t my_pid; + + /* pthread handle */ + pthread_t cut_through_thread_handle; + /* For deadman timers */ clib_time_t clib_time; @@ -102,14 +118,20 @@ typedef struct volatile int time_to_stop; volatile int time_to_print_stats; + u32 configured_segment_size; + /* VNET_API_ERROR_FOO -> "Foo" hash table */ uword *error_string_by_error_number; + + /* convenience */ + svm_fifo_segment_main_t *segment_main; + } uri_udp_test_main_t; #if CLIB_DEBUG > 0 -#define NITER 1000 +#define NITER 10000 #else -#define NITER 1000000 +#define NITER 4000000 #endif uri_udp_test_main_t uri_udp_test_main; @@ -159,7 +181,13 @@ format_api_error (u8 * s, va_list * args) int wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) { - f64 timeout = clib_time_now (&utm->clib_time) + 5.0; +#if CLIB_DEBUG > 0 +#define TIMEOUT 600.0 +#else +#define TIMEOUT 600.0 +#endif + + f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT; while (clib_time_now (&utm->clib_time) < timeout) { @@ -169,6 +197,183 @@ wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) return -1; } +u64 server_bytes_received, server_bytes_sent; + +static void * +cut_through_thread_fn (void *arg) +{ + session_t *s; + svm_fifo_t *rx_fifo; + svm_fifo_t *tx_fifo; + u8 *my_copy_buffer = 0; + uri_udp_test_main_t *utm = &uri_udp_test_main; + i32 actual_transfer; + int rv; + u32 buffer_offset; + + while (utm->cut_through_session_index == ~0) + ; + + s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); + + rx_fifo = s->server_rx_fifo; + tx_fifo = s->server_tx_fifo; + + vec_validate (my_copy_buffer, 64 * 1024 - 1); + + while (true) + { + /* We read from the tx fifo and write to the rx fifo */ + do + { + actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, + vec_len (my_copy_buffer), + my_copy_buffer); + } + while (actual_transfer <= 0); + + server_bytes_received += actual_transfer; + + buffer_offset = 0; + while (actual_transfer > 0) + { + rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, + my_copy_buffer + buffer_offset); + if (rv > 0) + { + actual_transfer -= rv; + buffer_offset += rv; + server_bytes_sent += rv; + } + + } + if (PREDICT_FALSE (utm->time_to_stop)) + break; + } + + pthread_exit (0); +} + +static void +uri_udp_slave_test (uri_udp_test_main_t * utm) +{ + vl_api_connect_uri_t *cmp; + int i; + u8 *test_data = 0; + u64 bytes_received = 0, bytes_sent = 0; + i32 bytes_to_read; + int rv; + int mypid = getpid (); + f64 before, after, delta, bytes_per_second; + session_t *session; + svm_fifo_t *rx_fifo, *tx_fifo; + int buffer_offset, bytes_to_send = 0; + + vec_validate (test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i & 0xff; + + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); + + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return; + } + + session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); + rx_fifo = session->server_rx_fifo; + tx_fifo = session->server_tx_fifo; + + before = clib_time_now (&utm->clib_time); + + vec_validate (utm->rx_buf, vec_len (test_data) - 1); + + for (i = 0; i < NITER; i++) + { + bytes_to_send = vec_len (test_data); + buffer_offset = 0; + while (bytes_to_send > 0) + { + rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, + bytes_to_send, + test_data + buffer_offset); + + if (rv > 0) + { + bytes_to_send -= rv; + buffer_offset += rv; + bytes_sent += rv; + } + } + + bytes_to_read = svm_fifo_max_dequeue (rx_fifo); + + bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ? + bytes_to_read : vec_len (utm->rx_buf); + + buffer_offset = 0; + while (bytes_to_read > 0) + { + rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + bytes_to_read, + utm->rx_buf + buffer_offset); + if (rv > 0) + { + bytes_to_read -= rv; + buffer_offset += rv; + bytes_received += rv; + } + } + } + while (bytes_received < bytes_sent) + { + rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + vec_len (utm->rx_buf), utm->rx_buf); + if (rv > 0) + { +#if CLIB_DEBUG > 0 + int j; + for (j = 0; j < rv; j++) + { + if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) + { + clib_warning ("error at byte %lld, 0x%x not 0x%x", + bytes_received + j, + utm->rx_buf[j], + ((bytes_received + j) & 0xff)); + } + } +#endif + bytes_received += (u64) rv; + } + } + + after = clib_time_now (&utm->clib_time); + delta = after - before; + bytes_per_second = 0.0; + + if (delta > 0.0) + bytes_per_second = (f64) bytes_received / delta; + + fformat (stdout, + "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n", + bytes_received, delta, bytes_per_second); + fformat (stdout, + "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n", + bytes_sent, delta, bytes_per_second); + fformat (stdout, + "client -> server -> client round trip: %.2f Gbit/sec \n\n", + (bytes_per_second * 8.0) / 1e9); +} + static void vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { @@ -183,12 +388,16 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) } a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->server_event_queue_address); /* Attach to the segment vpp created */ rv = svm_fifo_segment_attach (a); if (rv) { - clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); return; } @@ -198,6 +407,101 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) utm->state = STATE_READY; } +static void +vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, + mp->segment_size); +} + +static void +vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) +{ + u32 segment_index; + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + svm_fifo_segment_private_t *seg; + unix_shared_memory_queue_t *client_q; + vl_api_connect_uri_reply_t *rmp; + session_t *session; + int rv = 0; + + /* Create the segment */ + a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid, + utm->unique_segment_index++, 0); + a->segment_size = utm->configured_segment_size; + + rv = svm_fifo_segment_create (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name); + rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + goto send_reply; + } + + vec_add2 (utm->seg, seg, 1); + + segment_index = vec_len (sm->segments) - 1; + + memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); + + pool_get (utm->sessions, session); + + /* + * By construction the master's idea of the rx fifo ends up in + * fsh->fifos[0], and the master's idea of the tx fifo ends up in + * fsh->fifos[1]. + */ + session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, + 128 * 1024); + ASSERT (session->server_rx_fifo); + + session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, + 128 * 1024); + ASSERT (session->server_tx_fifo); + + session->server_rx_fifo->server_session_index = session - utm->sessions; + session->server_tx_fifo->server_session_index = session - utm->sessions; + utm->cut_through_session_index = session - utm->sessions; + + rv = pthread_create (&utm->cut_through_thread_handle, + NULL /*attr */ , cut_through_thread_fn, 0); + if (rv) + { + clib_warning ("pthread_create returned %d", rv); + rv = VNET_API_ERROR_SYSCALL_ERROR_1; + } + +send_reply: + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + + rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY); + rmp->context = mp->context; + rmp->retval = ntohl (rv); + rmp->segment_name_length = vec_len (a->segment_name); + memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); + + vec_free (a->segment_name); + + client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; + vl_msg_api_send_shmem (client_q, (u8 *) & rmp); +} + static void vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) { @@ -293,18 +597,79 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } +static void +vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) +{ + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *seg; + svm_fifo_segment_header_t *fsh; + session_t *session; + u32 segment_index; + int rv; + + ASSERT (utm->i_am_master == 0); + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + memset (a, 0, sizeof (*a)); + + a->segment_name = (char *) mp->segment_name; + + sleep (1); + + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); + return; + } + + segment_index = vec_len (sm->segments) - 1; + + vec_add2 (utm->seg, seg, 1); + + memcpy (seg, sm->segments + segment_index, sizeof (*seg)); + sh = seg->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + while (vec_len (fsh->fifos) < 2) + sleep (1); + + pool_get (utm->sessions, session); + utm->cut_through_session_index = session - utm->sessions; + + session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + ASSERT (session->server_rx_fifo); + session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + ASSERT (session->server_tx_fifo); + + /* security: could unlink /dev/shm/segment_name> here, maybe */ + + utm->state = STATE_READY; +} + #define foreach_uri_msg \ _(BIND_URI_REPLY, bind_uri_reply) \ +_(CONNECT_URI, connect_uri) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ _(UNBIND_URI_REPLY, unbind_uri_reply) \ _(ACCEPT_SESSION, accept_session) \ -_(DISCONNECT_SESSION, disconnect_session) +_(DISCONNECT_SESSION, disconnect_session) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) void uri_api_hookup (uri_udp_test_main_t * utm) { #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ + vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_api_##n##_t_endian, \ vl_api_##n##_t_print, \ @@ -349,7 +714,7 @@ init_error_string_table (uri_udp_test_main_t * utm) } void -handle_fifo_event_server_rx (uri_udp_test_main_t * utm, +server_handle_fifo_event_rx (uri_udp_test_main_t * utm, session_fifo_event_t * e) { svm_fifo_t *rx_fifo, *tx_fifo; @@ -385,7 +750,7 @@ handle_fifo_event_server_rx (uri_udp_test_main_t * utm, } void -handle_event_queue (uri_udp_test_main_t * utm) +server_handle_event_queue (uri_udp_test_main_t * utm) { session_fifo_event_t _e, *e = &_e;; @@ -396,7 +761,7 @@ handle_event_queue (uri_udp_test_main_t * utm) switch (e->event_type) { case FIFO_EVENT_SERVER_RX: - handle_fifo_event_server_rx (utm, e); + server_handle_fifo_event_rx (utm, e); break; case FIFO_EVENT_SERVER_EXIT: @@ -428,7 +793,12 @@ uri_udp_test (uri_udp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->segment_size = 2 << 30; + bmp->initial_segment_size = 256 << 20; /* size of initial segment */ + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); @@ -438,7 +808,7 @@ uri_udp_test (uri_udp_test_main_t * utm) return; } - handle_event_queue (utm); + server_handle_event_queue (utm); ump = vl_msg_api_alloc (sizeof (*ump)); memset (ump, 0, sizeof (*ump)); @@ -464,10 +834,12 @@ main (int argc, char **argv) unformat_input_t _argv, *a = &_argv; u8 *chroot_prefix; u8 *heap; - u8 *bind_name = (u8 *) "udp4:1234"; + u8 *bind_name = (u8 *) "udp://0.0.0.0/1234"; + u32 tmp; mheap_t *h; session_t *session; int i; + int i_am_master = 1; clib_mem_init (0, 256 << 20); @@ -481,6 +853,9 @@ main (int argc, char **argv) utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + utm->my_pid = getpid (); + utm->configured_segment_size = 1 << 20; + clib_time_init (&utm->clib_time); init_error_string_table (utm); svm_fifo_segment_init (0x200000000ULL, 20); @@ -494,6 +869,14 @@ main (int argc, char **argv) } else if (unformat (a, "uri %s", &bind_name)) ; + else if (unformat (a, "segment-size %dM", &tmp)) + utm->configured_segment_size = tmp << 20; + else if (unformat (a, "segment-size %dG", &tmp)) + utm->configured_segment_size = tmp << 30; + else if (unformat (a, "master")) + i_am_master = 1; + else if (unformat (a, "slave")) + i_am_master = 0; else { fformat (stderr, "%s: usage [master|slave]\n"); @@ -501,19 +884,30 @@ main (int argc, char **argv) } } + utm->cut_through_session_index = ~0; utm->uri = format (0, "%s%c", bind_name, 0); + utm->i_am_master = i_am_master; + utm->segment_main = &svm_fifo_segment_main; + + utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); setup_signal_handlers (); uri_api_hookup (utm); - if (connect_to_vpp ("uri_udp_test") < 0) + if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0) { svm_region_exit (); fformat (stderr, "Couldn't connect to vpe, exiting...\n"); exit (1); } + if (i_am_master == 0) + { + uri_udp_slave_test (utm); + exit (0); + } + /* $$$$ hack preallocation */ for (i = 0; i < 200000; i++) { @@ -531,7 +925,7 @@ main (int argc, char **argv) #undef vl_api_version #define vl_api_version(n,v) static u32 vpe_api_version = v; -#include +#include #undef vl_api_version void @@ -544,6 +938,12 @@ vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); } +u32 +vl (void *p) +{ + return vec_len (p); +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/uri/uri_udp_test2.c b/src/uri/uri_udp_test2.c deleted file mode 100644 index ddfffaa6..00000000 --- a/src/uri/uri_udp_test2.c +++ /dev/null @@ -1,954 +0,0 @@ -/* - * Copyright (c) 2016 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "../vnet/session/application_interface.h" - -#define vl_typedefs /* define message structures */ -#include -#undef vl_typedefs - -/* declare message handlers for each api */ - -#define vl_endianfun /* define message structures */ -#include -#undef vl_endianfun - -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) -#define vl_printfun -#include -#undef vl_printfun - -/* Satisfy external references when not linking with -lvlib */ -vlib_main_t vlib_global_main; -vlib_main_t **vlib_mains; - -typedef enum -{ - STATE_START, - STATE_READY, - STATE_DISCONNECTING, -} connection_state_t; - -typedef struct -{ - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; -} session_t; - -typedef struct -{ - /* vpe input queue */ - unix_shared_memory_queue_t *vl_input_queue; - - /* API client handle */ - u32 my_client_index; - - /* The URI we're playing with */ - u8 *uri; - - /* Session pool */ - session_t *sessions; - - /* Hash table for disconnect processing */ - uword *session_index_by_vpp_handles; - - /* fifo segment */ - svm_fifo_segment_private_t *seg; - - /* intermediate rx buffer */ - u8 *rx_buf; - - /* URI for connect */ - u8 *connect_uri; - - int i_am_master; - - /* Our event queue */ - unix_shared_memory_queue_t *our_event_queue; - - /* $$$ single thread only for the moment */ - unix_shared_memory_queue_t *vpp_event_queue; - - /* $$$$ hack: cut-through session index */ - volatile u32 cut_through_session_index; - - /* unique segment name counter */ - u32 unique_segment_index; - - pid_t my_pid; - - /* pthread handle */ - pthread_t cut_through_thread_handle; - - /* For deadman timers */ - clib_time_t clib_time; - - /* State of the connection, shared between msg RX thread and main thread */ - volatile connection_state_t state; - - volatile int time_to_stop; - volatile int time_to_print_stats; - - u32 configured_segment_size; - - /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword *error_string_by_error_number; - - /* convenience */ - svm_fifo_segment_main_t *segment_main; - -} uri_udp_test_main_t; - -#if CLIB_DEBUG > 0 -#define NITER 10000 -#else -#define NITER 4000000 -#endif - -uri_udp_test_main_t uri_udp_test_main; - -static void -stop_signal (int signum) -{ - uri_udp_test_main_t *um = &uri_udp_test_main; - - um->time_to_stop = 1; -} - -static void -stats_signal (int signum) -{ - uri_udp_test_main_t *um = &uri_udp_test_main; - - um->time_to_print_stats = 1; -} - -static clib_error_t * -setup_signal_handlers (void) -{ - signal (SIGINT, stats_signal); - signal (SIGQUIT, stop_signal); - signal (SIGTERM, stop_signal); - - return 0; -} - -u8 * -format_api_error (u8 * s, va_list * args) -{ - uri_udp_test_main_t *utm = va_arg (*args, uri_udp_test_main_t *); - i32 error = va_arg (*args, u32); - uword *p; - - p = hash_get (utm->error_string_by_error_number, -error); - - if (p) - s = format (s, "%s", p[0]); - else - s = format (s, "%d", error); - return s; -} - -int -wait_for_state_change (uri_udp_test_main_t * utm, connection_state_t state) -{ -#if CLIB_DEBUG > 0 -#define TIMEOUT 600.0 -#else -#define TIMEOUT 600.0 -#endif - - f64 timeout = clib_time_now (&utm->clib_time) + TIMEOUT; - - while (clib_time_now (&utm->clib_time) < timeout) - { - if (utm->state == state) - return 0; - } - return -1; -} - -u64 server_bytes_received, server_bytes_sent; - -static void * -cut_through_thread_fn (void *arg) -{ - session_t *s; - svm_fifo_t *rx_fifo; - svm_fifo_t *tx_fifo; - u8 *my_copy_buffer = 0; - uri_udp_test_main_t *utm = &uri_udp_test_main; - i32 actual_transfer; - int rv; - u32 buffer_offset; - - while (utm->cut_through_session_index == ~0) - ; - - s = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - - rx_fifo = s->server_rx_fifo; - tx_fifo = s->server_tx_fifo; - - vec_validate (my_copy_buffer, 64 * 1024 - 1); - - while (true) - { - /* We read from the tx fifo and write to the rx fifo */ - do - { - actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, - vec_len (my_copy_buffer), - my_copy_buffer); - } - while (actual_transfer <= 0); - - server_bytes_received += actual_transfer; - - buffer_offset = 0; - while (actual_transfer > 0) - { - rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, - my_copy_buffer + buffer_offset); - if (rv > 0) - { - actual_transfer -= rv; - buffer_offset += rv; - server_bytes_sent += rv; - } - - } - if (PREDICT_FALSE (utm->time_to_stop)) - break; - } - - pthread_exit (0); -} - -static void -uri_udp_slave_test (uri_udp_test_main_t * utm) -{ - vl_api_connect_uri_t *cmp; - int i; - u8 *test_data = 0; - u64 bytes_received = 0, bytes_sent = 0; - i32 bytes_to_read; - int rv; - int mypid = getpid (); - f64 before, after, delta, bytes_per_second; - session_t *session; - svm_fifo_t *rx_fifo, *tx_fifo; - int buffer_offset, bytes_to_send = 0; - - vec_validate (test_data, 64 * 1024 - 1); - for (i = 0; i < vec_len (test_data); i++) - test_data[i] = i & 0xff; - - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - rx_fifo = session->server_rx_fifo; - tx_fifo = session->server_tx_fifo; - - before = clib_time_now (&utm->clib_time); - - vec_validate (utm->rx_buf, vec_len (test_data) - 1); - - for (i = 0; i < NITER; i++) - { - bytes_to_send = vec_len (test_data); - buffer_offset = 0; - while (bytes_to_send > 0) - { - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, - bytes_to_send, - test_data + buffer_offset); - - if (rv > 0) - { - bytes_to_send -= rv; - buffer_offset += rv; - bytes_sent += rv; - } - } - - bytes_to_read = svm_fifo_max_dequeue (rx_fifo); - - bytes_to_read = vec_len (utm->rx_buf) > bytes_to_read ? - bytes_to_read : vec_len (utm->rx_buf); - - buffer_offset = 0; - while (bytes_to_read > 0) - { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, - bytes_to_read, - utm->rx_buf + buffer_offset); - if (rv > 0) - { - bytes_to_read -= rv; - buffer_offset += rv; - bytes_received += rv; - } - } - } - while (bytes_received < bytes_sent) - { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, - vec_len (utm->rx_buf), utm->rx_buf); - if (rv > 0) - { -#if CLIB_DEBUG > 0 - int j; - for (j = 0; j < rv; j++) - { - if (utm->rx_buf[j] != ((bytes_received + j) & 0xff)) - { - clib_warning ("error at byte %lld, 0x%x not 0x%x", - bytes_received + j, - utm->rx_buf[j], - ((bytes_received + j) & 0xff)); - } - } -#endif - bytes_received += (u64) rv; - } - } - - after = clib_time_now (&utm->clib_time); - delta = after - before; - bytes_per_second = 0.0; - - if (delta > 0.0) - bytes_per_second = (f64) bytes_received / delta; - - fformat (stdout, - "Done: %lld recv bytes in %.2f seconds, %.2f bytes/sec...\n\n", - bytes_received, delta, bytes_per_second); - fformat (stdout, - "Done: %lld sent bytes in %.2f seconds, %.2f bytes/sec...\n\n", - bytes_sent, delta, bytes_per_second); - fformat (stdout, - "client -> server -> client round trip: %.2f Gbit/sec \n\n", - (bytes_per_second * 8.0) / 1e9); -} - -static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT (mp->server_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - - utm->our_event_queue = (unix_shared_memory_queue_t *) - mp->server_event_queue_address; - - utm->state = STATE_READY; -} - -static void -vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) -{ - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - clib_warning ("Mapped new segment '%s' size %d", mp->segment_name, - mp->segment_size); -} - -static void -vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) -{ - u32 segment_index; - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - svm_fifo_segment_private_t *seg; - unix_shared_memory_queue_t *client_q; - vl_api_connect_uri_reply_t *rmp; - session_t *session; - int rv = 0; - - /* Create the segment */ - a->segment_name = (char *) format (0, "%d:segment%d%c", utm->my_pid, - utm->unique_segment_index++, 0); - a->segment_size = utm->configured_segment_size; - - rv = svm_fifo_segment_create (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%s') failed", a->segment_name); - rv = VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - goto send_reply; - } - - vec_add2 (utm->seg, seg, 1); - - segment_index = vec_len (sm->segments) - 1; - - memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); - - pool_get (utm->sessions, session); - - /* - * By construction the master's idea of the rx fifo ends up in - * fsh->fifos[0], and the master's idea of the tx fifo ends up in - * fsh->fifos[1]. - */ - session->server_rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); - ASSERT (session->server_rx_fifo); - - session->server_tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, - 128 * 1024); - ASSERT (session->server_tx_fifo); - - session->server_rx_fifo->server_session_index = session - utm->sessions; - session->server_tx_fifo->server_session_index = session - utm->sessions; - utm->cut_through_session_index = session - utm->sessions; - - rv = pthread_create (&utm->cut_through_thread_handle, - NULL /*attr */ , cut_through_thread_fn, 0); - if (rv) - { - clib_warning ("pthread_create returned %d", rv); - rv = VNET_API_ERROR_SYSCALL_ERROR_1; - } - -send_reply: - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - - rmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI_REPLY); - rmp->context = mp->context; - rmp->retval = ntohl (rv); - rmp->segment_name_length = vec_len (a->segment_name); - memcpy (rmp->segment_name, a->segment_name, vec_len (a->segment_name)); - - vec_free (a->segment_name); - - client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; - vl_msg_api_send_shmem (client_q, (u8 *) & rmp); -} - -static void -vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - - if (mp->retval != 0) - clib_warning ("returned %d", ntohl (mp->retval)); - - utm->state = STATE_START; -} - -static void -vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - vl_api_accept_session_reply_t *rmp; - svm_fifo_t *rx_fifo, *tx_fifo; - session_t *session; - static f64 start_time; - u64 key; - - if (start_time == 0.0) - start_time = clib_time_now (&utm->clib_time); - - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; - - pool_get (utm->sessions, session); - - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; - rx_fifo->client_session_index = session - utm->sessions; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; - tx_fifo->client_session_index = session - utm->sessions; - - session->server_rx_fifo = rx_fifo; - session->server_tx_fifo = tx_fifo; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions); - - utm->state = STATE_READY; - - if (pool_elts (utm->sessions) && (pool_elts (utm->sessions) % 20000) == 0) - { - f64 now = clib_time_now (&utm->clib_time); - fformat (stdout, "%d active sessions in %.2f seconds, %.2f/sec...\n", - pool_elts (utm->sessions), now - start_time, - (f64) pool_elts (utm->sessions) / (now - start_time)); - } - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->session_type = mp->session_type; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); -} - -static void -vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - session_t *session; - vl_api_disconnect_session_reply_t *rmp; - uword *p; - int rv = 0; - u64 key; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - p = hash_get (utm->session_index_by_vpp_handles, key); - - if (p) - { - session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); - pool_put (utm->sessions, session); - } - else - { - clib_warning ("couldn't find session key %llx", key); - rv = -11; - } - - rmp = vl_msg_api_alloc (sizeof (*rmp)); - memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); - rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); -} - -static void -vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) -{ - svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - ssvm_shared_header_t *sh; - svm_fifo_segment_private_t *seg; - svm_fifo_segment_header_t *fsh; - session_t *session; - u32 segment_index; - int rv; - - ASSERT (utm->i_am_master == 0); - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - memset (a, 0, sizeof (*a)); - - a->segment_name = (char *) mp->segment_name; - - sleep (1); - - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); - return; - } - - segment_index = vec_len (sm->segments) - 1; - - vec_add2 (utm->seg, seg, 1); - - memcpy (seg, sm->segments + segment_index, sizeof (*seg)); - sh = seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - - while (vec_len (fsh->fifos) < 2) - sleep (1); - - pool_get (utm->sessions, session); - utm->cut_through_session_index = session - utm->sessions; - - session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; - ASSERT (session->server_rx_fifo); - session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; - ASSERT (session->server_tx_fifo); - - /* security: could unlink /dev/shm/segment_name> here, maybe */ - - utm->state = STATE_READY; -} - -#define foreach_uri_msg \ -_(BIND_URI_REPLY, bind_uri_reply) \ -_(CONNECT_URI, connect_uri) \ -_(CONNECT_URI_REPLY, connect_uri_reply) \ -_(UNBIND_URI_REPLY, unbind_uri_reply) \ -_(ACCEPT_SESSION, accept_session) \ -_(DISCONNECT_SESSION, disconnect_session) \ -_(MAP_ANOTHER_SEGMENT, map_another_segment) - -void -uri_api_hookup (uri_udp_test_main_t * utm) -{ -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ - vl_noop_handler, \ - vl_api_##n##_t_endian, \ - vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 1); - foreach_uri_msg; -#undef _ - -} - - -int -connect_to_vpp (char *name) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - api_main_t *am = &api_main; - - if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) - return -1; - - utm->vl_input_queue = am->shmem_hdr->vl_input_queue; - utm->my_client_index = am->my_client_index; - - return 0; -} - -void -vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) -{ - clib_warning ("BUG"); -} - -static void -init_error_string_table (uri_udp_test_main_t * utm) -{ - utm->error_string_by_error_number = hash_create (0, sizeof (uword)); - -#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); - foreach_vnet_api_error; -#undef _ - - hash_set (utm->error_string_by_error_number, 99, "Misc"); -} - -void -handle_fifo_event_server_rx (uri_udp_test_main_t * utm, - session_fifo_event_t * e) -{ - svm_fifo_t *rx_fifo, *tx_fifo; - int nbytes; - - session_fifo_event_t evt; - unix_shared_memory_queue_t *q; - int rv; - - rx_fifo = e->fifo; - tx_fifo = utm->sessions[rx_fifo->client_session_index].server_tx_fifo; - - do - { - nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0, - vec_len (utm->rx_buf), utm->rx_buf); - } - while (nbytes <= 0); - do - { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf); - } - while (rv == -2); - - /* Fabricate TX event, send to vpp */ - evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; - /* $$$$ for event logging */ - evt.enqueue_length = nbytes; - evt.event_id = e->event_id; - q = utm->vpp_event_queue; - unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* do wait for mutex */ ); -} - -void -handle_event_queue (uri_udp_test_main_t * utm) -{ - session_fifo_event_t _e, *e = &_e;; - - while (1) - { - unix_shared_memory_queue_sub (utm->our_event_queue, (u8 *) e, - 0 /* nowait */ ); - switch (e->event_type) - { - case FIFO_EVENT_SERVER_RX: - handle_fifo_event_server_rx (utm, e); - break; - - case FIFO_EVENT_SERVER_EXIT: - return; - - default: - clib_warning ("unknown event type %d", e->event_type); - break; - } - if (PREDICT_FALSE (utm->time_to_stop == 1)) - break; - if (PREDICT_FALSE (utm->time_to_print_stats == 1)) - { - utm->time_to_print_stats = 0; - fformat (stdout, "%d connections\n", pool_elts (utm->sessions)); - } - } -} - -void -uri_udp_test (uri_udp_test_main_t * utm) -{ - vl_api_bind_uri_t *bmp; - vl_api_unbind_uri_t *ump; - - bmp = vl_msg_api_alloc (sizeof (*bmp)); - memset (bmp, 0, sizeof (*bmp)); - - bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); - bmp->client_index = utm->my_client_index; - bmp->context = ntohl (0xfeedface); - bmp->initial_segment_size = 256 << 20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - handle_event_queue (utm); - - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); - - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); - - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("timeout waiting for STATE_START"); - return; - } - - fformat (stdout, "Test complete...\n"); -} - -int -main (int argc, char **argv) -{ - uri_udp_test_main_t *utm = &uri_udp_test_main; - unformat_input_t _argv, *a = &_argv; - u8 *chroot_prefix; - u8 *heap; - u8 *bind_name = (u8 *) "udp://0.0.0.0/1234"; - u32 tmp; - mheap_t *h; - session_t *session; - int i; - int i_am_master = 1; - - clib_mem_init (0, 256 << 20); - - heap = clib_mem_get_per_cpu_heap (); - h = mheap_header (heap); - - /* make the main heap thread-safe */ - h->flags |= MHEAP_FLAG_THREAD_SAFE; - - vec_validate (utm->rx_buf, 8192); - - utm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - - utm->my_pid = getpid (); - utm->configured_segment_size = 1 << 20; - - clib_time_init (&utm->clib_time); - init_error_string_table (utm); - svm_fifo_segment_init (0x200000000ULL, 20); - unformat_init_command_line (a, argv); - - while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) - { - if (unformat (a, "chroot prefix %s", &chroot_prefix)) - { - vl_set_memory_root_path ((char *) chroot_prefix); - } - else if (unformat (a, "uri %s", &bind_name)) - ; - else if (unformat (a, "segment-size %dM", &tmp)) - utm->configured_segment_size = tmp << 20; - else if (unformat (a, "segment-size %dG", &tmp)) - utm->configured_segment_size = tmp << 30; - else if (unformat (a, "master")) - i_am_master = 1; - else if (unformat (a, "slave")) - i_am_master = 0; - else - { - fformat (stderr, "%s: usage [master|slave]\n"); - exit (1); - } - } - - utm->cut_through_session_index = ~0; - utm->uri = format (0, "%s%c", bind_name, 0); - utm->i_am_master = i_am_master; - utm->segment_main = &svm_fifo_segment_main; - - utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); - - setup_signal_handlers (); - - uri_api_hookup (utm); - - if (connect_to_vpp (i_am_master ? "uri_udp_master" : "uri_udp_slave") < 0) - { - svm_region_exit (); - fformat (stderr, "Couldn't connect to vpe, exiting...\n"); - exit (1); - } - - if (i_am_master == 0) - { - uri_udp_slave_test (utm); - exit (0); - } - - /* $$$$ hack preallocation */ - for (i = 0; i < 200000; i++) - { - pool_get (utm->sessions, session); - memset (session, 0, sizeof (*session)); - } - for (i = 0; i < 200000; i++) - pool_put_index (utm->sessions, i); - - uri_udp_test (utm); - - vl_client_disconnect_from_vlib (); - exit (0); -} - -#undef vl_api_version -#define vl_api_version(n,v) static u32 vpe_api_version = v; -#include -#undef vl_api_version - -void -vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) -{ - /* - * Send the main API signature in slot 0. This bit of code must - * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). - */ - mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); -} - -u32 -vl (void *p) -{ - return vec_len (p); -} - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/uri/uritest.c b/src/uri/uritest.c deleted file mode 100644 index edcdb3ad..00000000 --- a/src/uri/uritest.c +++ /dev/null @@ -1,484 +0,0 @@ -/* - * Copyright (c) 2016 Cisco and/or its affiliates. - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define vl_typedefs /* define message structures */ -#include -#undef vl_typedefs - -/* declare message handlers for each api */ - -#define vl_endianfun /* define message structures */ -#include -#undef vl_endianfun - -/* instantiate all the print functions we know about */ -#define vl_print(handle, ...) -#define vl_printfun -#include -#undef vl_printfun - -typedef enum -{ - STATE_START, - STATE_READY, - STATE_DISCONNECTING, -} connection_state_t; - -typedef struct -{ - /* vpe input queue */ - unix_shared_memory_queue_t *vl_input_queue; - - /* API client handle */ - u32 my_client_index; - - /* role */ - int i_am_master; - - /* The URI we're playing with */ - u8 *uri; - - /* fifo segment */ - svm_fifo_segment_private_t *seg; - - svm_fifo_t *rx_fifo; - svm_fifo_t *tx_fifo; - - /* For deadman timers */ - clib_time_t clib_time; - - /* State of the connection, shared between msg RX thread and main thread */ - volatile connection_state_t state; - - /* VNET_API_ERROR_FOO -> "Foo" hash table */ - uword *error_string_by_error_number; -} uritest_main_t; - -#if CLIB_DEBUG > 0 -#define NITER 1000 -#else -#define NITER 1000000 -#endif - -uritest_main_t uritest_main; - -u8 * -format_api_error (u8 * s, va_list * args) -{ - uritest_main_t *utm = va_arg (*args, uritest_main_t *); - i32 error = va_arg (*args, u32); - uword *p; - - p = hash_get (utm->error_string_by_error_number, -error); - - if (p) - s = format (s, "%s", p[0]); - else - s = format (s, "%d", error); - return s; -} - -int -wait_for_state_change (uritest_main_t * utm, connection_state_t state) -{ - f64 timeout = clib_time_now (&utm->clib_time) + 1.0; - - while (clib_time_now (&utm->clib_time) < timeout) - { - if (utm->state == state) - return 0; - } - return -1; -} - -static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) -{ - uritest_main_t *utm = &uritest_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; - - ASSERT (utm->i_am_master); - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - /* Create the segment */ - rv = svm_fifo_segment_create (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); - return; - } - - vec_validate (utm->seg, 0); - - memcpy (utm->seg, a->rv, sizeof (*utm->seg)); - - /* - * By construction the master's idea of the rx fifo ends up in - * fsh->fifos[0], and the master's idea of the tx fifo ends up in - * fsh->fifos[1]. - */ - utm->rx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240); - ASSERT (utm->rx_fifo); - - utm->tx_fifo = svm_fifo_segment_alloc_fifo (utm->seg, 10240); - ASSERT (utm->tx_fifo); - - utm->state = STATE_READY; -} - -static void -vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) -{ - uritest_main_t *utm = &uritest_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - ssvm_shared_header_t *sh; - svm_fifo_segment_header_t *fsh; - int rv; - - ASSERT (utm->i_am_master == 0); - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - memset (a, 0, sizeof (*a)); - - a->segment_name = (char *) mp->segment_name; - - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%s') failed", mp->segment_name); - return; - } - - vec_validate (utm->seg, 0); - - memcpy (utm->seg, a->rv, sizeof (*utm->seg)); - sh = utm->seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - - while (vec_len (fsh->fifos) < 2) - sleep (1); - - utm->rx_fifo = (svm_fifo_t *) fsh->fifos[1]; - ASSERT (utm->rx_fifo); - utm->tx_fifo = (svm_fifo_t *) fsh->fifos[0]; - ASSERT (utm->tx_fifo); - - /* security: could unlink /dev/shm/segment_name> here, maybe */ - - utm->state = STATE_READY; -} - -static void -vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) -{ - uritest_main_t *utm = &uritest_main; - - if (mp->retval != 0) - clib_warning ("returned %d", ntohl (mp->retval)); - - utm->state = STATE_START; -} - -#define foreach_uri_msg \ -_(BIND_URI_REPLY, bind_uri_reply) \ -_(CONNECT_URI_REPLY, connect_uri_reply) \ -_(UNBIND_URI_REPLY, unbind_uri_reply) - -void -uri_api_hookup (uritest_main_t * utm) -{ -#define _(N,n) \ - vl_msg_api_set_handlers(VL_API_##N, #n, \ - vl_api_##n##_t_handler, \ - vl_noop_handler, \ - vl_api_##n##_t_endian, \ - vl_api_##n##_t_print, \ - sizeof(vl_api_##n##_t), 1); - foreach_uri_msg; -#undef _ - -} - - -int -connect_to_vpp (char *name) -{ - uritest_main_t *utm = &uritest_main; - api_main_t *am = &api_main; - - if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0) - return -1; - - utm->vl_input_queue = am->shmem_hdr->vl_input_queue; - utm->my_client_index = am->my_client_index; - - return 0; -} - -void -vlib_cli_output (struct vlib_main_t *vm, char *fmt, ...) -{ - clib_warning ("BUG"); -} - -static void -init_error_string_table (uritest_main_t * utm) -{ - utm->error_string_by_error_number = hash_create (0, sizeof (uword)); - -#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); - foreach_vnet_api_error; -#undef _ - - hash_set (utm->error_string_by_error_number, 99, "Misc"); -} - -void -uritest_master (uritest_main_t * utm) -{ - vl_api_bind_uri_t *bmp; - vl_api_unbind_uri_t *ump; - int i; - u8 *test_data = 0; - u8 *reply = 0; - u32 reply_len; - int mypid = getpid (); - - for (i = 0; i < 2048; i++) - vec_add1 (test_data, 'a' + (i % 32)); - - bmp = vl_msg_api_alloc (sizeof (*bmp)); - memset (bmp, 0, sizeof (*bmp)); - - bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); - bmp->client_index = utm->my_client_index; - bmp->context = ntohl (0xfeedface); - bmp->segment_size = 256 << 10; - memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - for (i = 0; i < NITER; i++) - svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (test_data), test_data); - - vec_validate (reply, 0); - - reply_len = svm_fifo_dequeue (utm->rx_fifo, mypid, vec_len (reply), reply); - - if (reply_len != 1) - clib_warning ("reply length %d", reply_len); - - if (reply[0] == 1) - fformat (stdout, "Test OK..."); - - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); - - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); - - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - fformat (stdout, "Master done...\n"); -} - -void -uritest_slave (uritest_main_t * utm) -{ - vl_api_connect_uri_t *cmp; - int i, j; - u8 *test_data = 0; - u8 *reply = 0; - u32 bytes_received = 0; - u32 actual_bytes; - int mypid = getpid (); - u8 ok; - f64 before, after, delta, bytes_per_second; - - vec_validate (test_data, 4095); - - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - ok = 1; - before = clib_time_now (&utm->clib_time); - for (i = 0; i < NITER; i++) - { - actual_bytes = svm_fifo_dequeue (utm->rx_fifo, mypid, - vec_len (test_data), test_data); - j = 0; - while (j < actual_bytes) - { - if (test_data[j] != ('a' + (bytes_received % 32))) - ok = 0; - bytes_received++; - j++; - } - if (bytes_received == NITER * 2048) - break; - } - - vec_add1 (reply, ok); - - svm_fifo_enqueue (utm->tx_fifo, mypid, vec_len (reply), reply); - after = clib_time_now (&utm->clib_time); - delta = after - before; - bytes_per_second = 0.0; - - if (delta > 0.0) - bytes_per_second = (f64) bytes_received / delta; - - fformat (stdout, - "Slave done, %d bytes in %.2f seconds, %.2f bytes/sec...\n", - bytes_received, delta, bytes_per_second); -} - -int -main (int argc, char **argv) -{ - uritest_main_t *utm = &uritest_main; - unformat_input_t _argv, *a = &_argv; - u8 *chroot_prefix; - u8 *heap; - char *bind_name = "fifo:uritest"; - mheap_t *h; - int i_am_master = 0; - - clib_mem_init (0, 128 << 20); - - heap = clib_mem_get_per_cpu_heap (); - h = mheap_header (heap); - - /* make the main heap thread-safe */ - h->flags |= MHEAP_FLAG_THREAD_SAFE; - - clib_time_init (&utm->clib_time); - init_error_string_table (utm); - svm_fifo_segment_init (0x200000000ULL, 20); - unformat_init_command_line (a, argv); - - utm->uri = format (0, "%s%c", bind_name, 0); - - while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT) - { - if (unformat (a, "master")) - i_am_master = 1; - else if (unformat (a, "slave")) - i_am_master = 0; - else if (unformat (a, "chroot prefix %s", &chroot_prefix)) - { - vl_set_memory_root_path ((char *) chroot_prefix); - } - else - { - fformat (stderr, "%s: usage [master|slave]\n"); - exit (1); - } - } - - uri_api_hookup (utm); - - if (connect_to_vpp (i_am_master ? "uritest_master" : "uritest_slave") < 0) - { - svm_region_exit (); - fformat (stderr, "Couldn't connect to vpe, exiting...\n"); - exit (1); - } - - utm->i_am_master = i_am_master; - - if (i_am_master) - uritest_master (utm); - else - uritest_slave (utm); - - vl_client_disconnect_from_vlib (); - exit (0); -} - -#undef vl_api_version -#define vl_api_version(n,v) static u32 vpe_api_version = v; -#include -#undef vl_api_version - -void -vl_client_add_api_signatures (vl_api_memclnt_create_t * mp) -{ - /* - * Send the main API signature in slot 0. This bit of code must - * match the checks in ../vpe/api/api.c: vl_msg_api_version_check(). - */ - mp->api_versions[0] = clib_host_to_net_u32 (vpe_api_version); -} - -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ diff --git a/src/vnet.am b/src/vnet.am index 7125a122..4e30ee92 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -461,6 +461,7 @@ libvnet_la_SOURCES += \ vnet/tcp/tcp_output.c \ vnet/tcp/tcp_input.c \ vnet/tcp/tcp_newreno.c \ + vnet/tcp/builtin_server.c \ vnet/tcp/tcp.c nobase_include_HEADERS += \ diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h index 5e65ac7b..74d39bdb 100644 --- a/src/vnet/api_errno.h +++ b/src/vnet/api_errno.h @@ -103,7 +103,8 @@ _(LISP_RLOC_LOCAL, -110, "RLOC address is local") \ _(BFD_EAGAIN, -111, "BFD object cannot be manipulated at this time") \ _(INVALID_GPE_MODE, -112, "Invalid GPE mode") \ _(LISP_GPE_ENTRIES_PRESENT, -113, "LISP GPE entries are present") \ -_(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface") +_(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface") \ +_(SESSION_CONNECT_FAIL, -115, "Session failed to connect") typedef enum { diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index a561e7d1..a542eebe 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -154,6 +154,15 @@ application_get (u32 index) return pool_elt_at_index (app_pool, index); } +application_t * +application_get_if_valid (u32 index) +{ + if (pool_is_free_index (app_pool, index)) + return 0; + + return pool_elt_at_index (app_pool, index); +} + u32 application_get_index (application_t * app) { @@ -209,7 +218,7 @@ format_application_server (u8 * s, va_list * args) regp = vl_api_client_index_to_registration (srv->api_client_index); if (!regp) - server_name = format (0, "%s%c", regp->name, 0); + server_name = format (0, "builtin-%d%c", srv->index, 0); else server_name = regp->name; @@ -269,11 +278,17 @@ static clib_error_t * show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { + session_manager_main_t *smm = &session_manager_main; application_t *app; int do_server = 0; int do_client = 0; int verbose = 0; + if (!smm->is_enabled) + { + clib_error_return (0, "session layer is not enabled"); + } + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "server")) @@ -323,16 +338,20 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, /* *INDENT-ON* */ } else - vlib_cli_output (vm, "No active server bindings"); + vlib_cli_output (vm, "No active client bindings"); } return 0; } +/* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_app_command, static) = { -.path = "show app",.short_help = - "show app [server|client] [verbose]",.function = show_app_command_fn,}; + .path = "show app", + .short_help = "show app [server|client] [verbose]", + .function = show_app_command_fn, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 027d6967..480828f7 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -100,6 +100,7 @@ application_t *application_new (application_type_t type, session_type_t sst, session_cb_vft_t * cb_fns); void application_del (application_t * app); application_t *application_get (u32 index); +application_t *application_get_if_valid (u32 index); application_t *application_lookup (u32 api_client_index); u32 application_get_index (application_t * app); diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 0ea77fd8..6ddfb70f 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -51,7 +51,7 @@ ip_is_local (ip46_address_t * ip46_address, u8 is_ip4) prefix.fp_proto = FIB_PROTOCOL_IP6; } - clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address)); + clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address_t)); fei = fib_table_lookup (0, &prefix); flags = fib_entry_get_flags (fei); @@ -186,9 +186,7 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, /* * Not connecting to a local server. Create regular session */ - stream_session_open (sst, ip46, port, app->index); - - return 0; + return stream_session_open (sst, ip46, port, app->index); } /** diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index e467f4e9..399077de 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -104,9 +104,13 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, snd_space0 = transport_vft->send_space (tc0); snd_mss0 = transport_vft->send_mss (tc0); + /* Can't make any progress */ if (snd_space0 == 0 || svm_fifo_max_dequeue (s0->server_tx_fifo) == 0 || snd_mss0 == 0) - return 0; + { + vec_add1 (smm->evts_partially_read[thread_index], *e0); + return 0; + } ASSERT (e0->enqueue_length > 0); @@ -143,7 +147,12 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, if (PREDICT_FALSE (n_bufs < 0.9 * VLIB_FRAME_SIZE)) { /* Keep track of how much we've dequeued and exit */ - e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; + if (left_to_snd0 != max_len_to_snd0) + { + e0->enqueue_length -= max_len_to_snd0 - left_to_snd0; + vec_add1 (smm->evts_partially_read[thread_index], *e0); + } + return -1; } @@ -185,12 +194,13 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, t0->server_thread_index = s0->thread_index; } + /* *INDENT-OFF* */ if (1) { - ELOG_TYPE_DECLARE (e) = - { - .format = "evt-dequeue: id %d length %d",.format_args = - "i4i4",}; + ELOG_TYPE_DECLARE (e) = { + .format = "evt-dequeue: id %d length %d", + .format_args = "i4i4", + }; struct { u32 data[2]; @@ -199,6 +209,7 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, ed->data[0] = e0->event_id; ed->data[1] = e0->enqueue_length; } + /* *INDENT-ON* */ len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0; @@ -289,7 +300,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { session_manager_main_t *smm = vnet_get_session_manager_main (); session_fifo_event_t *my_fifo_events, *e; - u32 n_to_dequeue; + u32 n_to_dequeue, n_events; unix_shared_memory_queue_t *q; int n_tx_packets = 0; u32 my_thread_index = vm->cpu_index; @@ -309,14 +320,16 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, /* min number of events we can dequeue without blocking */ n_to_dequeue = q->cursize; - if (n_to_dequeue == 0) - return 0; - my_fifo_events = smm->fifo_events[my_thread_index]; - /* If we didn't manage to process previous events try going + if (n_to_dequeue == 0 && vec_len (my_fifo_events) == 0) + return 0; + + /* + * If we didn't manage to process previous events try going * over them again without dequeuing new ones. - * XXX: Block senders to sessions that can't keep up */ + */ + /* XXX: Block senders to sessions that can't keep up */ if (vec_len (my_fifo_events) >= 100) goto skip_dequeue; @@ -338,8 +351,8 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, smm->fifo_events[my_thread_index] = my_fifo_events; skip_dequeue: - - for (i = 0; i < n_to_dequeue; i++) + n_events = vec_len (my_fifo_events); + for (i = 0; i < n_events; i++) { svm_fifo_t *f0; /* $$$ prefetch 1 ahead maybe */ stream_session_t *s0; @@ -354,8 +367,13 @@ skip_dequeue: /* $$$ add multiple event queues, per vpp worker thread */ ASSERT (server_thread_index0 == my_thread_index); - s0 = pool_elt_at_index (smm->sessions[my_thread_index], - server_session_index0); + s0 = stream_session_get_if_valid (server_session_index0, + my_thread_index); + if (!s0) + { + clib_warning ("It's dead Jim!"); + continue; + } ASSERT (s0->thread_index == my_thread_index); @@ -380,11 +398,11 @@ skip_dequeue: done: /* Couldn't process all events. Probably out of buffers */ - if (PREDICT_FALSE (i < n_to_dequeue)) + if (PREDICT_FALSE (i < n_events)) { session_fifo_event_t *partially_read = smm->evts_partially_read[my_thread_index]; - vec_add (partially_read, &my_fifo_events[i], n_to_dequeue - i); + vec_add (partially_read, &my_fifo_events[i], n_events - i); vec_free (my_fifo_events); smm->fifo_events[my_thread_index] = partially_read; smm->evts_partially_read[my_thread_index] = 0; @@ -413,8 +431,7 @@ VLIB_REGISTER_NODE (session_queue_node) = .n_errors = ARRAY_LEN (session_queue_error_strings), .error_strings = session_queue_error_strings, .n_next_nodes = SESSION_QUEUE_N_NEXT, - /* .state = VLIB_NODE_STATE_DISABLED, enable on-demand? */ - /* edit / add dispositions here */ + .state = VLIB_NODE_STATE_DISABLED, .next_nodes = { [SESSION_QUEUE_NEXT_DROP] = "error-drop", diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api index a7b28c1d..582765b5 100644 --- a/src/vnet/session/session.api +++ b/src/vnet/session/session.api @@ -422,6 +422,28 @@ define reset_sock_reply { i32 retval; u64 handle; }; + +/** \brief enable/disable session layer + @param client_index - opaque cookie to identify the sender + client to vpp direction only + @param context - sender context, to match reply w/ request + @param is_enable - disable session layer if 0, enable otherwise +*/ +define session_enable_disable { + u32 client_index; + u32 context; + u8 is_enable; +}; + +/** \brief Reply for session enable/disable + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define session_enable_disable_reply { + u32 context; + i32 retval; +}; + /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 539da613..422527e0 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -311,11 +311,11 @@ stream_session_half_open_lookup (session_manager_main_t * smm, } transport_connection_t * -stream_session_lookup_transport4 (session_manager_main_t * smm, - ip4_address_t * lcl, ip4_address_t * rmt, +stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 my_thread_index) { + session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; stream_session_t *s; int rv; @@ -345,11 +345,11 @@ stream_session_lookup_transport4 (session_manager_main_t * smm, } transport_connection_t * -stream_session_lookup_transport6 (session_manager_main_t * smm, - ip6_address_t * lcl, ip6_address_t * rmt, +stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 my_thread_index) { + session_manager_main_t *smm = &session_manager_main; stream_session_t *s; session_kv6_t kv6; int rv; @@ -554,7 +554,7 @@ session_manager_allocate_session_fifos (session_manager_main_t * smm, u8 * added_a_segment) { svm_fifo_segment_private_t *fifo_segment; - u32 fifo_size, default_fifo_size = 8192 /* TODO config */ ; + u32 fifo_size, default_fifo_size = 128 << 10; /* TODO config */ int i; *added_a_segment = 0; @@ -948,7 +948,7 @@ void connects_session_manager_init (session_manager_main_t * smm, u8 session_type) { session_manager_t *sm; - u32 connect_fifo_size = 8 << 10; /* Config? */ + u32 connect_fifo_size = 256 << 10; /* Config? */ u32 default_segment_size = 1 << 20; pool_get (smm->session_managers, sm); @@ -1055,10 +1055,15 @@ stream_session_delete (stream_session_t * s) svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); - /* Cleanup app if client */ - app = application_get (s->app_index); + app = application_get_if_valid (s->app_index); + + /* No app. A possibility: after disconnect application called unbind */ + if (!app) + return; + if (app->mode == APP_CLIENT) { + /* Cleanup app if client */ application_del (app); } else if (app->mode == APP_SERVER) @@ -1068,6 +1073,7 @@ stream_session_delete (stream_session_t * s) svm_fifo_t **fifos; u32 fifo_index; + /* For server, see if any segments can be removed */ sm = session_manager_get (app->session_manager_index); /* Delete fifo */ @@ -1096,10 +1102,10 @@ stream_session_delete_notify (transport_connection_t * tc) { stream_session_t *s; + /* App might've been removed already */ s = stream_session_get_if_valid (tc->s_index, tc->thread_index); if (!s) { - clib_warning ("Surprised!"); return; } stream_session_delete (s); @@ -1151,16 +1157,24 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, return 0; } -void +int stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, u32 app_index) { transport_connection_t *tc; u32 tci; u64 value; + int rv; /* Ask transport to open connection */ - tci = tp_vfts[sst].open (addr, port_host_byte_order); + rv = tp_vfts[sst].open (addr, port_host_byte_order); + if (rv < 0) + { + clib_warning ("Transport failed to open connection."); + return VNET_API_ERROR_SESSION_CONNECT_FAIL; + } + + tci = rv; /* Get transport connection */ tc = tp_vfts[sst].get_half_open (tci); @@ -1170,6 +1184,8 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, /* Add to the half-open lookup table */ stream_session_half_open_table_add (sst, tc, value); + + return 0; } /** @@ -1216,16 +1232,13 @@ session_get_transport_vft (u8 type) } static clib_error_t * -session_manager_main_init (vlib_main_t * vm) +session_manager_main_enable (vlib_main_t * vm) { - u32 num_threads; - vlib_thread_main_t *vtm = vlib_get_thread_main (); session_manager_main_t *smm = &session_manager_main; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + u32 num_threads; int i; - smm->vlib_main = vm; - smm->vnet_main = vnet_get_main (); - num_threads = 1 /* main thread */ + vtm->n_threads; if (num_threads < 1) @@ -1272,11 +1285,48 @@ session_manager_main_init (vlib_main_t * vm) for (i = 0; i < SESSION_N_TYPES; i++) smm->connect_manager_index[i] = INVALID_INDEX; + smm->is_enabled = 1; + return 0; } -VLIB_INIT_FUNCTION (session_manager_main_init); +clib_error_t * +vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) +{ + if (is_en) + { + if (session_manager_main.is_enabled) + return 0; + + vlib_node_set_state (vm, session_queue_node.index, + VLIB_NODE_STATE_POLLING); + + return session_manager_main_enable (vm); + } + else + { + session_manager_main.is_enabled = 0; + vlib_node_set_state (vm, session_queue_node.index, + VLIB_NODE_STATE_DISABLED); + } + + return 0; +} + + +clib_error_t * +session_manager_main_init (vlib_main_t * vm) +{ + session_manager_main_t *smm = &session_manager_main; + + smm->vlib_main = vm; + smm->vnet_main = vnet_get_main (); + smm->is_enabled = 0; + + return 0; +} +VLIB_INIT_FUNCTION (session_manager_main_init) /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index cf14cca9..46e5ce2c 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -213,12 +213,15 @@ struct _session_manager_main /** Per transport rx function that can either dequeue or peek */ session_fifo_rx_fn *session_rx_fns[SESSION_N_TYPES]; + u8 is_enabled; + /* Convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; }; extern session_manager_main_t session_manager_main; +extern vlib_node_registration_t session_queue_node; /* * Session manager function @@ -276,14 +279,12 @@ stream_session_t *stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8, u32 thread_index); transport_connection_t - * stream_session_lookup_transport4 (session_manager_main_t * smm, - ip4_address_t * lcl, + * stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index); transport_connection_t - * stream_session_lookup_transport6 (session_manager_main_t * smm, - ip6_address_t * lcl, + * stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, u16 rmt_port, u8 proto, u32 thread_index); @@ -338,6 +339,14 @@ stream_session_max_enqueue (transport_connection_t * tc) return svm_fifo_max_enqueue (s->server_rx_fifo); } +always_inline u32 +stream_session_fifo_size (transport_connection_t * tc) +{ + stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); + return s->server_rx_fifo->nitems; +} + + int stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, u8 queue_event); @@ -356,8 +365,8 @@ void stream_session_reset_notify (transport_connection_t * tc); int stream_session_accept (transport_connection_t * tc, u32 listener_index, u8 sst, u8 notify); -void stream_session_open (u8 sst, ip46_address_t * addr, - u16 port_host_byte_order, u32 api_client_index); +int stream_session_open (u8 sst, ip46_address_t * addr, + u16 port_host_byte_order, u32 api_client_index); void stream_session_disconnect (stream_session_t * s); void stream_session_cleanup (stream_session_t * s); int @@ -369,6 +378,8 @@ u8 *format_stream_session (u8 * s, va_list * args); void session_register_transport (u8 type, const transport_proto_vft_t * vft); transport_proto_vft_t *session_get_transport_vft (u8 type); +clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en); + #endif /* __included_session_h__ */ /* diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 9d068684..8852fc6e 100644 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -52,6 +52,8 @@ _(DISCONNECT_SOCK, disconnect_sock) \ _(DISCONNECT_SOCK_REPLY, disconnect_sock_reply) \ _(ACCEPT_SOCK_REPLY, accept_sock_reply) \ _(RESET_SOCK_REPLY, reset_sock_reply) \ +_(SESSION_ENABLE_DISABLE, session_enable_disable) \ + static int send_add_segment_callback (u32 api_client_index, const u8 * segment_name, @@ -146,7 +148,6 @@ send_session_connected_uri_callback (u32 api_client_index, mp = vl_msg_api_alloc (sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); mp->context = app->api_context; - mp->retval = is_fail; if (!is_fail) { vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); @@ -157,6 +158,7 @@ send_session_connected_uri_callback (u32 api_client_index, mp->session_type = s->session_type; mp->vpp_event_queue_address = (u64) vpp_queue; mp->client_event_queue_address = (u64) app->event_queue; + mp->retval = 0; session_manager_get_segment_info (s->server_segment_index, &seg_name, &mp->segment_size); @@ -164,12 +166,22 @@ send_session_connected_uri_callback (u32 api_client_index, if (mp->segment_name_length) clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); } + else + { + mp->retval = VNET_API_ERROR_SESSION_CONNECT_FAIL; + } vl_msg_api_send_shmem (q, (u8 *) & mp); /* Remove client if connect failed */ if (is_fail) - application_del (app); + { + application_del (app); + } + else + { + s->session_state = SESSION_STATE_READY; + } return 0; } @@ -431,6 +443,17 @@ api_session_not_valid (u32 session_index, u32 thread_index) return 0; } +static void +vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp) +{ + vl_api_session_enable_disable_reply_t *rmp; + vlib_main_t *vm = vlib_get_main (); + int rv = 0; + + vnet_session_enable_disable (vm, mp->is_enable); + REPLY_MACRO (VL_API_SESSION_ENABLE_DISABLE_REPLY); +} + static void vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) { @@ -476,7 +499,6 @@ vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) } })); /* *INDENT-ON* */ - } static void @@ -493,7 +515,9 @@ vl_api_unbind_uri_t_handler (vl_api_unbind_uri_t * mp) static void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { + vl_api_connect_uri_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + int rv; a->uri = (char *) mp->uri; a->api_client_index = mp->client_index; @@ -501,7 +525,19 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) a->options = mp->options; a->session_cb_vft = &uri_session_cb_vft; a->mp = mp; - vnet_connect_uri (a); + + rv = vnet_connect_uri (a); + + if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) + return; + + /* Got some error, relay it */ + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ + rmp->retval = rv; + })); + /* *INDENT-ON* */ } static void @@ -662,7 +698,9 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp) static void vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) { + vl_api_connect_sock_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + int rv; clib_memcpy (&a->tep.ip, mp->ip, (mp->is_ip4 ? sizeof (ip4_address_t) : @@ -675,7 +713,18 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) a->api_context = mp->context; a->mp = mp; - vnet_connect (a); + rv = vnet_connect (a); + + if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) + return; + + /* Got some error, relay it */ + + /* *INDENT-OFF* */ + REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ + rmp->retval = rv; + })); + /* *INDENT-ON* */ } static void diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index b2943a1c..b029ee65 100644 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -60,7 +60,7 @@ format_stream_session (u8 * s, va_list * args) } else { - clib_warning ("Session in unknown state!"); + clib_warning ("Session in state: %d!", ss->session_state); } vec_free (str); @@ -78,6 +78,11 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, stream_session_t *s; u8 *str = 0; + if (!smm->is_enabled) + { + clib_error_return (0, "session layer is not enabled"); + } + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "verbose")) @@ -126,11 +131,14 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, return 0; } -VLIB_CLI_COMMAND (show_uri_command, static) = +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_session_command, static) = { -.path = "show session",.short_help = "show session [verbose]",.function = - show_session_command_fn,}; - + .path = "show session", + .short_help = "show session [verbose]", + .function = show_session_command_fn, +}; +/* *INDENT-ON* */ static clib_error_t * clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, @@ -142,6 +150,11 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, stream_session_t *pool, *session; application_t *server; + if (!smm->is_enabled) + { + clib_error_return (0, "session layer is not enabled"); + } + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "thread %d", &thread_index)) @@ -174,11 +187,43 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, return 0; } -VLIB_CLI_COMMAND (clear_uri_session_command, static) = +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_session_command, static) = +{ + .path = "clear session", + .short_help = "clear session thread session ", + .function = clear_session_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +session_enable_disable_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + u8 is_en = 1; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "enable")) + is_en = 1; + else if (unformat (input, "disable")) + is_en = 0; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + + return vnet_session_enable_disable (vm, is_en); +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (session_enable_disable_command, static) = { -.path = "clear session",.short_help = - "clear session thread session ",.function = - clear_session_command_fn,}; + .path = "session", + .short_help = "session [enable|disable]", + .function = session_enable_disable_fn, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c new file mode 100644 index 00000000..be65642a --- /dev/null +++ b/src/vnet/tcp/builtin_server.c @@ -0,0 +1,135 @@ +/* +* Copyright (c) 2015-2017 Cisco and/or its affiliates. +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at: +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +#include +#include +#include +#include + +int +builtin_session_accept_callback (stream_session_t * s) +{ + clib_warning ("called..."); + s->session_state = SESSION_STATE_READY; + return 0; +} + +void +builtin_session_disconnect_callback (stream_session_t * s) +{ + clib_warning ("called..."); +} + +int +builtin_session_connected_callback (u32 client_index, + stream_session_t * s, u8 is_fail) +{ + clib_warning ("called..."); + return -1; +} + +int +builtin_add_segment_callback (u32 client_index, + const u8 * seg_name, u32 seg_size) +{ + clib_warning ("called..."); + return -1; +} + +int +builtin_redirect_connect_callback (u32 client_index, void *mp) +{ + clib_warning ("called..."); + return -1; +} + +int +builtin_server_rx_callback (stream_session_t * s) +{ + clib_warning ("called..."); + return 0; +} + +static session_cb_vft_t builtin_session_cb_vft = { + .session_accept_callback = builtin_session_accept_callback, + .session_disconnect_callback = builtin_session_disconnect_callback, + .session_connected_callback = builtin_session_connected_callback, + .add_segment_callback = builtin_add_segment_callback, + .redirect_connect_callback = builtin_redirect_connect_callback, + .builtin_server_rx_callback = builtin_server_rx_callback +}; + +static int +server_create (vlib_main_t * vm) +{ + vnet_bind_args_t _a, *a = &_a; + u64 options[SESSION_OPTIONS_N_OPTIONS]; + char segment_name[128]; + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->uri = "tcp://0.0.0.0/80"; + a->api_client_index = ~0; + a->session_cb_vft = &builtin_session_cb_vft; + a->options = options; + a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 10; + a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 64 << 10; + a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 64 << 10; + a->segment_name = segment_name; + a->segment_name_length = ARRAY_LEN (segment_name); + + return vnet_bind_uri (a); +} + +static clib_error_t * +server_create_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + int rv; +#if 0 + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "whatever %d", &whatever)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } +#endif + + rv = server_create (vm); + switch (rv) + { + case 0: + break; + default: + return clib_error_return (0, "server_create returned %d", rv); + } + return 0; +} + +VLIB_CLI_COMMAND (server_create_command, static) = +{ +.path = "test server",.short_help = "test server",.function = + server_create_command_fn,}; + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 0f9b7097..e5feaeb1 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -217,6 +217,7 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4) return 0; } +#define PORT_MASK ((1 << 16)- 1) /** * Allocate local port and add if successful add entry to local endpoint * table to mark the pair as used. @@ -224,7 +225,6 @@ ip_interface_get_first_ip (u32 sw_if_index, u8 is_ip4) u16 tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) { - u8 unique = 0; transport_endpoint_t *tep; u32 time_now, tei; u16 min = 1024, max = 65535, tries; /* XXX configurable ? */ @@ -235,37 +235,34 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) /* Start at random point or max */ pool_get (tm->local_endpoints, tep); clib_memcpy (&tep->ip, ip, sizeof (*ip)); - tep->port = random_u32 (&time_now) << 16; - tep->port = tep->port < min ? max : tep->port; /* Search for first free slot */ - while (tries) + for (; tries >= 0; tries--) { + u16 port = 0; + + /* Find a port in the specified range */ + while (1) + { + port = random_u32 (&time_now) & PORT_MASK; + if (PREDICT_TRUE (port >= min && port < max)) + break; + } + + tep->port = port; + + /* Look it up */ tei = transport_endpoint_lookup (&tm->local_endpoints_table, &tep->ip, tep->port); + /* If not found, we're done */ if (tei == TRANSPORT_ENDPOINT_INVALID_INDEX) { - unique = 1; - break; + transport_endpoint_table_add (&tm->local_endpoints_table, tep, + tep - tm->local_endpoints); + return tep->port; } - - tep->port--; - - if (tep->port < min) - tep->port = max; - - tries--; } - - if (unique) - { - transport_endpoint_table_add (&tm->local_endpoints_table, tep, - tep - tm->local_endpoints); - - return tep->port; - } - - /* Failed */ + /* No free ports */ pool_put (tm->local_endpoints, tep); return -1; } @@ -360,7 +357,10 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) /* Allocate source port */ lcl_port = tcp_allocate_local_port (tm, &lcl_addr); if (lcl_port < 1) - return -1; + { + clib_warning ("Failed to allocate src port"); + return -1; + } /* * Create connection and send SYN diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 22f00a63..3560509d 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -30,7 +30,8 @@ #define TCP_MAX_OPTION_SPACE 40 #define TCP_DUPACK_THRESHOLD 3 -#define TCP_DEFAULT_RX_FIFO_SIZE 64 << 10 +#define TCP_MAX_RX_FIFO_SIZE 2 << 20 +#define TCP_IW_N_SEGMENTS 10 /** TCP FSM state definitions as per RFC793. */ #define foreach_tcp_fsm_state \ @@ -590,7 +591,6 @@ vlib_buffer_push_tcp_net_order (vlib_buffer_t * b, u16 sp, u16 dp, u32 seq, /** * Push TCP header to buffer * - * @param vm - vlib_main * @param b - buffer to write the header to * @param sp_net - source port net order * @param dp_net - destination port net order diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index daa0683b..0a907d0a 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -711,7 +711,7 @@ tcp_rcv_ack (tcp_connection_t * tc, vlib_buffer_t * b, if (tcp_opts_sack_permitted (&tc->opt)) tcp_rcv_sacks (tc, vnet_buffer (b)->tcp.ack_number); - new_snd_wnd = clib_net_to_host_u32 (th->window) << tc->snd_wscale; + new_snd_wnd = clib_net_to_host_u16 (th->window) << tc->snd_wscale; if (tcp_ack_is_dupack (tc, b, new_snd_wnd)) { @@ -1320,7 +1320,6 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Parse options */ tcp_options_parse (tcp0, &new_tc0->opt); - tcp_connection_init_vars (new_tc0); if (tcp_opts_tstamp (&new_tc0->opt)) { @@ -1331,11 +1330,13 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (tcp_opts_wscale (&new_tc0->opt)) new_tc0->snd_wscale = new_tc0->opt.wscale; - new_tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window) - << new_tc0->snd_wscale; + /* No scaling */ + new_tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window); new_tc0->snd_wl1 = seq0; new_tc0->snd_wl2 = ack0; + tcp_connection_init_vars (new_tc0); + /* SYN-ACK: See if we can switch to ESTABLISHED state */ if (tcp_ack (tcp0)) { @@ -1345,6 +1346,9 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, new_tc0->snd_una = ack0; new_tc0->state = TCP_STATE_ESTABLISHED; + /* Make sure las is initialized for the wnd computation */ + new_tc0->rcv_las = new_tc0->rcv_nxt; + /* Notify app that we have connection */ stream_session_connect_notify (&new_tc0->connection, sst, 0); @@ -1575,7 +1579,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Initialize session variables */ tc0->snd_una = vnet_buffer (b0)->tcp.ack_number; - tc0->snd_wnd = clib_net_to_host_u32 (tcp0->window) + tc0->snd_wnd = clib_net_to_host_u16 (tcp0->window) << tc0->opt.wscale; tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; @@ -1899,7 +1903,6 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } tcp_options_parse (th0, &child0->opt); - tcp_connection_init_vars (child0); child0->irs = vnet_buffer (b0)->tcp.seq_number; child0->rcv_nxt = vnet_buffer (b0)->tcp.seq_number + 1; @@ -1913,6 +1916,16 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, child0->tsval_recent_age = tcp_time_now (); } + if (tcp_opts_wscale (&child0->opt)) + child0->snd_wscale = child0->opt.wscale; + + /* No scaling */ + child0->snd_wnd = clib_net_to_host_u16 (th0->window); + child0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; + child0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; + + tcp_connection_init_vars (child0); + /* Reuse buffer to make syn-ack and send */ tcp_make_synack (child0, b0); next0 = tcp_next_output (is_ip4); @@ -1923,7 +1936,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } - b0->error = error0 ? node->errors[error0] : 0; + b0->error = node->errors[error0]; vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); @@ -2069,7 +2082,6 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_left_from, next_index, *from, *to_next; u32 my_thread_index = vm->cpu_index; tcp_main_t *tm = vnet_get_tcp_main (); - session_manager_main_t *ssm = vnet_get_session_manager_main (); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -2109,26 +2121,26 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* lookup session */ tc0 = - (tcp_connection_t *) stream_session_lookup_transport4 (ssm, - &ip40->dst_address, - &ip40->src_address, - tcp0->dst_port, - tcp0->src_port, - SESSION_TYPE_IP4_TCP, - my_thread_index); + (tcp_connection_t *) + stream_session_lookup_transport4 (&ip40->dst_address, + &ip40->src_address, + tcp0->dst_port, + tcp0->src_port, + SESSION_TYPE_IP4_TCP, + my_thread_index); } else { ip60 = vlib_buffer_get_current (b0); tcp0 = ip6_next_header (ip60); tc0 = - (tcp_connection_t *) stream_session_lookup_transport6 (ssm, - &ip60->src_address, - &ip60->dst_address, - tcp0->src_port, - tcp0->dst_port, - SESSION_TYPE_IP6_TCP, - my_thread_index); + (tcp_connection_t *) + stream_session_lookup_transport6 (&ip60->src_address, + &ip60->dst_address, + tcp0->src_port, + tcp0->dst_port, + SESSION_TYPE_IP6_TCP, + my_thread_index); } /* Session exists */ diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index dbcf1f74..7e431cd0 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -90,6 +90,15 @@ tcp_window_compute_scale (u32 available_space) return wnd_scale; } +/** + * TCP's IW as recommended by RFC6928 + */ +always_inline u32 +tcp_initial_wnd_unscaled (tcp_connection_t * tc) +{ + return TCP_IW_N_SEGMENTS * dummy_mtu; +} + /** * Compute initial window and scale factor. As per RFC1323, window field in * SYN and SYN-ACK segments is never scaled. @@ -97,18 +106,15 @@ tcp_window_compute_scale (u32 available_space) u32 tcp_initial_window_to_advertise (tcp_connection_t * tc) { - u32 available_space; + u32 max_fifo; /* Initial wnd for SYN. Fifos are not allocated yet. - * Use some predefined value */ - if (tc->state != TCP_STATE_SYN_RCVD) - { - return TCP_DEFAULT_RX_FIFO_SIZE; - } + * Use some predefined value. For SYN-ACK we still want the + * scale to be computed in the same way */ + max_fifo = TCP_MAX_RX_FIFO_SIZE; - available_space = stream_session_max_enqueue (&tc->connection); - tc->rcv_wscale = tcp_window_compute_scale (available_space); - tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); + tc->rcv_wscale = tcp_window_compute_scale (max_fifo); + tc->rcv_wnd = tcp_initial_wnd_unscaled (tc); return clib_min (tc->rcv_wnd, TCP_WND_MAX); } @@ -119,23 +125,43 @@ tcp_initial_window_to_advertise (tcp_connection_t * tc) u32 tcp_window_to_advertise (tcp_connection_t * tc, tcp_state_t state) { - u32 available_space, wnd, scaled_space; + u32 available_space, max_fifo, observed_wnd; - if (state != TCP_STATE_ESTABLISHED) + if (state < TCP_STATE_ESTABLISHED) return tcp_initial_window_to_advertise (tc); + /* + * Figure out how much space we have available + */ available_space = stream_session_max_enqueue (&tc->connection); - scaled_space = available_space >> tc->rcv_wscale; + max_fifo = stream_session_fifo_size (&tc->connection); + + ASSERT (tc->opt.mss < max_fifo); + + if (available_space < tc->opt.mss && available_space < max_fifo / 8) + available_space = 0; - /* Need to update scale */ - if (PREDICT_FALSE ((scaled_space == 0 && available_space != 0)) - || (scaled_space >= TCP_WND_MAX)) - tc->rcv_wscale = tcp_window_compute_scale (available_space); + /* + * Use the above and what we know about what we've previously advertised + * to compute the new window + */ + observed_wnd = tc->rcv_wnd - (tc->rcv_nxt - tc->rcv_las); - wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); - tc->rcv_wnd = wnd; + /* Bad. Thou shalt not shrink */ + if (available_space < observed_wnd) + { + if (available_space == 0) + clib_warning ("Didn't shrink rcv window despite not having space"); + } + + tc->rcv_wnd = clib_min (available_space, TCP_WND_MAX << tc->rcv_wscale); + + if (tc->rcv_wnd == 0) + { + tc->flags |= TCP_CONN_SENT_RCV_WND0; + } - return wnd >> tc->rcv_wscale; + return tc->rcv_wnd >> tc->rcv_wscale; } /** @@ -225,7 +251,7 @@ tcp_options_write (u8 * data, tcp_options_t * opts) } always_inline int -tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd) +tcp_make_syn_options (tcp_options_t * opts, u8 wnd_scale) { u8 len = 0; @@ -234,7 +260,7 @@ tcp_make_syn_options (tcp_options_t * opts, u32 initial_wnd) len += TCP_OPTION_LEN_MSS; opts->flags |= TCP_OPTS_FLAG_WSCALE; - opts->wscale = tcp_window_compute_scale (initial_wnd); + opts->wscale = wnd_scale; len += TCP_OPTION_LEN_WINDOW_SCALE; opts->flags |= TCP_OPTS_FLAG_TSTAMP; @@ -327,8 +353,7 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, case TCP_STATE_SYN_RCVD: return tcp_make_synack_options (tc, opts); case TCP_STATE_SYN_SENT: - return tcp_make_syn_options (opts, - tcp_initial_window_to_advertise (tc)); + return tcp_make_syn_options (opts, tc->rcv_wscale); default: clib_warning ("Not handled!"); return 0; @@ -732,7 +757,7 @@ tcp_send_syn (tcp_connection_t * tc) /* Make and write options */ memset (&snd_opts, 0, sizeof (snd_opts)); - tcp_opts_len = tcp_make_syn_options (&snd_opts, initial_wnd); + tcp_opts_len = tcp_make_syn_options (&snd_opts, tc->rcv_wscale); tcp_hdr_opts_len = tcp_opts_len + sizeof (tcp_header_t); th = vlib_buffer_push_tcp (b, tc->c_lcl_port, tc->c_rmt_port, tc->iss, @@ -900,7 +925,7 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, tcp_reuse_buffer (vm, b); - ASSERT (tc->state == TCP_STATE_ESTABLISHED); + ASSERT (tc->state >= TCP_STATE_ESTABLISHED); ASSERT (max_bytes != 0); if (tcp_opts_sack_permitted (&tc->opt)) @@ -929,7 +954,6 @@ tcp_prepare_retransmit_segment (tcp_connection_t * tc, vlib_buffer_t * b, max_bytes); ASSERT (n_bytes != 0); - tc->snd_nxt += n_bytes; tcp_push_hdr_i (tc, b, tc->state); return n_bytes; @@ -967,7 +991,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tcp_get_free_buffer_index (tm, &bi); b = vlib_get_buffer (vm, bi); - if (tc->state == TCP_STATE_ESTABLISHED) + if (tc->state >= TCP_STATE_ESTABLISHED) { tcp_fastrecovery_off (tc); @@ -977,6 +1001,12 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Figure out what and how many bytes we can send */ snd_space = tcp_available_snd_space (tc); max_bytes = clib_min (tc->snd_mss, snd_space); + + if (max_bytes == 0) + { + clib_warning ("no wnd to retransmit"); + return; + } tcp_prepare_retransmit_segment (tc, b, max_bytes); tc->rtx_bytes += max_bytes; @@ -996,7 +1026,11 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + tcp_push_hdr_i (tc, b, tc->state); + + /* Account for the SYN */ + tc->snd_nxt += 1; } if (!is_syn) @@ -1163,8 +1197,8 @@ tcp46_output_inline (vlib_main_t * vm, if (PREDICT_FALSE (vnet_buffer (b0)->tcp.flags & TCP_BUF_FLAG_DUPACK)) { + ASSERT (tc0->snt_dupacks > 0); tc0->snt_dupacks--; - ASSERT (tc0->snt_dupacks >= 0); if (!tcp_session_has_ooo_data (tc0)) { error0 = TCP_ERROR_FILTERED_DUPACKS; -- cgit 1.2.3-korg From d79b41e993981df80245b0e6d90eb691bdaae648 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Sat, 4 Mar 2017 05:37:52 -0800 Subject: VPP-659 TCP improvements - builtin test echo server - fix SYN-ACK retransmit canceling - avoid sending spurious ACK if in LAST_ACK - improved client dummy test app - renamed tx fifo dequeuing and sending functions to avoid confusion - improved RST handling Change-Id: Ia14aad3df319540dcf6e6a4e18a9f8d423a4b83b Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/scripts/vnet/uri/afp_setup.cli | 5 ++ src/scripts/vnet/uri/dummy_app.py | 26 ++++++++- src/scripts/vnet/uri/tap_setup.cli | 5 ++ src/scripts/vnet/uri/tcp_server | 5 -- src/uri/uri_tcp_test.c | 7 ++- src/vnet/session/application.c | 16 ++++++ src/vnet/session/application.h | 3 +- src/vnet/session/application_interface.c | 5 +- src/vnet/session/application_interface.h | 4 +- src/vnet/session/node.c | 56 +++++++++++-------- src/vnet/session/session.c | 38 +++++++++---- src/vnet/session/session.h | 28 +++++----- src/vnet/session/session_api.c | 50 +++++++++++++++-- src/vnet/session/transport.h | 66 +++++++++++----------- src/vnet/tcp/builtin_server.c | 94 ++++++++++++++++++++++++++++++-- src/vnet/tcp/tcp.c | 63 +++++++++++++++++---- src/vnet/tcp/tcp.h | 14 +++-- src/vnet/tcp/tcp_error.def | 11 ++-- src/vnet/tcp/tcp_input.c | 63 ++++++++++++++------- src/vnet/tcp/tcp_output.c | 47 +++++++++++++--- src/vnet/udp/builtin_server.c | 2 +- src/vnet/udp/udp_input.c | 14 ++--- src/vnet/unix/tapcli.c | 3 +- 23 files changed, 459 insertions(+), 166 deletions(-) create mode 100644 src/scripts/vnet/uri/afp_setup.cli create mode 100644 src/scripts/vnet/uri/tap_setup.cli delete mode 100644 src/scripts/vnet/uri/tcp_server (limited to 'src/vnet/session/application_interface.c') diff --git a/src/scripts/vnet/uri/afp_setup.cli b/src/scripts/vnet/uri/afp_setup.cli new file mode 100644 index 00000000..c29afc6f --- /dev/null +++ b/src/scripts/vnet/uri/afp_setup.cli @@ -0,0 +1,5 @@ +create host-interface name vpp1 +set int state host-vpp1 up +set int ip address host-vpp1 6.0.1.1/24 +trace add af-packet-input 10 +session enable diff --git a/src/scripts/vnet/uri/dummy_app.py b/src/scripts/vnet/uri/dummy_app.py index b80fbb28..50333923 100644 --- a/src/scripts/vnet/uri/dummy_app.py +++ b/src/scripts/vnet/uri/dummy_app.py @@ -2,7 +2,7 @@ import socket import sys -import bitstring +import time # action can be reflect or drop action = "drop" @@ -22,6 +22,7 @@ def handle_connection (connection, client_address): def run_server(ip, port): print("Starting server {}:{}".format(repr(ip), repr(port))) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_address = (ip, int(port)) sock.bind(server_address) sock.listen(1) @@ -39,12 +40,31 @@ def prepare_data(): def run_client(ip, port): print("Starting client {}:{}".format(repr(ip), repr(port))) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - server_address = ("6.0.1.1", 1234) + server_address = (ip, port) sock.connect(server_address) data = prepare_data() + n_rcvd = 0 + n_sent = len (data) try: sock.sendall(data) + + timeout = time.time() + 2 + while n_rcvd < n_sent and time.time() < timeout: + tmp = sock.recv(1500) + tmp = bytearray (tmp) + n_read = len(tmp) + for i in range(n_read): + if (data[n_rcvd + i] != tmp[i]): + print("Difference at byte {}. Sent {} got {}" + .format(n_rcvd + i, data[n_rcvd + i], tmp[i])) + n_rcvd += n_read + + if (n_rcvd < n_sent or n_rcvd > n_sent): + print("Sent {} and got back {}".format(n_sent, n_rcvd)) + else: + print("Got back what we've sent!!"); + finally: sock.close() @@ -62,4 +82,4 @@ if __name__ == "__main__": if (len(sys.argv) == 5): action = sys.argv[4] - run (sys.argv[1], sys.argv[2], sys.argv[3]) + run (sys.argv[1], sys.argv[2], int(sys.argv[3])) diff --git a/src/scripts/vnet/uri/tap_setup.cli b/src/scripts/vnet/uri/tap_setup.cli new file mode 100644 index 00000000..1d9a1b36 --- /dev/null +++ b/src/scripts/vnet/uri/tap_setup.cli @@ -0,0 +1,5 @@ +tap connect tap0 address 6.0.1.2/24 +set int ip addr tap-0 6.0.1.1/24 +set int state tap-0 up +trace add tapcli-rx 10 +session enable diff --git a/src/scripts/vnet/uri/tcp_server b/src/scripts/vnet/uri/tcp_server deleted file mode 100644 index c29afc6f..00000000 --- a/src/scripts/vnet/uri/tcp_server +++ /dev/null @@ -1,5 +0,0 @@ -create host-interface name vpp1 -set int state host-vpp1 up -set int ip address host-vpp1 6.0.1.1/24 -trace add af-packet-input 10 -session enable diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index 6c9cf1db..261fd288 100644 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -287,6 +287,7 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) session = pool_elt_at_index (utm->sessions, p[0]); hash_unset (utm->session_index_by_vpp_handles, key); pool_put (utm->sessions, session); + utm->time_to_stop = 1; } else { @@ -296,7 +297,7 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); + rmp->_vl_msg_id = ntohs (VL_API_RESET_SESSION_REPLY); rmp->retval = rv; rmp->session_index = mp->session_index; rmp->session_thread_index = mp->session_thread_index; @@ -734,7 +735,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); } - while (rv == -2); + while (rv == -2 && !utm->time_to_stop); /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; @@ -750,7 +751,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, if (n_read > 0) bytes -= n_read; } - while (n_read < 0 || bytes > 0); + while ((n_read < 0 || bytes > 0) && !utm->time_to_stop); } void diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index a542eebe..513e5fac 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -92,6 +92,19 @@ application_del (application_t * app) pool_put (app_pool, app); } +static void +application_verify_cb_fns (application_type_t type, session_cb_vft_t * cb_fns) +{ + if (type == APP_SERVER && cb_fns->session_accept_callback == 0) + clib_warning ("No accept callback function provided"); + if (type == APP_CLIENT && cb_fns->session_connected_callback == 0) + clib_warning ("No session connected callback function provided"); + if (cb_fns->session_disconnect_callback == 0) + clib_warning ("No session disconnect callback function provided"); + if (cb_fns->session_reset_callback == 0) + clib_warning ("No session reset callback function provided"); +} + application_t * application_new (application_type_t type, session_type_t sst, u32 api_client_index, u32 flags, session_cb_vft_t * cb_fns) @@ -142,6 +155,9 @@ application_new (application_type_t type, session_type_t sst, app->flags = flags; app->cb_fns = *cb_fns; + /* Check that the obvious things are properly set up */ + application_verify_cb_fns (type, cb_fns); + /* Add app to lookup by api_client_index table */ application_table_add (app); diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 480828f7..a60a8b8b 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -45,7 +45,8 @@ typedef struct _stream_session_cb_vft void (*session_reset_callback) (stream_session_t * s); /* Direct RX callback, for built-in servers */ - int (*builtin_server_rx_callback) (stream_session_t * session); + int (*builtin_server_rx_callback) (stream_session_t * session, + session_fifo_event_t * ep); /* Redirect connection to local server */ int (*redirect_connect_callback) (u32 api_client_index, void *mp); diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 6ddfb70f..4b30bd87 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -98,7 +98,7 @@ vnet_bind_i (u32 api_client_index, ip46_address_t * ip46, u16 port_host_order, if (application_lookup (api_client_index)) { - clib_warning ("Only one bind supported for now"); + clib_warning ("Only one connection supported for now"); return VNET_API_ERROR_ADDRESS_IN_USE; } @@ -364,8 +364,7 @@ vnet_connect_uri (vnet_connect_args_t * a) } int -vnet_disconnect_session (u32 client_index, u32 session_index, - u32 thread_index) +vnet_disconnect_session (u32 session_index, u32 thread_index) { stream_session_t *session; diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 8d87c067..a5f2b9a6 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -112,9 +112,7 @@ typedef enum int vnet_bind_uri (vnet_bind_args_t *); int vnet_unbind_uri (char *uri, u32 api_client_index); int vnet_connect_uri (vnet_connect_args_t * a); -int -vnet_disconnect_session (u32 client_index, u32 session_index, - u32 thread_index); +int vnet_disconnect_session (u32 session_index, u32 thread_index); int vnet_bind (vnet_bind_args_t * a); int vnet_connect (vnet_connect_args_t * a); diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index 399077de..7fd7e0b7 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -78,10 +78,11 @@ static u32 session_type_to_next[] = { }; always_inline int -session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, - session_manager_main_t * smm, session_fifo_event_t * e0, - stream_session_t * s0, u32 thread_index, int *n_tx_packets, - u8 peek_data) +session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, + session_fifo_event_t * e0, + stream_session_t * s0, u32 thread_index, + int *n_tx_packets, u8 peek_data) { u32 n_trace = vlib_get_trace_count (vm, node); u32 left_to_snd0, max_len_to_snd0, len_to_deq0, n_bufs, snd_space0; @@ -120,7 +121,7 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, if (peek_data) { /* Offset in rx fifo from where to peek data */ - rx_offset = transport_vft->rx_fifo_offset (tc0); + rx_offset = transport_vft->tx_fifo_offset (tc0); } /* TODO check if transport is willing to send len_to_snd0 @@ -194,25 +195,27 @@ session_fifo_rx_i (vlib_main_t * vm, vlib_node_runtime_t * node, t0->server_thread_index = s0->thread_index; } + len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0; + /* *INDENT-OFF* */ if (1) { ELOG_TYPE_DECLARE (e) = { - .format = "evt-dequeue: id %d length %d", - .format_args = "i4i4", + .format = "evt-deq: id %d len %d rd %d wnd %d", + .format_args = "i4i4i4i4", }; struct { - u32 data[2]; + u32 data[4]; } *ed; ed = ELOG_DATA (&vm->elog_main, e); ed->data[0] = e0->event_id; ed->data[1] = e0->enqueue_length; + ed->data[2] = len_to_deq0; + ed->data[3] = left_to_snd0; } /* *INDENT-ON* */ - len_to_deq0 = (left_to_snd0 < snd_mss0) ? left_to_snd0 : snd_mss0; - /* Make room for headers */ data0 = vlib_buffer_make_headroom (b0, MAX_HDRS_LEN); @@ -276,22 +279,25 @@ dequeue_fail: } int -session_fifo_rx_peek (vlib_main_t * vm, vlib_node_runtime_t * node, - session_manager_main_t * smm, session_fifo_event_t * e0, - stream_session_t * s0, u32 thread_index, int *n_tx_pkts) +session_tx_fifo_peek_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, + session_fifo_event_t * e0, + stream_session_t * s0, u32 thread_index, + int *n_tx_pkts) { - return session_fifo_rx_i (vm, node, smm, e0, s0, thread_index, n_tx_pkts, - 1); + return session_tx_fifo_read_and_snd_i (vm, node, smm, e0, s0, thread_index, + n_tx_pkts, 1); } int -session_fifo_rx_dequeue (vlib_main_t * vm, vlib_node_runtime_t * node, - session_manager_main_t * smm, - session_fifo_event_t * e0, stream_session_t * s0, - u32 thread_index, int *n_tx_pkts) +session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, + session_manager_main_t * smm, + session_fifo_event_t * e0, + stream_session_t * s0, u32 thread_index, + int *n_tx_pkts) { - return session_fifo_rx_i (vm, node, smm, e0, s0, thread_index, n_tx_pkts, - 0); + return session_tx_fifo_read_and_snd_i (vm, node, smm, e0, s0, thread_index, + n_tx_pkts, 0); } static uword @@ -369,12 +375,16 @@ skip_dequeue: s0 = stream_session_get_if_valid (server_session_index0, my_thread_index); - if (!s0) + + if (CLIB_DEBUG && !s0) { - clib_warning ("It's dead Jim!"); + clib_warning ("It's dead, Jim!"); continue; } + if (PREDICT_FALSE (s0->session_state == SESSION_STATE_CLOSED)) + continue; + ASSERT (s0->thread_index == my_thread_index); switch (e0->event_type) diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index b5a168ca..8867e794 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -373,7 +373,7 @@ stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, /* Finally, try half-open connections */ rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); if (rv == 0) - return tp_vfts[s->session_type].get_half_open (kv6.value & 0xFFFFFFFF); + return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF); return 0; } @@ -617,7 +617,10 @@ again: goto again; } else - return SESSION_ERROR_NO_SPACE; + { + clib_warning ("No space to allocate fifos!"); + return SESSION_ERROR_NO_SPACE; + } } return 0; } @@ -806,6 +809,10 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block) evt.event_id = serial_number++; evt.enqueue_length = svm_fifo_max_dequeue (s->server_rx_fifo); + /* Built-in server? Hand event to the callback... */ + if (app->cb_fns.builtin_server_rx_callback) + return app->cb_fns.builtin_server_rx_callback (s, &evt); + /* Add event to server's event queue */ q = app->event_queue; @@ -1043,13 +1050,9 @@ stream_session_delete (stream_session_t * s) session_manager_main_t *smm = vnet_get_session_manager_main (); svm_fifo_segment_private_t *fifo_segment; application_t *app; - int rv; - /* delete from the main lookup table */ - rv = stream_session_table_del (smm, s); - - if (rv) - clib_warning ("hash delete error, rv %d", rv); + /* Delete from the main lookup table. */ + stream_session_table_del (smm, s); /* Cleanup fifo segments */ fifo_segment = svm_fifo_get_segment (s->server_segment_index); @@ -1197,18 +1200,30 @@ stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, void stream_session_disconnect (stream_session_t * s) { - tp_vfts[s->session_type].close (s->connection_index, s->thread_index); s->session_state = SESSION_STATE_CLOSED; + tp_vfts[s->session_type].close (s->connection_index, s->thread_index); } /** * Cleanup transport and session state. + * + * Notify transport of the cleanup, wait for a delete notify to actually + * remove the session state. */ void stream_session_cleanup (stream_session_t * s) { + session_manager_main_t *smm = &session_manager_main; + int rv; + + s->session_state = SESSION_STATE_CLOSED; + + /* Delete from the main lookup table to avoid more enqueues */ + rv = stream_session_table_del (smm, s); + if (rv) + clib_warning ("hash delete error, rv %d", rv); + tp_vfts[s->session_type].cleanup (s->connection_index, s->thread_index); - stream_session_delete (s); } void @@ -1221,7 +1236,8 @@ session_register_transport (u8 type, const transport_proto_vft_t * vft) /* If an offset function is provided, then peek instead of dequeue */ smm->session_rx_fns[type] = - (vft->rx_fifo_offset) ? session_fifo_rx_peek : session_fifo_rx_dequeue; + (vft->tx_fifo_offset) ? session_tx_fifo_peek_and_snd : + session_tx_fifo_dequeue_and_snd; } transport_proto_vft_t * diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 46e5ce2c..1b712e2e 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -102,33 +102,33 @@ typedef CLIB_PACKED (struct typedef struct _stream_session_t { + /** fifo pointers. Once allocated, these do not move */ + svm_fifo_t *server_rx_fifo; + svm_fifo_t *server_tx_fifo; + /** Type */ u8 session_type; /** State */ u8 session_state; + u8 thread_index; + + /** used during unbind processing */ + u8 is_deleted; + + /** To avoid n**2 "one event per frame" check */ + u8 enqueue_epoch; + /** Session index in per_thread pool */ u32 session_index; /** Transport specific */ u32 connection_index; - u8 thread_index; - /** Application specific */ u32 pid; - /** fifo pointers. Once allocated, these do not move */ - svm_fifo_t *server_rx_fifo; - svm_fifo_t *server_tx_fifo; - - /** To avoid n**2 "one event per frame" check */ - u8 enqueue_epoch; - - /** used during unbind processing */ - u8 is_deleted; - /** stream server pool index */ u32 app_index; @@ -162,8 +162,8 @@ typedef int session_fifo_event_t * e0, stream_session_t * s0, u32 thread_index, int *n_tx_pkts); -extern session_fifo_rx_fn session_fifo_rx_peek; -extern session_fifo_rx_fn session_fifo_rx_dequeue; +extern session_fifo_rx_fn session_tx_fifo_peek_and_snd; +extern session_fifo_rx_fn session_tx_fifo_dequeue_and_snd; struct _session_manager_main { diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 8852fc6e..9c38428a 100644 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -130,6 +130,27 @@ send_session_disconnect_uri_callback (stream_session_t * s) vl_msg_api_send_shmem (q, (u8 *) & mp); } +static void +send_session_reset_uri_callback (stream_session_t * s) +{ + vl_api_reset_session_t *mp; + unix_shared_memory_queue_t *q; + application_t *app = application_get (s->app_index); + + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_RESET_SESSION); + + mp->session_thread_index = s->thread_index; + mp->session_index = s->session_index; + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + static int send_session_connected_uri_callback (u32 api_client_index, stream_session_t * s, u8 is_fail) @@ -347,6 +368,26 @@ send_session_disconnect_callback (stream_session_t * s) vl_msg_api_send_shmem (q, (u8 *) & mp); } +static void +send_session_reset_callback (stream_session_t * s) +{ + vl_api_reset_sock_t *mp; + unix_shared_memory_queue_t *q; + application_t *app = application_get (s->app_index); + + q = vl_api_client_index_to_input_queue (app->api_client_index); + + if (!q) + return; + + mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_RESET_SOCK); + + mp->handle = make_session_handle (s); + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + /** * Redirect a connect_uri message to the indicated server. * Only sent if the server has bound the related port with @@ -414,6 +455,7 @@ static session_cb_vft_t uri_session_cb_vft = { .session_accept_callback = send_session_accept_uri_callback, .session_disconnect_callback = send_session_disconnect_uri_callback, .session_connected_callback = send_session_connected_uri_callback, + .session_reset_callback = send_session_reset_uri_callback, .add_segment_callback = send_add_segment_callback, .redirect_connect_callback = redirect_connect_uri_callback }; @@ -422,6 +464,7 @@ static session_cb_vft_t session_cb_vft = { .session_accept_callback = send_session_accept_callback, .session_disconnect_callback = send_session_disconnect_callback, .session_connected_callback = send_session_connected_callback, + .session_reset_callback = send_session_reset_callback, .add_segment_callback = send_add_segment_callback, .redirect_connect_callback = redirect_connect_callback }; @@ -548,8 +591,8 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rv = api_session_not_valid (mp->session_index, mp->session_thread_index); if (!rv) - rv = vnet_disconnect_session (mp->client_index, mp->session_index, - mp->session_thread_index); + rv = + vnet_disconnect_session (mp->session_index, mp->session_thread_index); REPLY_MACRO (VL_API_DISCONNECT_SESSION_REPLY); } @@ -572,8 +615,7 @@ vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * } /* Disconnect has been confirmed. Confirm close to transport */ - vnet_disconnect_session (mp->client_index, mp->session_index, - mp->session_thread_index); + vnet_disconnect_session (mp->session_index, mp->session_thread_index); } static void diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index f486dbb2..0da30261 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -74,7 +74,7 @@ typedef struct _transport_proto_vft u32 (*push_header) (transport_connection_t * tconn, vlib_buffer_t * b); u16 (*send_mss) (transport_connection_t * tc); u32 (*send_space) (transport_connection_t * tc); - u32 (*rx_fifo_offset) (transport_connection_t * tc); + u32 (*tx_fifo_offset) (transport_connection_t * tc); /* * Connection retrieval @@ -92,39 +92,39 @@ typedef struct _transport_proto_vft } transport_proto_vft_t; +/* *INDENT-OFF* */ /* 16 octets */ -typedef CLIB_PACKED (struct - { - union - { - struct - { - ip4_address_t src; ip4_address_t dst; - u16 src_port; - u16 dst_port; - /* align by making this 4 octets even though its a 1-bit field - * NOTE: avoid key overlap with other transports that use 5 tuples for - * session identification. - */ - u32 proto; - }; - u64 as_u64[2]; - }; - }) v4_connection_key_t; - -typedef CLIB_PACKED (struct - { - union - { - struct - { - /* 48 octets */ - ip6_address_t src; ip6_address_t dst; - u16 src_port; - u16 dst_port; u32 proto; u8 unused_for_now[8]; - }; u64 as_u64[6]; - }; - }) v6_connection_key_t; +typedef CLIB_PACKED (struct { + union + { + struct + { + ip4_address_t src; ip4_address_t dst; + u16 src_port; + u16 dst_port; + /* align by making this 4 octets even though its a 1-bit field + * NOTE: avoid key overlap with other transports that use 5 tuples for + * session identification. + */ + u32 proto; + }; + u64 as_u64[2]; + }; +}) v4_connection_key_t; + +typedef CLIB_PACKED (struct { + union + { + struct + { + /* 48 octets */ + ip6_address_t src; ip6_address_t dst; + u16 src_port; + u16 dst_port; u32 proto; u8 unused_for_now[8]; + }; u64 as_u64[6]; + }; +}) v6_connection_key_t; +/* *INDENT-ON* */ typedef clib_bihash_kv_16_8_t session_kv4_t; typedef clib_bihash_kv_48_8_t session_kv6_t; diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index be65642a..9b697a01 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -18,10 +18,24 @@ #include #include +typedef struct +{ + u8 *rx_buf; + unix_shared_memory_queue_t **vpp_queue; + vlib_main_t *vlib_main; +} builtin_server_main_t; + +builtin_server_main_t builtin_server_main; + + int builtin_session_accept_callback (stream_session_t * s) { + builtin_server_main_t *bsm = &builtin_server_main; clib_warning ("called..."); + + bsm->vpp_queue[s->thread_index] = + session_manager_get_vpp_event_queue (s->thread_index); s->session_state = SESSION_STATE_READY; return 0; } @@ -30,8 +44,19 @@ void builtin_session_disconnect_callback (stream_session_t * s) { clib_warning ("called..."); + + vnet_disconnect_session (s->session_index, s->thread_index); } +void +builtin_session_reset_callback (stream_session_t * s) +{ + clib_warning ("called.. "); + + stream_session_cleanup (s); +} + + int builtin_session_connected_callback (u32 client_index, stream_session_t * s, u8 is_fail) @@ -56,9 +81,57 @@ builtin_redirect_connect_callback (u32 client_index, void *mp) } int -builtin_server_rx_callback (stream_session_t * s) +builtin_server_rx_callback (stream_session_t * s, session_fifo_event_t * e) { - clib_warning ("called..."); + int n_written, bytes, total_copy_bytes; + int n_read; + svm_fifo_t *tx_fifo; + builtin_server_main_t *bsm = &builtin_server_main; + session_fifo_event_t evt; + static int serial_number = 0; + + bytes = e->enqueue_length; + if (PREDICT_FALSE (bytes <= 0)) + { + clib_warning ("bizarre rx callback: bytes %d", bytes); + return 0; + } + + tx_fifo = s->server_tx_fifo; + + /* Number of bytes we're going to copy */ + total_copy_bytes = (bytes < (tx_fifo->nitems - tx_fifo->cursize)) ? bytes : + tx_fifo->nitems - tx_fifo->cursize; + + if (PREDICT_FALSE (total_copy_bytes <= 0)) + { + clib_warning ("no space in tx fifo, event had %d bytes", bytes); + return 0; + } + + vec_validate (bsm->rx_buf, total_copy_bytes - 1); + _vec_len (bsm->rx_buf) = total_copy_bytes; + + n_read = svm_fifo_dequeue_nowait (s->server_rx_fifo, 0, total_copy_bytes, + bsm->rx_buf); + ASSERT (n_read == total_copy_bytes); + + /* + * Echo back + */ + + n_written = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, bsm->rx_buf); + ASSERT (n_written == total_copy_bytes); + + /* Fabricate TX event, send to vpp */ + evt.fifo = tx_fifo; + evt.event_type = FIFO_EVENT_SERVER_TX; + evt.enqueue_length = total_copy_bytes; + evt.event_id = serial_number++; + + unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], (u8 *) & evt, + 0 /* do wait for mutex */ ); + return 0; } @@ -68,7 +141,8 @@ static session_cb_vft_t builtin_session_cb_vft = { .session_connected_callback = builtin_session_connected_callback, .add_segment_callback = builtin_add_segment_callback, .redirect_connect_callback = builtin_redirect_connect_callback, - .builtin_server_rx_callback = builtin_server_rx_callback + .builtin_server_rx_callback = builtin_server_rx_callback, + .session_reset_callback = builtin_session_reset_callback }; static int @@ -77,6 +151,11 @@ server_create (vlib_main_t * vm) vnet_bind_args_t _a, *a = &_a; u64 options[SESSION_OPTIONS_N_OPTIONS]; char segment_name[128]; + u32 num_threads; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + + num_threads = 1 /* main thread */ + vtm->n_threads; + vec_validate (builtin_server_main.vpp_queue, num_threads - 1); memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); @@ -110,6 +189,7 @@ server_create_command_fn (vlib_main_t * vm, } #endif + vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); rv = server_create (vm); switch (rv) { @@ -121,10 +201,14 @@ server_create_command_fn (vlib_main_t * vm, return 0; } +/* *INDENT-OFF* */ VLIB_CLI_COMMAND (server_create_command, static) = { -.path = "test server",.short_help = "test server",.function = - server_create_command_fn,}; + .path = "test server", + .short_help = "test server", + .function = server_create_command_fn, +}; +/* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 69433e26..d2df5c3e 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -139,6 +139,20 @@ tcp_connection_del (tcp_connection_t * tc) tcp_connection_cleanup (tc); } +/** Notify session that connection has been reset. + * + * Switch state to closed and wait for session to call cleanup. + */ +void +tcp_connection_reset (tcp_connection_t * tc) +{ + if (tc->state == TCP_STATE_CLOSED) + return; + + tc->state = TCP_STATE_CLOSED; + stream_session_reset_notify (&tc->connection); +} + /** * Begin connection closing procedure. * @@ -149,6 +163,8 @@ tcp_connection_del (tcp_connection_t * tc) * calls cleanup. * 2) TIME_WAIT (active close) whereby after 2MSL the 2MSL timer triggers * and cleanup is called. + * + * N.B. Half-close connections are not supported */ void tcp_connection_close (tcp_connection_t * tc) @@ -166,9 +182,9 @@ tcp_connection_close (tcp_connection_t * tc) else if (tc->state == TCP_STATE_CLOSE_WAIT) tc->state = TCP_STATE_LAST_ACK; - /* Half-close connections are not supported XXX */ - - if (tc->state == TCP_STATE_CLOSED) + /* If in CLOSED and WAITCLOSE timer is not set, delete connection now */ + if (tc->timers[TCP_TIMER_WAITCLOSE] == TCP_TIMER_HANDLE_INVALID + && tc->state == TCP_STATE_CLOSED) tcp_connection_del (tc); } @@ -185,7 +201,10 @@ tcp_session_cleanup (u32 conn_index, u32 thread_index) { tcp_connection_t *tc; tc = tcp_connection_get (conn_index, thread_index); - tcp_connection_cleanup (tc); + + /* Wait for the session tx events to clear */ + tc->state = TCP_STATE_CLOSED; + tcp_timer_update (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); } void * @@ -227,7 +246,8 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) { transport_endpoint_t *tep; u32 time_now, tei; - u16 min = 1024, max = 65535, tries; /* XXX configurable ? */ + u16 min = 1024, max = 65535; /* XXX configurable ? */ + int tries; tries = max - min; time_now = tcp_time_now (); @@ -505,10 +525,10 @@ tcp_session_send_space (transport_connection_t * trans_conn) } u32 -tcp_session_rx_fifo_offset (transport_connection_t * trans_conn) +tcp_session_tx_fifo_offset (transport_connection_t * trans_conn) { tcp_connection_t *tc = (tcp_connection_t *) trans_conn; - return (tc->snd_una_max - tc->snd_una); + return (tc->snd_nxt - tc->snd_una); } /* *INDENT-OFF* */ @@ -524,7 +544,7 @@ const static transport_proto_vft_t tcp4_proto = { .cleanup = tcp_session_cleanup, .send_mss = tcp_session_send_mss, .send_space = tcp_session_send_space, - .rx_fifo_offset = tcp_session_rx_fifo_offset, + .tx_fifo_offset = tcp_session_tx_fifo_offset, .format_connection = format_tcp_session_ip4, .format_listener = format_tcp_listener_session_ip4, .format_half_open = format_tcp_half_open_session_ip4 @@ -542,7 +562,7 @@ const static transport_proto_vft_t tcp6_proto = { .cleanup = tcp_session_cleanup, .send_mss = tcp_session_send_mss, .send_space = tcp_session_send_space, - .rx_fifo_offset = tcp_session_rx_fifo_offset, + .tx_fifo_offset = tcp_session_tx_fifo_offset, .format_connection = format_tcp_session_ip6, .format_listener = format_tcp_listener_session_ip6, .format_half_open = format_tcp_half_open_session_ip6 @@ -579,13 +599,32 @@ tcp_timer_establish_handler (u32 conn_index) } void -tcp_timer_2msl_handler (u32 conn_index) +tcp_timer_waitclose_handler (u32 conn_index) { u32 cpu_index = os_get_cpu_number (); tcp_connection_t *tc; tc = tcp_connection_get (conn_index, cpu_index); - tc->timers[TCP_TIMER_2MSL] = TCP_TIMER_HANDLE_INVALID; + tc->timers[TCP_TIMER_WAITCLOSE] = TCP_TIMER_HANDLE_INVALID; + + /* Session didn't come back with a close(). Send FIN either way + * and switch to LAST_ACK. */ + if (tc->state == TCP_STATE_CLOSE_WAIT) + { + if (tc->flags & TCP_CONN_FINSNT) + { + clib_warning ("FIN was sent and still in CLOSE WAIT. Weird!"); + } + + tcp_send_fin (tc); + tc->state = TCP_STATE_LAST_ACK; + + /* Make sure we don't wait in LAST ACK forever */ + tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); + + /* Don't delete the connection yet */ + return; + } tcp_connection_del (tc); } @@ -597,7 +636,7 @@ static timer_expiration_handler *timer_expiration_handlers[TCP_N_TIMERS] = tcp_timer_delack_handler, 0, tcp_timer_keep_handler, - tcp_timer_2msl_handler, + tcp_timer_waitclose_handler, tcp_timer_retransmit_syn_handler, tcp_timer_establish_handler }; diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 7d443433..3b3d8fc7 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -63,8 +63,8 @@ format_function_t format_tcp_state; _(DELACK, "DELAYED ACK") \ _(PERSIST, "PERSIST") \ _(KEEP, "KEEP") \ - _(2MSL, "2MSL") \ - _(RETRANSMIT_SYN, "RETRANSMIT_SYN") \ + _(WAITCLOSE, "WAIT CLOSE") \ + _(RETRANSMIT_SYN, "RETRANSMIT SYN") \ _(ESTABLISH, "ESTABLISH") typedef enum _tcp_timers @@ -89,6 +89,8 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_DELACK_TIME 1 /* 0.1s */ #define TCP_ESTABLISH_TIME 750 /* 75s */ #define TCP_2MSL_TIME 300 /* 30s */ +#define TCP_CLOSEWAIT_TIME 1 /* 0.1s */ +#define TCP_CLEANUP_TIME 5 /* 0.5s Time to wait before cleanup */ #define TCP_RTO_MAX 60 * THZ /* Min max RTO (60s) as per RFC6298 */ #define TCP_RTT_MAX 30 * THZ /* 30s (probably too much) */ @@ -102,6 +104,7 @@ void tcp_update_time (f64 now, u32 thread_index); _(DELACK, "Delay ACK") \ _(SNDACK, "Send ACK") \ _(BURSTACK, "Burst ACK set") \ + _(FINSNT, "FIN sent") \ _(SENT_RCV_WND0, "Sent 0 receive window") \ _(RECOVERY, "Recovery on") \ _(FAST_RECOVERY, "Fast Recovery on") @@ -331,6 +334,8 @@ clib_error_t *vnet_tcp_enable_disable (vlib_main_t * vm, u8 is_en); always_inline tcp_connection_t * tcp_connection_get (u32 conn_index, u32 thread_index) { + if (pool_is_free_index (tcp_main.connections[thread_index], conn_index)) + return 0; return pool_elt_at_index (tcp_main.connections[thread_index], conn_index); } @@ -347,6 +352,7 @@ tcp_connection_get_if_valid (u32 conn_index, u32 thread_index) void tcp_connection_close (tcp_connection_t * tc); void tcp_connection_cleanup (tcp_connection_t * tc); void tcp_connection_del (tcp_connection_t * tc); +void tcp_connection_reset (tcp_connection_t * tc); always_inline tcp_connection_t * tcp_listener_get (u32 tli) @@ -361,7 +367,7 @@ tcp_half_open_connection_get (u32 conn_index) } void tcp_make_ack (tcp_connection_t * ts, vlib_buffer_t * b); -void tcp_make_finack (tcp_connection_t * tc, vlib_buffer_t * b); +void tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b); void tcp_make_synack (tcp_connection_t * ts, vlib_buffer_t * b); void tcp_send_reset (vlib_buffer_t * pkt, u8 is_ip4); void tcp_send_syn (tcp_connection_t * tc); @@ -467,7 +473,7 @@ tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) } always_inline void -tcp_retransmit_timer_set (tcp_main_t * tm, tcp_connection_t * tc) +tcp_retransmit_timer_set (tcp_connection_t * tc) { /* XXX Switch to faster TW */ tcp_timer_set (tc, TCP_TIMER_RETRANSMIT, diff --git a/src/vnet/tcp/tcp_error.def b/src/vnet/tcp/tcp_error.def index cff5ec13..2dbdd9b3 100644 --- a/src/vnet/tcp/tcp_error.def +++ b/src/vnet/tcp/tcp_error.def @@ -17,13 +17,13 @@ tcp_error (NONE, "no error") tcp_error (NO_LISTENER, "no listener for dst port") tcp_error (LOOKUP_DROPS, "lookup drops") tcp_error (DISPATCH, "Dispatch error") -tcp_error (ENQUEUED, "Packets pushed into rx fifo") +tcp_error (ENQUEUED, "Packets pushed into rx fifo") tcp_error (PURE_ACK, "Pure acks") tcp_error (SYNS_RCVD, "SYNs received") tcp_error (SYN_ACKS_RCVD, "SYN-ACKs received") -tcp_error (NOT_READY, "Session not ready for packets") -tcp_error (FIFO_FULL, "Packets dropped for lack of rx fifo space") -tcp_error (EVENT_FIFO_FULL, "Events not sent for lack of event fifo space") +tcp_error (NOT_READY, "Session not ready for packets") +tcp_error (FIFO_FULL, "Packets dropped for lack of rx fifo space") +tcp_error (EVENT_FIFO_FULL, "Events not sent for lack of event fifo space") tcp_error (API_QUEUE_FULL, "Sessions not created for lack of API queue space") tcp_error (CREATE_SESSION_FAIL, "Sessions couldn't be allocated") tcp_error (SEGMENT_INVALID, "Invalid segment") @@ -32,4 +32,5 @@ tcp_error (ACK_DUP, "Duplicate ACK") tcp_error (ACK_OLD, "Old ACK") tcp_error (PKTS_SENT, "Packets sent") tcp_error (FILTERED_DUPACKS, "Filtered duplicate ACKs") -tcp_error (RST_SENT, "Resets sent") \ No newline at end of file +tcp_error (RST_SENT, "Resets sent") +tcp_error (INVALID_CONNECTION, "Invalid connection") diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 0a907d0a..f19fbf87 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -274,10 +274,7 @@ tcp_segment_validate (vlib_main_t * vm, tcp_connection_t * tc0, /* 2nd: check the RST bit */ if (tcp_rst (th0)) { - /* Notify session that connection has been reset. Switch - * state to closed and await for session to do the cleanup. */ - stream_session_reset_notify (&tc0->connection); - tc0->state = TCP_STATE_CLOSED; + tcp_connection_reset (tc0); return -1; } @@ -1023,6 +1020,12 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, my_thread_index); + if (PREDICT_FALSE (tc0 == 0)) + { + error0 = TCP_ERROR_INVALID_CONNECTION; + goto drop; + } + /* Checksum computed by ipx_local no need to compute again */ if (is_ip4) @@ -1072,12 +1075,12 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* 8: check the FIN bit */ if (tcp_fin (th0)) { - /* Send ACK and enter CLOSE-WAIT */ - tcp_make_ack (tc0, b0); - tcp_connection_force_ack (tc0, b0); - next0 = tcp_next_output (tc0->c_is_ip4); + /* Enter CLOSE-WAIT and notify session. Don't send ACK, instead + * wait for session to call close. To avoid lingering + * in CLOSE-WAIT, set timer (reuse WAITCLOSE). */ tc0->state = TCP_STATE_CLOSE_WAIT; stream_session_disconnect_notify (&tc0->connection); + tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_CLOSEWAIT_TIME); } drop: @@ -1468,7 +1471,7 @@ VLIB_REGISTER_NODE (tcp6_syn_sent_node) = VLIB_NODE_FUNCTION_MULTIARCH (tcp6_syn_sent_node, tcp6_syn_sent_rcv); /** - * Handles reception for all states except LISTEN, SYN-SEND and ESTABLISHED + * Handles reception for all states except LISTEN, SYN-SENT and ESTABLISHED * as per RFC793 p. 64 */ always_inline uword @@ -1511,6 +1514,11 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, b0 = vlib_get_buffer (vm, bi0); tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, my_thread_index); + if (PREDICT_FALSE (tc0 == 0)) + { + error0 = TCP_ERROR_INVALID_CONNECTION; + goto drop; + } /* Checksum computed by ipx_local no need to compute again */ @@ -1587,7 +1595,8 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Shoulder tap the server */ stream_session_accept_notify (&tc0->connection); - tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT_SYN); + /* Reset SYN-ACK retransmit timer */ + tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT); break; case TCP_STATE_ESTABLISHED: /* We can get packets in established state here because they @@ -1602,9 +1611,14 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, * continue processing in that state. */ if (tcp_rcv_ack (tc0, b0, tcp0, &next0, &error0)) goto drop; - tc0->state = TCP_STATE_FIN_WAIT_2; - /* Stop all timers, 2MSL will be set lower */ - tcp_connection_timers_reset (tc0); + + /* If FIN is ACKed */ + if (tc0->snd_una == tc0->snd_una_max) + { + tc0->state = TCP_STATE_FIN_WAIT_2; + /* Stop all timers, 2MSL will be set lower */ + tcp_connection_timers_reset (tc0); + } break; case TCP_STATE_FIN_WAIT_2: /* In addition to the processing for the ESTABLISHED state, if @@ -1639,7 +1653,17 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (!tcp_rcv_ack_is_acceptable (tc0, b0)) goto drop; - tcp_connection_del (tc0); + tc0->state = TCP_STATE_CLOSED; + + /* Don't delete the connection/session yet. Instead, wait a + * reasonable amount of time until the pipes are cleared. In + * particular, this makes sure that we won't have dead sessions + * when processing events on the tx path */ + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); + + /* Stop retransmit */ + tcp_timer_reset (tc0, TCP_TIMER_RETRANSMIT); + goto drop; break; @@ -1684,7 +1708,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, case TCP_STATE_SYN_RCVD: /* Send FIN-ACK notify app and enter CLOSE-WAIT */ tcp_connection_timers_reset (tc0); - tcp_make_finack (tc0, b0); + tcp_make_fin (tc0, b0); next0 = tcp_next_output (tc0->c_is_ip4); stream_session_disconnect_notify (&tc0->connection); tc0->state = TCP_STATE_CLOSE_WAIT; @@ -1697,12 +1721,12 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, case TCP_STATE_FIN_WAIT_1: tc0->state = TCP_STATE_TIME_WAIT; tcp_connection_timers_reset (tc0); - tcp_timer_set (tc0, TCP_TIMER_2MSL, TCP_2MSL_TIME); + tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); break; case TCP_STATE_FIN_WAIT_2: /* Got FIN, send ACK! */ tc0->state = TCP_STATE_TIME_WAIT; - tcp_timer_set (tc0, TCP_TIMER_2MSL, TCP_2MSL_TIME); + tcp_timer_set (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); tcp_make_ack (tc0, b0); next0 = tcp_next_output (is_ip4); break; @@ -1710,7 +1734,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Remain in the TIME-WAIT state. Restart the 2 MSL time-wait * timeout. */ - tcp_timer_update (tc0, TCP_TIMER_2MSL, TCP_2MSL_TIME); + tcp_timer_update (tc0, TCP_TIMER_WAITCLOSE, TCP_2MSL_TIME); break; } @@ -2113,6 +2137,7 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); + vnet_buffer (b0)->tcp.flags = 0; if (is_ip4) { @@ -2168,7 +2193,6 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* Send reset */ next0 = TCP_INPUT_NEXT_RESET; error0 = TCP_ERROR_NO_LISTENER; - vnet_buffer (b0)->tcp.flags = 0; } b0->error = error0 ? node->errors[error0] : 0; @@ -2288,6 +2312,7 @@ do { \ _(ESTABLISHED, TCP_FLAG_FIN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); _(ESTABLISHED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); + _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); /* ACK or FIN-ACK to our FIN */ _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 7e431cd0..aa43e9f3 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -396,6 +396,7 @@ tcp_reuse_buffer (vlib_main_t * vm, vlib_buffer_t * b) /* Leave enough space for headers */ vlib_buffer_make_headroom (b, MAX_HDRS_LEN); + vnet_buffer (b)->tcp.flags = 0; } /** @@ -443,16 +444,22 @@ tcp_make_ack (tcp_connection_t * tc, vlib_buffer_t * b) * Convert buffer to FIN-ACK */ void -tcp_make_finack (tcp_connection_t * tc, vlib_buffer_t * b) +tcp_make_fin (tcp_connection_t * tc, vlib_buffer_t * b) { tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = tm->vlib_main; + u8 flags = 0; tcp_reuse_buffer (vm, b); - tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, TCP_FLAG_ACK | TCP_FLAG_FIN); + + if (tc->rcv_las == tc->rcv_nxt) + flags = TCP_FLAG_FIN; + else + flags = TCP_FLAG_FIN | TCP_FLAG_ACK; + + tcp_make_ack_i (tc, b, TCP_STATE_ESTABLISHED, flags); /* Reset flags, make sure ack is sent */ - tc->flags = TCP_CONN_SNDACK; vnet_buffer (b)->tcp.flags &= ~TCP_BUF_FLAG_DUPACK; tc->snd_nxt += 1; @@ -500,7 +507,7 @@ tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b) vnet_buffer (b)->tcp.flags = TCP_BUF_FLAG_ACK; /* Init retransmit timer */ - tcp_retransmit_timer_set (tm, tc); + tcp_retransmit_timer_set (tc); } always_inline void @@ -818,9 +825,9 @@ tcp_send_fin (tcp_connection_t * tc) /* Leave enough space for headers */ vlib_buffer_make_headroom (b, MAX_HDRS_LEN); - tcp_make_finack (tc, b); - + tcp_make_fin (tc, b); tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); + tc->flags |= TCP_CONN_FINSNT; } always_inline u8 @@ -1038,7 +1045,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tcp_enqueue_to_output (vm, b, bi, tc->c_is_ip4); /* Re-enable retransmit timer */ - tcp_retransmit_timer_set (tm, tc); + tcp_retransmit_timer_set (tc); } else { @@ -1139,7 +1146,6 @@ tcp46_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { - tcp_main_t *tm = vnet_get_tcp_main (); u32 n_left_from, next_index, *from, *to_next; u32 my_thread_index = vm->cpu_index; @@ -1172,6 +1178,13 @@ tcp46_output_inline (vlib_main_t * vm, b0 = vlib_get_buffer (vm, bi0); tc0 = tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, my_thread_index); + if (PREDICT_FALSE (tc0 == 0 || tc0->state == TCP_STATE_CLOSED)) + { + error0 = TCP_ERROR_INVALID_CONNECTION; + next0 = TCP_OUTPUT_NEXT_DROP; + goto done; + } + th0 = vlib_buffer_get_current (b0); if (is_ip4) @@ -1229,6 +1242,22 @@ tcp46_output_inline (vlib_main_t * vm, tc0->rtt_ts = tcp_time_now (); tc0->rtt_seq = tc0->snd_nxt; } + + if (1) + { + ELOG_TYPE_DECLARE (e) = + { + .format = + "output: snd_una %u snd_una_max %u",.format_args = + "i4i4",}; + struct + { + u32 data[2]; + } *ed; + ed = ELOG_DATA (&vm->elog_main, e); + ed->data[0] = tc0->snd_una - tc0->iss; + ed->data[1] = tc0->snd_una_max - tc0->iss; + } } /* Set the retransmit timer if not set already and not @@ -1236,7 +1265,7 @@ tcp46_output_inline (vlib_main_t * vm, if (!tcp_timer_is_active (tc0, TCP_TIMER_RETRANSMIT) && tc0->snd_nxt != tc0->snd_una) { - tcp_retransmit_timer_set (tm, tc0); + tcp_retransmit_timer_set (tc0); tc0->rto_boff = 0; } diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index afa66ba4..46c8e734 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -39,7 +39,7 @@ builtin_session_disconnect_callback (stream_session_t * s) } static int -builtin_server_rx_callback (stream_session_t * s) +builtin_server_rx_callback (stream_session_t * s, session_fifo_event_t * ep) { svm_fifo_t *rx_fifo, *tx_fifo; u32 this_transfer; diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 4d509335..88278735 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -244,19 +244,19 @@ udp4_uri_input_node_fn (vlib_main_t * vm, /* Get session's server */ server0 = application_get (s0->app_index); - /* Built-in server? Deliver the goods... */ - if (server0->cb_fns.builtin_server_rx_callback) - { - server0->cb_fns.builtin_server_rx_callback (s0); - continue; - } - /* Fabricate event */ evt.fifo = s0->server_rx_fifo; evt.event_type = FIFO_EVENT_SERVER_RX; evt.event_id = serial_number++; evt.enqueue_length = svm_fifo_max_dequeue (s0->server_rx_fifo); + /* Built-in server? Deliver the goods... */ + if (server0->cb_fns.builtin_server_rx_callback) + { + server0->cb_fns.builtin_server_rx_callback (s0, &evt); + continue; + } + /* Add event to server's event queue */ q = server0->event_queue; diff --git a/src/vnet/unix/tapcli.c b/src/vnet/unix/tapcli.c index 496f3885..fb1a8bac 100644 --- a/src/vnet/unix/tapcli.c +++ b/src/vnet/unix/tapcli.c @@ -1435,7 +1435,8 @@ done: VLIB_CLI_COMMAND (tap_connect_command, static) = { .path = "tap connect", - .short_help = "tap connect [hwaddr ]", + .short_help = + "tap connect [address /mw] [hwaddr ]", .function = tap_connect_command_fn, }; -- cgit 1.2.3-korg From 6cf30adc2cd3aa818e5d97cf71ea8b2fc2aaefa7 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Tue, 4 Apr 2017 23:08:23 -0700 Subject: Session layer refactoring Major refactoring of the session layer api - Add attatch api for application binding to the the session layer - Simplify listen/connect calls - Update application CLI - Add transport endpoint to accept callback - Associate segment manager to application and allow for multiple binds/connects per app Additional: - svm fifo cleanup - add fifo free, format fns - add fifo offset enqueue unit test Change-Id: Id93a65047de61afc2bf3d58c9b544339c02065af Signed-off-by: Florin Coras Signed-off-by: Dave Barach --- src/scripts/vnet/uri/udp | 3 +- src/svm/svm_fifo.c | 66 ++- src/svm/svm_fifo.h | 32 +- src/svm/svm_fifo_segment.h | 14 +- src/uri/uri_tcp_test.c | 315 +++++++++----- src/uri/uri_udp_test.c | 326 +++++++++------ src/vnet.am | 2 + src/vnet/api_errno.h | 4 +- src/vnet/session/application.c | 458 +++++++++++++++------ src/vnet/session/application.h | 77 ++-- src/vnet/session/application_interface.c | 278 ++++++------- src/vnet/session/application_interface.h | 45 +- src/vnet/session/segment_manager.c | 342 ++++++++++++++++ src/vnet/session/segment_manager.h | 106 +++++ src/vnet/session/session.api | 237 +++++------ src/vnet/session/session.c | 564 +++++++------------------ src/vnet/session/session.h | 175 +++++--- src/vnet/session/session_api.c | 678 ++++++++++++------------------- src/vnet/session/transport.h | 23 +- src/vnet/tcp/builtin_client.c | 161 ++++++-- src/vnet/tcp/builtin_client.h | 7 +- src/vnet/tcp/builtin_server.c | 206 +++++++++- src/vnet/tcp/tcp.c | 20 +- src/vnet/tcp/tcp.h | 9 +- src/vnet/tcp/tcp_input.c | 7 +- src/vnet/tcp/tcp_test.c | 127 +++++- src/vnet/udp/builtin_server.c | 34 +- 27 files changed, 2601 insertions(+), 1715 deletions(-) create mode 100644 src/vnet/session/segment_manager.c create mode 100644 src/vnet/session/segment_manager.h (limited to 'src/vnet/session/application_interface.c') diff --git a/src/scripts/vnet/uri/udp b/src/scripts/vnet/uri/udp index ca13b83c..c7628f49 100644 --- a/src/scripts/vnet/uri/udp +++ b/src/scripts/vnet/uri/udp @@ -1,5 +1,5 @@ loop create -set int ip address loop0 10.0.0.1/32 +set int ip address loop0 6.0.0.1/32 set int state loop0 up packet-generator new { @@ -17,3 +17,4 @@ packet-generator new { incrementing 100 } } +session enable diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index cc84feb9..097bab77 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -20,8 +20,6 @@ svm_fifo_t * svm_fifo_create (u32 data_size_in_bytes) { svm_fifo_t *f; - pthread_mutexattr_t attr; - pthread_condattr_t cattr; f = clib_mem_alloc_aligned_or_null (sizeof (*f) + data_size_in_bytes, CLIB_CACHE_LINE_BYTES); @@ -32,29 +30,16 @@ svm_fifo_create (u32 data_size_in_bytes) f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; - memset (&attr, 0, sizeof (attr)); - memset (&cattr, 0, sizeof (cattr)); - - if (pthread_mutexattr_init (&attr)) - clib_unix_warning ("mutexattr_init"); - if (pthread_mutexattr_setpshared (&attr, PTHREAD_PROCESS_SHARED)) - clib_unix_warning ("pthread_mutexattr_setpshared"); - if (pthread_mutex_init (&f->mutex, &attr)) - clib_unix_warning ("mutex_init"); - if (pthread_mutexattr_destroy (&attr)) - clib_unix_warning ("mutexattr_destroy"); - if (pthread_condattr_init (&cattr)) - clib_unix_warning ("condattr_init"); - if (pthread_condattr_setpshared (&cattr, PTHREAD_PROCESS_SHARED)) - clib_unix_warning ("condattr_setpshared"); - if (pthread_cond_init (&f->condvar, &cattr)) - clib_unix_warning ("cond_init1"); - if (pthread_condattr_destroy (&cattr)) - clib_unix_warning ("cond_init2"); - return (f); } +void +svm_fifo_free (svm_fifo_t * f) +{ + pool_free (f->ooo_segments); + clib_mem_free (f); +} + always_inline ooo_segment_t * ooo_segment_new (svm_fifo_t * f, u32 start, u32 length) { @@ -567,6 +552,43 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) return total_drop_bytes; } +u8 * +format_svm_fifo (u8 * s, va_list * args) +{ + svm_fifo_t *f = va_arg (*args, svm_fifo_t *); + int verbose = va_arg (*args, int); + + s = format (s, "cursize %u nitems %u has_event %d\n", + f->cursize, f->nitems, f->has_event); + s = format (s, "head %d tail %d\n", f->head, f->tail); + + if (verbose > 1) + s = format + (s, "server session %d thread %d client session %d thread %d\n", + f->server_session_index, f->server_thread_index, + f->client_session_index, f->client_thread_index); + + if (verbose) + { + ooo_segment_t *seg; + u32 seg_index; + + s = + format (s, "ooo pool %d active elts\n", pool_elts (f->ooo_segments)); + + seg_index = f->ooos_list_head; + + while (seg_index != OOO_SEGMENT_INVALID_INDEX) + { + seg = pool_elt_at_index (f->ooo_segments, seg_index); + s = format (s, " pos %u, len %u next %d\n", + seg->fifo_position, seg->length, seg->next); + seg_index = seg->next; + } + } + return s; +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 80e5b0f2..9beb63f5 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -48,10 +48,6 @@ typedef struct u32 nitems; CLIB_CACHE_LINE_ALIGN_MARK (end_cursize); - pthread_mutex_t mutex; /* 8 bytes */ - pthread_cond_t condvar; /* 8 bytes */ - svm_lock_tag_t tag; - volatile u8 has_event; /**< non-zero if deq event exists */ u32 owner_pid; @@ -60,6 +56,7 @@ typedef struct u32 client_session_index; u8 server_thread_index; u8 client_thread_index; + u32 segment_manager; CLIB_CACHE_LINE_ALIGN_MARK (end_shared); u32 head; CLIB_CACHE_LINE_ALIGN_MARK (end_consumer); @@ -74,30 +71,6 @@ typedef struct CLIB_CACHE_LINE_ALIGN_MARK (data); } svm_fifo_t; -static inline int -svm_fifo_lock (svm_fifo_t * f, u32 pid, u32 tag, int nowait) -{ - if (PREDICT_TRUE (nowait == 0)) - pthread_mutex_lock (&f->mutex); - else - { - if (pthread_mutex_trylock (&f->mutex)) - return -1; - } - f->owner_pid = pid; - f->tag = tag; - return 0; -} - -static inline void -svm_fifo_unlock (svm_fifo_t * f) -{ - f->owner_pid = 0; - f->tag = 0; - CLIB_MEMORY_BARRIER (); - pthread_mutex_unlock (&f->mutex); -} - static inline u32 svm_fifo_max_dequeue (svm_fifo_t * f) { @@ -139,6 +112,7 @@ svm_fifo_unset_event (svm_fifo_t * f) } svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); +void svm_fifo_free (svm_fifo_t * f); int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, u8 * copy_from_here); @@ -154,6 +128,8 @@ int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, u8 * copy_here); int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes); +format_function_t format_svm_fifo; + always_inline ooo_segment_t * svm_fifo_newest_ooo_segment (svm_fifo_t * f) { diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index ecb5653a..9ab47a4c 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -55,6 +55,18 @@ svm_fifo_get_segment (u32 segment_index) return vec_elt_at_index (ssm->segments, segment_index); } +static inline u8 +svm_fifo_segment_has_fifos (svm_fifo_segment_private_t * fifo_segment) +{ + return vec_len ((svm_fifo_t **) fifo_segment->h->fifos) != 0; +} + +static inline svm_fifo_t ** +svm_fifo_segment_get_fifos (svm_fifo_segment_private_t * fifo_segment) +{ + return (svm_fifo_t **) fifo_segment->h->fifos; +} + #define foreach_ssvm_fifo_segment_api_error \ _(OUT_OF_SPACE, "Out of space in segment", -200) @@ -73,9 +85,7 @@ svm_fifo_t *svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, u32 data_size_in_bytes); void svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f); - void svm_fifo_segment_init (u64 baseva, u32 timeout_in_seconds); - u32 svm_fifo_segment_index (svm_fifo_segment_private_t * s); #endif /* __included_ssvm_fifo_segment_h__ */ diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index e2834817..c057e06e 100644 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -15,8 +15,6 @@ #include #include -#include -#include #include #include #include @@ -47,8 +45,7 @@ typedef struct svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; - u32 vpp_session_index; - u32 vpp_session_thread; + u32 vpp_session_handle; } session_t; typedef enum @@ -116,7 +113,7 @@ typedef struct pthread_t client_rx_thread_handle; u32 client_bytes_received; u8 test_return_packets; - u32 bytes_to_send; + u64 bytes_to_send; /* convenience */ svm_fifo_segment_main_t *segment_main; @@ -152,6 +149,88 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) return -1; } +void +application_attach (uri_tcp_test_main_t * utm) +{ + vl_api_application_attach_t *bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + bmp->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 20; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +application_detach (uri_tcp_test_main_t * utm) +{ + vl_api_application_detach_t *bmp; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_DETACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +static void +vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * + mp) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->retval) + { + uword *errp = hash_get (utm->error_string_by_error_number, mp->retval); + clib_warning ("attach failed: %s", *errp); + utm->state = STATE_FAILED; + return; + } + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->app_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->app_event_queue_address; + +} + +static void +vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * + mp) +{ + if (mp->retval) + clib_warning ("detach returned with err: %d", mp->retval); +} + static void init_error_string_table (uri_tcp_test_main_t * utm) { @@ -239,21 +318,18 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; - u64 key; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - p = hash_get (utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, mp->handle); if (p) { session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); + hash_unset (utm->session_index_by_vpp_handles, mp->handle); pool_put (utm->sessions, session); } else { - clib_warning ("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", mp->handle); rv = -11; } @@ -264,8 +340,7 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -277,22 +352,19 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) vl_api_reset_session_reply_t *rmp; uword *p; int rv = 0; - u64 key; - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - p = hash_get (utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, mp->handle); if (p) { session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); + hash_unset (utm->session_index_by_vpp_handles, mp->handle); pool_put (utm->sessions, session); utm->time_to_stop = 1; } else { - clib_warning ("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", mp->handle); rv = -11; } @@ -300,8 +372,7 @@ vl_api_reset_session_t_handler (vl_api_reset_session_t * mp) memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_RESET_SESSION_REPLY); rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -343,7 +414,7 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { if (n_read == -2) { - clib_warning ("weird!"); +// clib_warning ("weird!"); break; } } @@ -409,52 +480,19 @@ static void vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; session_t *session; u32 session_index; svm_fifo_t *rx_fifo, *tx_fifo; int rv; - u64 key; if (mp->retval) { - clib_warning ("connection failed with code: %d", mp->retval); - utm->state = STATE_FAILED; - return; - } - - /* - * Attatch to segment - */ - - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); + uword *errp = hash_get (utm->error_string_by_error_number, -mp->retval); + clib_warning ("connection failed with code: %s", *errp); utm->state = STATE_FAILED; return; } - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT (mp->client_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - - /* - * Save the queues - */ - - utm->our_event_queue = (unix_shared_memory_queue_t *) - mp->client_event_queue_address; - utm->vpp_event_queue = (unix_shared_memory_queue_t *) mp->vpp_event_queue_address; @@ -472,16 +510,14 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; - session->vpp_session_index = mp->session_index; - session->vpp_session_thread = mp->session_thread_index; + session->vpp_session_handle = mp->handle; /* Save handle */ utm->connected_session_index = session_index; utm->state = STATE_READY; /* Add it to lookup table */ - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - hash_set (utm->session_index_by_vpp_handles, key, session_index); + hash_set (utm->session_index_by_vpp_handles, mp->handle, session_index); /* Start RX thread */ rv = pthread_create (&utm->client_rx_thread_handle, @@ -606,8 +642,7 @@ client_disconnect (uri_tcp_test_main_t * utm) memset (dmp, 0, sizeof (*dmp)); dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = utm->my_client_index; - dmp->session_index = connected_session->vpp_session_index; - dmp->session_thread_index = connected_session->vpp_session_thread; + dmp->handle = connected_session->vpp_session_handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp); } @@ -616,6 +651,7 @@ client_test (uri_tcp_test_main_t * utm) { int i; + application_attach (utm); client_connect (utm); if (wait_for_state_change (utm, STATE_READY)) @@ -636,47 +672,26 @@ client_test (uri_tcp_test_main_t * utm) if (wait_for_state_change (utm, STATE_START)) { + clib_warning ("Disconnect failed"); return; } + application_detach (utm); } static void vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) { uri_tcp_test_main_t *utm = &uri_tcp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; if (mp->retval) { - clib_warning ("bind failed: %d", mp->retval); + uword *errp = hash_get (utm->error_string_by_error_number, + -clib_net_to_host_u32 (mp->retval)); + clib_warning ("bind failed: %s", (char *) *errp); utm->state = STATE_FAILED; return; } - if (mp->segment_name_length == 0) - { - clib_warning ("segment_name_length zero"); - return; - } - - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; - - ASSERT (mp->server_event_queue_address); - - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); - return; - } - - utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->server_event_queue_address; - utm->state = STATE_READY; } @@ -691,6 +706,89 @@ vl_api_unbind_uri_reply_t_handler (vl_api_unbind_uri_reply_t * mp) utm->state = STATE_START; } +u8 * +format_ip4_address (u8 * s, va_list * args) +{ + u8 *a = va_arg (*args, u8 *); + return format (s, "%d.%d.%d.%d", a[0], a[1], a[2], a[3]); +} + +u8 * +format_ip6_address (u8 * s, va_list * args) +{ + ip6_address_t *a = va_arg (*args, ip6_address_t *); + u32 i, i_max_n_zero, max_n_zeros, i_first_zero, n_zeros, last_double_colon; + + i_max_n_zero = ARRAY_LEN (a->as_u16); + max_n_zeros = 0; + i_first_zero = i_max_n_zero; + n_zeros = 0; + for (i = 0; i < ARRAY_LEN (a->as_u16); i++) + { + u32 is_zero = a->as_u16[i] == 0; + if (is_zero && i_first_zero >= ARRAY_LEN (a->as_u16)) + { + i_first_zero = i; + n_zeros = 0; + } + n_zeros += is_zero; + if ((!is_zero && n_zeros > max_n_zeros) + || (i + 1 >= ARRAY_LEN (a->as_u16) && n_zeros > max_n_zeros)) + { + i_max_n_zero = i_first_zero; + max_n_zeros = n_zeros; + i_first_zero = ARRAY_LEN (a->as_u16); + n_zeros = 0; + } + } + + last_double_colon = 0; + for (i = 0; i < ARRAY_LEN (a->as_u16); i++) + { + if (i == i_max_n_zero && max_n_zeros > 1) + { + s = format (s, "::"); + i += max_n_zeros - 1; + last_double_colon = 1; + } + else + { + s = format (s, "%s%x", + (last_double_colon || i == 0) ? "" : ":", + clib_net_to_host_u16 (a->as_u16[i])); + last_double_colon = 0; + } + } + + return s; +} + +/* Format an IP46 address. */ +u8 * +format_ip46_address (u8 * s, va_list * args) +{ + ip46_address_t *ip46 = va_arg (*args, ip46_address_t *); + ip46_type_t type = va_arg (*args, ip46_type_t); + int is_ip4 = 1; + + switch (type) + { + case IP46_TYPE_ANY: + is_ip4 = ip46_address_is_ip4 (ip46); + break; + case IP46_TYPE_IP4: + is_ip4 = 1; + break; + case IP46_TYPE_IP6: + is_ip4 = 0; + break; + } + + return is_ip4 ? + format (s, "%U", format_ip4_address, &ip46->ip4) : + format (s, "%U", format_ip6_address, &ip46->ip6); +} + static void vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) { @@ -699,12 +797,15 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) svm_fifo_t *rx_fifo, *tx_fifo; session_t *session; static f64 start_time; - u64 key; u32 session_index; + u8 *ip_str; if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); + ip_str = format (0, "%U", format_ip46_address, &mp->ip, mp->is_ip4); + clib_warning ("Accepted session from: %s:%d", ip_str, + clib_net_to_host_u16 (mp->port)); utm->vpp_event_queue = (unix_shared_memory_queue_t *) mp->vpp_event_queue_address; @@ -721,8 +822,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) session->server_tx_fifo = tx_fifo; /* Add it to lookup table */ - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - hash_set (utm->session_index_by_vpp_handles, key, session_index); + hash_set (utm->session_index_by_vpp_handles, mp->handle, session_index); utm->state = STATE_READY; @@ -741,9 +841,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->session_type = mp->session_type; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -837,22 +935,15 @@ server_handle_event_queue (uri_tcp_test_main_t * utm) } void -server_bind (uri_tcp_test_main_t * utm) +server_listen (uri_tcp_test_main_t * utm) { vl_api_bind_uri_t *bmp; - u32 fifo_size = 3 << 20; bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->initial_segment_size = 256 << 20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } @@ -874,8 +965,10 @@ server_unbind (uri_tcp_test_main_t * utm) void server_test (uri_tcp_test_main_t * utm) { + application_attach (utm); + /* Bind to uri */ - server_bind (utm); + server_listen (utm); if (wait_for_state_change (utm, STATE_READY)) { @@ -895,6 +988,8 @@ server_test (uri_tcp_test_main_t * utm) return; } + application_detach (utm); + fformat (stdout, "Test complete...\n"); } @@ -916,7 +1011,9 @@ _(CONNECT_URI_REPLY, connect_uri_reply) \ _(DISCONNECT_SESSION, disconnect_session) \ _(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \ _(RESET_SESSION, reset_session) \ -_(MAP_ANOTHER_SEGMENT, map_another_segment) +_(APPLICATION_ATTACH_REPLY, application_attach_reply) \ +_(APPLICATION_DETACH_REPLY, application_detach_reply) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) \ void uri_api_hookup (uri_tcp_test_main_t * utm) @@ -941,7 +1038,7 @@ main (int argc, char **argv) u8 *heap, *uri = 0; u8 *bind_uri = (u8 *) "tcp://0.0.0.0/1234"; u8 *connect_uri = (u8 *) "tcp://6.0.1.2/1234"; - u32 bytes_to_send = 64 << 10, mbytes; + u64 bytes_to_send = 64 << 10, mbytes; u32 tmp; mheap_t *h; session_t *session; @@ -988,10 +1085,14 @@ main (int argc, char **argv) drop_packets = 1; else if (unformat (a, "test")) test_return_packets = 1; - else if (unformat (a, "mbytes %d", &mbytes)) + else if (unformat (a, "mbytes %lld", &mbytes)) { bytes_to_send = mbytes << 20; } + else if (unformat (a, "gbytes %lld", &mbytes)) + { + bytes_to_send = mbytes << 30; + } else { fformat (stderr, "%s: usage [master|slave]\n"); diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index e6c239c1..598052bc 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -55,6 +55,7 @@ typedef enum { STATE_START, STATE_READY, + STATE_FAILED, STATE_DISCONNECTING, } connection_state_t; @@ -162,6 +163,86 @@ setup_signal_handlers (void) return 0; } +void +application_attach (uri_udp_test_main_t * utm) +{ + vl_api_application_attach_t *bmp; + u32 fifo_size = 3 << 20; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + bmp->options[SESSION_OPTIONS_FLAGS] = + SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; + bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; + bmp->options[SESSION_OPTIONS_SEGMENT_SIZE] = 256 << 20; + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +application_detach (uri_udp_test_main_t * utm) +{ + vl_api_application_detach_t *bmp; + bmp = vl_msg_api_alloc (sizeof (*bmp)); + memset (bmp, 0, sizeof (*bmp)); + + bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_DETACH); + bmp->client_index = utm->my_client_index; + bmp->context = ntohl (0xfeedface); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +static void +vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * + mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + int rv; + + if (mp->retval) + { + clib_warning ("attach failed: %d", mp->retval); + utm->state = STATE_FAILED; + return; + } + + if (mp->segment_name_length == 0) + { + clib_warning ("segment_name_length zero"); + return; + } + + a->segment_name = (char *) mp->segment_name; + a->segment_size = mp->segment_size; + + ASSERT (mp->app_event_queue_address); + + /* Attach to the segment vpp created */ + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("svm_fifo_segment_attach ('%s') failed", + mp->segment_name); + return; + } + + utm->our_event_queue = + (unix_shared_memory_queue_t *) mp->app_event_queue_address; +} + +static void +vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * + mp) +{ + if (mp->retval) + clib_warning ("detach returned with err: %d", mp->retval); +} + u8 * format_api_error (u8 * s, va_list * args) { @@ -255,9 +336,22 @@ cut_through_thread_fn (void *arg) } static void -uri_udp_slave_test (uri_udp_test_main_t * utm) +udp_client_connect (uri_udp_test_main_t * utm) { vl_api_connect_uri_t *cmp; + cmp = vl_msg_api_alloc (sizeof (*cmp)); + memset (cmp, 0, sizeof (*cmp)); + + cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); + cmp->client_index = utm->my_client_index; + cmp->context = ntohl (0xfeedface); + memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); +} + +static void +client_send (uri_udp_test_main_t * utm, session_t * session) +{ int i; u8 *test_data = 0; u64 bytes_received = 0, bytes_sent = 0; @@ -265,30 +359,16 @@ uri_udp_slave_test (uri_udp_test_main_t * utm) int rv; int mypid = getpid (); f64 before, after, delta, bytes_per_second; - session_t *session; svm_fifo_t *rx_fifo, *tx_fifo; int buffer_offset, bytes_to_send = 0; + /* + * Prepare test data + */ vec_validate (test_data, 64 * 1024 - 1); for (i = 0; i < vec_len (test_data); i++) test_data[i] = i & 0xff; - cmp = vl_msg_api_alloc (sizeof (*cmp)); - memset (cmp, 0, sizeof (*cmp)); - - cmp->_vl_msg_id = ntohs (VL_API_CONNECT_URI); - cmp->client_index = utm->my_client_index; - cmp->context = ntohl (0xfeedface); - memcpy (cmp->uri, utm->connect_uri, vec_len (utm->connect_uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } - - session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); rx_fifo = session->server_rx_fifo; tx_fifo = session->server_tx_fifo; @@ -375,35 +455,38 @@ uri_udp_slave_test (uri_udp_test_main_t * utm) } static void -vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +uri_udp_client_test (uri_udp_test_main_t * utm) { - uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - int rv; + session_t *session; - if (mp->segment_name_length == 0) + application_attach (utm); + udp_client_connect (utm); + + if (wait_for_state_change (utm, STATE_READY)) { - clib_warning ("segment_name_length zero"); + clib_warning ("timeout waiting for STATE_READY"); return; } - a->segment_name = (char *) mp->segment_name; - a->segment_size = mp->segment_size; + /* Only works with cut through sessions */ + session = pool_elt_at_index (utm->sessions, utm->cut_through_session_index); - ASSERT (mp->server_event_queue_address); + client_send (utm, session); + application_detach (utm); +} - /* Attach to the segment vpp created */ - rv = svm_fifo_segment_attach (a); - if (rv) +static void +vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) +{ + uri_udp_test_main_t *utm = &uri_udp_test_main; + + if (mp->retval) { - clib_warning ("svm_fifo_segment_attach ('%s') failed", - mp->segment_name); + clib_warning ("bind failed: %d", mp->retval); + utm->state = STATE_FAILED; return; } - utm->our_event_queue = (unix_shared_memory_queue_t *) - mp->server_event_queue_address; - utm->state = STATE_READY; } @@ -427,6 +510,9 @@ vl_api_map_another_segment_t_handler (vl_api_map_another_segment_t * mp) mp->segment_size); } +/** + * Acting as server for redirected connect requests + */ static void vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { @@ -456,7 +542,6 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) vec_add2 (utm->seg, seg, 1); segment_index = vec_len (sm->segments) - 1; - memcpy (seg, sm->segments + segment_index, sizeof (utm->seg[0])); pool_get (utm->sessions, session); @@ -521,7 +606,6 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) svm_fifo_t *rx_fifo, *tx_fifo; session_t *session; static f64 start_time; - u64 key; if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); @@ -539,9 +623,8 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) session->server_rx_fifo = rx_fifo; session->server_tx_fifo = tx_fifo; - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - - hash_set (utm->session_index_by_vpp_handles, key, session - utm->sessions); + hash_set (utm->session_index_by_vpp_handles, mp->handle, + session - utm->sessions); utm->state = STATE_READY; @@ -556,9 +639,7 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) rmp = vl_msg_api_alloc (sizeof (*rmp)); memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_ACCEPT_SESSION_REPLY); - rmp->session_type = mp->session_type; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } @@ -570,21 +651,18 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) vl_api_disconnect_session_reply_t *rmp; uword *p; int rv = 0; - u64 key; - - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - p = hash_get (utm->session_index_by_vpp_handles, key); + p = hash_get (utm->session_index_by_vpp_handles, mp->handle); if (p) { session = pool_elt_at_index (utm->sessions, p[0]); - hash_unset (utm->session_index_by_vpp_handles, key); + hash_unset (utm->session_index_by_vpp_handles, mp->handle); pool_put (utm->sessions, session); } else { - clib_warning ("couldn't find session key %llx", key); + clib_warning ("couldn't find session key %llx", mp->handle); rv = -11; } @@ -592,77 +670,76 @@ vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) memset (rmp, 0, sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION_REPLY); rmp->retval = rv; - rmp->session_index = mp->session_index; - rmp->session_thread_index = mp->session_thread_index; + rmp->handle = mp->handle; vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & rmp); } static void vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) { - svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; uri_udp_test_main_t *utm = &uri_udp_test_main; - svm_fifo_segment_create_args_t _a, *a = &_a; - ssvm_shared_header_t *sh; - svm_fifo_segment_private_t *seg; - svm_fifo_segment_header_t *fsh; - session_t *session; - u32 segment_index; - int rv; ASSERT (utm->i_am_master == 0); - if (mp->segment_name_length == 0) + /* We've been redirected */ + if (mp->segment_name_length > 0) { - clib_warning ("segment_name_length zero"); - return; - } - - memset (a, 0, sizeof (*a)); - - a->segment_name = (char *) mp->segment_name; - - sleep (1); - - rv = svm_fifo_segment_attach (a); - if (rv) - { - clib_warning ("sm_fifo_segment_create ('%v') failed", mp->segment_name); - return; - } - - segment_index = vec_len (sm->segments) - 1; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + svm_fifo_segment_create_args_t _a, *a = &_a; + u32 segment_index; + session_t *session; + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *seg; + svm_fifo_segment_header_t *fsh; + int rv; + + memset (a, 0, sizeof (*a)); + a->segment_name = (char *) mp->segment_name; + + sleep (1); + + rv = svm_fifo_segment_attach (a); + if (rv) + { + clib_warning ("sm_fifo_segment_create ('%v') failed", + mp->segment_name); + return; + } - vec_add2 (utm->seg, seg, 1); + segment_index = vec_len (sm->segments) - 1; + vec_add2 (utm->seg, seg, 1); - memcpy (seg, sm->segments + segment_index, sizeof (*seg)); - sh = seg->ssvm.sh; - fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + memcpy (seg, sm->segments + segment_index, sizeof (*seg)); + sh = seg->ssvm.sh; + fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - while (vec_len (fsh->fifos) < 2) - sleep (1); + while (vec_len (fsh->fifos) < 2) + sleep (1); - pool_get (utm->sessions, session); - utm->cut_through_session_index = session - utm->sessions; + pool_get (utm->sessions, session); + utm->cut_through_session_index = session - utm->sessions; - session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; - ASSERT (session->server_rx_fifo); - session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; - ASSERT (session->server_tx_fifo); + session->server_rx_fifo = (svm_fifo_t *) fsh->fifos[0]; + ASSERT (session->server_rx_fifo); + session->server_tx_fifo = (svm_fifo_t *) fsh->fifos[1]; + ASSERT (session->server_tx_fifo); + } /* security: could unlink /dev/shm/segment_name> here, maybe */ utm->state = STATE_READY; } -#define foreach_uri_msg \ -_(BIND_URI_REPLY, bind_uri_reply) \ -_(CONNECT_URI, connect_uri) \ -_(CONNECT_URI_REPLY, connect_uri_reply) \ -_(UNBIND_URI_REPLY, unbind_uri_reply) \ -_(ACCEPT_SESSION, accept_session) \ -_(DISCONNECT_SESSION, disconnect_session) \ -_(MAP_ANOTHER_SEGMENT, map_another_segment) +#define foreach_uri_msg \ +_(BIND_URI_REPLY, bind_uri_reply) \ +_(CONNECT_URI, connect_uri) \ +_(CONNECT_URI_REPLY, connect_uri_reply) \ +_(UNBIND_URI_REPLY, unbind_uri_reply) \ +_(ACCEPT_SESSION, accept_session) \ +_(DISCONNECT_SESSION, disconnect_session) \ +_(MAP_ANOTHER_SEGMENT, map_another_segment) \ +_(APPLICATION_ATTACH_REPLY, application_attach_reply) \ +_(APPLICATION_DETACH_REPLY, application_detach_reply) \ void uri_api_hookup (uri_udp_test_main_t * utm) @@ -679,7 +756,6 @@ uri_api_hookup (uri_udp_test_main_t * utm) } - int connect_to_vpp (char *name) { @@ -784,26 +860,43 @@ server_handle_event_queue (uri_udp_test_main_t * utm) } } -void -uri_udp_test (uri_udp_test_main_t * utm) +static void +server_unbind (uri_udp_test_main_t * utm) { - vl_api_bind_uri_t *bmp; vl_api_unbind_uri_t *ump; + ump = vl_msg_api_alloc (sizeof (*ump)); + memset (ump, 0, sizeof (*ump)); + + ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); + ump->client_index = utm->my_client_index; + memcpy (ump->uri, utm->uri, vec_len (utm->uri)); + vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); +} + +static void +server_listen (uri_udp_test_main_t * utm) +{ + vl_api_bind_uri_t *bmp; + bmp = vl_msg_api_alloc (sizeof (*bmp)); memset (bmp, 0, sizeof (*bmp)); bmp->_vl_msg_id = ntohs (VL_API_BIND_URI); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->initial_segment_size = 256 << 20; /* size of initial segment */ - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; - bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 16 << 10; - bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; memcpy (bmp->uri, utm->uri, vec_len (utm->uri)); vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); +} + +void +udp_server_test (uri_udp_test_main_t * utm) +{ + + application_attach (utm); + + /* Bind to uri */ + server_listen (utm); if (wait_for_state_change (utm, STATE_READY)) { @@ -813,13 +906,8 @@ uri_udp_test (uri_udp_test_main_t * utm) server_handle_event_queue (utm); - ump = vl_msg_api_alloc (sizeof (*ump)); - memset (ump, 0, sizeof (*ump)); - - ump->_vl_msg_id = ntohs (VL_API_UNBIND_URI); - ump->client_index = utm->my_client_index; - memcpy (ump->uri, utm->uri, vec_len (utm->uri)); - vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); + /* Cleanup */ + server_unbind (utm); if (wait_for_state_change (utm, STATE_START)) { @@ -827,6 +915,8 @@ uri_udp_test (uri_udp_test_main_t * utm) return; } + application_detach (utm); + fformat (stdout, "Test complete...\n"); } @@ -892,7 +982,7 @@ main (int argc, char **argv) utm->i_am_master = i_am_master; utm->segment_main = &svm_fifo_segment_main; - utm->connect_uri = format (0, "udp://10.0.0.1/1234%c", 0); + utm->connect_uri = format (0, "udp://6.0.0.1/1234%c", 0); setup_signal_handlers (); @@ -907,7 +997,7 @@ main (int argc, char **argv) if (i_am_master == 0) { - uri_udp_slave_test (utm); + uri_udp_client_test (utm); exit (0); } @@ -920,7 +1010,7 @@ main (int argc, char **argv) for (i = 0; i < 200000; i++) pool_put_index (utm->sessions, i); - uri_udp_test (utm); + udp_server_test (utm); vl_client_disconnect_from_vlib (); exit (0); diff --git a/src/vnet.am b/src/vnet.am index bed4902b..25b84616 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -827,6 +827,7 @@ libvnet_la_SOURCES += \ vnet/session/session_cli.c \ vnet/session/hashes.c \ vnet/session/application_interface.c \ + vnet/session/segment_manager.c \ vnet/session/session_api.c nobase_include_HEADERS += \ @@ -835,6 +836,7 @@ nobase_include_HEADERS += \ vnet/session/transport.h \ vnet/session/application_interface.h \ vnet/session/session_debug.h \ + vnet/session/segment_manager.h \ vnet/session/session.api.h API_FILES += vnet/session/session.api diff --git a/src/vnet/api_errno.h b/src/vnet/api_errno.h index f3ffd2a6..e939404b 100644 --- a/src/vnet/api_errno.h +++ b/src/vnet/api_errno.h @@ -105,7 +105,9 @@ _(INVALID_GPE_MODE, -112, "Invalid GPE mode") \ _(LISP_GPE_ENTRIES_PRESENT, -113, "LISP GPE entries are present") \ _(ADDRESS_FOUND_FOR_INTERFACE, -114, "Address found for interface") \ _(SESSION_CONNECT_FAIL, -115, "Session failed to connect") \ -_(ENTRY_ALREADY_EXISTS, -116, "Entry already exists") +_(ENTRY_ALREADY_EXISTS, -116, "Entry already exists") \ +_(SVM_SEGMENT_CREATE_FAIL, -117, "svm segment create fail") \ +_(APPLICATION_NOT_ATTACHED, -118, "application not attached") typedef enum { diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 513e5fac..5a45537b 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -14,18 +14,24 @@ */ #include +#include #include -/* +/** * Pool from which we allocate all applications */ static application_t *app_pool; -/* +/** * Hash table of apps by api client index */ static uword *app_by_api_client_index; +/** + * Default application event queue size + */ +static u32 default_app_evt_queue_size = 128; + int application_api_queue_is_full (application_t * app) { @@ -67,37 +73,71 @@ application_lookup (u32 api_client_index) return 0; } +application_t * +application_new () +{ + application_t *app; + pool_get (app_pool, app); + memset (app, 0, sizeof (*app)); + app->index = application_get_index (app); + app->connects_seg_manager = ~0; + return app; +} + void application_del (application_t * app) { - session_manager_main_t *smm = vnet_get_session_manager_main (); api_main_t *am = &api_main; void *oldheap; - session_manager_t *sm; + segment_manager_t *sm; + u64 handle; + u32 index, *handles = 0; + int i; + vnet_unbind_args_t _a, *a = &_a; + + /* + * Cleanup segment managers + */ + if (app->connects_seg_manager != (u32) ~ 0) + { + sm = segment_manager_get (app->connects_seg_manager); + segment_manager_del (sm); + } - if (app->mode == APP_SERVER) + /* *INDENT-OFF* */ + hash_foreach (handle, index, app->listeners_table, + ({ + vec_add1 (handles, handle); + })); + /* *INDENT-ON* */ + + /* Actual listener cleanup */ + for (i = 0; i < vec_len (handles); i++) { - sm = session_manager_get (app->session_manager_index); - session_manager_del (smm, sm); + a->app_index = app->api_client_index; + a->handle = handles[i]; + /* seg manager is removed when unbind completes */ + vnet_unbind (a); } - /* Free the event fifo in the /vpe-api shared-memory segment */ + /* + * Free the event fifo in the /vpe-api shared-memory segment + */ oldheap = svm_push_data_heap (am->vlib_rp); if (app->event_queue) unix_shared_memory_queue_free (app->event_queue); svm_pop_heap (oldheap); application_table_del (app); - pool_put (app_pool, app); } static void -application_verify_cb_fns (application_type_t type, session_cb_vft_t * cb_fns) +application_verify_cb_fns (session_cb_vft_t * cb_fns) { - if (type == APP_SERVER && cb_fns->session_accept_callback == 0) + if (cb_fns->session_accept_callback == 0) clib_warning ("No accept callback function provided"); - if (type == APP_CLIENT && cb_fns->session_connected_callback == 0) + if (cb_fns->session_connected_callback == 0) clib_warning ("No session connected callback function provided"); if (cb_fns->session_disconnect_callback == 0) clib_warning ("No session disconnect callback function provided"); @@ -105,25 +145,26 @@ application_verify_cb_fns (application_type_t type, session_cb_vft_t * cb_fns) clib_warning ("No session reset callback function provided"); } -application_t * -application_new (application_type_t type, session_type_t sst, - u32 api_client_index, u32 flags, session_cb_vft_t * cb_fns) +int +application_init (application_t * app, u32 api_client_index, u64 * options, + session_cb_vft_t * cb_fns) { - session_manager_main_t *smm = vnet_get_session_manager_main (); api_main_t *am = &api_main; - application_t *app; + segment_manager_t *sm; + segment_manager_properties_t *props; void *oldheap; - session_manager_t *sm; + u32 app_evt_queue_size; + int rv; - pool_get (app_pool, app); - memset (app, 0, sizeof (*app)); + app_evt_queue_size = options[APP_EVT_QUEUE_SIZE] > 0 ? + options[APP_EVT_QUEUE_SIZE] : default_app_evt_queue_size; /* Allocate event fifo in the /vpe-api shared-memory segment */ oldheap = svm_push_data_heap (am->vlib_rp); /* Allocate server event queue */ app->event_queue = - unix_shared_memory_queue_init (128 /* nels $$$$ config */ , + unix_shared_memory_queue_init (app_evt_queue_size, sizeof (session_fifo_event_t), 0 /* consumer pid */ , 0 @@ -132,36 +173,31 @@ application_new (application_type_t type, session_type_t sst, svm_pop_heap (oldheap); - /* If a server, allocate session manager */ - if (type == APP_SERVER) - { - pool_get (smm->session_managers, sm); - memset (sm, 0, sizeof (*sm)); + /* Setup segment manager */ + sm = segment_manager_new (); + sm->app_index = app->index; + props = &app->sm_properties; + props->add_segment_size = options[SESSION_OPTIONS_ADD_SEGMENT_SIZE]; + props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; + props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; + props->add_segment = props->add_segment_size != 0; - app->session_manager_index = sm - smm->session_managers; - } - else if (type == APP_CLIENT) - { - /* Allocate connect session manager if needed */ - if (smm->connect_manager_index[sst] == INVALID_INDEX) - connects_session_manager_init (smm, sst); - app->session_manager_index = smm->connect_manager_index[sst]; - } + if ((rv = segment_manager_init (sm, props, + options[SESSION_OPTIONS_SEGMENT_SIZE]))) + return rv; - app->mode = type; - app->index = application_get_index (app); - app->session_type = sst; + app->first_segment_manager = segment_manager_index (sm); app->api_client_index = api_client_index; - app->flags = flags; + app->flags = options[SESSION_OPTIONS_FLAGS]; app->cb_fns = *cb_fns; /* Check that the obvious things are properly set up */ - application_verify_cb_fns (type, cb_fns); + application_verify_cb_fns (cb_fns); /* Add app to lookup by api_client_index table */ application_table_add (app); - return app; + return 0; } application_t * @@ -185,108 +221,286 @@ application_get_index (application_t * app) return app - app_pool; } +static segment_manager_t * +application_alloc_segment_manager (application_t * app) +{ + segment_manager_t *sm = 0; + + if (app->first_segment_manager != (u32) ~ 0) + { + sm = segment_manager_get (app->first_segment_manager); + app->first_segment_manager = ~0; + return sm; + } + + sm = segment_manager_new (); + if (segment_manager_init (sm, &app->sm_properties, 0)) + return 0; + return sm; +} + +/** + * Start listening local transport endpoint for requested transport. + * + * Creates a 'dummy' stream session with state LISTENING to be used in session + * lookups, prior to establishing connection. Requests transport to build + * it's own specific listening connection. + */ int -application_server_init (application_t * server, u32 segment_size, - u32 add_segment_size, u32 rx_fifo_size, - u32 tx_fifo_size, u8 ** segment_name) +application_start_listen (application_t * srv, session_type_t session_type, + transport_endpoint_t * tep, u64 * res) { - session_manager_main_t *smm = vnet_get_session_manager_main (); - session_manager_t *sm; - int rv; + segment_manager_t *sm; + stream_session_t *s; + u64 handle; + + s = listen_session_new (session_type); + s->app_index = srv->index; + + if (stream_session_listen (s, tep)) + goto err; + + /* Allocate segment manager. All sessions derived out of a listen session + * have fifos allocated by the same segment manager. */ + sm = application_alloc_segment_manager (srv); + if (sm == 0) + goto err; + + /* Add to app's listener table. Useful to find all child listeners + * when app goes down, although, just for unbinding this is not needed */ + handle = listen_session_get_handle (s); + hash_set (srv->listeners_table, handle, segment_manager_index (sm)); - sm = session_manager_get (server->session_manager_index); + *res = handle; + return 0; + +err: + listen_session_del (s); + return -1; +} + +/** + * Stop listening on session associated to handle + */ +int +application_stop_listen (application_t * srv, u64 handle) +{ + stream_session_t *listener; + uword *indexp; + segment_manager_t *sm; - /* Add first segment */ - if ((rv = session_manager_add_first_segment (smm, sm, segment_size, - segment_name))) + if (srv && hash_get (srv->listeners_table, handle) == 0) { - return rv; + clib_warning ("app doesn't own handle %llu!", handle); + return -1; } - /* Setup session manager */ - sm->add_segment_size = add_segment_size; - sm->rx_fifo_size = rx_fifo_size; - sm->tx_fifo_size = tx_fifo_size; - sm->add_segment = sm->add_segment_size != 0; + listener = listen_session_get_from_handle (handle); + stream_session_stop_listen (listener); + + indexp = hash_get (srv->listeners_table, handle); + ASSERT (indexp); + + sm = segment_manager_get (*indexp); + segment_manager_del (sm); + hash_unset (srv->listeners_table, handle); + listen_session_del (listener); + return 0; } +int +application_open_session (application_t * app, session_type_t sst, + transport_endpoint_t * tep, u32 api_context) +{ + segment_manager_t *sm; + transport_connection_t *tc = 0; + int rv; + + /* Make sure we have a segment manager for connects */ + if (app->connects_seg_manager == (u32) ~ 0) + { + sm = application_alloc_segment_manager (app); + if (sm == 0) + return -1; + app->connects_seg_manager = segment_manager_index (sm); + } + + if ((rv = stream_session_open (app->index, sst, tep, &tc))) + return rv; + + /* Store api_context for when the reply comes. Not the nicest thing + * but better allocating a separate half-open pool. */ + tc->s_index = api_context; + + return 0; +} + +segment_manager_t * +application_get_connect_segment_manager (application_t * app) +{ + ASSERT (app->connects_seg_manager != (u32) ~ 0); + return segment_manager_get (app->connects_seg_manager); +} + +segment_manager_t * +application_get_listen_segment_manager (application_t * app, + stream_session_t * s) +{ + uword *smp; + smp = hash_get (app->listeners_table, listen_session_get_handle (s)); + ASSERT (smp != 0); + return segment_manager_get (*smp); +} + +static u8 * +app_get_name_from_reg_index (application_t * app) +{ + u8 *app_name; + + vl_api_registration_t *regp; + regp = vl_api_client_index_to_registration (app->api_client_index); + if (!regp) + app_name = format (0, "builtin-%d%c", app->index, 0); + else + app_name = format (0, "%s%c", regp->name, 0); + + return app_name; +} + u8 * -format_application_server (u8 * s, va_list * args) +format_application_listener (u8 * s, va_list * args) { - application_t *srv = va_arg (*args, application_t *); + application_t *app = va_arg (*args, application_t *); + u64 handle = va_arg (*args, u64); + u32 index = va_arg (*args, u32); int verbose = va_arg (*args, int); - vl_api_registration_t *regp; stream_session_t *listener; - u8 *server_name, *str, *seg_name; - u32 segment_size; + u8 *app_name, *str; - if (srv == 0) + if (app == 0) { if (verbose) - s = format (s, "%-40s%-20s%-15s%-15s%-10s", "Connection", "Server", - "Segment", "API Client", "Cookie"); + s = format (s, "%-40s%-20s%-15s%-15s%-10s", "Connection", "App", + "API Client", "ListenerID", "SegManager"); else - s = format (s, "%-40s%-20s", "Connection", "Server"); + s = format (s, "%-40s%-20s", "Connection", "App"); return s; } - regp = vl_api_client_index_to_registration (srv->api_client_index); - if (!regp) - server_name = format (0, "builtin-%d%c", srv->index, 0); - else - server_name = regp->name; - - listener = stream_session_listener_get (srv->session_type, - srv->session_index); + app_name = app_get_name_from_reg_index (app); + listener = listen_session_get_from_handle (handle); str = format (0, "%U", format_stream_session, listener, verbose); - session_manager_get_segment_info (listener->server_segment_index, &seg_name, - &segment_size); if (verbose) { - s = format (s, "%-40s%-20s%-20s%-10d%-10d", str, server_name, - seg_name, srv->api_client_index, srv->accept_cookie); + s = format (s, "%-40s%-20s%-15u%-15u%-10u", str, app_name, + app->api_client_index, handle, index); } else - s = format (s, "%-40s%-20s", str, server_name); + s = format (s, "%-40s%-20s", str, app_name); + + vec_free (app_name); return s; } -u8 * -format_application_client (u8 * s, va_list * args) +void +application_format_connects (application_t * app, int verbose) { - application_t *client = va_arg (*args, application_t *); - int verbose = va_arg (*args, int); - stream_session_t *session; - u8 *str, *seg_name; - u32 segment_size; + vlib_main_t *vm = vlib_get_main (); + segment_manager_t *sm; + u8 *app_name, *s = 0; + int i, j; - if (client == 0) + /* Header */ + if (app == 0) { if (verbose) - s = - format (s, "%-40s%-20s%-10s", "Connection", "Segment", - "API Client"); + vlib_cli_output (vm, "%-40s%-20s%-15s%-10s", "Connection", "App", + "API Client", "SegManager"); else - s = format (s, "%-40s", "Connection"); + vlib_cli_output (vm, "%-40s%-20s", "Connection", "App"); + return; + } - return s; + /* make sure */ + if (app->connects_seg_manager == (u32) ~ 0) + return; + + app_name = app_get_name_from_reg_index (app); + + /* Across all fifo segments */ + sm = segment_manager_get (app->connects_seg_manager); + for (j = 0; j < vec_len (sm->segment_indices); j++) + { + svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t **fifos; + u8 *str; + + fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifos = svm_fifo_segment_get_fifos (fifo_segment); + for (i = 0; i < vec_len (fifos); i++) + { + svm_fifo_t *fifo; + u32 session_index, thread_index; + stream_session_t *session; + + /* There are 2 fifos/session. Avoid printing twice. */ + if (i % 2) + continue; + + fifo = fifos[i]; + session_index = fifo->server_session_index; + thread_index = fifo->server_thread_index; + + session = stream_session_get (session_index, thread_index); + str = format (0, "%U", format_stream_session, session, verbose); + + if (verbose) + s = format (s, "%-40s%-20s%-15u%-10u", str, app_name, + app->api_client_index, app->connects_seg_manager); + else + s = format (s, "%-40s%-20s", str, app_name); + + vlib_cli_output (vm, "%v", s); + + vec_reset_length (s); + vec_free (str); + } + vec_free (s); } - session = stream_session_get (client->session_index, client->thread_index); - str = format (0, "%U", format_stream_session, session, verbose); + vec_free (app_name); +} - session_manager_get_segment_info (session->server_segment_index, &seg_name, - &segment_size); - if (verbose) +u8 * +format_application (u8 * s, va_list * args) +{ + application_t *app = va_arg (*args, application_t *); + CLIB_UNUSED (int verbose) = va_arg (*args, int); + u8 *app_name; + + if (app == 0) { - s = format (s, "%-40s%-20s%-10d%", str, seg_name, - client->api_client_index); + if (verbose) + s = format (s, "%-10s%-20s%-15s%-15s%-15s%-15s", "Index", "Name", + "API Client", "Add seg size", "Rx fifo size", + "Tx fifo size"); + else + s = format (s, "%-10s%-20s%-20s", "Index", "Name", "API Client"); + return s; } + + app_name = app_get_name_from_reg_index (app); + if (verbose) + s = format (s, "%-10d%-20s%-15d%-15d%-15d%-15d", app->index, app_name, + app->api_client_index, app->sm_properties.add_segment_size, + app->sm_properties.rx_fifo_size, + app->sm_properties.tx_fifo_size); else - s = format (s, "%-40s", str); + s = format (s, "%-10d%-20s%-20d", app->index, app_name, + app->api_client_index); return s; } @@ -294,13 +508,12 @@ static clib_error_t * show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { - session_manager_main_t *smm = &session_manager_main; application_t *app; int do_server = 0; int do_client = 0; int verbose = 0; - if (!smm->is_enabled) + if (!session_manager_is_enabled ()) { clib_error_return (0, "session layer is not enabled"); } @@ -319,17 +532,24 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, if (do_server) { + u64 handle; + u32 index; if (pool_elts (app_pool)) { - vlib_cli_output (vm, "%U", format_application_server, - 0 /* header */ , + vlib_cli_output (vm, "%U", format_application_listener, + 0 /* header */ , 0, 0, verbose); /* *INDENT-OFF* */ pool_foreach (app, app_pool, ({ - if (app->mode == APP_SERVER) - vlib_cli_output (vm, "%U", format_application_server, app, - verbose); + /* App's listener sessions */ + if (hash_elts (app->listeners_table) == 0) + continue; + hash_foreach (handle, index, app->listeners_table, + ({ + vlib_cli_output (vm, "%U", format_application_listener, app, + handle, index, verbose); + })); })); /* *INDENT-ON* */ } @@ -341,15 +561,14 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, { if (pool_elts (app_pool)) { - vlib_cli_output (vm, "%U", format_application_client, - 0 /* header */ , - verbose); + application_format_connects (0, verbose); + /* *INDENT-OFF* */ pool_foreach (app, app_pool, ({ - if (app->mode == APP_CLIENT) - vlib_cli_output (vm, "%U", format_application_client, app, - verbose); + if (app->connects_seg_manager == (u32)~0) + continue; + application_format_connects (app, verbose); })); /* *INDENT-ON* */ } @@ -357,6 +576,19 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_output (vm, "No active client bindings"); } + /* Print app related info */ + if (!do_server && !do_client) + { + vlib_cli_output (vm, "%U", format_application, 0, verbose); + pool_foreach (app, app_pool, ( + { + vlib_cli_output (vm, "%U", + format_application, app, + verbose); + } + )); + } + return 0; } diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 480828f7..6bcee9d3 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -18,11 +18,13 @@ #include #include +#include typedef enum { APP_SERVER, - APP_CLIENT + APP_CLIENT, + APP_N_TYPES } application_type_t; typedef struct _stream_session_cb_vft @@ -35,7 +37,7 @@ typedef struct _stream_session_cb_vft int (*session_accept_callback) (stream_session_t * new_session); /* Connection request callback */ - int (*session_connected_callback) (u32 api_client_index, + int (*session_connected_callback) (u32 app_index, u32 api_context, stream_session_t * s, u8 code); /** Notify app that session is closing */ @@ -59,45 +61,52 @@ typedef struct _application /** Flags */ u32 flags; + /* Stream server mode: accept or connect + * TODO REMOVE*/ + u8 mode; + + /** Index of the listen session or connect session + * TODO REMOVE*/ + u32 session_index; + + /** Session thread index for client connect sessions + * TODO REMOVE */ + u32 thread_index; + + /* + * Binary API interface to external app + */ + /** Binary API connection index, ~0 if internal */ u32 api_client_index; - /* */ - u32 api_context; - /** Application listens for events on this svm queue */ unix_shared_memory_queue_t *event_queue; - /** Stream session type */ - u8 session_type; - - /* Stream server mode: accept or connect */ - u8 mode; + /* + * Callbacks: shoulder-taps for the server/client + */ - u32 session_manager_index; + session_cb_vft_t cb_fns; /* - * Bind/Listen specific + * svm segment management */ + u32 connects_seg_manager; - /** Accept cookie, for multiple session flavors ($$$ maybe) */ - u32 accept_cookie; + /* Lookup tables for listeners. Value is segment manager index */ + uword *listeners_table; - /** Index of the listen session or connect session */ - u32 session_index; + u32 first_segment_manager; - /** Session thread index for client connect sessions */ - u32 thread_index; - - /* - * Callbacks: shoulder-taps for the server/client - */ - session_cb_vft_t cb_fns; + /** Segment manager properties. Shared by all segment managers */ + segment_manager_properties_t sm_properties; } application_t; -application_t *application_new (application_type_t type, session_type_t sst, - u32 api_client_index, u32 flags, - session_cb_vft_t * cb_fns); +application_t *application_new (); +int +application_init (application_t * app, u32 api_client_index, u64 * options, + session_cb_vft_t * cb_fns); void application_del (application_t * app); application_t *application_get (u32 index); application_t *application_get_if_valid (u32 index); @@ -105,11 +114,21 @@ application_t *application_lookup (u32 api_client_index); u32 application_get_index (application_t * app); int -application_server_init (application_t * server, u32 segment_size, - u32 add_segment_size, u32 rx_fifo_size, - u32 tx_fifo_size, u8 ** segment_name); +application_start_listen (application_t * app, session_type_t session_type, + transport_endpoint_t * tep, u64 * handle); +int application_stop_listen (application_t * srv, u64 handle); +int +application_open_session (application_t * app, session_type_t sst, + transport_endpoint_t * tep, u32 api_context); int application_api_queue_is_full (application_t * app); +segment_manager_t *application_get_listen_segment_manager (application_t * + app, + stream_session_t * + s); +segment_manager_t *application_get_connect_segment_manager (application_t * + app); + #endif /* SRC_VNET_SESSION_APPLICATION_H_ */ /* diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 4b30bd87..96d2c621 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -79,81 +79,51 @@ api_parse_session_handle (u64 handle, u32 * session_index, u32 * thread_index) } int -vnet_bind_i (u32 api_client_index, ip46_address_t * ip46, u16 port_host_order, - session_type_t sst, u64 * options, session_cb_vft_t * cb_fns, - application_t ** app, u32 * len_seg_name, char *seg_name) +vnet_bind_i (u32 app_index, session_type_t sst, + transport_endpoint_t * tep, u64 * handle) { - u8 *segment_name = 0; - application_t *server = 0; + application_t *app; stream_session_t *listener; - u8 is_ip4; - - listener = - stream_session_lookup_listener (ip46, - clib_host_to_net_u16 (port_host_order), - sst); - - if (listener) - return VNET_API_ERROR_ADDRESS_IN_USE; - if (application_lookup (api_client_index)) + app = application_get_if_valid (app_index); + if (!app) { - clib_warning ("Only one connection supported for now"); - return VNET_API_ERROR_ADDRESS_IN_USE; + clib_warning ("app not attached"); + return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; } - is_ip4 = SESSION_TYPE_IP4_UDP == sst || SESSION_TYPE_IP4_TCP == sst; - if (!ip_is_zero (ip46, is_ip4) && !ip_is_local (ip46, is_ip4)) - return VNET_API_ERROR_INVALID_VALUE; - - /* Allocate and initialize stream server */ - server = application_new (APP_SERVER, sst, api_client_index, - options[SESSION_OPTIONS_FLAGS], cb_fns); + listener = stream_session_lookup_listener (&tep->ip, + clib_host_to_net_u16 (tep->port), + sst); + if (listener) + return VNET_API_ERROR_ADDRESS_IN_USE; - application_server_init (server, options[SESSION_OPTIONS_SEGMENT_SIZE], - options[SESSION_OPTIONS_ADD_SEGMENT_SIZE], - options[SESSION_OPTIONS_RX_FIFO_SIZE], - options[SESSION_OPTIONS_TX_FIFO_SIZE], - &segment_name); + if (!ip_is_zero (&tep->ip, tep->is_ip4) + && !ip_is_local (&tep->ip, tep->is_ip4)) + return VNET_API_ERROR_INVALID_VALUE_2; /* Setup listen path down to transport */ - stream_session_start_listen (server->index, ip46, port_host_order); - - /* - * Return values - */ - - ASSERT (vec_len (segment_name) <= 128); - *len_seg_name = vec_len (segment_name); - memcpy (seg_name, segment_name, *len_seg_name); - *app = server; - - return 0; + return application_start_listen (app, sst, tep, handle); } int -vnet_unbind_i (u32 api_client_index) +vnet_unbind_i (u32 app_index, u64 handle) { - application_t *server; + application_t *app = application_get_if_valid (app_index); - /* - * Find the stream_server_t corresponding to the api client - */ - server = application_lookup (api_client_index); - if (!server) - return VNET_API_ERROR_INVALID_VALUE_2; + if (!app) + { + clib_warning ("app not attached"); + return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } /* Clear the listener */ - stream_session_stop_listen (server->index); - application_del (server); - - return 0; + return application_stop_listen (app, handle); } int -vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, - ip46_address_t * ip46, u16 port, u64 * options, void *mp, - session_cb_vft_t * cb_fns) +vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst, + transport_endpoint_t * tep, void *mp) { stream_session_t *listener; application_t *server, *app; @@ -161,8 +131,8 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, /* * Figure out if connecting to a local server */ - listener = stream_session_lookup_listener (ip46, - clib_host_to_net_u16 (port), + listener = stream_session_lookup_listener (&tep->ip, + clib_host_to_net_u16 (tep->port), sst); if (listener) { @@ -177,16 +147,11 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, redirect_connect_callback (server->api_client_index, mp); } - /* Create client app */ - app = application_new (APP_CLIENT, sst, api_client_index, - options[SESSION_OPTIONS_FLAGS], cb_fns); - - app->api_context = api_context; - /* * Not connecting to a local server. Create regular session */ - return stream_session_open (sst, ip46, port, app->index); + app = application_get (app_index); + return application_open_session (app, sst, tep, api_context); } /** @@ -209,30 +174,31 @@ vnet_connect_i (u32 api_client_index, u32 api_context, session_type_t sst, uword unformat_vnet_uri (unformat_input_t * input, va_list * args) { - ip46_address_t *address = va_arg (*args, ip46_address_t *); session_type_t *sst = va_arg (*args, session_type_t *); - u16 *port = va_arg (*args, u16 *); + transport_endpoint_t *tep = va_arg (*args, transport_endpoint_t *); - if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &address->ip4, - port)) + if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &tep->ip.ip4, + &tep->port)) { *sst = SESSION_TYPE_IP4_TCP; + tep->is_ip4 = 1; return 1; } - if (unformat (input, "udp://%U/%d", unformat_ip4_address, &address->ip4, - port)) + if (unformat (input, "udp://%U/%d", unformat_ip4_address, &tep->ip.ip4, + &tep->port)) { *sst = SESSION_TYPE_IP4_UDP; + tep->is_ip4 = 1; return 1; } - if (unformat (input, "udp://%U/%d", unformat_ip6_address, &address->ip6, - port)) + if (unformat (input, "udp://%U/%d", unformat_ip6_address, &tep->ip.ip6, + &tep->port)) { *sst = SESSION_TYPE_IP6_UDP; return 1; } - if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &address->ip6, - port)) + if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &tep->ip.ip6, + &tep->port)) { *sst = SESSION_TYPE_IP6_TCP; return 1; @@ -242,8 +208,7 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) } int -parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, - u16 * port_number_host_byte_order) +parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep) { unformat_input_t _input, *input = &_input; @@ -252,8 +217,7 @@ parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, /* Parse uri */ unformat_init_string (input, uri, strlen (uri)); - if (!unformat (input, "%U", unformat_vnet_uri, addr, sst, - port_number_host_byte_order)) + if (!unformat (input, "%U", unformat_vnet_uri, sst, tep)) { unformat_free (input); return VNET_API_ERROR_INVALID_VALUE; @@ -263,26 +227,51 @@ parse_uri (char *uri, session_type_t * sst, ip46_address_t * addr, return 0; } +/** + * Attaches application. + * + * Allocates a vpp app, i.e., a structure that keeps back pointers + * to external app and a segment manager for shared memory fifo based + * communication with the external app. + */ int -vnet_bind_uri (vnet_bind_args_t * a) +vnet_application_attach (vnet_app_attach_args_t * a) { - application_t *server = 0; - u16 port_host_order; - session_type_t sst = SESSION_N_TYPES; - ip46_address_t ip46; + application_t *app = 0; + segment_manager_t *sm; + u8 *seg_name; int rv; - memset (&ip46, 0, sizeof (ip46)); - rv = parse_uri (a->uri, &sst, &ip46, &port_host_order); - if (rv) + app = application_new (); + if ((rv = application_init (app, a->api_client_index, a->options, + a->session_cb_vft))) return rv; - if ((rv = vnet_bind_i (a->api_client_index, &ip46, port_host_order, sst, - a->options, a->session_cb_vft, &server, - &a->segment_name_length, a->segment_name))) - return rv; + a->app_event_queue_address = (u64) app->event_queue; + sm = segment_manager_get (app->first_segment_manager); + segment_manager_get_segment_info (sm->segment_indices[0], + &seg_name, &a->segment_size); - a->server_event_queue_address = (u64) server->event_queue; + a->segment_name_length = vec_len (seg_name); + a->segment_name = seg_name; + ASSERT (vec_len (a->segment_name) <= 128); + a->app_index = app->index; + return 0; +} + +int +vnet_application_detach (vnet_app_detach_args_t * a) +{ + application_t *app; + app = application_get_if_valid (a->app_index); + + if (!app) + { + clib_warning ("app not attached"); + return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } + + application_del (app); return 0; } @@ -308,125 +297,102 @@ session_type_from_proto_and_ip (session_api_proto_t proto, u8 is_ip4) } int -vnet_unbind_uri (char *uri, u32 api_client_index) +vnet_bind_uri (vnet_bind_args_t * a) { - u16 port_number_host_byte_order; session_type_t sst = SESSION_N_TYPES; - ip46_address_t ip46_address; - stream_session_t *listener; + transport_endpoint_t tep; int rv; - rv = parse_uri (uri, &sst, &ip46_address, &port_number_host_byte_order); + memset (&tep, 0, sizeof (tep)); + rv = parse_uri (a->uri, &sst, &tep); if (rv) return rv; - listener = - stream_session_lookup_listener (&ip46_address, - clib_host_to_net_u16 - (port_number_host_byte_order), sst); + if ((rv = vnet_bind_i (a->app_index, sst, &tep, &a->handle))) + return rv; + + return 0; +} + +int +vnet_unbind_uri (vnet_unbind_args_t * a) +{ + session_type_t sst = SESSION_N_TYPES; + stream_session_t *listener; + transport_endpoint_t tep; + int rv; + + rv = parse_uri (a->uri, &sst, &tep); + if (rv) + return rv; + listener = stream_session_lookup_listener (&tep.ip, + clib_host_to_net_u16 (tep.port), + sst); if (!listener) return VNET_API_ERROR_ADDRESS_NOT_IN_USE; - /* External client? */ - if (api_client_index != ~0) - { - ASSERT (vl_api_client_index_to_registration (api_client_index)); - } - - return vnet_unbind_i (api_client_index); + return vnet_unbind_i (a->app_index, listen_session_get_handle (listener)); } int vnet_connect_uri (vnet_connect_args_t * a) { - ip46_address_t ip46_address; - u16 port; + transport_endpoint_t tep; session_type_t sst; - application_t *app; int rv; - app = application_lookup (a->api_client_index); - if (app) - { - clib_warning ("Already have a connect from this app"); - return VNET_API_ERROR_INVALID_VALUE_2; - } - /* Parse uri */ - rv = parse_uri (a->uri, &sst, &ip46_address, &port); + memset (&tep, 0, sizeof (tep)); + rv = parse_uri (a->uri, &sst, &tep); if (rv) return rv; - return vnet_connect_i (a->api_client_index, a->api_context, sst, - &ip46_address, port, a->options, a->mp, - a->session_cb_vft); + return vnet_connect_i (a->app_index, a->api_context, sst, &tep, a->mp); } int -vnet_disconnect_session (u32 session_index, u32 thread_index) +vnet_disconnect_session (vnet_disconnect_args_t * a) { - stream_session_t *session; + u32 index, thread_index; + stream_session_t *s; - session = stream_session_get (session_index, thread_index); - stream_session_disconnect (session); + stream_session_parse_handle (a->handle, &index, &thread_index); + s = stream_session_get_if_valid (index, thread_index); + + if (!s || s->app_index != a->app_index) + return VNET_API_ERROR_INVALID_VALUE; + stream_session_disconnect (s); return 0; } - int vnet_bind (vnet_bind_args_t * a) { - application_t *server = 0; session_type_t sst = SESSION_N_TYPES; int rv; sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); - if ((rv = vnet_bind_i (a->api_client_index, &a->tep.ip, a->tep.port, sst, - a->options, a->session_cb_vft, &server, - &a->segment_name_length, a->segment_name))) + if ((rv = vnet_bind_i (a->app_index, sst, &a->tep, &a->handle))) return rv; - a->server_event_queue_address = (u64) server->event_queue; - a->handle = (u64) a->tep.vrf << 32 | (u64) server->session_index; return 0; } int vnet_unbind (vnet_unbind_args_t * a) { - application_t *server; - - if (a->api_client_index != ~0) - { - ASSERT (vl_api_client_index_to_registration (a->api_client_index)); - } - - /* Make sure this is the right one */ - server = application_lookup (a->api_client_index); - ASSERT (server->session_index == (0xFFFFFFFF & a->handle)); - - /* TODO use handle to disambiguate namespaces/vrfs */ - return vnet_unbind_i (a->api_client_index); + return vnet_unbind_i (a->app_index, a->handle); } int vnet_connect (vnet_connect_args_t * a) { session_type_t sst; - application_t *app; - - app = application_lookup (a->api_client_index); - if (app) - { - clib_warning ("Already have a connect from this app"); - return VNET_API_ERROR_INVALID_VALUE_2; - } sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4); - return vnet_connect_i (a->api_client_index, a->api_context, sst, &a->tep.ip, - a->tep.port, a->options, a->mp, a->session_cb_vft); + return vnet_connect_i (a->app_index, a->api_context, sst, &a->tep, a->mp); } int diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index a5f2b9a6..2c497531 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -28,6 +28,27 @@ typedef enum _session_api_proto SESSION_PROTO_UDP } session_api_proto_t; +typedef struct _vnet_app_attach_args_t +{ + u32 api_client_index; + u64 *options; + session_cb_vft_t *session_cb_vft; + + /* + * Results + */ + u8 *segment_name; + u32 segment_name_length; + u32 segment_size; + u64 app_event_queue_address; + u32 app_index; +} vnet_app_attach_args_t; + +typedef struct _vnet_app_detach_args_t +{ + u32 app_index; +} vnet_app_detach_args_t; + typedef struct _vnet_bind_args_t { union @@ -40,9 +61,7 @@ typedef struct _vnet_bind_args_t }; }; - u32 api_client_index; - u64 *options; - session_cb_vft_t *session_cb_vft; + u32 app_index; /* * Results @@ -60,7 +79,7 @@ typedef struct _vnet_unbind_args_t char *uri; u64 handle; }; - u32 api_client_index; + u32 app_index; } vnet_unbind_args_t; typedef struct _vnet_connect_args @@ -74,10 +93,8 @@ typedef struct _vnet_connect_args session_api_proto_t proto; }; }; - u32 api_client_index; + u32 app_index; u32 api_context; - u64 *options; - session_cb_vft_t *session_cb_vft; /* Used for redirects */ void *mp; @@ -86,12 +103,13 @@ typedef struct _vnet_connect_args typedef struct _vnet_disconnect_args_t { u64 handle; - u32 api_client_index; + u32 app_index; } vnet_disconnect_args_t; -/* Bind / connect options */ +/* Application attach options */ typedef enum { + APP_EVT_QUEUE_SIZE, SESSION_OPTIONS_FLAGS, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, @@ -99,7 +117,7 @@ typedef enum SESSION_OPTIONS_TX_FIFO_SIZE, SESSION_OPTIONS_ACCEPT_COOKIE, SESSION_OPTIONS_N_OPTIONS -} session_options_index_t; +} app_attach_options_index_t; /** Server can handle delegated connect requests from local clients */ #define SESSION_OPTIONS_FLAGS_USE_FIFO (1<<0) @@ -109,10 +127,13 @@ typedef enum #define VNET_CONNECT_REDIRECTED 123 +int vnet_application_attach (vnet_app_attach_args_t * a); +int vnet_application_detach (vnet_app_detach_args_t * a); + int vnet_bind_uri (vnet_bind_args_t *); -int vnet_unbind_uri (char *uri, u32 api_client_index); +int vnet_unbind_uri (vnet_unbind_args_t * a); int vnet_connect_uri (vnet_connect_args_t * a); -int vnet_disconnect_session (u32 session_index, u32 thread_index); +int vnet_disconnect_session (vnet_disconnect_args_t * a); int vnet_bind (vnet_bind_args_t * a); int vnet_connect (vnet_connect_args_t * a); diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c new file mode 100644 index 00000000..16e5bc56 --- /dev/null +++ b/src/vnet/session/segment_manager.c @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +/** + * Counter used to build segment names + */ +u32 segment_name_counter = 0; + +/** + * Pool of segment managers + */ +segment_manager_t *segment_managers = 0; + +/** + * Default fifo and segment size. TODO config. + */ +u32 default_fifo_size = 1 << 16; +u32 default_segment_size = 1 << 20; + +void +segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size) +{ + svm_fifo_segment_private_t *s; + s = svm_fifo_get_segment (index); + *name = s->h->segment_name; + *size = s->ssvm.ssvm_size; +} + +always_inline int +session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, + u8 * segment_name) +{ + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + int rv; + + memset (ca, 0, sizeof (*ca)); + + ca->segment_name = (char *) segment_name; + ca->segment_size = segment_size; + + rv = svm_fifo_segment_create (ca); + if (rv) + { + clib_warning ("svm_fifo_segment_create ('%s', %d) failed", + ca->segment_name, ca->segment_size); + vec_free (segment_name); + return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; + } + + vec_add1 (sm->segment_indices, ca->new_segment_index); + + return 0; +} + +int +session_manager_add_segment (segment_manager_t * sm) +{ + u8 *segment_name; + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + u32 add_segment_size; + int rv; + + memset (ca, 0, sizeof (*ca)); + segment_name = format (0, "%d-%d%c", getpid (), segment_name_counter++, 0); + add_segment_size = sm->properties->add_segment_size ? + sm->properties->add_segment_size : default_segment_size; + + rv = session_manager_add_segment_i (sm, add_segment_size, segment_name); + vec_free (segment_name); + return rv; +} + +int +session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) +{ + svm_fifo_segment_create_args_t _ca, *ca = &_ca; + u8 *segment_name; + int rv; + + memset (ca, 0, sizeof (*ca)); + segment_name = format (0, "%d-%d%c", getpid (), segment_name_counter++, 0); + rv = session_manager_add_segment_i (sm, segment_size, segment_name); + vec_free (segment_name); + return rv; +} + +/** + * Initializes segment manager based on options provided. + * Returns error if svm segment allocation fails. + */ +int +segment_manager_init (segment_manager_t * sm, + segment_manager_properties_t * properties, + u32 first_seg_size) +{ + int rv; + + /* app allocates these */ + sm->properties = properties; + + if (first_seg_size > 0) + { + rv = session_manager_add_first_segment (sm, first_seg_size); + if (rv) + { + clib_warning ("Failed to allocate segment"); + return rv; + } + } + + return 0; +} + +/** + * Removes segment manager. + * + * Since the fifos allocated in the segment keep backpointers to the sessions + * prior to removing the segment, we call session disconnect. This + * subsequently propages into transport. + */ +void +segment_manager_del (segment_manager_t * sm) +{ + u32 *deleted_sessions = 0; + u32 *deleted_thread_indices = 0; + int i, j; + + /* Across all fifo segments used by the server */ + for (j = 0; j < vec_len (sm->segment_indices); j++) + { + svm_fifo_segment_private_t *fifo_segment; + svm_fifo_t **fifos; + /* Vector of fifos allocated in the segment */ + fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); + fifos = svm_fifo_segment_get_fifos (fifo_segment); + + /* + * Remove any residual sessions from the session lookup table + * Don't bother deleting the individual fifos, we're going to + * throw away the fifo segment in a minute. + */ + for (i = 0; i < vec_len (fifos); i++) + { + svm_fifo_t *fifo; + u32 session_index, thread_index; + stream_session_t *session; + + fifo = fifos[i]; + session_index = fifo->server_session_index; + thread_index = fifo->server_thread_index; + + session = stream_session_get (session_index, thread_index); + + /* Add to the deleted_sessions vector (once!) */ + if (!session->is_deleted) + { + session->is_deleted = 1; + vec_add1 (deleted_sessions, session_index); + vec_add1 (deleted_thread_indices, thread_index); + } + } + + for (i = 0; i < vec_len (deleted_sessions); i++) + { + stream_session_t *session; + session = stream_session_get (deleted_sessions[i], + deleted_thread_indices[i]); + + /* Instead of directly removing the session call disconnect */ + stream_session_disconnect (session); + + /* + stream_session_table_del (smm, session); + pool_put(smm->sessions[deleted_thread_indices[i]], session); + */ + } + + vec_reset_length (deleted_sessions); + vec_reset_length (deleted_thread_indices); + + /* Instead of removing the segment, test when removing the session if + * the segment can be removed + */ + /* svm_fifo_segment_delete (fifo_segment); */ + } + + vec_free (deleted_sessions); + vec_free (deleted_thread_indices); + pool_put (segment_managers, sm); +} + +static int +segment_manager_notify_app_seg_add (segment_manager_t * sm, + u32 fifo_segment_index) +{ + application_t *app = application_get (sm->app_index); + u32 seg_size = 0; + u8 *seg_name; + + /* Send an API message to the external app, to map new segment */ + ASSERT (app->cb_fns.add_segment_callback); + + segment_manager_get_segment_info (fifo_segment_index, &seg_name, &seg_size); + return app->cb_fns.add_segment_callback (app->api_client_index, seg_name, + seg_size); +} + +int +segment_manager_alloc_session_fifos (segment_manager_t * sm, + svm_fifo_t ** server_rx_fifo, + svm_fifo_t ** server_tx_fifo, + u32 * fifo_segment_index) +{ + svm_fifo_segment_private_t *fifo_segment; + u32 fifo_size, sm_index; + u8 added_a_segment = 0; + int i; + + /* Allocate svm fifos */ + ASSERT (vec_len (sm->segment_indices)); + +again: + for (i = 0; i < vec_len (sm->segment_indices); i++) + { + *fifo_segment_index = sm->segment_indices[i]; + fifo_segment = svm_fifo_get_segment (*fifo_segment_index); + + fifo_size = sm->properties->rx_fifo_size; + fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; + *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + + fifo_size = sm->properties->tx_fifo_size; + fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; + *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); + + if (*server_rx_fifo == 0) + { + /* This would be very odd, but handle it... */ + if (*server_tx_fifo != 0) + { + svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); + *server_tx_fifo = 0; + } + continue; + } + if (*server_tx_fifo == 0) + { + if (*server_rx_fifo != 0) + { + svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); + *server_rx_fifo = 0; + } + continue; + } + break; + } + + /* See if we're supposed to create another segment */ + if (*server_rx_fifo == 0) + { + if (sm->properties->add_segment) + { + if (added_a_segment) + { + clib_warning ("added a segment, still cant allocate a fifo"); + return SESSION_ERROR_NEW_SEG_NO_SPACE; + } + + if (session_manager_add_segment (sm)) + return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + + added_a_segment = 1; + goto again; + } + else + { + clib_warning ("No space to allocate fifos!"); + return SESSION_ERROR_NO_SPACE; + } + } + + if (added_a_segment) + return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); + + /* Backpointers to segment manager */ + sm_index = segment_manager_index (sm); + (*server_tx_fifo)->segment_manager = sm_index; + (*server_rx_fifo)->segment_manager = sm_index; + + return 0; +} + +void +segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, + svm_fifo_t * tx_fifo) +{ + segment_manager_t *sm; + svm_fifo_segment_private_t *fifo_segment; + + fifo_segment = svm_fifo_get_segment (svm_segment_index); + svm_fifo_segment_free_fifo (fifo_segment, rx_fifo); + svm_fifo_segment_free_fifo (fifo_segment, tx_fifo); + + /* If we have segment manager, try doing some cleanup. + * It's possible to have no segment manager if the session was removed + * as result of a detach */ + sm = segment_manager_get_if_valid (rx_fifo->segment_manager); + if (sm) + { + /* Remove segment only if it holds no fifos and not the first */ + if (sm->segment_indices[0] != svm_segment_index + && !svm_fifo_segment_has_fifos (fifo_segment)) + { + svm_fifo_segment_delete (fifo_segment); + vec_del1 (sm->segment_indices, svm_segment_index); + } + } +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h new file mode 100644 index 00000000..778d6040 --- /dev/null +++ b/src/vnet/session/segment_manager.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef SRC_VNET_SESSION_SEGMENT_MANAGER_H_ +#define SRC_VNET_SESSION_SEGMENT_MANAGER_H_ + +#include +#include + +typedef struct _segment_manager_properties +{ + /** Session fifo sizes. */ + u32 rx_fifo_size; + u32 tx_fifo_size; + + /** Configured additional segment size */ + u32 add_segment_size; + + /** Flag that indicates if additional segments should be created */ + u8 add_segment; + +} segment_manager_properties_t; + +typedef struct _segment_manager +{ + /** segments mapped by this manager */ + u32 *segment_indices; + + /** Owner app index */ + u32 app_index; + + /** Pointer to manager properties. Could be shared among all of + * an app's segment managers s*/ + segment_manager_properties_t *properties; +} segment_manager_t; + +/** Pool of segment managers */ +extern segment_manager_t *segment_managers; + +always_inline segment_manager_t * +segment_manager_new () +{ + segment_manager_t *sm; + pool_get (segment_managers, sm); + memset (sm, 0, sizeof (*sm)); + return sm; +} + +always_inline segment_manager_t * +segment_manager_get (u32 index) +{ + return pool_elt_at_index (segment_managers, index); +} + +always_inline segment_manager_t * +segment_manager_get_if_valid (u32 index) +{ + if (pool_is_free_index (segment_managers, index)) + return 0; + return pool_elt_at_index (segment_managers, index); +} + +always_inline u32 +segment_manager_index (segment_manager_t * sm) +{ + return sm - segment_managers; +} + +int +segment_manager_init (segment_manager_t * sm, + segment_manager_properties_t * properties, + u32 seg_size); + +void segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size); +int +session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size); +int session_manager_add_segment (segment_manager_t * sm); +void segment_manager_del (segment_manager_t * sm); +int +segment_manager_alloc_session_fifos (segment_manager_t * sm, + svm_fifo_t ** server_rx_fifo, + svm_fifo_t ** server_tx_fifo, + u32 * fifo_segment_index); +void +segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, + svm_fifo_t * tx_fifo); + +#endif /* SRC_VNET_SESSION_SEGMENT_MANAGER_H_ */ +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api index 582765b5..e207e46f 100644 --- a/src/vnet/session/session.api +++ b/src/vnet/session/session.api @@ -13,6 +13,68 @@ * limitations under the License. */ +/** \brief client->vpp, attach application to session layer + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param initial_segment_size - size of the initial shm segment to be + allocated + @param options - segment size, fifo sizes, etc. +*/ + define application_attach { + u32 client_index; + u32 context; + u32 initial_segment_size; + u64 options[16]; + }; + + /** \brief Application attach reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param app_event_queue_address - vpp event queue address or 0 if this + connection shouldn't send events + @param segment_size - size of first shm segment + @param segment_name_length - length of segment name + @param segment_name - name of segment client needs to attach to +*/ +define application_attach_reply { + u32 context; + i32 retval; + u64 app_event_queue_address; + u32 segment_size; + u8 segment_name_length; + u8 segment_name[128]; +}; + + /** \brief client->vpp, attach application to session layer + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ + define application_detach { + u32 client_index; + u32 context; + }; + + /** \brief detach reply + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define application_detach_reply { + u32 context; + i32 retval; +}; + +/** \brief vpp->client, please map an additional shared memory segment + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param segment_name - +*/ +define map_another_segment { + u32 client_index; + u32 context; + u32 segment_size; + u8 segment_name[128]; +}; + /** \brief Bind to a given URI @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -25,9 +87,7 @@ define bind_uri { u32 client_index; u32 context; u32 accept_cookie; - u32 initial_segment_size; u8 uri[128]; - u64 options[16]; }; /** \brief Unbind a given URI @@ -49,7 +109,10 @@ define unbind_uri { @param accept_cookie - sender accept cookie, to identify this bind flavor @param uri - a URI, e.g. "tcp4://0.0.0.0/0/80" "tcp6://::/0/80" [ipv6], etc. - @param options - socket options, fifo sizes, etc. + @param options - socket options, fifo sizes, etc. passed by vpp to the + server when redirecting connects + @param client_queue_address - binary API client queue address. Used by + local server when connect was redirected. */ define connect_uri { u32 client_index; @@ -62,18 +125,10 @@ define connect_uri { /** \brief Bind reply @param context - sender context, to match reply w/ request @param retval - return code for the request - @param event_queue_address - vpp event queue address or 0 if this - connection shouldn't send events - @param segment_name_length - length of segment name - @param segment_name - name of segment client needs to attach to */ define bind_uri_reply { u32 context; i32 retval; - u64 server_event_queue_address; - u8 segment_name_length; - u32 segment_size; - u8 segment_name[128]; }; /** \brief unbind reply @@ -88,43 +143,28 @@ define unbind_uri_reply { /** \brief vpp->client, connect reply @param context - sender context, to match reply w/ request @param retval - return code for the request + @param handle - session handle @param server_rx_fifo - rx (vpp -> vpp-client) fifo address @param server_tx_fifo - tx (vpp-client -> vpp) fifo address - @param session_index - session index; - @param session_thread_index - session thread index - @param session_type - session thread type @param vpp_event_queue_address - vpp's event queue address - @param client_event_queue_address - client's event queue address + @param segment_size - size of segment to be attached. Only for redirects. @param segment_name_length - non-zero if the client needs to attach to - the fifo segment + the fifo segment. This should only happen + if session was redirected. @param segment_name - set if the client needs to attach to the segment */ define connect_uri_reply { u32 context; i32 retval; + u64 handle; u64 server_rx_fifo; u64 server_tx_fifo; - u32 session_index; - u32 session_thread_index; - u8 session_type; - u64 client_event_queue_address; u64 vpp_event_queue_address; u32 segment_size; u8 segment_name_length; u8 segment_name[128]; }; -/** \brief vpp->client, please map an additional shared memory segment - @param context - sender context, to match reply w/ request - @param segment_name - -*/ -define map_another_segment { - u32 client_index; - u32 context; - u32 segment_size; - u8 segment_name[128]; -}; - /** \brief client->vpp @param context - sender context, to match reply w/ request @param retval - return code for the request @@ -136,25 +176,27 @@ define map_another_segment_reply { /** \brief vpp->client, accept this session @param context - sender context, to match reply w/ request - @param accept_cookie - tells client which bind flavor just occurred + @param listener_handle - tells client which listener this pertains to + @param handle - unique session identifier + @param session_thread_index - thread index of new session @param rx_fifo_address - rx (vpp -> vpp-client) fifo address @param tx_fifo_address - tx (vpp-client -> vpp) fifo address - @param session_index - index of new session - @param session_thread_index - thread index of new session @param vpp_event_queue_address - vpp's event queue address - @param session_type - type of session - + @param port - remote port + @param is_ip4 - 1 if the ip is ip4 + @param ip - remote ip */ define accept_session { u32 client_index; u32 context; - u32 accept_cookie; + u64 listener_handle; + u64 handle; u64 server_rx_fifo; u64 server_tx_fifo; - u32 session_index; - u32 session_thread_index; u64 vpp_event_queue_address; - u8 session_type; + u16 port; + u8 is_ip4; + u8 ip[16]; }; /** \brief client->vpp, reply to an accept message @@ -167,23 +209,19 @@ define accept_session { define accept_session_reply { u32 context; i32 retval; - u8 session_type; - u8 session_thread_index; - u32 session_index; + u64 handle; }; /** \brief bidirectional disconnect API @param client_index - opaque cookie to identify the sender client to vpp direction only @param context - sender context, to match reply w/ request - @param session_index - cookie #1 from accept_session / connect_reply - @param session_thread_index - cookie #2 + @param handle - session handle obtained from accept/connect */ define disconnect_session { u32 client_index; u32 context; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief bidirectional disconnect reply API @@ -191,31 +229,25 @@ define disconnect_session { client to vpp direction only @param context - sender context, to match reply w/ request @param retval - return code for the request - @param session_index - session index from accept_session / connect_reply - @param session_thread_index - thread index from accept_session / - connect_reply + @param handle - session handle */ define disconnect_session_reply { u32 client_index; u32 context; i32 retval; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief vpp->client reset session API @param client_index - opaque cookie to identify the sender client to vpp direction only @param context - sender context, to match reply w/ request - @param session_index - session index from accept_session / connect_reply - @param session_thread_index - thread index from accept_session / - connect_reply + @param handle - session handle obtained via accept/connects */ define reset_session { u32 client_index; u32 context; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief client->vpp reset session reply @@ -223,16 +255,13 @@ define reset_session { client to vpp direction only @param context - sender context, to match reply w/ request @param retval - return code for the request - @param session_index - session index from accept_session / connect_reply - @param session_thread_index - thread index from accept_session / - connect_reply + @param handle - session handle obtained via accept/connect */ define reset_session_reply { u32 client_index; u32 context; i32 retval; - u32 session_index; - u32 session_thread_index; + u64 handle; }; /** \brief Bind to an ip:port pair for a given transport protocol @@ -277,7 +306,7 @@ define unbind_sock { @param proto - protocol 0 - TCP 1 - UDP @param client_queue_address - client's API queue address. Non-zero when used to perform redirects - @param options - socket options, fifo sizes, etc. + @param options - socket options, fifo sizes, etc. when doing redirects */ define connect_sock { u32 client_index; @@ -326,7 +355,7 @@ define unbind_sock_reply { @param server_rx_fifo - rx (vpp -> vpp-client) fifo address @param server_tx_fifo - tx (vpp-client -> vpp) fifo address @param vpp_event_queue_address - vpp's event queue address - @param client_event_queue_address - client's event queue address + @param segment_size - size of segment to be attached. Only for redirects. @param segment_name_length - non-zero if the client needs to attach to the fifo segment @param segment_name - set if the client needs to attach to the segment @@ -337,92 +366,12 @@ define connect_sock_reply { u64 handle; u64 server_rx_fifo; u64 server_tx_fifo; - u64 client_event_queue_address; u64 vpp_event_queue_address; u32 segment_size; u8 segment_name_length; u8 segment_name[128]; }; -/** \brief bidirectional disconnect API - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define disconnect_sock { - u32 client_index; - u32 context; - u64 handle; -}; - -/** \brief bidirectional disconnect reply API - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param client_context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define disconnect_sock_reply { - u32 client_index; - u32 context; - i32 retval; - u64 handle; -}; - -/** \brief vpp->client, accept this session - @param context - sender context, to match reply w/ request - @param accept_cookie - tells client which bind flavor just occurred - @param handle - session handle obtained through accept/connect - @param rx_fifo_address - rx (vpp -> vpp-client) fifo address - @param tx_fifo_address - tx (vpp-client -> vpp) fifo address - @param vpp_event_queue_address - vpp's event queue address -*/ -define accept_sock { - u32 client_index; - u32 context; - u32 accept_cookie; - u64 handle; - u64 server_rx_fifo; - u64 server_tx_fifo; - u64 vpp_event_queue_address; -}; - -/** \brief client->vpp, reply to an accept message - @param context - sender context, to match reply w/ request - @param retval - return code for the request - @param handle - session handle obtained through accept/connect -*/ -define accept_sock_reply { - u32 context; - i32 retval; - u64 handle; -}; - -/** \brief vpp->client reset session API - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define reset_sock { - u32 client_index; - u32 context; - u64 handle; -}; - -/** \brief client->vpp reset session reply - @param client_index - opaque cookie to identify the sender - client to vpp direction only - @param context - sender context, to match reply w/ request - @param handle - session handle obtained through accept/connect -*/ -define reset_sock_reply { - u32 client_index; - u32 context; - i32 retval; - u64 handle; -}; - /** \brief enable/disable session layer @param client_index - opaque cookie to identify the sender client to vpp direction only diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index 8e2b2616..e6cfe7da 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -36,15 +36,14 @@ session_manager_main_t session_manager_main; * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type) * Value: (owner thread index << 32 | session_index); */ -static void -stream_session_table_add_for_tc (u8 sst, transport_connection_t * tc, - u64 value) +void +stream_session_table_add_for_tc (transport_connection_t * tc, u64 value) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; session_kv6_t kv6; - switch (sst) + switch (tc->proto) { case SESSION_TYPE_IP4_UDP: case SESSION_TYPE_IP4_TCP: @@ -72,12 +71,12 @@ stream_session_table_add (session_manager_main_t * smm, stream_session_t * s, tc = tp_vfts[s->session_type].get_connection (s->connection_index, s->thread_index); - stream_session_table_add_for_tc (s->session_type, tc, value); + stream_session_table_add_for_tc (tc, value); } static void -stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, - u64 value) +stream_session_half_open_table_add (session_type_t sst, + transport_connection_t * tc, u64 value) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; @@ -105,14 +104,13 @@ stream_session_half_open_table_add (u8 sst, transport_connection_t * tc, } } -static int -stream_session_table_del_for_tc (session_manager_main_t * smm, u8 sst, - transport_connection_t * tc) +int +stream_session_table_del_for_tc (transport_connection_t * tc) { + session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; session_kv6_t kv6; - - switch (sst) + switch (tc->proto) { case SESSION_TYPE_IP4_UDP: case SESSION_TYPE_IP4_TCP: @@ -141,7 +139,7 @@ stream_session_table_del (session_manager_main_t * smm, stream_session_t * s) ts = tp_vfts[s->session_type].get_connection (s->connection_index, s->thread_index); - return stream_session_table_del_for_tc (smm, s->session_type, ts); + return stream_session_table_del_for_tc (ts); } static void @@ -383,7 +381,7 @@ stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, * Allocate vpp event queue (once) per worker thread */ void -vpp_session_event_queue_allocate (session_manager_main_t * smm, +session_vpp_event_queue_allocate (session_manager_main_t * smm, u32 thread_index) { api_main_t *am = &api_main; @@ -406,266 +404,24 @@ vpp_session_event_queue_allocate (session_manager_main_t * smm, } } -void -session_manager_get_segment_info (u32 index, u8 ** name, u32 * size) -{ - svm_fifo_segment_private_t *s; - s = svm_fifo_get_segment (index); - *name = s->h->segment_name; - *size = s->ssvm.ssvm_size; -} - -always_inline int -session_manager_add_segment_i (session_manager_main_t * smm, - session_manager_t * sm, - u32 segment_size, u8 * segment_name) -{ - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - int rv; - - memset (ca, 0, sizeof (*ca)); - - ca->segment_name = (char *) segment_name; - ca->segment_size = segment_size; - - rv = svm_fifo_segment_create (ca); - if (rv) - { - clib_warning ("svm_fifo_segment_create ('%s', %d) failed", - ca->segment_name, ca->segment_size); - vec_free (segment_name); - return -1; - } - - vec_add1 (sm->segment_indices, ca->new_segment_index); - - return 0; -} - -static int -session_manager_add_segment (session_manager_main_t * smm, - session_manager_t * sm) -{ - u8 *segment_name; - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - u32 add_segment_size; - u32 default_segment_size = 128 << 10; - - memset (ca, 0, sizeof (*ca)); - segment_name = format (0, "%d-%d%c", getpid (), - smm->unique_segment_name_counter++, 0); - add_segment_size = - sm->add_segment_size ? sm->add_segment_size : default_segment_size; - - return session_manager_add_segment_i (smm, sm, add_segment_size, - segment_name); -} - -int -session_manager_add_first_segment (session_manager_main_t * smm, - session_manager_t * sm, u32 segment_size, - u8 ** segment_name) -{ - svm_fifo_segment_create_args_t _ca, *ca = &_ca; - memset (ca, 0, sizeof (*ca)); - *segment_name = format (0, "%d-%d%c", getpid (), - smm->unique_segment_name_counter++, 0); - return session_manager_add_segment_i (smm, sm, segment_size, *segment_name); -} - -void -session_manager_del (session_manager_main_t * smm, session_manager_t * sm) -{ - u32 *deleted_sessions = 0; - u32 *deleted_thread_indices = 0; - int i, j; - - /* Across all fifo segments used by the server */ - for (j = 0; j < vec_len (sm->segment_indices); j++) - { - svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; - /* Vector of fifos allocated in the segment */ - fifo_segment = svm_fifo_get_segment (sm->segment_indices[j]); - fifos = (svm_fifo_t **) fifo_segment->h->fifos; - - /* - * Remove any residual sessions from the session lookup table - * Don't bother deleting the individual fifos, we're going to - * throw away the fifo segment in a minute. - */ - for (i = 0; i < vec_len (fifos); i++) - { - svm_fifo_t *fifo; - u32 session_index, thread_index; - stream_session_t *session; - - fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; - - session = pool_elt_at_index (smm->sessions[thread_index], - session_index); - - /* Add to the deleted_sessions vector (once!) */ - if (!session->is_deleted) - { - session->is_deleted = 1; - vec_add1 (deleted_sessions, - session - smm->sessions[thread_index]); - vec_add1 (deleted_thread_indices, thread_index); - } - } - - for (i = 0; i < vec_len (deleted_sessions); i++) - { - stream_session_t *session; - - session = - pool_elt_at_index (smm->sessions[deleted_thread_indices[i]], - deleted_sessions[i]); - - /* Instead of directly removing the session call disconnect */ - stream_session_disconnect (session); - - /* - stream_session_table_del (smm, session); - pool_put(smm->sessions[deleted_thread_indices[i]], session); - */ - } - - vec_reset_length (deleted_sessions); - vec_reset_length (deleted_thread_indices); - - /* Instead of removing the segment, test when removing the session if - * the segment can be removed - */ - /* svm_fifo_segment_delete (fifo_segment); */ - } - - vec_free (deleted_sessions); - vec_free (deleted_thread_indices); -} - -int -session_manager_allocate_session_fifos (session_manager_main_t * smm, - session_manager_t * sm, - svm_fifo_t ** server_rx_fifo, - svm_fifo_t ** server_tx_fifo, - u32 * fifo_segment_index, - u8 * added_a_segment) -{ - svm_fifo_segment_private_t *fifo_segment; - u32 fifo_size, default_fifo_size = 1 << 16; /* TODO config */ - int i; - - *added_a_segment = 0; - - /* Allocate svm fifos */ - ASSERT (vec_len (sm->segment_indices)); - -again: - for (i = 0; i < vec_len (sm->segment_indices); i++) - { - *fifo_segment_index = sm->segment_indices[i]; - fifo_segment = svm_fifo_get_segment (*fifo_segment_index); - - fifo_size = sm->rx_fifo_size; - fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_rx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); - - fifo_size = sm->tx_fifo_size; - fifo_size = (fifo_size == 0) ? default_fifo_size : fifo_size; - *server_tx_fifo = svm_fifo_segment_alloc_fifo (fifo_segment, fifo_size); - - if (*server_rx_fifo == 0) - { - /* This would be very odd, but handle it... */ - if (*server_tx_fifo != 0) - { - svm_fifo_segment_free_fifo (fifo_segment, *server_tx_fifo); - *server_tx_fifo = 0; - } - continue; - } - if (*server_tx_fifo == 0) - { - if (*server_rx_fifo != 0) - { - svm_fifo_segment_free_fifo (fifo_segment, *server_rx_fifo); - *server_rx_fifo = 0; - } - continue; - } - break; - } - - /* See if we're supposed to create another segment */ - if (*server_rx_fifo == 0) - { - if (sm->add_segment) - { - if (*added_a_segment) - { - clib_warning ("added a segment, still cant allocate a fifo"); - return SESSION_ERROR_NEW_SEG_NO_SPACE; - } - - if (session_manager_add_segment (smm, sm)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - - *added_a_segment = 1; - goto again; - } - else - { - clib_warning ("No space to allocate fifos!"); - return SESSION_ERROR_NO_SPACE; - } - } - return 0; -} - int -stream_session_create_i (session_manager_main_t * smm, application_t * app, - transport_connection_t * tc, +stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, stream_session_t ** ret_s) { - int rv; + session_manager_main_t *smm = &session_manager_main; svm_fifo_t *server_rx_fifo = 0, *server_tx_fifo = 0; u32 fifo_segment_index; - u32 pool_index, seg_size; + u32 pool_index; stream_session_t *s; u64 value; u32 thread_index = tc->thread_index; - session_manager_t *sm; - u8 segment_added; - u8 *seg_name; - - sm = session_manager_get (app->session_manager_index); - - /* Check the API queue */ - if (app->mode == APP_SERVER && application_api_queue_is_full (app)) - return SESSION_ERROR_API_QUEUE_FULL; + int rv; - if ((rv = session_manager_allocate_session_fifos (smm, sm, &server_rx_fifo, - &server_tx_fifo, - &fifo_segment_index, - &segment_added))) + if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, + &server_tx_fifo, + &fifo_segment_index))) return rv; - if (segment_added && app->mode == APP_SERVER) - { - /* Send an API message to the external server, to map new segment */ - ASSERT (app->cb_fns.add_segment_callback); - - session_manager_get_segment_info (fifo_segment_index, &seg_name, - &seg_size); - if (app->cb_fns.add_segment_callback (app->api_client_index, seg_name, - seg_size)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; - } - /* Create the session */ pool_get (smm->sessions[thread_index], s); memset (s, 0, sizeof (*s)); @@ -682,10 +438,9 @@ stream_session_create_i (session_manager_main_t * smm, application_t * app, s->server_tx_fifo = server_tx_fifo; /* Initialize state machine, such as it is... */ - s->session_type = app->session_type; + s->session_type = tc->proto; s->session_state = SESSION_STATE_CONNECTING; - s->app_index = application_get_index (app); - s->server_segment_index = fifo_segment_index; + s->svm_segment_index = fifo_segment_index; s->thread_index = thread_index; s->session_index = pool_index; @@ -697,7 +452,7 @@ stream_session_create_i (session_manager_main_t * smm, application_t * app, /* Add to the main lookup table */ value = (((u64) thread_index) << 32) | (u64) s->session_index; - stream_session_table_add_for_tc (app->session_type, tc, value); + stream_session_table_add_for_tc (tc, value); *ret_s = s; @@ -881,94 +636,6 @@ session_manager_flush_enqueue_events (u32 thread_index) return errors; } -/* - * Start listening on server's ip/port pair for requested transport. - * - * Creates a 'dummy' stream session with state LISTENING to be used in session - * lookups, prior to establishing connection. Requests transport to build - * it's own specific listening connection. - */ -int -stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port) -{ - session_manager_main_t *smm = &session_manager_main; - stream_session_t *s; - transport_connection_t *tc; - application_t *srv; - u32 tci; - - srv = application_get (server_index); - - pool_get (smm->listen_sessions[srv->session_type], s); - memset (s, 0, sizeof (*s)); - - s->session_type = srv->session_type; - s->session_state = SESSION_STATE_LISTENING; - s->session_index = s - smm->listen_sessions[srv->session_type]; - s->app_index = srv->index; - - /* Transport bind/listen */ - tci = tp_vfts[srv->session_type].bind (s->session_index, ip, port); - - /* Attach transport to session */ - s->connection_index = tci; - tc = tp_vfts[srv->session_type].get_listener (tci); - - srv->session_index = s->session_index; - - /* Add to the main lookup table */ - stream_session_table_add_for_tc (s->session_type, tc, s->session_index); - - return 0; -} - -void -stream_session_stop_listen (u32 server_index) -{ - session_manager_main_t *smm = &session_manager_main; - stream_session_t *listener; - transport_connection_t *tc; - application_t *srv; - - srv = application_get (server_index); - listener = pool_elt_at_index (smm->listen_sessions[srv->session_type], - srv->session_index); - - tc = tp_vfts[srv->session_type].get_listener (listener->connection_index); - stream_session_table_del_for_tc (smm, listener->session_type, tc); - - tp_vfts[srv->session_type].unbind (listener->connection_index); - pool_put (smm->listen_sessions[srv->session_type], listener); -} - -int -connect_server_add_segment_cb (application_t * ss, char *segment_name, - u32 segment_size) -{ - /* Does exactly nothing, but die */ - ASSERT (0); - return 0; -} - -void -connects_session_manager_init (session_manager_main_t * smm, u8 session_type) -{ - session_manager_t *sm; - u32 connect_fifo_size = 256 << 10; /* Config? */ - u32 default_segment_size = 1 << 20; - - pool_get (smm->session_managers, sm); - memset (sm, 0, sizeof (*sm)); - - sm->add_segment_size = default_segment_size; - sm->rx_fifo_size = connect_fifo_size; - sm->tx_fifo_size = connect_fifo_size; - sm->add_segment = 1; - - session_manager_add_segment (smm, sm); - smm->connect_manager_index[session_type] = sm - smm->session_managers; -} - void stream_session_connect_notify (transport_connection_t * tc, u8 sst, u8 is_fail) @@ -976,34 +643,36 @@ stream_session_connect_notify (transport_connection_t * tc, u8 sst, session_manager_main_t *smm = &session_manager_main; application_t *app; stream_session_t *new_s = 0; - u64 value; + u64 handle; + u32 api_context = 0; - value = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, - tc->lcl_port, tc->rmt_port, - tc->proto); - if (value == HALF_OPEN_LOOKUP_INVALID_VALUE) + handle = stream_session_half_open_lookup (smm, &tc->lcl_ip, &tc->rmt_ip, + tc->lcl_port, tc->rmt_port, + tc->proto); + if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE) { clib_warning ("This can't be good!"); return; } - app = application_get (value >> 32); + /* Get the app's index from the handle we stored when opening connection */ + app = application_get (handle >> 32); + api_context = tc->s_index; if (!is_fail) { - /* Create new session (server segments are allocated if needed) */ - if (stream_session_create_i (smm, app, tc, &new_s)) - return; + segment_manager_t *sm; + sm = application_get_connect_segment_manager (app); - app->session_index = stream_session_get_index (new_s); - app->thread_index = new_s->thread_index; + /* Create new session (svm segments are allocated if needed) */ + if (stream_session_create_i (sm, tc, &new_s)) + return; - /* Allocate vpp event queue for this thread if needed */ - vpp_session_event_queue_allocate (smm, tc->thread_index); + new_s->app_index = app->index; } /* Notify client */ - app->cb_fns.session_connected_callback (app->api_client_index, new_s, + app->cb_fns.session_connected_callback (app->index, api_context, new_s, is_fail); /* Cleanup session lookup */ @@ -1046,48 +715,13 @@ void stream_session_delete (stream_session_t * s) { session_manager_main_t *smm = vnet_get_session_manager_main (); - svm_fifo_segment_private_t *fifo_segment; - application_t *app; /* Delete from the main lookup table. */ stream_session_table_del (smm, s); /* Cleanup fifo segments */ - fifo_segment = svm_fifo_get_segment (s->server_segment_index); - svm_fifo_segment_free_fifo (fifo_segment, s->server_rx_fifo); - svm_fifo_segment_free_fifo (fifo_segment, s->server_tx_fifo); - - app = application_get_if_valid (s->app_index); - - /* No app. A possibility: after disconnect application called unbind */ - if (!app) - return; - - if (app->mode == APP_CLIENT) - { - /* Cleanup app if client */ - application_del (app); - } - else if (app->mode == APP_SERVER) - { - session_manager_t *sm; - svm_fifo_segment_private_t *fifo_segment; - svm_fifo_t **fifos; - u32 fifo_index; - - /* For server, see if any segments can be removed */ - sm = session_manager_get (app->session_manager_index); - - /* Delete fifo */ - fifo_segment = svm_fifo_get_segment (s->server_segment_index); - fifos = (svm_fifo_t **) fifo_segment->h->fifos; - - fifo_index = svm_fifo_segment_index (fifo_segment); - - /* Remove segment only if it holds no fifos and not the first */ - if (sm->segment_indices[0] != fifo_index && vec_len (fifos) == 0) - svm_fifo_segment_delete (fifo_segment); - } + segment_manager_dealloc_fifos (s->svm_segment_index, s->server_rx_fifo, + s->server_tx_fifo); pool_put (smm->sessions[s->thread_index], s); } @@ -1134,21 +768,22 @@ int stream_session_accept (transport_connection_t * tc, u32 listener_index, u8 sst, u8 notify) { - session_manager_main_t *smm = &session_manager_main; application_t *server; stream_session_t *s, *listener; + segment_manager_t *sm; int rv; /* Find the server */ - listener = pool_elt_at_index (smm->listen_sessions[sst], listener_index); + listener = listen_session_get (sst, listener_index); server = application_get (listener->app_index); - if ((rv = stream_session_create_i (smm, server, tc, &s))) + sm = application_get_listen_segment_manager (server, listener); + if ((rv = stream_session_create_i (sm, tc, &s))) return rv; - /* Allocate vpp event queue for this thread if needed */ - vpp_session_event_queue_allocate (smm, tc->thread_index); + s->app_index = server->index; + s->listener_index = listener_index; /* Shoulder-tap the server */ if (notify) @@ -1159,37 +794,111 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, return 0; } +/** + * Ask transport to open connection to remote transport endpoint. + * + * Stores handle for matching request with reply since the call can be + * asynchronous. For instance, for TCP the 3-way handshake must complete + * before reply comes. Session is only created once connection is established. + * + * @param app_index Index of the application requesting the connect + * @param st Session type requested. + * @param tep Remote transport endpoint + * @param res Resulting transport connection . + */ int -stream_session_open (u8 sst, ip46_address_t * addr, u16 port_host_byte_order, - u32 app_index) +stream_session_open (u32 app_index, session_type_t st, + transport_endpoint_t * tep, + transport_connection_t ** res) { transport_connection_t *tc; - u32 tci; - u64 value; int rv; + u64 handle; - /* Ask transport to open connection */ - rv = tp_vfts[sst].open (addr, port_host_byte_order); + rv = tp_vfts[st].open (&tep->ip, tep->port); if (rv < 0) { clib_warning ("Transport failed to open connection."); return VNET_API_ERROR_SESSION_CONNECT_FAIL; } - tci = rv; + tc = tp_vfts[st].get_half_open ((u32) rv); - /* Get transport connection */ - tc = tp_vfts[sst].get_half_open (tci); - - /* Store api_client_index and transport connection index */ - value = (((u64) app_index) << 32) | (u64) tc->c_index; + /* Save app and tc index. The latter is needed to help establish the + * connection while the former is needed when the connect notify comes + * and we have to notify the external app */ + handle = (((u64) app_index) << 32) | (u64) tc->c_index; /* Add to the half-open lookup table */ - stream_session_half_open_table_add (sst, tc, value); + stream_session_half_open_table_add (st, tc, handle); + + *res = tc; + + return 0; +} + +/** + * Ask transport to listen on local transport endpoint. + * + * @param s Session for which listen will be called. Note that unlike + * established sessions, listen sessions are not associated to a + * thread. + * @param tep Local endpoint to be listened on. + */ +int +stream_session_listen (stream_session_t * s, transport_endpoint_t * tep) +{ + transport_connection_t *tc; + u32 tci; + + /* Transport bind/listen */ + tci = tp_vfts[s->session_type].bind (s->session_index, &tep->ip, tep->port); + + if (tci == (u32) ~ 0) + return -1; + + /* Attach transport to session */ + s->connection_index = tci; + tc = tp_vfts[s->session_type].get_listener (tci); + + /* Weird but handle it ... */ + if (tc == 0) + return -1; + + /* Add to the main lookup table */ + stream_session_table_add_for_tc (tc, s->session_index); return 0; } +/** + * Ask transport to stop listening on local transport endpoint. + * + * @param s Session to stop listening on. It must be in state LISTENING. + */ +int +stream_session_stop_listen (stream_session_t * s) +{ + transport_connection_t *tc; + + if (s->session_state != SESSION_STATE_LISTENING) + { + clib_warning ("not a listening session"); + return -1; + } + + tc = tp_vfts[s->session_type].get_listener (s->connection_index); + if (!tc) + { + clib_warning ("no transport"); + return VNET_API_ERROR_ADDRESS_NOT_IN_USE; + } + + stream_session_table_del_for_tc (tc); + tp_vfts[s->session_type].unbind (s->connection_index); + return 0; +} + /** * Disconnect session and propagate to transport. This should eventually * result in a delete notification that allows us to cleanup session state. @@ -1297,6 +1006,10 @@ session_manager_main_enable (vlib_main_t * vm) vec_validate (smm->last_event_poll_by_thread, num_threads - 1); #endif + /* Allocate vpp event queues */ + for (i = 0; i < vec_len (smm->vpp_event_queues); i++) + session_vpp_event_queue_allocate (smm, i); + /* $$$$ preallocate hack config parameter */ for (i = 0; i < 200000; i++) { @@ -1322,9 +1035,6 @@ session_manager_main_enable (vlib_main_t * vm) 200000 /* $$$$ config parameter nbuckets */ , (64 << 20) /*$$$ config parameter table size */ ); - for (i = 0; i < SESSION_N_TYPES; i++) - smm->connect_manager_index[i] = INVALID_INDEX; - smm->is_enabled = 1; /* Enable TCP transport */ diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 6878b4d2..6e4ea96d 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -21,6 +21,7 @@ #include #include #include +#include #define HALF_OPEN_LOOKUP_INVALID_VALUE ((u64)~0) #define INVALID_INDEX ((u32)~0) @@ -107,6 +108,9 @@ typedef struct _stream_session_t svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; + /** svm segment index where fifos were allocated */ + u32 svm_segment_index; + /** Type */ u8 session_type; @@ -133,27 +137,10 @@ typedef struct _stream_session_t /** stream server pool index */ u32 app_index; - /** svm segment index */ - u32 server_segment_index; + /** Parent listener session if the result of an accept */ + u32 listener_index; } stream_session_t; -typedef struct _session_manager -{ - /** segments mapped by this server */ - u32 *segment_indices; - - /** Session fifo sizes. They are provided for binds and take default - * values for connects */ - u32 rx_fifo_size; - u32 tx_fifo_size; - - /** Configured additional segment size */ - u32 add_segment_size; - - /** Flag that indicates if additional segments should be created */ - u8 add_segment; -} session_manager_t; - /* Forward definition */ typedef struct _session_manager_main session_manager_main_t; @@ -206,11 +193,6 @@ struct _session_manager_main /** Unique segment name counter */ u32 unique_segment_name_counter; - /* Connection manager used by incoming connects */ - u32 connect_manager_index[SESSION_N_TYPES]; - - session_manager_t *session_managers; - /** Per transport rx function that can either dequeue or peek */ session_fifo_rx_fn *session_tx_fns[SESSION_N_TYPES]; @@ -242,37 +224,6 @@ vnet_get_session_manager_main () return &session_manager_main; } -always_inline session_manager_t * -session_manager_get (u32 index) -{ - return pool_elt_at_index (session_manager_main.session_managers, index); -} - -always_inline unix_shared_memory_queue_t * -session_manager_get_vpp_event_queue (u32 thread_index) -{ - return session_manager_main.vpp_event_queues[thread_index]; -} - -always_inline session_manager_t * -connects_session_manager_get (session_manager_main_t * smm, - session_type_t session_type) -{ - return pool_elt_at_index (smm->session_managers, - smm->connect_manager_index[session_type]); -} - -void session_manager_get_segment_info (u32 index, u8 ** name, u32 * size); -int session_manager_flush_enqueue_events (u32 thread_index); -int -session_manager_add_first_segment (session_manager_main_t * smm, - session_manager_t * sm, u32 segment_size, - u8 ** segment_name); -void -session_manager_del (session_manager_main_t * smm, session_manager_t * sm); -void -connects_session_manager_init (session_manager_main_t * smm, u8 session_type); - /* * Stream session functions */ @@ -300,6 +251,8 @@ transport_connection_t u32 thread_index); stream_session_t *stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto); +void stream_session_table_add_for_tc (transport_connection_t * tc, u64 value); +int stream_session_table_del_for_tc (transport_connection_t * tc); always_inline stream_session_t * stream_session_get_tsi (u64 ti_and_si, u32 thread_index) @@ -310,7 +263,7 @@ stream_session_get_tsi (u64 ti_and_si, u32 thread_index) } always_inline stream_session_t * -stream_session_get (u64 si, u32 thread_index) +stream_session_get (u32 si, u32 thread_index) { return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } @@ -327,6 +280,40 @@ stream_session_get_if_valid (u64 si, u32 thread_index) return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } +always_inline u64 +stream_session_handle (stream_session_t * s) +{ + return ((u64) s->thread_index << 32) | (u64) s->session_index; +} + +always_inline u32 +stream_session_index_from_handle (u64 handle) +{ + return handle & 0xFFFFFFFF; +} + +always_inline u32 +stream_session_thread_from_handle (u64 handle) +{ + return handle >> 32; +} + +always_inline void +stream_session_parse_handle (u64 handle, u32 * index, u32 * thread_index) +{ + *index = stream_session_index_from_handle (handle); + *thread_index = stream_session_thread_from_handle (handle); +} + +always_inline stream_session_t * +stream_session_get_from_handle (u64 handle) +{ + session_manager_main_t *smm = &session_manager_main; + return pool_elt_at_index (smm->sessions[stream_session_thread_from_handle + (handle)], + stream_session_index_from_handle (handle)); +} + always_inline stream_session_t * stream_session_listener_get (u8 sst, u64 si) { @@ -375,13 +362,14 @@ void stream_session_reset_notify (transport_connection_t * tc); int stream_session_accept (transport_connection_t * tc, u32 listener_index, u8 sst, u8 notify); -int stream_session_open (u8 sst, ip46_address_t * addr, - u16 port_host_byte_order, u32 api_client_index); +int +stream_session_open (u32 app_index, session_type_t st, + transport_endpoint_t * tep, + transport_connection_t ** tc); +int stream_session_listen (stream_session_t * s, transport_endpoint_t * tep); +int stream_session_stop_listen (stream_session_t * s); void stream_session_disconnect (stream_session_t * s); void stream_session_cleanup (stream_session_t * s); -int -stream_session_start_listen (u32 server_index, ip46_address_t * ip, u16 port); -void stream_session_stop_listen (u32 server_index); u8 *format_stream_session (u8 * s, va_list * args); @@ -390,6 +378,71 @@ transport_proto_vft_t *session_get_transport_vft (u8 type); clib_error_t *vnet_session_enable_disable (vlib_main_t * vm, u8 is_en); +always_inline unix_shared_memory_queue_t * +session_manager_get_vpp_event_queue (u32 thread_index) +{ + return session_manager_main.vpp_event_queues[thread_index]; +} + +int session_manager_flush_enqueue_events (u32 thread_index); + +always_inline u64 +listen_session_get_handle (stream_session_t * s) +{ + ASSERT (s->session_state == SESSION_STATE_LISTENING); + return ((u64) s->session_type << 32) | s->session_index; +} + +always_inline stream_session_t * +listen_session_get_from_handle (u64 handle) +{ + session_manager_main_t *smm = &session_manager_main; + stream_session_t *s; + u32 type, index; + type = handle >> 32; + index = handle & 0xFFFFFFFF; + + if (pool_is_free_index (smm->listen_sessions[type], index)) + return 0; + + s = pool_elt_at_index (smm->listen_sessions[type], index); + ASSERT (s->session_state == SESSION_STATE_LISTENING); + return s; +} + +always_inline stream_session_t * +listen_session_new (session_type_t type) +{ + stream_session_t *s; + pool_get (session_manager_main.listen_sessions[type], s); + memset (s, 0, sizeof (*s)); + + s->session_type = type; + s->session_state = SESSION_STATE_LISTENING; + s->session_index = s - session_manager_main.listen_sessions[type]; + + return s; +} + +always_inline stream_session_t * +listen_session_get (session_type_t type, u32 index) +{ + return pool_elt_at_index (session_manager_main.listen_sessions[type], + index); +} + +always_inline void +listen_session_del (stream_session_t * s) +{ + pool_put (session_manager_main.listen_sessions[s->session_type], s); +} + +always_inline u8 +session_manager_is_enabled () +{ + return session_manager_main.is_enabled == 1; +} + #endif /* __included_session_h__ */ /* diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 9c38428a..a82dfe0b 100644 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -38,6 +38,8 @@ #define foreach_session_api_msg \ _(MAP_ANOTHER_SEGMENT_REPLY, map_another_segment_reply) \ +_(APPLICATION_ATTACH, application_attach) \ +_(APPLICATION_DETACH, application_detach) \ _(BIND_URI, bind_uri) \ _(UNBIND_URI, unbind_uri) \ _(CONNECT_URI, connect_uri) \ @@ -48,13 +50,8 @@ _(RESET_SESSION_REPLY, reset_session_reply) \ _(BIND_SOCK, bind_sock) \ _(UNBIND_SOCK, unbind_sock) \ _(CONNECT_SOCK, connect_sock) \ -_(DISCONNECT_SOCK, disconnect_sock) \ -_(DISCONNECT_SOCK_REPLY, disconnect_sock_reply) \ -_(ACCEPT_SOCK_REPLY, accept_sock_reply) \ -_(RESET_SOCK_REPLY, reset_sock_reply) \ _(SESSION_ENABLE_DISABLE, session_enable_disable) \ - static int send_add_segment_callback (u32 api_client_index, const u8 * segment_name, u32 segment_size) @@ -80,11 +77,14 @@ send_add_segment_callback (u32 api_client_index, const u8 * segment_name, } static int -send_session_accept_uri_callback (stream_session_t * s) +send_session_accept_callback (stream_session_t * s) { vl_api_accept_session_t *mp; unix_shared_memory_queue_t *q, *vpp_queue; application_t *server = application_get (s->app_index); + transport_connection_t *tc; + transport_proto_vft_t *tp_vft; + stream_session_t *listener; q = vl_api_client_index_to_input_queue (server->api_client_index); vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); @@ -93,24 +93,28 @@ send_session_accept_uri_callback (stream_session_t * s) return -1; mp = vl_msg_api_alloc (sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); + memset (mp, 0, sizeof (*mp)); - /* Note: session_type is the first octet in all types of sessions */ + mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); - mp->accept_cookie = server->accept_cookie; + listener = listen_session_get (s->session_type, s->listener_index); + tp_vft = session_get_transport_vft (s->session_type); + tc = tp_vft->get_connection (s->connection_index, s->thread_index); + mp->listener_handle = listen_session_get_handle (listener); + mp->handle = stream_session_handle (s); mp->server_rx_fifo = (u64) s->server_rx_fifo; mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; - mp->session_type = s->session_type; mp->vpp_event_queue_address = (u64) vpp_queue; + mp->port = tc->rmt_port; + mp->is_ip4 = tc->is_ip4; + clib_memcpy (&mp->ip, &tc->rmt_ip, sizeof (tc->rmt_ip)); vl_msg_api_send_shmem (q, (u8 *) & mp); return 0; } static void -send_session_disconnect_uri_callback (stream_session_t * s) +send_session_disconnect_callback (stream_session_t * s) { vl_api_disconnect_session_t *mp; unix_shared_memory_queue_t *q; @@ -124,14 +128,12 @@ send_session_disconnect_uri_callback (stream_session_t * s) mp = vl_msg_api_alloc (sizeof (*mp)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_DISCONNECT_SESSION); - - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; + mp->handle = stream_session_handle (s); vl_msg_api_send_shmem (q, (u8 *) & mp); } static void -send_session_reset_uri_callback (stream_session_t * s) +send_session_reset_callback (stream_session_t * s) { vl_api_reset_session_t *mp; unix_shared_memory_queue_t *q; @@ -145,22 +147,20 @@ send_session_reset_uri_callback (stream_session_t * s) mp = vl_msg_api_alloc (sizeof (*mp)); memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_RESET_SESSION); - - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; + mp->handle = stream_session_handle (s); vl_msg_api_send_shmem (q, (u8 *) & mp); } static int -send_session_connected_uri_callback (u32 api_client_index, - stream_session_t * s, u8 is_fail) +send_session_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 is_fail) { vl_api_connect_uri_reply_t *mp; unix_shared_memory_queue_t *q; - application_t *app = application_lookup (api_client_index); - u8 *seg_name; + application_t *app; unix_shared_memory_queue_t *vpp_queue; + app = application_get (app_index); q = vl_api_client_index_to_input_queue (app->api_client_index); if (!q) @@ -168,24 +168,15 @@ send_session_connected_uri_callback (u32 api_client_index, mp = vl_msg_api_alloc (sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_URI_REPLY); - mp->context = app->api_context; + mp->context = api_context; if (!is_fail) { vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); mp->server_rx_fifo = (u64) s->server_rx_fifo; mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->session_thread_index = s->thread_index; - mp->session_index = s->session_index; - mp->session_type = s->session_type; + mp->handle = stream_session_handle (s); mp->vpp_event_queue_address = (u64) vpp_queue; - mp->client_event_queue_address = (u64) app->event_queue; mp->retval = 0; - - session_manager_get_segment_info (s->server_segment_index, &seg_name, - &mp->segment_size); - mp->segment_name_length = vec_len (seg_name); - if (mp->segment_name_length) - clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); } else { @@ -195,199 +186,14 @@ send_session_connected_uri_callback (u32 api_client_index, vl_msg_api_send_shmem (q, (u8 *) & mp); /* Remove client if connect failed */ - if (is_fail) - { - application_del (app); - } - else - { - s->session_state = SESSION_STATE_READY; - } - - return 0; -} - -/** - * Redirect a connect_uri message to the indicated server. - * Only sent if the server has bound the related port with - * URI_OPTIONS_FLAGS_USE_FIFO - */ -static int -redirect_connect_uri_callback (u32 server_api_client_index, void *mp_arg) -{ - vl_api_connect_uri_t *mp = mp_arg; - unix_shared_memory_queue_t *server_q, *client_q; - vlib_main_t *vm = vlib_get_main (); - f64 timeout = vlib_time_now (vm) + 0.5; - int rv = 0; - - server_q = vl_api_client_index_to_input_queue (server_api_client_index); - - if (!server_q) - { - rv = VNET_API_ERROR_INVALID_VALUE; - goto out; - } - - client_q = vl_api_client_index_to_input_queue (mp->client_index); - if (!client_q) - { - rv = VNET_API_ERROR_INVALID_VALUE_2; - goto out; - } - - /* Tell the server the client's API queue address, so it can reply */ - mp->client_queue_address = (u64) client_q; - - /* - * Bounce message handlers MUST NOT block the data-plane. - * Spin waiting for the queue lock, but - */ - - while (vlib_time_now (vm) < timeout) - { - rv = - unix_shared_memory_queue_add (server_q, (u8 *) & mp, 1 /*nowait */ ); - switch (rv) - { - /* correctly enqueued */ - case 0: - return VNET_CONNECT_REDIRECTED; - - /* continue spinning, wait for pthread_mutex_trylock to work */ - case -1: - continue; - - /* queue stuffed, drop the msg */ - case -2: - rv = VNET_API_ERROR_QUEUE_FULL; - goto out; - } - } -out: - /* Dispose of the message */ - vl_msg_api_free (mp); - return rv; -} - -static u64 -make_session_handle (stream_session_t * s) -{ - return (u64) s->session_index << 32 | (u64) s->thread_index; -} - -static int -send_session_accept_callback (stream_session_t * s) -{ - vl_api_accept_sock_t *mp; - unix_shared_memory_queue_t *q, *vpp_queue; - application_t *server = application_get (s->app_index); - - q = vl_api_client_index_to_input_queue (server->api_client_index); - vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - - if (!q) - return -1; - - mp = vl_msg_api_alloc (sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SOCK); - - /* Note: session_type is the first octet in all types of sessions */ - - mp->accept_cookie = server->accept_cookie; - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->handle = make_session_handle (s); - mp->vpp_event_queue_address = (u64) vpp_queue; - vl_msg_api_send_shmem (q, (u8 *) & mp); - - return 0; -} - -static int -send_session_connected_callback (u32 api_client_index, stream_session_t * s, - u8 is_fail) -{ - vl_api_connect_sock_reply_t *mp; - unix_shared_memory_queue_t *q; - application_t *app = application_lookup (api_client_index); - u8 *seg_name; - unix_shared_memory_queue_t *vpp_queue; - - q = vl_api_client_index_to_input_queue (app->api_client_index); - - if (!q) - return -1; - - mp = vl_msg_api_alloc (sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_CONNECT_SOCK_REPLY); - mp->context = app->api_context; - mp->retval = is_fail; if (!is_fail) { - vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->handle = make_session_handle (s); - mp->vpp_event_queue_address = (u64) vpp_queue; - mp->client_event_queue_address = (u64) app->event_queue; - - session_manager_get_segment_info (s->server_segment_index, &seg_name, - &mp->segment_size); - mp->segment_name_length = vec_len (seg_name); - if (mp->segment_name_length) - clib_memcpy (mp->segment_name, seg_name, mp->segment_name_length); + s->session_state = SESSION_STATE_READY; } - vl_msg_api_send_shmem (q, (u8 *) & mp); - - /* Remove client if connect failed */ - if (is_fail) - application_del (app); - return 0; } -static void -send_session_disconnect_callback (stream_session_t * s) -{ - vl_api_disconnect_sock_t *mp; - unix_shared_memory_queue_t *q; - application_t *app = application_get (s->app_index); - - q = vl_api_client_index_to_input_queue (app->api_client_index); - - if (!q) - return; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_DISCONNECT_SOCK); - - mp->handle = make_session_handle (s); - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - -static void -send_session_reset_callback (stream_session_t * s) -{ - vl_api_reset_sock_t *mp; - unix_shared_memory_queue_t *q; - application_t *app = application_get (s->app_index); - - q = vl_api_client_index_to_input_queue (app->api_client_index); - - if (!q) - return; - - mp = vl_msg_api_alloc (sizeof (*mp)); - memset (mp, 0, sizeof (*mp)); - mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_RESET_SOCK); - - mp->handle = make_session_handle (s); - vl_msg_api_send_shmem (q, (u8 *) & mp); -} - /** * Redirect a connect_uri message to the indicated server. * Only sent if the server has bound the related port with @@ -396,10 +202,11 @@ send_session_reset_callback (stream_session_t * s) static int redirect_connect_callback (u32 server_api_client_index, void *mp_arg) { - vl_api_connect_sock_t *mp = mp_arg; + vl_api_connect_uri_t *mp = mp_arg; unix_shared_memory_queue_t *server_q, *client_q; vlib_main_t *vm = vlib_get_main (); f64 timeout = vlib_time_now (vm) + 0.5; + application_t *app; int rv = 0; server_q = vl_api_client_index_to_input_queue (server_api_client_index); @@ -419,6 +226,9 @@ redirect_connect_callback (u32 server_api_client_index, void *mp_arg) /* Tell the server the client's API queue address, so it can reply */ mp->client_queue_address = (u64) client_q; + app = application_lookup (mp->client_index); + mp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = app->sm_properties.rx_fifo_size; + mp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = app->sm_properties.tx_fifo_size; /* * Bounce message handlers MUST NOT block the data-plane. @@ -452,15 +262,6 @@ out: } static session_cb_vft_t uri_session_cb_vft = { - .session_accept_callback = send_session_accept_uri_callback, - .session_disconnect_callback = send_session_disconnect_uri_callback, - .session_connected_callback = send_session_connected_uri_callback, - .session_reset_callback = send_session_reset_uri_callback, - .add_segment_callback = send_add_segment_callback, - .redirect_connect_callback = redirect_connect_uri_callback -}; - -static session_cb_vft_t session_cb_vft = { .session_accept_callback = send_session_accept_callback, .session_disconnect_callback = send_session_disconnect_callback, .session_connected_callback = send_session_connected_callback, @@ -498,60 +299,134 @@ vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp) } static void -vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) +vl_api_application_attach_t_handler (vl_api_application_attach_t * mp) { - vl_api_bind_uri_reply_t *rmp; - vnet_bind_args_t _a, *a = &_a; - char segment_name[128]; - u32 segment_name_length; + vl_api_application_attach_reply_t *rmp; + vnet_app_attach_args_t _a, *a = &_a; int rv; - _Static_assert (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= - sizeof (mp->options), - "Out of options, fix api message definition"); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - segment_name_length = ARRAY_LEN (segment_name); + STATIC_ASSERT (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= + sizeof (mp->options), + "Out of options, fix api message definition"); memset (a, 0, sizeof (*a)); - a->uri = (char *) mp->uri; a->api_client_index = mp->client_index; a->options = mp->options; - a->segment_name = segment_name; - a->segment_name_length = segment_name_length; a->session_cb_vft = &uri_session_cb_vft; - a->options[SESSION_OPTIONS_SEGMENT_SIZE] = mp->initial_segment_size; - a->options[SESSION_OPTIONS_ACCEPT_COOKIE] = mp->accept_cookie; - rv = vnet_bind_uri (a); + rv = vnet_application_attach (a); +done: /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_BIND_URI_REPLY, ({ + REPLY_MACRO2 (VL_API_APPLICATION_ATTACH_REPLY, ({ rmp->retval = rv; if (!rv) { rmp->segment_name_length = 0; /* $$$$ policy? */ - rmp->segment_size = mp->initial_segment_size; - if (segment_name_length) + rmp->segment_size = a->segment_size; + if (a->segment_name_length) { - memcpy (rmp->segment_name, segment_name, segment_name_length); - rmp->segment_name_length = segment_name_length; + memcpy (rmp->segment_name, a->segment_name, + a->segment_name_length); + rmp->segment_name_length = a->segment_name_length; } - rmp->server_event_queue_address = a->server_event_queue_address; + rmp->app_event_queue_address = a->app_event_queue_address; } })); /* *INDENT-ON* */ } +static void +vl_api_application_detach_t_handler (vl_api_application_detach_t * mp) +{ + vl_api_application_detach_reply_t *rmp; + int rv = VNET_API_ERROR_INVALID_VALUE_2; + vnet_app_detach_args_t _a, *a = &_a; + application_t *app; + + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + a->app_index = app->index; + rv = vnet_application_detach (a); + } + +done: + REPLY_MACRO (VL_API_APPLICATION_DETACH_REPLY); +} + +static void +vl_api_bind_uri_t_handler (vl_api_bind_uri_t * mp) +{ + vl_api_bind_uri_reply_t *rmp; + vnet_bind_args_t _a, *a = &_a; + application_t *app; + int rv; + + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + memset (a, 0, sizeof (*a)); + a->uri = (char *) mp->uri; + a->app_index = app->index; + rv = vnet_bind_uri (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } + +done: + REPLY_MACRO (VL_API_BIND_URI_REPLY); +} + static void vl_api_unbind_uri_t_handler (vl_api_unbind_uri_t * mp) { vl_api_unbind_uri_reply_t *rmp; + application_t *app; + vnet_unbind_args_t _a, *a = &_a; int rv; - rv = vnet_unbind_uri ((char *) mp->uri, mp->client_index); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + a->uri = (char *) mp->uri; + a->app_index = app->index; + rv = vnet_unbind_uri (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } +done: REPLY_MACRO (VL_API_UNBIND_URI_REPLY); } @@ -560,26 +435,37 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) { vl_api_connect_uri_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + application_t *app; int rv; - a->uri = (char *) mp->uri; - a->api_client_index = mp->client_index; - a->api_context = mp->context; - a->options = mp->options; - a->session_cb_vft = &uri_session_cb_vft; - a->mp = mp; + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - rv = vnet_connect_uri (a); + app = application_lookup (mp->client_index); + if (app) + { + a->uri = (char *) mp->uri; + a->api_context = mp->context; + a->app_index = app->index; + a->mp = mp; + rv = vnet_connect_uri (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) return; /* Got some error, relay it */ +done: /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ - rmp->retval = rv; - })); + REPLY_MACRO (VL_API_CONNECT_URI_REPLY); /* *INDENT-ON* */ } @@ -587,13 +473,29 @@ static void vl_api_disconnect_session_t_handler (vl_api_disconnect_session_t * mp) { vl_api_disconnect_session_reply_t *rmp; - int rv; + vnet_disconnect_args_t _a, *a = &_a; + application_t *app; + int rv = 0; - rv = api_session_not_valid (mp->session_index, mp->session_thread_index); - if (!rv) - rv = - vnet_disconnect_session (mp->session_index, mp->session_thread_index); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } + + app = application_lookup (mp->client_index); + if (app) + { + a->handle = mp->handle; + a->app_index = app->index; + rv = vnet_disconnect_session (a); + } + else + { + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + } +done: REPLY_MACRO (VL_API_DISCONNECT_SESSION_REPLY); } @@ -601,11 +503,8 @@ static void vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * mp) { - if (api_session_not_valid (mp->session_index, mp->session_thread_index)) - { - clib_warning ("Invalid session!"); - return; - } + vnet_disconnect_args_t _a, *a = &_a; + application_t *app; /* Client objected to disconnecting the session, log and continue */ if (mp->retval) @@ -615,15 +514,29 @@ vl_api_disconnect_session_reply_t_handler (vl_api_disconnect_session_reply_t * } /* Disconnect has been confirmed. Confirm close to transport */ - vnet_disconnect_session (mp->session_index, mp->session_thread_index); + app = application_lookup (mp->client_index); + if (app) + { + a->handle = mp->handle; + a->app_index = app->index; + vnet_disconnect_session (a); + } } static void vl_api_reset_session_reply_t_handler (vl_api_reset_session_reply_t * mp) { + application_t *app; stream_session_t *s; + u32 index, thread_index; + + app = application_lookup (mp->client_index); + if (!app) + return; - if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + stream_session_parse_handle (mp->handle, &index, &thread_index); + s = stream_session_get_if_valid (index, thread_index); + if (s == 0 || app->index != s->app_index) { clib_warning ("Invalid session!"); return; @@ -636,8 +549,6 @@ vl_api_reset_session_reply_t_handler (vl_api_reset_session_reply_t * mp) return; } - s = stream_session_get (mp->session_index, mp->session_thread_index); - /* This comes as a response to a reset, transport only waiting for * confirmation to remove connection state, no need to disconnect */ stream_session_cleanup (s); @@ -648,11 +559,13 @@ vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp) { stream_session_t *s; int rv; - - if (api_session_not_valid (mp->session_index, mp->session_thread_index)) + u32 session_index, thread_index; + session_index = stream_session_index_from_handle (mp->handle); + thread_index = stream_session_thread_from_handle (mp->handle); + if (api_session_not_valid (session_index, thread_index)) return; - s = stream_session_get (mp->session_index, mp->session_thread_index); + s = stream_session_get (session_index, thread_index); rv = mp->retval; if (rv) @@ -677,49 +590,31 @@ vl_api_bind_sock_t_handler (vl_api_bind_sock_t * mp) { vl_api_bind_sock_reply_t *rmp; vnet_bind_args_t _a, *a = &_a; - char segment_name[128]; - u32 segment_name_length; - int rv; - - STATIC_ASSERT (sizeof (u64) * SESSION_OPTIONS_N_OPTIONS <= - sizeof (mp->options), - "Out of options, fix api message definition"); - - segment_name_length = ARRAY_LEN (segment_name); - - memset (a, 0, sizeof (*a)); - - clib_memcpy (&a->tep.ip, mp->ip, - (mp->is_ip4 ? sizeof (ip4_address_t) : - sizeof (ip6_address_t))); - a->tep.is_ip4 = mp->is_ip4; - a->tep.port = mp->port; - a->tep.vrf = mp->vrf; - - a->api_client_index = mp->client_index; - a->options = mp->options; - a->segment_name = segment_name; - a->segment_name_length = segment_name_length; - a->session_cb_vft = &session_cb_vft; + int rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; + application_t *app; - rv = vnet_bind_uri (a); + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_BIND_SOCK_REPLY, ({ - rmp->retval = rv; - if (!rv) - { - rmp->segment_name_length = 0; - rmp->segment_size = mp->options[SESSION_OPTIONS_SEGMENT_SIZE]; - if (segment_name_length) - { - memcpy(rmp->segment_name, segment_name, segment_name_length); - rmp->segment_name_length = segment_name_length; - } - rmp->server_event_queue_address = a->server_event_queue_address; - } - })); - /* *INDENT-ON* */ + app = application_lookup (mp->client_index); + if (app) + { + memset (a, 0, sizeof (*a)); + clib_memcpy (&a->tep.ip, mp->ip, (mp->is_ip4 ? + sizeof (ip4_address_t) : + sizeof (ip6_address_t))); + a->tep.is_ip4 = mp->is_ip4; + a->tep.port = mp->port; + a->tep.vrf = mp->vrf; + a->app_index = app->index; + + rv = vnet_bind (a); + } +done: + REPLY_MACRO (VL_API_BIND_SOCK_REPLY); } static void @@ -727,13 +622,24 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp) { vl_api_unbind_sock_reply_t *rmp; vnet_unbind_args_t _a, *a = &_a; - int rv; + application_t *app; + int rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; - a->api_client_index = mp->client_index; - a->handle = mp->handle; + if (session_manager_is_enabled () == 0) + { + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; + } - rv = vnet_unbind (a); + app = application_lookup (mp->client_index); + if (app) + { + a->app_index = mp->client_index; + a->handle = mp->handle; + rv = vnet_unbind (a); + } +done: REPLY_MACRO (VL_API_UNBIND_SOCK_REPLY); } @@ -742,114 +648,55 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp) { vl_api_connect_sock_reply_t *rmp; vnet_connect_args_t _a, *a = &_a; + application_t *app; int rv; - clib_memcpy (&a->tep.ip, mp->ip, - (mp->is_ip4 ? sizeof (ip4_address_t) : - sizeof (ip6_address_t))); - a->tep.is_ip4 = mp->is_ip4; - a->tep.port = mp->port; - a->tep.vrf = mp->vrf; - a->options = mp->options; - a->session_cb_vft = &session_cb_vft; - a->api_context = mp->context; - a->mp = mp; - - rv = vnet_connect (a); - - if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) - return; - - /* Got some error, relay it */ - - /* *INDENT-OFF* */ - REPLY_MACRO2 (VL_API_CONNECT_URI_REPLY, ({ - rmp->retval = rv; - })); - /* *INDENT-ON* */ -} - -static void -vl_api_disconnect_sock_t_handler (vl_api_disconnect_sock_t * mp) -{ - vnet_disconnect_args_t _a, *a = &_a; - vl_api_disconnect_sock_reply_t *rmp; - int rv; - - a->api_client_index = mp->client_index; - a->handle = mp->handle; - rv = vnet_disconnect (a); - - REPLY_MACRO (VL_API_DISCONNECT_SOCK_REPLY); -} - -static void -vl_api_disconnect_sock_reply_t_handler (vl_api_disconnect_sock_reply_t * mp) -{ - vnet_disconnect_args_t _a, *a = &_a; - - /* Client objected to disconnecting the session, log and continue */ - if (mp->retval) + if (session_manager_is_enabled () == 0) { - clib_warning ("client retval %d", mp->retval); - return; + rv = VNET_API_ERROR_FEATURE_DISABLED; + goto done; } - a->api_client_index = mp->client_index; - a->handle = mp->handle; - - vnet_disconnect (a); -} - -static void -vl_api_reset_sock_reply_t_handler (vl_api_reset_sock_reply_t * mp) -{ - stream_session_t *s; - u32 session_index, thread_index; - - /* Client objected to resetting the session, log and continue */ - if (mp->retval) + app = application_lookup (mp->client_index); + if (app) { - clib_warning ("client retval %d", mp->retval); - return; + clib_memcpy (&a->tep.ip, mp->ip, + (mp->is_ip4 ? sizeof (ip4_address_t) : + sizeof (ip6_address_t))); + a->api_context = mp->context; + a->app_index = app->index; + a->mp = mp; + rv = vnet_connect (a); } - - if (api_parse_session_handle (mp->handle, &session_index, &thread_index)) + else { - clib_warning ("Invalid handle"); - return; + rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED; } - s = stream_session_get (session_index, thread_index); + if (rv == 0 || rv == VNET_CONNECT_REDIRECTED) + return; - /* This comes as a response to a reset, transport only waiting for - * confirmation to remove connection state, no need to disconnect */ - stream_session_cleanup (s); + /* Got some error, relay it */ + +done: + REPLY_MACRO (VL_API_CONNECT_URI_REPLY); } -static void -vl_api_accept_sock_reply_t_handler (vl_api_accept_sock_reply_t * mp) +static clib_error_t * +application_reaper_cb (u32 client_index) { - stream_session_t *s; - u32 session_index, thread_index; - - if (api_parse_session_handle (mp->handle, &session_index, &thread_index)) - { - clib_warning ("Invalid handle"); - return; - } - s = stream_session_get (session_index, thread_index); - - if (mp->retval) + application_t *app = application_lookup (client_index); + vnet_app_detach_args_t _a, *a = &_a; + if (app) { - /* Server isn't interested, kill the session */ - stream_session_disconnect (s); - return; + a->app_index = app->index; + vnet_application_detach (a); } - - s->session_state = SESSION_STATE_READY; + return 0; } +VL_MSG_API_REAPER_FUNCTION (application_reaper_cb); + #define vl_msg_name_crc_list #include #undef vl_msg_name_crc_list @@ -903,6 +750,7 @@ session_api_hookup (vlib_main_t * vm) } VLIB_API_INIT_FUNCTION (session_api_hookup); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 2f912cbc..7ea7af15 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -30,7 +30,7 @@ typedef struct _transport_connection ip46_address_t lcl_ip; /**< Local IP */ u16 lcl_port; /**< Local port */ u16 rmt_port; /**< Remote port */ - u8 proto; /**< Transport protocol id */ + u8 proto; /**< Transport protocol id (also session type) */ u32 s_index; /**< Parent session index */ u32 c_index; /**< Connection index in transport pool */ @@ -103,7 +103,8 @@ typedef CLIB_PACKED (struct { { struct { - ip4_address_t src; ip4_address_t dst; + ip4_address_t src; + ip4_address_t dst; u16 src_port; u16 dst_port; /* align by making this 4 octets even though its a 1-bit field @@ -122,10 +123,14 @@ typedef CLIB_PACKED (struct { struct { /* 48 octets */ - ip6_address_t src; ip6_address_t dst; + ip6_address_t src; + ip6_address_t dst; u16 src_port; - u16 dst_port; u32 proto; u8 unused_for_now[8]; - }; u64 as_u64[6]; + u16 dst_port; + u32 proto; + u8 unused_for_now[8]; + }; + u64 as_u64[6]; }; }) v6_connection_key_t; /* *INDENT-ON* */ @@ -233,10 +238,10 @@ make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t) typedef struct _transport_endpoint { - ip46_address_t ip; - u16 port; - u8 is_ip4; - u32 vrf; + ip46_address_t ip; /** ip address */ + u16 port; /** port in host order */ + u8 is_ip4; /** 1 if ip4 */ + u32 vrf; /** fib table the endpoint is associated with */ } transport_endpoint_t; typedef clib_bihash_24_8_t transport_endpoint_table_t; diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 9e8e1561..f8fbf28c 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -237,8 +237,7 @@ tclient_thread_fn (void *arg) memset (dmp, 0, sizeof (*dmp)); dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); dmp->client_index = tm->my_client_index; - dmp->session_index = sp->vpp_session_index; - dmp->session_thread_index = sp->vpp_session_thread; + dmp->handle = sp->vpp_session_handle; vl_msg_api_send_shmem (tm->vl_input_queue, (u8 *) & dmp); pool_put (tm->sessions, sp); } @@ -253,9 +252,10 @@ tclient_thread_fn (void *arg) static void vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) { + vlib_main_t *vm = vlib_get_main (); tclient_main_t *tm = &tclient_main; - tm->my_client_index = mp->index; + vlib_process_signal_event (vm, tm->node_index, 1 /* evt */ , 0 /* data */ ); } static void @@ -264,7 +264,6 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) tclient_main_t *tm = &tclient_main; session_t *session; u32 session_index; - u64 key; i32 retval = /* clib_net_to_host_u32 ( */ mp->retval /*) */ ; if (retval < 0) @@ -291,24 +290,24 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session->server_rx_fifo->client_session_index = session_index; session->server_tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; session->server_tx_fifo->client_session_index = session_index; - - session->vpp_session_index = mp->session_index; - session->vpp_session_thread = mp->session_thread_index; + session->vpp_session_handle = mp->handle; /* Add it to the session lookup table */ - key = (((u64) mp->session_thread_index) << 32) | (u64) mp->session_index; - hash_set (tm->session_index_by_vpp_handles, key, session_index); + hash_set (tm->session_index_by_vpp_handles, mp->handle, session_index); tm->ready_connections++; } -static void +static int create_api_loopback (tclient_main_t * tm) { + vlib_main_t *vm = vlib_get_main (); vl_api_memclnt_create_t _m, *mp = &_m; extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr; + uword *event_data = 0, event_type; + int resolved = 0; /* * Create a "loopback" API client connection @@ -324,6 +323,25 @@ create_api_loopback (tclient_main_t * tm) strncpy ((char *) mp->name, "tcp_tester", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); + + /* Wait for reply */ + tm->node_index = vlib_get_current_process (vm)->node_runtime.node_index; + vlib_process_wait_for_event_or_clock (vm, 1.0); + event_type = vlib_process_get_events (vm, &event_data); + switch (event_type) + { + case 1: + resolved = 1; + break; + case ~0: + /* timed out */ + break; + default: + clib_warning ("unknown event_type %d", event_type); + } + if (!resolved) + return -1; + return 0; } #define foreach_tclient_static_api_msg \ @@ -333,17 +351,7 @@ _(CONNECT_URI_REPLY, connect_uri_reply) static clib_error_t * tclient_api_hookup (vlib_main_t * vm) { - tclient_main_t *tm = &tclient_main; vl_msg_api_msg_config_t _c, *c = &_c; - int i; - - /* Init test data */ - vec_validate (tm->connect_test_data, 64 * 1024 - 1); - for (i = 0; i < vec_len (tm->connect_test_data); i++) - tm->connect_test_data[i] = i & 0xff; - - tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); - vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); /* Hook up client-side static APIs to our handlers */ #define _(N,n) do { \ @@ -365,18 +373,105 @@ tclient_api_hookup (vlib_main_t * vm) return 0; } -VLIB_API_INIT_FUNCTION (tclient_api_hookup); +static int +tcp_test_clients_init (vlib_main_t * vm) +{ + tclient_main_t *tm = &tclient_main; + int i; + + tclient_api_hookup (vm); + if (create_api_loopback (tm)) + return -1; + + /* Init test data */ + vec_validate (tm->connect_test_data, 64 * 1024 - 1); + for (i = 0; i < vec_len (tm->connect_test_data); i++) + tm->connect_test_data[i] = i & 0xff; + + tm->session_index_by_vpp_handles = hash_create (0, sizeof (uword)); + vec_validate (tm->rx_buf, vec_len (tm->connect_test_data) - 1); + + tm->is_init = 1; + + return 0; +} + +static void +builtin_session_reset_callback (stream_session_t * s) +{ + return; +} + +static int +builtin_session_connected_callback (u32 app_index, u32 api_context, + stream_session_t * s, u8 code) +{ + return 0; +} + +static int +builtin_session_create_callback (stream_session_t * s) +{ + return 0; +} + +static void +builtin_session_disconnect_callback (stream_session_t * s) +{ + return; +} + +static int +builtin_server_rx_callback (stream_session_t * s) +{ + return 0; +} + +/* *INDENT-OFF* */ +static session_cb_vft_t builtin_clients = { + .session_reset_callback = builtin_session_reset_callback, + .session_connected_callback = builtin_session_connected_callback, + .session_accept_callback = builtin_session_create_callback, + .session_disconnect_callback = builtin_session_disconnect_callback, + .builtin_server_rx_callback = builtin_server_rx_callback +}; +/* *INDENT-ON* */ + +static int +attach_builtin_test_clients () +{ + vnet_app_attach_args_t _a, *a = &_a; + u8 segment_name[128]; + u32 segment_name_length; + u64 options[16]; + + segment_name_length = ARRAY_LEN (segment_name); + + memset (a, 0, sizeof (*a)); + memset (options, 0, sizeof (options)); + + a->api_client_index = ~0; + a->segment_name = segment_name; + a->segment_name_length = segment_name_length; + a->session_cb_vft = &builtin_clients; + + options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; + options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + a->options = options; + + return vnet_application_attach (a); +} static clib_error_t * test_tcp_clients_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { + tclient_main_t *tm = &tclient_main; u8 *connect_uri = (u8 *) "tcp://6.0.1.1/1234"; u8 *uri; - tclient_main_t *tm = &tclient_main; - int i; u32 n_clients = 1; + int i; tm->bytes_to_send = 8192; tm->n_iterations = 1; @@ -397,14 +492,19 @@ test_tcp_clients_command_fn (vlib_main_t * vm, format_unformat_error, input); } + if (tm->is_init == 0) + { + if (tcp_test_clients_init (vm)) + return clib_error_return (0, "failed init"); + } + tm->ready_connections = 0; tm->expected_connections = n_clients; + uri = connect_uri; if (tm->connect_uri) uri = tm->connect_uri; - create_api_loopback (tm); - #if TCP_BUILTIN_CLIENT_PTHREAD /* Start a transmit thread */ if (tm->client_thread_handle == 0) @@ -420,6 +520,7 @@ test_tcp_clients_command_fn (vlib_main_t * vm, } #endif vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); + attach_builtin_test_clients (); /* Fire off connect requests, in something approaching a normal manner */ for (i = 0; i < n_clients; i++) @@ -461,6 +562,16 @@ VLIB_CLI_COMMAND (test_clients_command, static) = }; /* *INDENT-ON* */ +clib_error_t * +tcp_test_clients_main_init (vlib_main_t * vm) +{ + tclient_main_t *tm = &tclient_main; + tm->is_init = 0; + return 0; +} + +VLIB_INIT_FUNCTION (tcp_test_clients_main_init); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 64030302..2bd87c07 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -39,8 +39,7 @@ typedef struct svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; - u32 vpp_session_index; - u32 vpp_session_thread; + u64 vpp_session_handle; } session_t; typedef struct @@ -110,6 +109,10 @@ typedef struct u32 client_bytes_received; u8 test_return_packets; + u8 is_init; + + u32 node_index; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 917d4bd3..8308e3d9 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -18,17 +18,46 @@ #include #include +/* define message IDs */ +#include + +/* define message structures */ +#define vl_typedefs +#include +#undef vl_typedefs + +/* define generated endian-swappers */ +#define vl_endianfun +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + typedef struct { u8 *rx_buf; unix_shared_memory_queue_t **vpp_queue; - u32 byte_index; + u64 byte_index; + + /* Sever's event queue */ + unix_shared_memory_queue_t *vl_input_queue; + + /* API client handle */ + u32 my_client_index; + + u32 app_index; + + /* process node index for evnt scheduling */ + u32 node_index; vlib_main_t *vlib_main; } builtin_server_main_t; builtin_server_main_t builtin_server_main; - int builtin_session_accept_callback (stream_session_t * s) { @@ -45,9 +74,13 @@ builtin_session_accept_callback (stream_session_t * s) void builtin_session_disconnect_callback (stream_session_t * s) { + builtin_server_main_t *bsm = &builtin_server_main; + vnet_disconnect_args_t _a, *a = &_a; clib_warning ("called..."); - vnet_disconnect_session (s->session_index, s->thread_index); + a->handle = stream_session_handle (s); + a->app_index = bsm->app_index; + vnet_disconnect_session (a); } void @@ -60,7 +93,7 @@ builtin_session_reset_callback (stream_session_t * s) int -builtin_session_connected_callback (u32 client_index, +builtin_session_connected_callback (u32 app_index, u32 api_context, stream_session_t * s, u8 is_fail) { clib_warning ("called..."); @@ -91,7 +124,7 @@ test_bytes (builtin_server_main_t * bsm, int actual_transfer) { if (bsm->rx_buf[i] != ((bsm->byte_index + i) & 0xff)) { - clib_warning ("at %d expected %d got %d", bsm->byte_index + i, + clib_warning ("at %lld expected %d got %d", bsm->byte_index + i, (bsm->byte_index + i) & 0xff, bsm->rx_buf[i]); } } @@ -190,23 +223,66 @@ static session_cb_vft_t builtin_session_cb_vft = { .session_reset_callback = builtin_session_reset_callback }; +/* Abuse VPP's input queue */ static int -server_create (vlib_main_t * vm) +create_api_loopback (vlib_main_t * vm) { - vnet_bind_args_t _a, *a = &_a; - u64 options[SESSION_OPTIONS_N_OPTIONS]; - char segment_name[128]; - u32 num_threads; - vlib_thread_main_t *vtm = vlib_get_thread_main (); + builtin_server_main_t *bsm = &builtin_server_main; + vl_api_memclnt_create_t _m, *mp = &_m; + extern void vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t *); + api_main_t *am = &api_main; + vl_shmem_hdr_t *shmem_hdr; + uword *event_data = 0, event_type; + int resolved = 0; - num_threads = 1 /* main thread */ + vtm->n_threads; - vec_validate (builtin_server_main.vpp_queue, num_threads - 1); + /* + * Create a "loopback" API client connection + * Don't do things like this unless you know what you're doing... + */ + + shmem_hdr = am->shmem_hdr; + bsm->vl_input_queue = shmem_hdr->vl_input_queue; + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; + mp->context = 0xFEEDFACE; + mp->input_queue = (u64) bsm->vl_input_queue; + strncpy ((char *) mp->name, "tcp_test_server", sizeof (mp->name) - 1); + + vl_api_memclnt_create_t_handler (mp); + + /* Wait for reply */ + bsm->node_index = vlib_get_current_process (vm)->node_runtime.node_index; + vlib_process_wait_for_event_or_clock (vm, 1.0); + event_type = vlib_process_get_events (vm, &event_data); + switch (event_type) + { + case 1: + resolved = 1; + break; + case ~0: + /* timed out */ + break; + default: + clib_warning ("unknown event_type %d", event_type); + } + if (!resolved) + return -1; + + return 0; +} + +static int +server_attach () +{ + builtin_server_main_t *bsm = &builtin_server_main; + u8 segment_name[128]; + u64 options[SESSION_OPTIONS_N_OPTIONS]; + vnet_app_attach_args_t _a, *a = &_a; memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); - a->uri = "tcp://0.0.0.0/1234"; - a->api_client_index = ~0; + a->api_client_index = bsm->my_client_index; a->session_cb_vft = &builtin_session_cb_vft; a->options = options; a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; @@ -215,9 +291,94 @@ server_create (vlib_main_t * vm) a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); + if (vnet_application_attach (a)) + { + clib_warning ("failed to attach server"); + return -1; + } + bsm->app_index = a->app_index; + return 0; +} + +static int +server_listen () +{ + builtin_server_main_t *bsm = &builtin_server_main; + vnet_bind_args_t _a, *a = &_a; + memset (a, 0, sizeof (*a)); + a->app_index = bsm->app_index; + a->uri = "tcp://0.0.0.0/1234"; return vnet_bind_uri (a); } +static int +server_create (vlib_main_t * vm) +{ + builtin_server_main_t *bsm = &builtin_server_main; + u32 num_threads; + vlib_thread_main_t *vtm = vlib_get_thread_main (); + + if (bsm->my_client_index == (u32) ~ 0) + { + if (create_api_loopback (vm)) + return -1; + } + + num_threads = 1 /* main thread */ + vtm->n_threads; + vec_validate (builtin_server_main.vpp_queue, num_threads - 1); + + if (server_attach ()) + { + clib_warning ("failed to attach server"); + return -1; + } + if (server_listen ()) + { + clib_warning ("failed to start listening"); + return -1; + } + return 0; +} + +/* Get our api client index */ +static void +vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) +{ + vlib_main_t *vm = vlib_get_main (); + builtin_server_main_t *bsm = &builtin_server_main; + bsm->my_client_index = mp->index; + vlib_process_signal_event (vm, bsm->node_index, 1 /* evt */ , + 0 /* data */ ); +} + +#define foreach_tcp_builtin_server_api_msg \ +_(MEMCLNT_CREATE_REPLY, memclnt_create_reply) \ + +static clib_error_t * +tcp_builtin_server_api_hookup (vlib_main_t * vm) +{ + vl_msg_api_msg_config_t _c, *c = &_c; + + /* Hook up client-side static APIs to our handlers */ +#define _(N,n) do { \ + c->id = VL_API_##N; \ + c->name = #n; \ + c->handler = vl_api_##n##_t_handler; \ + c->cleanup = vl_noop_handler; \ + c->endian = vl_api_##n##_t_endian; \ + c->print = vl_api_##n##_t_print; \ + c->size = sizeof(vl_api_##n##_t); \ + c->traced = 1; /* trace, so these msgs print */ \ + c->replay = 0; /* don't replay client create/delete msgs */ \ + c->message_bounce = 0; /* don't bounce this message */ \ + vl_msg_api_config(c);} while (0); + + foreach_tcp_builtin_server_api_msg; +#undef _ + + return 0; +} + static clib_error_t * server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) @@ -234,6 +395,7 @@ server_create_command_fn (vlib_main_t * vm, } #endif + tcp_builtin_server_api_hookup (vm); vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ ); rv = server_create (vm); switch (rv) @@ -249,12 +411,22 @@ server_create_command_fn (vlib_main_t * vm, /* *INDENT-OFF* */ VLIB_CLI_COMMAND (server_create_command, static) = { - .path = "test server", - .short_help = "test server", + .path = "test tcp server", + .short_help = "test tcp server", .function = server_create_command_fn, }; /* *INDENT-ON* */ +clib_error_t * +builtin_tcp_server_main_init (vlib_main_t * vm) +{ + builtin_server_main_t *bsm = &builtin_server_main; + bsm->my_client_index = ~0; + return 0; +} + +VLIB_INIT_FUNCTION (builtin_tcp_server_main_init); + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index b6c34828..a0c66b9f 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -34,14 +34,19 @@ tcp_connection_bind (u32 session_index, ip46_address_t * ip, listener->c_lcl_port = clib_host_to_net_u16 (port_host_byte_order); if (is_ip4) - listener->c_lcl_ip4.as_u32 = ip->ip4.as_u32; + { + listener->c_lcl_ip4.as_u32 = ip->ip4.as_u32; + listener->c_is_ip4 = 1; + listener->c_proto = SESSION_TYPE_IP4_TCP; + } else - clib_memcpy (&listener->c_lcl_ip6, &ip->ip6, sizeof (ip6_address_t)); + { + clib_memcpy (&listener->c_lcl_ip6, &ip->ip6, sizeof (ip6_address_t)); + listener->c_proto = SESSION_TYPE_IP6_TCP; + } listener->c_s_index = session_index; - listener->c_proto = SESSION_TYPE_IP4_TCP; listener->state = TCP_STATE_LISTEN; - listener->c_is_ip4 = 1; tcp_connection_timers_init (listener); @@ -62,7 +67,6 @@ tcp_session_bind_ip6 (u32 session_index, ip46_address_t * ip, u16 port_host_byte_order) { return tcp_connection_bind (session_index, ip, port_host_byte_order, 0); - } static void @@ -397,6 +401,7 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) tc->c_lcl_port = clib_host_to_net_u16 (lcl_port); tc->c_c_index = tc - tm->half_open_connections; tc->c_is_ip4 = is_ip4; + tc->c_proto = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; /* The other connection vars will be initialized after SYN ACK */ tcp_connection_timers_init (tc); @@ -518,7 +523,10 @@ format_tcp_session (u8 * s, va_list * args) tcp_connection_t *tc; tc = tcp_connection_get (tci, thread_index); - return format (s, "%U", format_tcp_connection, tc); + if (tc) + return format (s, "%U", format_tcp_connection, tc); + else + return format (s, "empty"); } u8 * diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 2f5da108..93f3245d 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -100,8 +100,6 @@ extern timer_expiration_handler tcp_timer_retransmit_syn_handler; #define TCP_RTO_SYN_RETRIES 3 /* SYN retries without doubling RTO */ #define TCP_RTO_INIT 1 * THZ /* Initial retransmit timer */ -void tcp_update_time (f64 now, u32 thread_index); - /** TCP connection flags */ #define foreach_tcp_connection_flag \ _(SNDACK, "Send ACK") \ @@ -481,6 +479,13 @@ tcp_time_now (void) return clib_cpu_time_now () * tcp_main.tstamp_ticks_per_clock; } +always_inline void +tcp_update_time (f64 now, u32 thread_index) +{ + tw_timer_expire_timers_16t_2w_512sl (&tcp_main.timer_wheels[thread_index], + now); +} + u32 tcp_push_header (transport_connection_t * tconn, vlib_buffer_t * b); u32 diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 7e9fa47b..ae1f92d5 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1841,6 +1841,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, case TCP_STATE_ESTABLISHED: case TCP_STATE_FIN_WAIT_1: case TCP_STATE_FIN_WAIT_2: + vlib_buffer_advance (b0, n_advance_bytes0); error0 = tcp_segment_rcv (tm, tc0, b0, n_data_bytes0, &next0); break; case TCP_STATE_CLOSE_WAIT: @@ -2410,12 +2411,6 @@ VLIB_REGISTER_NODE (tcp6_input_node) = /* *INDENT-ON* */ VLIB_NODE_FUNCTION_MULTIARCH (tcp6_input_node, tcp6_input); -void -tcp_update_time (f64 now, u32 thread_index) -{ - tcp_main_t *tm = vnet_get_tcp_main (); - tw_timer_expire_timers_16t_2w_512sl (&tm->timer_wheels[thread_index], now); -} static void tcp_dispatch_table_init (tcp_main_t * tm) diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 0725bb04..3dbbdf6f 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -12,7 +12,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include #define TCP_TEST_I(_cond, _comment, _args...) \ @@ -174,6 +173,118 @@ tcp_test_sack () return 0; } +static int +tcp_test_fifo (vlib_main_t * vm, unformat_input_t * input) +{ + svm_fifo_t *f; + u32 fifo_size = 1 << 20; + u32 *test_data = 0; + u32 offset; + int i, rv; + u32 data_word, test_data_len; + + /* $$$ parse args */ + test_data_len = fifo_size / sizeof (u32); + vec_validate (test_data, test_data_len - 1); + + for (i = 0; i < vec_len (test_data); i++) + test_data[i] = i; + + f = svm_fifo_create (fifo_size); + + /* Paint fifo data vector with -1's */ + memset (f->data, 0xFF, test_data_len); + + /* Enqueue an initial (un-dequeued) chunk */ + rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , + sizeof (u32), (u8 *) test_data); + + if (rv != sizeof (u32)) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + + /* + * Create 3 chunks in the future. The offsets are relative + * to the current fifo tail + */ + for (i = 0; i < 3; i++) + { + offset = (2 * i + 1) * sizeof (u32); + vlib_cli_output (vm, "add offset %d", offset); + + rv = svm_fifo_enqueue_with_offset + (f, 0 /* pid */ , offset, sizeof (u32), + (u8 *) (test_data + ((offset + sizeof (u32)) / sizeof (u32)))); + + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + } + + /* Paint missing data backwards */ + for (i = 3; i > 0; i--) + { + offset = (2 * i + 0) * sizeof (u32); + + vlib_cli_output (vm, "add offset %d", offset); + + rv = svm_fifo_enqueue_with_offset + (f, 0 /* pid */ , offset, sizeof (u32), + (u8 *) (test_data + ((offset + sizeof (u32)) / sizeof (u32)))); + + if (rv) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + } + + vlib_cli_output (vm, "fifo before missing link: %U", + format_svm_fifo, f, 1 /* verbose */ ); + + /* Enqueue the missing u32 */ + rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , + sizeof (u32), (u8 *) (test_data + 1)); + if (rv != 7 * sizeof (u32)) + { + clib_warning ("enqueue returned %d", rv); + goto out; + } + + vlib_cli_output (vm, "fifo after missing link: %U", + format_svm_fifo, f, 1 /* verbose */ ); + + /* Collect results */ + for (i = 0; i < 7; i++) + { + rv = svm_fifo_dequeue_nowait (f, 0 /* pid */ , sizeof (u32), + (u8 *) & data_word); + if (rv != sizeof (u32)) + { + clib_warning ("dequeue returned %d", rv); + goto out; + } + if (data_word != test_data[i]) + { + clib_warning ("recovered data %d not %d", data_word, test_data[i]); + goto out; + } + } + + clib_warning ("test complete..."); + +out: + svm_fifo_free (f); + vec_free (test_data); + return 0; +} + + + static clib_error_t * tcp_test (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd_arg) @@ -186,6 +297,10 @@ tcp_test (vlib_main_t * vm, { res = tcp_test_sack (); } + else if (unformat (input, "fifo")) + { + res = tcp_test_fifo (vm, input); + } else { return clib_error_return (0, "unknown input `%U'", @@ -203,10 +318,16 @@ tcp_test (vlib_main_t * vm, } } +/* *INDENT-OFF* */ VLIB_CLI_COMMAND (tcp_test_command, static) = { -.path = "test tcp",.short_help = "internal tcp unit tests",.function = - tcp_test,}; + .path = "test tcp", + .short_help = "internal tcp unit tests", + .function = tcp_test, +}; +/* *INDENT-ON* */ + + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 57f774c5..8565f04c 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -91,12 +91,11 @@ static session_cb_vft_t builtin_server = { /* *INDENT-ON* */ static int -bind_builtin_uri_server (u8 * uri) +attach_builtin_uri_server () { - vnet_bind_args_t _a, *a = &_a; - char segment_name[128]; + vnet_app_attach_args_t _a, *a = &_a; + u8 segment_name[128]; u32 segment_name_length; - int rv; u64 options[16]; segment_name_length = ARRAY_LEN (segment_name); @@ -104,8 +103,7 @@ bind_builtin_uri_server (u8 * uri) memset (a, 0, sizeof (*a)); memset (options, 0, sizeof (options)); - a->uri = (char *) uri; - a->api_client_index = ~0; /* built-in server */ + a->api_client_index = ~0; a->segment_name = segment_name; a->segment_name_length = segment_name_length; a->session_cb_vft = &builtin_server; @@ -114,6 +112,23 @@ bind_builtin_uri_server (u8 * uri) options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ a->options = options; + return vnet_application_attach (a); +} + +static int +bind_builtin_uri_server (u8 * uri) +{ + vnet_bind_args_t _a, *a = &_a; + int rv; + + rv = attach_builtin_uri_server (); + if (rv) + return rv; + + memset (a, 0, sizeof (*a)); + a->uri = (char *) uri; + a->app_index = ~0; /* built-in server */ + rv = vnet_bind_uri (a); return rv; @@ -122,11 +137,12 @@ bind_builtin_uri_server (u8 * uri) static int unbind_builtin_uri_server (u8 * uri) { - int rv; + vnet_unbind_args_t _a, *a = &_a; - rv = vnet_unbind_uri ((char *) uri, ~0 /* client_index */ ); + a->app_index = ~0; + a->uri = (char *) uri; - return rv; + return vnet_unbind_uri (a); } static clib_error_t * -- cgit 1.2.3-korg From a5464817522c7a7dc760af4612f1d6a68ed0afc8 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Wed, 19 Apr 2017 13:00:05 -0700 Subject: Session layer improvements Among others: - Moved app event queue to shared memory segment - Use private memory segment for builtin apps - Remove pid from svm fifo - Protect session fifo (de)allocation - Use fifo event for session disconnects - Have session queue node poll in all wk threads Change-Id: I89dbf7fdfebef12f5ef2b34ba3ef3c2c07f49ff2 Signed-off-by: Florin Coras --- src/svm/svm_fifo.c | 30 ++--- src/svm/svm_fifo.h | 31 ++--- src/svm/svm_fifo_segment.c | 50 +++++++- src/svm/svm_fifo_segment.h | 5 + src/svm/test_svm_fifo1.c | 27 ++--- src/uri/uri_tcp_test.c | 189 ++++++++++++++++++++----------- src/uri/uri_udp_test.c | 40 ++++--- src/vnet/session/application.c | 48 +++----- src/vnet/session/application.h | 12 -- src/vnet/session/application_interface.c | 26 +---- src/vnet/session/application_interface.h | 38 ++++++- src/vnet/session/node.c | 63 ++++++----- src/vnet/session/segment_manager.c | 134 ++++++++++++++++++---- src/vnet/session/segment_manager.h | 12 ++ src/vnet/session/session.c | 138 ++++++++++++---------- src/vnet/session/session.h | 19 ++-- src/vnet/session/session_api.c | 58 ++++------ src/vnet/tcp/builtin_client.c | 9 +- src/vnet/tcp/builtin_server.c | 8 +- src/vnet/tcp/tcp.c | 13 ++- src/vnet/tcp/tcp_input.c | 8 +- src/vnet/tcp/tcp_output.c | 6 - src/vnet/tcp/tcp_test.c | 43 ++++--- src/vnet/udp/builtin_server.c | 8 +- src/vnet/udp/udp_input.c | 5 +- 25 files changed, 604 insertions(+), 416 deletions(-) (limited to 'src/vnet/session/application_interface.c') diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index f428d3ec..8f2ed0c9 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -57,7 +57,7 @@ format_svm_fifo (u8 * s, va_list * args) if (verbose > 1) s = format (s, "server session %d thread %d client session %d thread %d\n", - f->server_session_index, f->server_thread_index, + f->master_session_index, f->master_thread_index, f->client_session_index, f->client_thread_index); if (verbose) @@ -353,8 +353,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) } static int -svm_fifo_enqueue_internal (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_from_here) +svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; @@ -411,10 +410,9 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, } int -svm_fifo_enqueue_nowait (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_from_here) +svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) { - return svm_fifo_enqueue_internal (f, pid, max_bytes, copy_from_here); + return svm_fifo_enqueue_internal (f, max_bytes, copy_from_here); } /** @@ -426,7 +424,6 @@ svm_fifo_enqueue_nowait (svm_fifo_t * f, */ static int svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, - int pid, u32 offset, u32 required_bytes, u8 * copy_from_here) @@ -439,7 +436,7 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, /* Users would do well to avoid this */ if (PREDICT_FALSE (f->tail == (offset % f->nitems))) { - rv = svm_fifo_enqueue_internal (f, pid, required_bytes, copy_from_here); + rv = svm_fifo_enqueue_internal (f, required_bytes, copy_from_here); if (rv > 0) return 0; return -1; @@ -484,18 +481,16 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, int svm_fifo_enqueue_with_offset (svm_fifo_t * f, - int pid, u32 offset, u32 required_bytes, u8 * copy_from_here) { - return svm_fifo_enqueue_with_offset_internal - (f, pid, offset, required_bytes, copy_from_here); + return svm_fifo_enqueue_with_offset_internal (f, offset, required_bytes, + copy_from_here); } static int -svm_fifo_dequeue_internal (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_here) +svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; u32 cursize, nitems; @@ -545,14 +540,13 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, } int -svm_fifo_dequeue_nowait (svm_fifo_t * f, - int pid, u32 max_bytes, u8 * copy_here) +svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) { - return svm_fifo_dequeue_internal (f, pid, max_bytes, copy_here); + return svm_fifo_dequeue_internal (f, max_bytes, copy_here); } int -svm_fifo_peek (svm_fifo_t * f, int pid, u32 relative_offset, u32 max_bytes, +svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes, u8 * copy_here) { u32 total_copy_bytes, first_copy_bytes, second_copy_bytes; @@ -590,7 +584,7 @@ svm_fifo_peek (svm_fifo_t * f, int pid, u32 relative_offset, u32 max_bytes, } int -svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes) +svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes) { u32 total_drop_bytes, first_drop_bytes, second_drop_bytes; u32 cursize, nitems; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index 0fff2577..d67237c6 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -23,13 +23,6 @@ #include #include -typedef enum -{ - SVM_FIFO_TAG_NOT_HELD = 0, - SVM_FIFO_TAG_DEQUEUE, - SVM_FIFO_TAG_ENQUEUE, -} svm_lock_tag_t; - /** Out-of-order segment */ typedef struct { @@ -37,7 +30,7 @@ typedef struct u32 prev; /**< Previous linked-list element pool index */ u32 start; /**< Start of segment, normalized*/ - u32 length; /**< Length of segment */ + u32 length; /**< Length of segment */ } ooo_segment_t; format_function_t format_ooo_segment; @@ -52,12 +45,11 @@ typedef struct CLIB_CACHE_LINE_ALIGN_MARK (end_cursize); volatile u8 has_event; /**< non-zero if deq event exists */ - u32 owner_pid; /* Backpointers */ - u32 server_session_index; + u32 master_session_index; u32 client_session_index; - u8 server_thread_index; + u8 master_thread_index; u8 client_thread_index; u32 segment_manager; CLIB_CACHE_LINE_ALIGN_MARK (end_shared); @@ -117,19 +109,14 @@ svm_fifo_unset_event (svm_fifo_t * f) svm_fifo_t *svm_fifo_create (u32 data_size_in_bytes); void svm_fifo_free (svm_fifo_t * f); -int svm_fifo_enqueue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, +int svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here); +int svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, + u32 required_bytes, u8 * copy_from_here); +int svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here); -int svm_fifo_enqueue_with_offset (svm_fifo_t * f, int pid, - u32 offset, u32 required_bytes, - u8 * copy_from_here); - -int svm_fifo_dequeue_nowait (svm_fifo_t * f, int pid, u32 max_bytes, - u8 * copy_here); - -int svm_fifo_peek (svm_fifo_t * f, int pid, u32 offset, u32 max_bytes, - u8 * copy_here); -int svm_fifo_dequeue_drop (svm_fifo_t * f, int pid, u32 max_bytes); +int svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 max_bytes, u8 * copy_here); +int svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes); u32 svm_fifo_number_ooo_segments (svm_fifo_t * f); ooo_segment_t *svm_fifo_first_ooo_segment (svm_fifo_t * f); diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index acabb3bd..281fae27 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -70,6 +70,44 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) return (0); } +/** Create an svm fifo segment in process-private memory */ +int +svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) +{ + svm_fifo_segment_private_t *s; + svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; + ssvm_shared_header_t *sh; + svm_fifo_segment_header_t *fsh; + + /* Allocate a fresh segment */ + pool_get (sm->segments, s); + memset (s, 0, sizeof (*s)); + + s->ssvm.ssvm_size = ~0; + s->ssvm.i_am_master = 1; + s->ssvm.my_pid = getpid (); + s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.requested_va = ~0; + + /* Allocate a [sic] shared memory header, in process memory... */ + sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); + s->ssvm.sh = sh; + + memset (sh, 0, sizeof (*sh)); + sh->heap = clib_mem_get_heap (); + + /* Set up svm_fifo_segment shared header */ + fsh = clib_mem_alloc (sizeof (*fsh)); + memset (fsh, 0, sizeof (*fsh)); + sh->opaque[0] = fsh; + s->h = fsh; + fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + + sh->ready = 1; + a->new_segment_index = s - sm->segments; + return (0); +} + /** (slave) attach to an svm fifo segment */ int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) @@ -82,7 +120,6 @@ svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) /* Allocate a fresh segment */ pool_get (sm->segments, s); - memset (s, 0, sizeof (*s)); s->ssvm.ssvm_size = a->segment_size; @@ -126,19 +163,22 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; + + ssvm_lock (sh, 1, 0); oldheap = ssvm_push_heap (sh); /* Note: this can fail, in which case: create another segment */ f = svm_fifo_create (data_size_in_bytes); - if (f == 0) + if (PREDICT_FALSE (f == 0)) { ssvm_pop_heap (oldheap); + ssvm_unlock (sh); return (0); } vec_add1 (fsh->fifos, f); - ssvm_pop_heap (oldheap); + ssvm_unlock (sh); return (f); } @@ -152,8 +192,9 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - oldheap = ssvm_push_heap (sh); + ssvm_lock (sh, 1, 0); + oldheap = ssvm_push_heap (sh); for (i = 0; i < vec_len (fsh->fifos); i++) { if (fsh->fifos[i] == f) @@ -167,6 +208,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f) found: clib_mem_free (f); ssvm_pop_heap (oldheap); + ssvm_unlock (sh); } void diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 9ab47a4c..4218013a 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -17,6 +17,7 @@ #include #include +#include typedef struct { @@ -32,6 +33,8 @@ typedef struct typedef struct { + volatile u32 lock; + /** pool of segments */ svm_fifo_segment_private_t *segments; /* Where to put the next one */ @@ -78,6 +81,8 @@ typedef enum } ssvm_fifo_segment_api_error_enum_t; int svm_fifo_segment_create (svm_fifo_segment_create_args_t * a); +int svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t + * a); int svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a); void svm_fifo_segment_delete (svm_fifo_segment_private_t * s); diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 355653df..398dd6d7 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -25,7 +25,6 @@ hello_world (int verbose) u8 *test_data; u8 *retrieved_data = 0; clib_error_t *error = 0; - int pid = getpid (); memset (a, 0, sizeof (*a)); @@ -48,18 +47,16 @@ hello_world (int verbose) vec_validate (retrieved_data, vec_len (test_data) - 1); while (svm_fifo_max_enqueue (f) >= vec_len (test_data)) - svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + svm_fifo_enqueue_nowait (f, vec_len (test_data), test_data); while (svm_fifo_max_dequeue (f) >= vec_len (test_data)) - svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), - retrieved_data); + svm_fifo_dequeue_nowait (f, vec_len (retrieved_data), retrieved_data); while (svm_fifo_max_enqueue (f) >= vec_len (test_data)) - svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + svm_fifo_enqueue_nowait (f, vec_len (test_data), test_data); while (svm_fifo_max_dequeue (f) >= vec_len (test_data)) - svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), - retrieved_data); + svm_fifo_dequeue_nowait (f, vec_len (retrieved_data), retrieved_data); if (!memcmp (retrieved_data, test_data, vec_len (test_data))) error = clib_error_return (0, "data test OK, got '%s'", retrieved_data); @@ -81,7 +78,6 @@ master (int verbose) u8 *test_data; u8 *retrieved_data = 0; int i; - int pid = getpid (); memset (a, 0, sizeof (*a)); @@ -104,7 +100,7 @@ master (int verbose) vec_validate (retrieved_data, vec_len (test_data) - 1); for (i = 0; i < 1000; i++) - svm_fifo_enqueue_nowait (f, pid, vec_len (test_data), test_data); + svm_fifo_enqueue_nowait (f, vec_len (test_data), test_data); return clib_error_return (0, "master (enqueue) done"); } @@ -176,7 +172,6 @@ offset (int verbose) u32 *test_data = 0; u32 *recovered_data = 0; int i; - int pid = getpid (); memset (a, 0, sizeof (*a)); @@ -199,19 +194,19 @@ offset (int verbose) vec_add1 (test_data, i); /* Enqueue the first 1024 u32's */ - svm_fifo_enqueue_nowait (f, pid, 4096 /* bytes to enqueue */ , + svm_fifo_enqueue_nowait (f, 4096 /* bytes to enqueue */ , (u8 *) test_data); /* Enqueue the third 1024 u32's 2048 ahead of the current tail */ - svm_fifo_enqueue_with_offset (f, pid, 4096, 4096, (u8 *) & test_data[2048]); + svm_fifo_enqueue_with_offset (f, 4096, 4096, (u8 *) & test_data[2048]); /* Enqueue the second 1024 u32's at the current tail */ - svm_fifo_enqueue_nowait (f, pid, 4096 /* bytes to enqueue */ , + svm_fifo_enqueue_nowait (f, 4096 /* bytes to enqueue */ , (u8 *) & test_data[1024]); vec_validate (recovered_data, (3 * 1024) - 1); - svm_fifo_dequeue_nowait (f, pid, 3 * 4096, (u8 *) recovered_data); + svm_fifo_dequeue_nowait (f, 3 * 4096, (u8 *) recovered_data); for (i = 0; i < (3 * 1024); i++) { @@ -237,7 +232,6 @@ slave (int verbose) int rv; u8 *test_data; u8 *retrieved_data = 0; - int pid = getpid (); int i; memset (a, 0, sizeof (*a)); @@ -262,8 +256,7 @@ slave (int verbose) for (i = 0; i < 1000; i++) { - svm_fifo_dequeue_nowait (f, pid, vec_len (retrieved_data), - retrieved_data); + svm_fifo_dequeue_nowait (f, vec_len (retrieved_data), retrieved_data); if (memcmp (retrieved_data, test_data, vec_len (retrieved_data))) return clib_error_return (0, "retrieved data incorrect, '%s'", retrieved_data); diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index 2e15d36c..686c93f9 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -45,12 +45,13 @@ typedef struct svm_fifo_t *server_rx_fifo; svm_fifo_t *server_tx_fifo; - u32 vpp_session_handle; + u64 vpp_session_handle; } session_t; typedef enum { STATE_START, + STATE_ATTACHED, STATE_READY, STATE_DISCONNECTING, STATE_FAILED @@ -127,6 +128,34 @@ uri_tcp_test_main_t uri_tcp_test_main; #define NITER 4000000 #endif +static u8 * +format_api_error (u8 * s, va_list * args) +{ + uri_tcp_test_main_t *utm = &uri_tcp_test_main; + i32 error = va_arg (*args, u32); + uword *p; + + p = hash_get (utm->error_string_by_error_number, -error); + + if (p) + s = format (s, "%s", p[0]); + else + s = format (s, "%d", error); + return s; +} + +static void +init_error_string_table (uri_tcp_test_main_t * utm) +{ + utm->error_string_by_error_number = hash_create (0, sizeof (uword)); + +#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); + foreach_vnet_api_error; +#undef _ + + hash_set (utm->error_string_by_error_number, 99, "Misc"); +} + int wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) { @@ -150,7 +179,7 @@ wait_for_state_change (uri_tcp_test_main_t * utm, connection_state_t state) } void -application_attach (uri_tcp_test_main_t * utm) +application_send_attach (uri_tcp_test_main_t * utm) { vl_api_application_attach_t *bmp; u32 fifo_size = 3 << 20; @@ -160,8 +189,8 @@ application_attach (uri_tcp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_FLAGS] = + APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; @@ -169,6 +198,18 @@ application_attach (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } +int +application_attach (uri_tcp_test_main_t * utm) +{ + application_send_attach (utm); + if (wait_for_state_change (utm, STATE_ATTACHED)) + { + clib_warning ("timeout waiting for STATE_ATTACHED"); + return -1; + } + return 0; +} + void application_detach (uri_tcp_test_main_t * utm) { @@ -192,8 +233,8 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * if (mp->retval) { - uword *errp = hash_get (utm->error_string_by_error_number, -mp->retval); - clib_warning ("attach failed: %s", *errp); + clib_warning ("attach failed: %U", format_api_error, + clib_net_to_host_u32 (mp->retval)); utm->state = STATE_FAILED; return; } @@ -220,7 +261,7 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * utm->our_event_queue = (unix_shared_memory_queue_t *) mp->app_event_queue_address; - + utm->state = STATE_ATTACHED; } static void @@ -231,18 +272,6 @@ vl_api_application_detach_reply_t_handler (vl_api_application_detach_reply_t * clib_warning ("detach returned with err: %d", mp->retval); } -static void -init_error_string_table (uri_tcp_test_main_t * utm) -{ - utm->error_string_by_error_number = hash_create (0, sizeof (uword)); - -#define _(n,v,s) hash_set (utm->error_string_by_error_number, -v, s); - foreach_vnet_api_error; -#undef _ - - hash_set (utm->error_string_by_error_number, 99, "Misc"); -} - static void stop_signal (int signum) { @@ -392,7 +421,7 @@ client_handle_fifo_event_rx (uri_tcp_test_main_t * utm, /* Read the bytes */ do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, + n_read = svm_fifo_dequeue_nowait (rx_fifo, clib_min (vec_len (utm->rx_buf), bytes), utm->rx_buf); if (n_read > 0) @@ -432,11 +461,11 @@ client_handle_event_queue (uri_tcp_test_main_t * utm) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: client_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return; default: @@ -458,11 +487,11 @@ client_rx_thread_fn (void *arg) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: client_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return 0; default: clib_warning ("unknown event type %d", e->event_type); @@ -487,9 +516,8 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) if (mp->retval) { - uword *errp = hash_get (utm->error_string_by_error_number, - -clib_net_to_host_u32 (mp->retval)); - clib_warning ("connection failed with code: %s", *errp); + clib_warning ("connection failed with code: %U", format_api_error, + clib_net_to_host_u32 (mp->retval)); utm->state = STATE_FAILED; return; } @@ -551,7 +579,7 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, { actual_write = bytes_to_snd > queue_max_chunk ? queue_max_chunk : bytes_to_snd; - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, actual_write, + rv = svm_fifo_enqueue_nowait (tx_fifo, actual_write, test_data + test_buf_offset); if (rv > 0) @@ -564,7 +592,7 @@ send_test_chunk (uri_tcp_test_main_t * utm, svm_fifo_t * tx_fifo, int mypid, { /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; unix_shared_memory_queue_add (utm->vpp_event_queue, @@ -619,7 +647,7 @@ client_send_data (uri_tcp_test_main_t * utm) } void -client_connect (uri_tcp_test_main_t * utm) +client_send_connect (uri_tcp_test_main_t * utm) { vl_api_connect_uri_t *cmp; cmp = vl_msg_api_alloc (sizeof (*cmp)); @@ -632,8 +660,20 @@ client_connect (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & cmp); } +int +client_connect (uri_tcp_test_main_t * utm) +{ + client_send_connect (utm); + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("Connect failed"); + return -1; + } + return 0; +} + void -client_disconnect (uri_tcp_test_main_t * utm) +client_send_disconnect (uri_tcp_test_main_t * utm) { session_t *connected_session; vl_api_disconnect_session_t *dmp; @@ -647,16 +687,29 @@ client_disconnect (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & dmp); } +int +client_disconnect (uri_tcp_test_main_t * utm) +{ + client_send_disconnect (utm); + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("Disconnect failed"); + return -1; + } + return 0; +} + static void client_test (uri_tcp_test_main_t * utm) { int i; - application_attach (utm); - client_connect (utm); + if (application_attach (utm)) + return; - if (wait_for_state_change (utm, STATE_READY)) + if (client_connect (utm)) { + application_detach (utm); return; } @@ -671,11 +724,6 @@ client_test (uri_tcp_test_main_t * utm) /* Disconnect */ client_disconnect (utm); - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("Disconnect failed"); - return; - } application_detach (utm); } @@ -686,9 +734,8 @@ vl_api_bind_uri_reply_t_handler (vl_api_bind_uri_reply_t * mp) if (mp->retval) { - uword *errp = hash_get (utm->error_string_by_error_number, - -clib_net_to_host_u32 (mp->retval)); - clib_warning ("bind failed: %s", (char *) *errp); + clib_warning ("bind failed: %s", format_api_error, + clib_net_to_host_u32 (mp->retval)); utm->state = STATE_FAILED; return; } @@ -869,7 +916,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, /* Read the bytes */ do { - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (utm->rx_buf), + n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), utm->rx_buf); if (n_read > 0) bytes -= n_read; @@ -882,7 +929,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { do { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, n_read, utm->rx_buf); + rv = svm_fifo_enqueue_nowait (tx_fifo, n_read, utm->rx_buf); } while (rv <= 0 && !utm->time_to_stop); @@ -891,7 +938,7 @@ server_handle_fifo_event_rx (uri_tcp_test_main_t * utm, { /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = e->event_id; q = utm->vpp_event_queue; @@ -914,11 +961,11 @@ server_handle_event_queue (uri_tcp_test_main_t * utm) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: server_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return; default: @@ -936,7 +983,7 @@ server_handle_event_queue (uri_tcp_test_main_t * utm) } void -server_listen (uri_tcp_test_main_t * utm) +server_send_listen (uri_tcp_test_main_t * utm) { vl_api_bind_uri_t *bmp; bmp = vl_msg_api_alloc (sizeof (*bmp)); @@ -949,8 +996,20 @@ server_listen (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & bmp); } +int +server_listen (uri_tcp_test_main_t * utm) +{ + server_send_listen (utm); + if (wait_for_state_change (utm, STATE_READY)) + { + clib_warning ("timeout waiting for STATE_READY"); + return -1; + } + return 0; +} + void -server_unbind (uri_tcp_test_main_t * utm) +server_send_unbind (uri_tcp_test_main_t * utm) { vl_api_unbind_uri_t *ump; @@ -963,31 +1022,33 @@ server_unbind (uri_tcp_test_main_t * utm) vl_msg_api_send_shmem (utm->vl_input_queue, (u8 *) & ump); } +int +server_unbind (uri_tcp_test_main_t * utm) +{ + server_send_unbind (utm); + if (wait_for_state_change (utm, STATE_START)) + { + clib_warning ("timeout waiting for STATE_START"); + return -1; + } + return 0; +} + void server_test (uri_tcp_test_main_t * utm) { - application_attach (utm); + if (application_attach (utm)) + return; /* Bind to uri */ - server_listen (utm); - - if (wait_for_state_change (utm, STATE_READY)) - { - clib_warning ("timeout waiting for STATE_READY"); - return; - } + if (server_listen (utm)) + return; /* Enter handle event loop */ server_handle_event_queue (utm); /* Cleanup */ - server_unbind (utm); - - if (wait_for_state_change (utm, STATE_START)) - { - clib_warning ("timeout waiting for STATE_START"); - return; - } + server_send_unbind (utm); application_detach (utm); diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 598052bc..266215c8 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -164,7 +164,7 @@ setup_signal_handlers (void) } void -application_attach (uri_udp_test_main_t * utm) +application_send_attach (uri_udp_test_main_t * utm) { vl_api_application_attach_t *bmp; u32 fifo_size = 3 << 20; @@ -174,8 +174,8 @@ application_attach (uri_udp_test_main_t * utm) bmp->_vl_msg_id = ntohs (VL_API_APPLICATION_ATTACH); bmp->client_index = utm->my_client_index; bmp->context = ntohl (0xfeedface); - bmp->options[SESSION_OPTIONS_FLAGS] = - SESSION_OPTIONS_FLAGS_USE_FIFO | SESSION_OPTIONS_FLAGS_ADD_SEGMENT; + bmp->options[APP_OPTIONS_FLAGS] = + APP_OPTIONS_FLAGS_USE_FIFO | APP_OPTIONS_FLAGS_ADD_SEGMENT; bmp->options[SESSION_OPTIONS_RX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_TX_FIFO_SIZE] = fifo_size; bmp->options[SESSION_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20; @@ -307,7 +307,7 @@ cut_through_thread_fn (void *arg) /* We read from the tx fifo and write to the rx fifo */ do { - actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, 0, + actual_transfer = svm_fifo_dequeue_nowait (tx_fifo, vec_len (my_copy_buffer), my_copy_buffer); } @@ -318,7 +318,7 @@ cut_through_thread_fn (void *arg) buffer_offset = 0; while (actual_transfer > 0) { - rv = svm_fifo_enqueue_nowait (rx_fifo, 0, actual_transfer, + rv = svm_fifo_enqueue_nowait (rx_fifo, actual_transfer, my_copy_buffer + buffer_offset); if (rv > 0) { @@ -357,7 +357,6 @@ client_send (uri_udp_test_main_t * utm, session_t * session) u64 bytes_received = 0, bytes_sent = 0; i32 bytes_to_read; int rv; - int mypid = getpid (); f64 before, after, delta, bytes_per_second; svm_fifo_t *rx_fifo, *tx_fifo; int buffer_offset, bytes_to_send = 0; @@ -382,8 +381,7 @@ client_send (uri_udp_test_main_t * utm, session_t * session) buffer_offset = 0; while (bytes_to_send > 0) { - rv = svm_fifo_enqueue_nowait (tx_fifo, mypid, - bytes_to_send, + rv = svm_fifo_enqueue_nowait (tx_fifo, bytes_to_send, test_data + buffer_offset); if (rv > 0) @@ -402,7 +400,7 @@ client_send (uri_udp_test_main_t * utm, session_t * session) buffer_offset = 0; while (bytes_to_read > 0) { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, + rv = svm_fifo_dequeue_nowait (rx_fifo, bytes_to_read, utm->rx_buf + buffer_offset); if (rv > 0) @@ -415,8 +413,8 @@ client_send (uri_udp_test_main_t * utm, session_t * session) } while (bytes_received < bytes_sent) { - rv = svm_fifo_dequeue_nowait (rx_fifo, mypid, - vec_len (utm->rx_buf), utm->rx_buf); + rv = + svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), utm->rx_buf); if (rv > 0) { #if CLIB_DEBUG > 0 @@ -459,7 +457,7 @@ uri_udp_client_test (uri_udp_test_main_t * utm) { session_t *session; - application_attach (utm); + application_send_attach (utm); udp_client_connect (utm); if (wait_for_state_change (utm, STATE_READY)) @@ -559,8 +557,8 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp) 128 * 1024); ASSERT (session->server_tx_fifo); - session->server_rx_fifo->server_session_index = session - utm->sessions; - session->server_tx_fifo->server_session_index = session - utm->sessions; + session->server_rx_fifo->master_session_index = session - utm->sessions; + session->server_tx_fifo->master_session_index = session - utm->sessions; utm->cut_through_session_index = session - utm->sessions; rv = pthread_create (&utm->cut_through_thread_handle, @@ -805,19 +803,19 @@ server_handle_fifo_event_rx (uri_udp_test_main_t * utm, do { - nbytes = svm_fifo_dequeue_nowait (rx_fifo, 0, - vec_len (utm->rx_buf), utm->rx_buf); + nbytes = svm_fifo_dequeue_nowait (rx_fifo, vec_len (utm->rx_buf), + utm->rx_buf); } while (nbytes <= 0); do { - rv = svm_fifo_enqueue_nowait (tx_fifo, 0, nbytes, utm->rx_buf); + rv = svm_fifo_enqueue_nowait (tx_fifo, nbytes, utm->rx_buf); } while (rv == -2); /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = e->event_id; if (svm_fifo_set_event (tx_fifo)) @@ -839,11 +837,11 @@ server_handle_event_queue (uri_udp_test_main_t * utm) 0 /* nowait */ ); switch (e->event_type) { - case FIFO_EVENT_SERVER_RX: + case FIFO_EVENT_APP_RX: server_handle_fifo_event_rx (utm, e); break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: return; default: @@ -893,7 +891,7 @@ void udp_server_test (uri_udp_test_main_t * utm) { - application_attach (utm); + application_send_attach (utm); /* Bind to uri */ server_listen (utm); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 5a45537b..ccf9837f 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -87,14 +87,17 @@ application_new () void application_del (application_t * app) { - api_main_t *am = &api_main; - void *oldheap; segment_manager_t *sm; u64 handle; u32 index, *handles = 0; int i; vnet_unbind_args_t _a, *a = &_a; + /* + * The app event queue allocated in first segment is cleared with + * the segment manager. No need to explicitly free it. + */ + /* * Cleanup segment managers */ @@ -120,14 +123,6 @@ application_del (application_t * app) vnet_unbind (a); } - /* - * Free the event fifo in the /vpe-api shared-memory segment - */ - oldheap = svm_push_data_heap (am->vlib_rp); - if (app->event_queue) - unix_shared_memory_queue_free (app->event_queue); - svm_pop_heap (oldheap); - application_table_del (app); pool_put (app_pool, app); } @@ -149,30 +144,14 @@ int application_init (application_t * app, u32 api_client_index, u64 * options, session_cb_vft_t * cb_fns) { - api_main_t *am = &api_main; segment_manager_t *sm; segment_manager_properties_t *props; - void *oldheap; - u32 app_evt_queue_size; + u32 app_evt_queue_size, first_seg_size; int rv; app_evt_queue_size = options[APP_EVT_QUEUE_SIZE] > 0 ? options[APP_EVT_QUEUE_SIZE] : default_app_evt_queue_size; - /* Allocate event fifo in the /vpe-api shared-memory segment */ - oldheap = svm_push_data_heap (am->vlib_rp); - - /* Allocate server event queue */ - app->event_queue = - unix_shared_memory_queue_init (app_evt_queue_size, - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) signal when queue non-empty */ - ); - - svm_pop_heap (oldheap); - /* Setup segment manager */ sm = segment_manager_new (); sm->app_index = app->index; @@ -181,16 +160,21 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props->rx_fifo_size = options[SESSION_OPTIONS_RX_FIFO_SIZE]; props->tx_fifo_size = options[SESSION_OPTIONS_TX_FIFO_SIZE]; props->add_segment = props->add_segment_size != 0; + props->use_private_segment = options[APP_OPTIONS_FLAGS] + & APP_OPTIONS_FLAGS_BUILTIN_APP; - if ((rv = segment_manager_init (sm, props, - options[SESSION_OPTIONS_SEGMENT_SIZE]))) + first_seg_size = options[SESSION_OPTIONS_SEGMENT_SIZE]; + if ((rv = segment_manager_init (sm, props, first_seg_size))) return rv; app->first_segment_manager = segment_manager_index (sm); app->api_client_index = api_client_index; - app->flags = options[SESSION_OPTIONS_FLAGS]; + app->flags = options[APP_OPTIONS_FLAGS]; app->cb_fns = *cb_fns; + /* Allocate app event queue in the first shared-memory segment */ + app->event_queue = segment_manager_alloc_queue (sm, app_evt_queue_size); + /* Check that the obvious things are properly set up */ application_verify_cb_fns (cb_fns); @@ -451,8 +435,8 @@ application_format_connects (application_t * app, int verbose) continue; fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; + session_index = fifo->master_session_index; + thread_index = fifo->master_thread_index; session = stream_session_get (session_index, thread_index); str = format (0, "%U", format_stream_session, session, verbose); diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h index 6bcee9d3..35caae85 100644 --- a/src/vnet/session/application.h +++ b/src/vnet/session/application.h @@ -61,18 +61,6 @@ typedef struct _application /** Flags */ u32 flags; - /* Stream server mode: accept or connect - * TODO REMOVE*/ - u8 mode; - - /** Index of the listen session or connect session - * TODO REMOVE*/ - u32 session_index; - - /** Session thread index for client connect sessions - * TODO REMOVE */ - u32 thread_index; - /* * Binary API interface to external app */ diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 96d2c621..ad44baa1 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -142,7 +142,7 @@ vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst, * Server is willing to have a direct fifo connection created * instead of going through the state machine, etc. */ - if (server->flags & SESSION_OPTIONS_FLAGS_USE_FIFO) + if (server->flags & APP_OPTIONS_FLAGS_USE_FIFO) return server->cb_fns. redirect_connect_callback (server->api_client_index, mp); } @@ -363,7 +363,11 @@ vnet_disconnect_session (vnet_disconnect_args_t * a) if (!s || s->app_index != a->app_index) return VNET_API_ERROR_INVALID_VALUE; - stream_session_disconnect (s); + /* We're peeking into another's thread pool. Make sure */ + ASSERT (s->session_index == index); + + session_send_session_evt_to_thread (a->handle, FIFO_EVENT_DISCONNECT, + thread_index); return 0; } @@ -395,24 +399,6 @@ vnet_connect (vnet_connect_args_t * a) return vnet_connect_i (a->app_index, a->api_context, sst, &a->tep, a->mp); } -int -vnet_disconnect (vnet_disconnect_args_t * a) -{ - stream_session_t *session; - u32 session_index, thread_index; - - if (api_parse_session_handle (a->handle, &session_index, &thread_index)) - { - clib_warning ("Invalid handle"); - return -1; - } - - session = stream_session_get (session_index, thread_index); - stream_session_disconnect (session); - - return 0; -} - /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 2c497531..7d924c14 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -30,10 +30,18 @@ typedef enum _session_api_proto typedef struct _vnet_app_attach_args_t { + /** Binary API client index */ u32 api_client_index; + + /** Application and segment manager options */ u64 *options; + + /** Session to application callback functions */ session_cb_vft_t *session_cb_vft; + /** Flag that indicates if app is builtin */ + u8 builtin; + /* * Results */ @@ -110,7 +118,7 @@ typedef struct _vnet_disconnect_args_t typedef enum { APP_EVT_QUEUE_SIZE, - SESSION_OPTIONS_FLAGS, + APP_OPTIONS_FLAGS, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, SESSION_OPTIONS_RX_FIFO_SIZE, @@ -119,11 +127,30 @@ typedef enum SESSION_OPTIONS_N_OPTIONS } app_attach_options_index_t; -/** Server can handle delegated connect requests from local clients */ -#define SESSION_OPTIONS_FLAGS_USE_FIFO (1<<0) +#define foreach_app_options_flags \ + _(USE_FIFO, "Use FIFO with redirects") \ + _(ADD_SEGMENT, "Add segment and signal app if needed") \ + _(BUILTIN_APP, "Application is builtin") \ + +typedef enum _app_options +{ +#define _(sym, str) APP_OPTIONS_##sym, + foreach_app_options_flags +#undef _ +} app_options_t; + +typedef enum _app_options_flags +{ +#define _(sym, str) APP_OPTIONS_FLAGS_##sym = 1 << APP_OPTIONS_##sym, + foreach_app_options_flags +#undef _ +} app_options_flags_t; -/** Server wants vpp to add segments when out of memory for fifos */ -#define SESSION_OPTIONS_FLAGS_ADD_SEGMENT (1<<1) +///** Server can handle delegated connect requests from local clients */ +//#define APP_OPTIONS_FLAGS_USE_FIFO (1<<0) +// +///** Server wants vpp to add segments when out of memory for fifos */ +//#define APP_OPTIONS_FLAGS_ADD_SEGMENT (1<<1) #define VNET_CONNECT_REDIRECTED 123 @@ -138,7 +165,6 @@ int vnet_disconnect_session (vnet_disconnect_args_t * a); int vnet_bind (vnet_bind_args_t * a); int vnet_connect (vnet_connect_args_t * a); int vnet_unbind (vnet_unbind_args_t * a); -int vnet_disconnect (vnet_disconnect_args_t * a); int api_parse_session_handle (u64 handle, u32 * session_index, diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index dd211c51..210754fa 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -218,8 +218,8 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, * 2) buffer chains */ if (peek_data) { - n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, s0->pid, - rx_offset, len_to_deq0, data0); + n_bytes_read = svm_fifo_peek (s0->server_tx_fifo, rx_offset, + len_to_deq0, data0); if (n_bytes_read <= 0) goto dequeue_fail; @@ -230,8 +230,7 @@ session_tx_fifo_read_and_snd_i (vlib_main_t * vm, vlib_node_runtime_t * node, else { n_bytes_read = svm_fifo_dequeue_nowait (s0->server_tx_fifo, - s0->pid, len_to_deq0, - data0); + len_to_deq0, data0); if (n_bytes_read <= 0) goto dequeue_fail; } @@ -301,6 +300,26 @@ session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, n_tx_pkts, 0); } +stream_session_t * +session_event_get_session (session_fifo_event_t * e0, u8 thread_index) +{ + svm_fifo_t *f0; + stream_session_t *s0; + u32 session_index0; + + f0 = e0->fifo; + session_index0 = f0->master_session_index; + + /* $$$ add multiple event queues, per vpp worker thread */ + ASSERT (f0->master_thread_index == thread_index); + + s0 = stream_session_get_if_valid (session_index0, thread_index); + + ASSERT (s0->thread_index == thread_index); + + return s0; +} + static uword session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -370,34 +389,24 @@ skip_dequeue: n_events = vec_len (my_fifo_events); for (i = 0; i < n_events; i++) { - svm_fifo_t *f0; /* $$$ prefetch 1 ahead maybe */ - stream_session_t *s0; - u32 session_index0; + stream_session_t *s0; /* $$$ prefetch 1 ahead maybe */ session_fifo_event_t *e0; e0 = &my_fifo_events[i]; - f0 = e0->fifo; - session_index0 = f0->server_session_index; - - /* $$$ add multiple event queues, per vpp worker thread */ - ASSERT (f0->server_thread_index == my_thread_index); - s0 = stream_session_get_if_valid (session_index0, my_thread_index); - - if (CLIB_DEBUG && !s0) + switch (e0->event_type) { - clib_warning ("It's dead, Jim!"); - continue; - } - - if (PREDICT_FALSE (s0->session_state == SESSION_STATE_CLOSED)) - continue; + case FIFO_EVENT_APP_TX: + s0 = session_event_get_session (e0, my_thread_index); - ASSERT (s0->thread_index == my_thread_index); + if (CLIB_DEBUG && !s0) + { + clib_warning ("It's dead, Jim!"); + continue; + } - switch (e0->event_type) - { - case FIFO_EVENT_SERVER_TX: + if (PREDICT_FALSE (s0->session_state == SESSION_STATE_CLOSED)) + continue; /* Spray packets in per session type frames, since they go to * different nodes */ rv = (smm->session_tx_fns[s0->session_type]) (vm, node, smm, e0, s0, @@ -408,10 +417,12 @@ skip_dequeue: goto done; break; - case FIFO_EVENT_SERVER_EXIT: + case FIFO_EVENT_DISCONNECT: + s0 = stream_session_get_from_handle (e0->session_handle); stream_session_disconnect (s0); break; case FIFO_EVENT_BUILTIN_RX: + s0 = session_event_get_session (e0, my_thread_index); svm_fifo_unset_event (s0->server_rx_fifo); /* Get session's server */ app = application_get (s0->app_index); diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index 16e5bc56..e0532320 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -27,6 +27,11 @@ u32 segment_name_counter = 0; */ segment_manager_t *segment_managers = 0; +/** + * Process private segment index + */ +u32 private_segment_index = ~0; + /** * Default fifo and segment size. TODO config. */ @@ -100,6 +105,26 @@ session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size) return rv; } +static void +segment_manager_alloc_process_private_segment () +{ + svm_fifo_segment_create_args_t _a, *a = &_a; + + if (private_segment_index != ~0) + return; + + memset (a, 0, sizeof (*a)); + a->segment_name = "process-private-segment"; + a->segment_size = ~0; + a->new_segment_index = ~0; + + if (svm_fifo_segment_create_process_private (a)) + clib_warning ("Failed to create process private segment"); + + private_segment_index = a->new_segment_index; + ASSERT (private_segment_index != ~0); +} + /** * Initializes segment manager based on options provided. * Returns error if svm segment allocation fails. @@ -114,7 +139,9 @@ segment_manager_init (segment_manager_t * sm, /* app allocates these */ sm->properties = properties; - if (first_seg_size > 0) + first_seg_size = first_seg_size > 0 ? first_seg_size : default_segment_size; + + if (sm->properties->use_private_segment == 0) { rv = session_manager_add_first_segment (sm, first_seg_size); if (rv) @@ -123,7 +150,15 @@ segment_manager_init (segment_manager_t * sm, return rv; } } + else + { + if (private_segment_index == ~0) + segment_manager_alloc_process_private_segment (); + ASSERT (private_segment_index != ~0); + vec_add1 (sm->segment_indices, private_segment_index); + } + clib_spinlock_init (&sm->lockp); return 0; } @@ -162,8 +197,8 @@ segment_manager_del (segment_manager_t * sm) stream_session_t *session; fifo = fifos[i]; - session_index = fifo->server_session_index; - thread_index = fifo->server_thread_index; + session_index = fifo->master_session_index; + thread_index = fifo->master_thread_index; session = stream_session_get (session_index, thread_index); @@ -183,7 +218,9 @@ segment_manager_del (segment_manager_t * sm) deleted_thread_indices[i]); /* Instead of directly removing the session call disconnect */ - stream_session_disconnect (session); + session_send_session_evt_to_thread (stream_session_handle (session), + FIFO_EVENT_DISCONNECT, + deleted_thread_indices[i]); /* stream_session_table_del (smm, session); @@ -200,6 +237,7 @@ segment_manager_del (segment_manager_t * sm) /* svm_fifo_segment_delete (fifo_segment); */ } + clib_spinlock_free (&sm->lockp); vec_free (deleted_sessions); vec_free (deleted_thread_indices); pool_put (segment_managers, sm); @@ -232,9 +270,13 @@ segment_manager_alloc_session_fifos (segment_manager_t * sm, u8 added_a_segment = 0; int i; - /* Allocate svm fifos */ ASSERT (vec_len (sm->segment_indices)); + /* Make sure we don't have multiple threads trying to allocate segments + * at the same time. */ + clib_spinlock_lock (&sm->lockp); + + /* Allocate svm fifos */ again: for (i = 0; i < vec_len (sm->segment_indices); i++) { @@ -283,7 +325,9 @@ again: } if (session_manager_add_segment (sm)) - return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + { + return VNET_API_ERROR_URI_FIFO_CREATE_FAILED; + } added_a_segment = 1; goto again; @@ -295,14 +339,16 @@ again: } } - if (added_a_segment) - return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); - /* Backpointers to segment manager */ sm_index = segment_manager_index (sm); (*server_tx_fifo)->segment_manager = sm_index; (*server_rx_fifo)->segment_manager = sm_index; + clib_spinlock_unlock (&sm->lockp); + + if (added_a_segment) + return segment_manager_notify_app_seg_add (sm, *fifo_segment_index); + return 0; } @@ -313,26 +359,72 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, segment_manager_t *sm; svm_fifo_segment_private_t *fifo_segment; + sm = segment_manager_get_if_valid (rx_fifo->segment_manager); + + /* It's possible to have no segment manager if the session was removed + * as result of a detach */ + if (!sm) + return; + fifo_segment = svm_fifo_get_segment (svm_segment_index); svm_fifo_segment_free_fifo (fifo_segment, rx_fifo); svm_fifo_segment_free_fifo (fifo_segment, tx_fifo); - /* If we have segment manager, try doing some cleanup. - * It's possible to have no segment manager if the session was removed - * as result of a detach */ - sm = segment_manager_get_if_valid (rx_fifo->segment_manager); - if (sm) + /* Remove segment only if it holds no fifos and not the first */ + if (sm->segment_indices[0] != svm_segment_index + && !svm_fifo_segment_has_fifos (fifo_segment)) { - /* Remove segment only if it holds no fifos and not the first */ - if (sm->segment_indices[0] != svm_segment_index - && !svm_fifo_segment_has_fifos (fifo_segment)) - { - svm_fifo_segment_delete (fifo_segment); - vec_del1 (sm->segment_indices, svm_segment_index); - } + svm_fifo_segment_delete (fifo_segment); + vec_del1 (sm->segment_indices, svm_segment_index); } } +/** + * Allocates shm queue in the first segment + */ +unix_shared_memory_queue_t * +segment_manager_alloc_queue (segment_manager_t * sm, u32 queue_size) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *segment; + unix_shared_memory_queue_t *q; + void *oldheap; + + ASSERT (sm->segment_indices != 0); + + segment = svm_fifo_get_segment (sm->segment_indices[0]); + sh = segment->ssvm.sh; + + oldheap = ssvm_push_heap (sh); + q = + unix_shared_memory_queue_init (queue_size, sizeof (session_fifo_event_t), + 0 /* consumer pid */ , 0 + /* signal when queue non-empty */ ); + ssvm_pop_heap (oldheap); + return q; +} + +/** + * Frees shm queue allocated in the first segment + */ +void +segment_manager_dealloc_queue (segment_manager_t * sm, + unix_shared_memory_queue_t * q) +{ + ssvm_shared_header_t *sh; + svm_fifo_segment_private_t *segment; + void *oldheap; + + ASSERT (sm->segment_indices != 0); + + segment = svm_fifo_get_segment (sm->segment_indices[0]); + sh = segment->ssvm.sh; + + oldheap = ssvm_push_heap (sh); + unix_shared_memory_queue_free (q); + ssvm_pop_heap (oldheap); +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index 778d6040..2710bb54 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -18,6 +18,10 @@ #include #include +#include +#include +#include + typedef struct _segment_manager_properties { /** Session fifo sizes. */ @@ -30,10 +34,14 @@ typedef struct _segment_manager_properties /** Flag that indicates if additional segments should be created */ u8 add_segment; + /** Use private memory segment instead of shared memory */ + u8 use_private_segment; } segment_manager_properties_t; typedef struct _segment_manager { + clib_spinlock_t lockp; + /** segments mapped by this manager */ u32 *segment_indices; @@ -95,6 +103,10 @@ segment_manager_alloc_session_fifos (segment_manager_t * sm, void segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, svm_fifo_t * tx_fifo); +unix_shared_memory_queue_t *segment_manager_alloc_queue (segment_manager_t * + sm, u32 queue_size); +void segment_manager_dealloc_queue (segment_manager_t * sm, + unix_shared_memory_queue_t * q); #endif /* SRC_VNET_SESSION_SEGMENT_MANAGER_H_ */ /* diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index e6cfe7da..d17c93f8 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -377,33 +377,6 @@ stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt, return 0; } -/** - * Allocate vpp event queue (once) per worker thread - */ -void -session_vpp_event_queue_allocate (session_manager_main_t * smm, - u32 thread_index) -{ - api_main_t *am = &api_main; - void *oldheap; - - if (smm->vpp_event_queues[thread_index] == 0) - { - /* Allocate event fifo in the /vpe-api shared-memory segment */ - oldheap = svm_push_data_heap (am->vlib_rp); - - smm->vpp_event_queues[thread_index] = - unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , - sizeof (session_fifo_event_t), - 0 /* consumer pid */ , - 0 - /* (do not) send signal when queue non-empty */ - ); - - svm_pop_heap (oldheap); - } -} - int stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, stream_session_t ** ret_s) @@ -428,11 +401,11 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, /* Initialize backpointers */ pool_index = s - smm->sessions[thread_index]; - server_rx_fifo->server_session_index = pool_index; - server_rx_fifo->server_thread_index = thread_index; + server_rx_fifo->master_session_index = pool_index; + server_rx_fifo->master_thread_index = thread_index; - server_tx_fifo->server_session_index = pool_index; - server_tx_fifo->server_thread_index = thread_index; + server_tx_fifo->master_session_index = pool_index; + server_tx_fifo->master_thread_index = thread_index; s->server_rx_fifo = server_rx_fifo; s->server_tx_fifo = server_tx_fifo; @@ -485,7 +458,7 @@ stream_session_enqueue_data (transport_connection_t * tc, u8 * data, u16 len, if (PREDICT_FALSE (len > svm_fifo_max_enqueue (s->server_rx_fifo))) return -1; - enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, s->pid, len, data); + enqueued = svm_fifo_enqueue_nowait (s->server_rx_fifo, len, data); if (queue_event) { @@ -527,14 +500,14 @@ stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer, u32 offset, u32 max_bytes) { stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); - return svm_fifo_peek (s->server_tx_fifo, s->pid, offset, max_bytes, buffer); + return svm_fifo_peek (s->server_tx_fifo, offset, max_bytes, buffer); } u32 stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes) { stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index); - return svm_fifo_dequeue_drop (s->server_tx_fifo, s->pid, max_bytes); + return svm_fifo_dequeue_drop (s->server_tx_fifo, max_bytes); } /** @@ -568,7 +541,7 @@ stream_session_enqueue_notify (stream_session_t * s, u8 block) { /* Fabricate event */ evt.fifo = s->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_type = FIFO_EVENT_APP_RX; evt.event_id = serial_number++; /* Add event to server's event queue */ @@ -899,37 +872,45 @@ stream_session_stop_listen (stream_session_t * s) return 0; } +void +session_send_session_evt_to_thread (u64 session_handle, + fifo_event_type_t evt_type, + u32 thread_index) +{ + static u16 serial_number = 0; + session_fifo_event_t evt; + unix_shared_memory_queue_t *q; + + /* Fabricate event */ + evt.session_handle = session_handle; + evt.event_type = evt_type; + evt.event_id = serial_number++; + + q = session_manager_get_vpp_event_queue (thread_index); + + /* Based on request block (or not) for lack of space */ + if (PREDICT_TRUE (q->cursize < q->maxsize)) + unix_shared_memory_queue_add (q, (u8 *) & evt, + 0 /* do wait for mutex */ ); + else + { + clib_warning ("queue full"); + return; + } +} + /** * Disconnect session and propagate to transport. This should eventually * result in a delete notification that allows us to cleanup session state. * Called for both active/passive disconnects. + * + * Should be called from the session's thread. */ void stream_session_disconnect (stream_session_t * s) { -// session_fifo_event_t evt; - s->session_state = SESSION_STATE_CLOSED; - /* RPC to vpp evt queue in the right thread */ - tp_vfts[s->session_type].close (s->connection_index, s->thread_index); - -// { -// /* Fabricate event */ -// evt.fifo = s->server_rx_fifo; -// evt.event_type = FIFO_EVENT_SERVER_RX; -// evt.event_id = serial_number++; -// -// /* Based on request block (or not) for lack of space */ -// if (PREDICT_TRUE(q->cursize < q->maxsize)) -// unix_shared_memory_queue_add (app->event_queue, (u8 *) &evt, -// 0 /* do wait for mutex */); -// else -// { -// clib_warning("fifo full"); -// return -1; -// } -// } } /** @@ -976,6 +957,33 @@ session_get_transport_vft (u8 type) return &tp_vfts[type]; } +/** + * Allocate vpp event queue (once) per worker thread + */ +void +session_vpp_event_queue_allocate (session_manager_main_t * smm, + u32 thread_index) +{ + api_main_t *am = &api_main; + void *oldheap; + + if (smm->vpp_event_queues[thread_index] == 0) + { + /* Allocate event fifo in the /vpe-api shared-memory segment */ + oldheap = svm_push_data_heap (am->vlib_rp); + + smm->vpp_event_queues[thread_index] = + unix_shared_memory_queue_init (2048 /* nels $$$$ config */ , + sizeof (session_fifo_event_t), + 0 /* consumer pid */ , + 0 + /* (do not) send signal when queue non-empty */ + ); + + svm_pop_heap (oldheap); + } +} + static clib_error_t * session_manager_main_enable (vlib_main_t * vm) { @@ -1043,6 +1051,18 @@ session_manager_main_enable (vlib_main_t * vm) return 0; } +void +session_node_enable_disable (u8 is_en) +{ + u8 state = is_en ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED; + /* *INDENT-OFF* */ + foreach_vlib_main (({ + vlib_node_set_state (this_vlib_main, session_queue_node.index, + state); + })); + /* *INDENT-ON* */ +} + clib_error_t * vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) { @@ -1051,16 +1071,14 @@ vnet_session_enable_disable (vlib_main_t * vm, u8 is_en) if (session_manager_main.is_enabled) return 0; - vlib_node_set_state (vm, session_queue_node.index, - VLIB_NODE_STATE_POLLING); + session_node_enable_disable (is_en); return session_manager_main_enable (vm); } else { session_manager_main.is_enabled = 0; - vlib_node_set_state (vm, session_queue_node.index, - VLIB_NODE_STATE_DISABLED); + session_node_enable_disable (is_en); } return 0; diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 6e4ea96d..8cd72f35 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -17,9 +17,6 @@ #include #include -#include -#include -#include #include #include @@ -31,10 +28,10 @@ typedef enum { - FIFO_EVENT_SERVER_RX, - FIFO_EVENT_SERVER_TX, + FIFO_EVENT_APP_RX, + FIFO_EVENT_APP_TX, FIFO_EVENT_TIMEOUT, - FIFO_EVENT_SERVER_EXIT, + FIFO_EVENT_DISCONNECT, FIFO_EVENT_BUILTIN_RX } fifo_event_type_t; @@ -96,7 +93,11 @@ typedef enum /* *INDENT-OFF* */ typedef CLIB_PACKED (struct { - svm_fifo_t * fifo; + union + { + svm_fifo_t * fifo; + u64 session_handle; + }; u8 event_type; u16 event_id; }) session_fifo_event_t; @@ -370,7 +371,9 @@ int stream_session_listen (stream_session_t * s, transport_endpoint_t * tep); int stream_session_stop_listen (stream_session_t * s); void stream_session_disconnect (stream_session_t * s); void stream_session_cleanup (stream_session_t * s); - +void session_send_session_evt_to_thread (u64 session_handle, + fifo_event_type_t evt_type, + u32 thread_index); u8 *format_stream_session (u8 * s, va_list * args); void session_register_transport (u8 type, const transport_proto_vft_t * vft); diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 8116b673..79d67a2f 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -96,7 +96,7 @@ send_session_accept_callback (stream_session_t * s) memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = clib_host_to_net_u16 (VL_API_ACCEPT_SESSION); - + mp->context = server->index; listener = listen_session_get (s->session_type, s->listener_index); tp_vft = session_get_transport_vft (s->session_type); tc = tp_vft->get_connection (s->connection_index, s->thread_index); @@ -270,23 +270,6 @@ static session_cb_vft_t uri_session_cb_vft = { .redirect_connect_callback = redirect_connect_callback }; -static int -api_session_not_valid (u32 session_index, u32 thread_index) -{ - session_manager_main_t *smm = vnet_get_session_manager_main (); - stream_session_t *pool; - - if (thread_index >= vec_len (smm->sessions)) - return VNET_API_ERROR_INVALID_VALUE; - - pool = smm->sessions[thread_index]; - - if (pool_is_free_index (pool, session_index)) - return VNET_API_ERROR_INVALID_VALUE_2; - - return 0; -} - static void vl_api_session_enable_disable_t_handler (vl_api_session_enable_disable_t * mp) { @@ -324,9 +307,9 @@ vl_api_application_attach_t_handler (vl_api_application_attach_t * mp) rv = vnet_application_attach (a); done: + /* *INDENT-OFF* */ REPLY_MACRO2 (VL_API_APPLICATION_ATTACH_REPLY, ({ - rmp->retval = rv; if (!rv) { rmp->segment_name_length = 0; @@ -558,24 +541,33 @@ static void vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp) { stream_session_t *s; - int rv; u32 session_index, thread_index; - session_index = stream_session_index_from_handle (mp->handle); - thread_index = stream_session_thread_from_handle (mp->handle); - if (api_session_not_valid (session_index, thread_index)) - return; - - s = stream_session_get (session_index, thread_index); - rv = mp->retval; + vnet_disconnect_args_t _a, *a = &_a; - if (rv) + /* Server isn't interested, kill the session */ + if (mp->retval) { - /* Server isn't interested, kill the session */ - stream_session_disconnect (s); - return; + a->app_index = mp->context; + a->handle = mp->handle; + vnet_disconnect_session (a); + } + else + { + stream_session_parse_handle (mp->handle, &session_index, &thread_index); + s = stream_session_get_if_valid (session_index, thread_index); + if (!s) + { + clib_warning ("session doesn't exist"); + return; + } + if (s->app_index != mp->context) + { + clib_warning ("app doesn't own session"); + return; + } + /* XXX volatile? */ + s->session_state = SESSION_STATE_READY; } - - s->session_state = SESSION_STATE_READY; } static void diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index f8fbf28c..276beb21 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -62,8 +62,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) bytes_this_chunk = bytes_this_chunk < s->bytes_to_send ? bytes_this_chunk : s->bytes_to_send; - rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, 0 /*pid */ , - bytes_this_chunk, + rv = svm_fifo_enqueue_nowait (s->server_tx_fifo, bytes_this_chunk, test_data + test_buf_offset); /* If we managed to enqueue data... */ @@ -95,7 +94,7 @@ send_test_chunk (tclient_main_t * tm, session_t * s) { /* Fabricate TX event, send to vpp */ evt.fifo = s->server_tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; unix_shared_memory_queue_add (tm->vpp_event_queue, (u8 *) & evt, @@ -113,7 +112,7 @@ receive_test_chunk (tclient_main_t * tm, session_t * s) /* Allow enqueuing of new event */ // svm_fifo_unset_event (rx_fifo); - n_read = svm_fifo_dequeue_nowait (rx_fifo, 0, vec_len (tm->rx_buf), + n_read = svm_fifo_dequeue_nowait (rx_fifo, vec_len (tm->rx_buf), tm->rx_buf); if (n_read > 0) { @@ -457,6 +456,8 @@ attach_builtin_test_clients () options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options = options; return vnet_application_attach (a); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 8308e3d9..34682699 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -180,7 +180,7 @@ builtin_server_rx_callback (stream_session_t * s) vec_validate (bsm->rx_buf, max_transfer - 1); _vec_len (bsm->rx_buf) = max_transfer; - actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, 0, max_transfer, + actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, bsm->rx_buf); ASSERT (actual_transfer == max_transfer); @@ -190,8 +190,7 @@ builtin_server_rx_callback (stream_session_t * s) * Echo back */ - n_written = - svm_fifo_enqueue_nowait (tx_fifo, 0, actual_transfer, bsm->rx_buf); + n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, bsm->rx_buf); if (n_written != max_transfer) clib_warning ("short trout!"); @@ -200,7 +199,7 @@ builtin_server_rx_callback (stream_session_t * s) { /* Fabricate TX event, send to vpp */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], @@ -288,6 +287,7 @@ server_attach () a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 128 << 20; a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = 1 << 16; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = 1 << 16; + a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 12982589..245a35ab 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -487,7 +487,8 @@ u8 * format_tcp_connection (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); - + if (!tc) + return s; if (tc->c_is_ip4) { s = format (s, "[#%d][%s] %U:%d->%U:%d", tc->c_thread_index, "T", @@ -747,12 +748,14 @@ void tcp_initialize_timer_wheels (tcp_main_t * tm) { tw_timer_wheel_16t_2w_512sl_t *tw; - vec_foreach (tw, tm->timer_wheels) - { + /* *INDENT-OFF* */ + foreach_vlib_main (({ + tw = &tm->timer_wheels[ii]; tw_timer_wheel_init_16t_2w_512sl (tw, tcp_expired_timers_dispatch, 100e-3 /* timer period 100ms */ , ~0); - tw->last_run_time = vlib_time_now (tm->vlib_main); - } + tw->last_run_time = vlib_time_now (this_vlib_main); + })); + /* *INDENT-ON* */ } clib_error_t * diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 97679aaf..3bd53878 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1011,8 +1011,8 @@ tcp_session_enqueue_ooo (tcp_connection_t * tc, vlib_buffer_t * b, clib_warning ("ooo: offset %d len %d", offset, data_len); - rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, s0->pid, offset, - data_len, vlib_buffer_get_current (b)); + rv = svm_fifo_enqueue_with_offset (s0->server_rx_fifo, offset, data_len, + vlib_buffer_get_current (b)); /* Nothing written */ if (rv) @@ -2392,8 +2392,8 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header)); - clib_memcpy (&t0->tcp_connection, tc0, - sizeof (t0->tcp_connection)); + if (tc0) + clib_memcpy (&t0->tcp_connection, tc0, sizeof (*tc0)); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index a7be8bd5..4e1a7aa5 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -1558,7 +1558,6 @@ tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t *b0; tcp_tx_trace_t *t0; tcp_header_t *th0; - tcp_connection_t *tc0; u32 error0 = TCP_ERROR_RST_SENT, next0 = TCP_RESET_NEXT_IP_LOOKUP; bi0 = from[0]; @@ -1592,13 +1591,8 @@ tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, th0 = ip4_next_header ((ip4_header_t *) th0); else th0 = ip6_next_header ((ip6_header_t *) th0); - tc0 = - tcp_connection_get (vnet_buffer (b0)->tcp.connection_index, - my_thread_index); t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); clib_memcpy (&t0->tcp_header, th0, sizeof (t0->tcp_header)); - clib_memcpy (&t0->tcp_connection, tc0, - sizeof (t0->tcp_connection)); } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index 890e50b9..0146154b 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -351,8 +351,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) /* * Enqueue an initial (un-dequeued) chunk */ - rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , - sizeof (u32), (u8 *) test_data); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) test_data); TCP_TEST ((rv == sizeof (u32)), "enqueued %d", rv); TCP_TEST ((f->tail == 4), "fifo tail %u", f->tail); @@ -364,7 +363,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -393,7 +392,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 0) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 0)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i, offset, offset + sizeof (u32)); @@ -418,8 +417,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) /* * Enqueue the missing u32 */ - rv = svm_fifo_enqueue_nowait (f, 0 /* pid */ , sizeof (u32), - (u8 *) (test_data + 2)); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) (test_data + 2)); if (verbose) vlib_cli_output (vm, "fifo after missing link: %U", format_svm_fifo, f, 1); @@ -432,8 +430,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) */ for (i = 0; i < 7; i++) { - rv = svm_fifo_dequeue_nowait (f, 0 /* pid */ , sizeof (u32), - (u8 *) & data_word); + rv = svm_fifo_dequeue_nowait (f, sizeof (u32), (u8 *) & data_word); if (rv != sizeof (u32)) { clib_warning ("bytes dequeues %u", rv); @@ -457,7 +454,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -468,13 +465,13 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } - rv = svm_fifo_enqueue_with_offset (f, 0, 8, 21, data); + rv = svm_fifo_enqueue_with_offset (f, 8, 21, data); TCP_TEST ((rv == 0), "ooo enqueued %u", rv); TCP_TEST ((svm_fifo_number_ooo_segments (f) == 1), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); vec_validate (data_buf, vec_len (data)); - svm_fifo_peek (f, 0, 0, vec_len (data), data_buf); + svm_fifo_peek (f, 0, vec_len (data), data_buf); if (compare_data (data_buf, data, 8, vec_len (data), &j)) { TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); @@ -491,7 +488,7 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) { offset = (2 * i + 1) * sizeof (u32); data = (u8 *) (test_data + (2 * i + 1)); - rv = svm_fifo_enqueue_with_offset (f, 0, offset, sizeof (u32), data); + rv = svm_fifo_enqueue_with_offset (f, offset, sizeof (u32), data); if (verbose) vlib_cli_output (vm, "add [%d] [%d, %d]", 2 * i + 1, offset, offset + sizeof (u32)); @@ -502,13 +499,13 @@ tcp_test_fifo1 (vlib_main_t * vm, unformat_input_t * input) } } - rv = svm_fifo_enqueue_nowait (f, 0, 29, data); + rv = svm_fifo_enqueue_nowait (f, 29, data); TCP_TEST ((rv == 32), "ooo enqueued %u", rv); TCP_TEST ((svm_fifo_number_ooo_segments (f) == 0), "number of ooo segments %u", svm_fifo_number_ooo_segments (f)); vec_validate (data_buf, vec_len (data)); - svm_fifo_peek (f, 0, 0, vec_len (data), data_buf); + svm_fifo_peek (f, 0, vec_len (data), data_buf); if (compare_data (data_buf, data, 0, vec_len (data), &j)) { TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], data[j]); @@ -551,7 +548,7 @@ tcp_test_fifo2 (vlib_main_t * vm) { tp = vp + i; data64 = tp->offset; - rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + rv = svm_fifo_enqueue_with_offset (f, tp->offset, tp->len, (u8 *) & data64); } @@ -565,7 +562,7 @@ tcp_test_fifo2 (vlib_main_t * vm) "first ooo seg length %u", ooo_seg->length); data64 = 0; - rv = svm_fifo_enqueue_nowait (f, 0, sizeof (u32), (u8 *) & data64); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) & data64); TCP_TEST ((rv == 3000), "bytes to be enqueued %u", rv); svm_fifo_free (f); @@ -581,7 +578,7 @@ tcp_test_fifo2 (vlib_main_t * vm) { tp = &test_data[i]; data64 = tp->offset; - rv = svm_fifo_enqueue_with_offset (f, 0, tp->offset, tp->len, + rv = svm_fifo_enqueue_with_offset (f, tp->offset, tp->len, (u8 *) & data64); if (rv) { @@ -599,7 +596,7 @@ tcp_test_fifo2 (vlib_main_t * vm) "first ooo seg length %u", ooo_seg->length); data64 = 0; - rv = svm_fifo_enqueue_nowait (f, 0, sizeof (u32), (u8 *) & data64); + rv = svm_fifo_enqueue_nowait (f, sizeof (u32), (u8 *) & data64); TCP_TEST ((rv == 3000), "bytes to be enqueued %u", rv); @@ -755,7 +752,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) for (i = 0; i < vec_len (generate); i++) { tp = generate + i; - rv = svm_fifo_enqueue_with_offset (f, 0, fifo_initial_offset + rv = svm_fifo_enqueue_with_offset (f, fifo_initial_offset + tp->offset, tp->len, (u8 *) data_pattern + tp->offset); } @@ -776,7 +773,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) u32 bytes_to_enq = 1; if (in_seq_all) bytes_to_enq = total_size; - rv = svm_fifo_enqueue_nowait (f, 0, bytes_to_enq, data_pattern + 0); + rv = svm_fifo_enqueue_nowait (f, bytes_to_enq, data_pattern + 0); if (verbose) vlib_cli_output (vm, "in-order enqueue returned %d", rv); @@ -793,7 +790,7 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) * Test if peeked data is the same as original data */ vec_validate (data_buf, vec_len (data_pattern)); - svm_fifo_peek (f, 0, 0, vec_len (data_pattern), data_buf); + svm_fifo_peek (f, 0, vec_len (data_pattern), data_buf); if (compare_data (data_buf, data_pattern, 0, vec_len (data_pattern), &j)) { TCP_TEST (0, "[%d] peeked %u expected %u", j, data_buf[j], @@ -806,11 +803,11 @@ tcp_test_fifo3 (vlib_main_t * vm, unformat_input_t * input) */ if (drop) { - svm_fifo_dequeue_drop (f, 0, vec_len (data_pattern)); + svm_fifo_dequeue_drop (f, vec_len (data_pattern)); } else { - svm_fifo_dequeue_nowait (f, 0, vec_len (data_pattern), data_buf); + svm_fifo_dequeue_nowait (f, vec_len (data_pattern), data_buf); if (compare_data (data_buf, data_pattern, 0, vec_len (data_pattern), &j)) { diff --git a/src/vnet/udp/builtin_server.c b/src/vnet/udp/builtin_server.c index 8565f04c..18684d54 100644 --- a/src/vnet/udp/builtin_server.c +++ b/src/vnet/udp/builtin_server.c @@ -59,10 +59,10 @@ builtin_server_rx_callback (stream_session_t * s) vec_validate (my_copy_buffer, this_transfer - 1); _vec_len (my_copy_buffer) = this_transfer; - actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, 0, this_transfer, + actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, this_transfer, my_copy_buffer); ASSERT (actual_transfer == this_transfer); - actual_transfer = svm_fifo_enqueue_nowait (tx_fifo, 0, this_transfer, + actual_transfer = svm_fifo_enqueue_nowait (tx_fifo, this_transfer, my_copy_buffer); ASSERT (actual_transfer == this_transfer); @@ -72,7 +72,7 @@ builtin_server_rx_callback (stream_session_t * s) { /* Fabricate TX event, send to ourselves */ evt.fifo = tx_fifo; - evt.event_type = FIFO_EVENT_SERVER_TX; + evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = 0; q = session_manager_get_vpp_event_queue (s->thread_index); unix_shared_memory_queue_add (q, (u8 *) & evt, @@ -110,6 +110,8 @@ attach_builtin_uri_server () options[SESSION_OPTIONS_ACCEPT_COOKIE] = 0x12345678; options[SESSION_OPTIONS_SEGMENT_SIZE] = (2 << 30); /*$$$$ config / arg */ + options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options = options; return vnet_application_attach (a); diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 810278e6..e6b4f8fc 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -145,8 +145,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, goto trace0; } - svm_fifo_enqueue_nowait (f0, 0 /* pid */ , - udp_len0 - sizeof (*udp0), + svm_fifo_enqueue_nowait (f0, udp_len0 - sizeof (*udp0), (u8 *) (udp0 + 1)); b0->error = node->errors[SESSION_ERROR_ENQUEUED]; @@ -255,7 +254,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, { /* Fabricate event */ evt.fifo = s0->server_rx_fifo; - evt.event_type = FIFO_EVENT_SERVER_RX; + evt.event_type = FIFO_EVENT_APP_RX; evt.event_id = serial_number++; /* Add event to server's event queue */ -- cgit 1.2.3-korg From 7bee80c823ca77de3aca803fdede77e4c7385a52 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 26 Apr 2017 15:32:12 +0200 Subject: Fix remaining 32-bit compile issues Change-Id: I9664214652229b663c3e3ba7406b4ede96bfb123 Signed-off-by: Damjan Marion --- Makefile | 8 ++++---- build-data/platforms/vpp.mk | 5 +++++ src/plugins/dpdk/buffer.c | 6 +++--- src/plugins/ixge/ixge.c | 5 +++-- src/svm/svm.c | 11 ++++++----- src/svm/svmtool.c | 4 ++-- src/tools/vppapigen/gram.y | 6 +++--- src/tools/vppapigen/node.c | 4 ++-- src/uri/uri_tcp_test.c | 21 ++++++++++++--------- src/uri/uri_udp_test.c | 15 +++++++++------ src/vat/api_format.c | 10 +++++----- src/vlib/threads.c | 2 +- src/vlibmemory/memory_client.c | 2 +- src/vlibmemory/memory_vlib.c | 3 ++- src/vnet/devices/virtio/vhost-user.c | 2 +- src/vnet/session/application_interface.c | 2 +- src/vnet/session/session_api.c | 14 +++++++------- src/vnet/tcp/builtin_client.c | 19 +++++++++++-------- src/vnet/tcp/builtin_server.c | 2 +- src/vppinfra/mheap.c | 2 +- 20 files changed, 80 insertions(+), 63 deletions(-) (limited to 'src/vnet/session/application_interface.c') diff --git a/Makefile b/Makefile index 8240e789..b344f377 100644 --- a/Makefile +++ b/Makefile @@ -263,9 +263,9 @@ define test TEST_DIR=$(WS_ROOT)/test \ VPP_TEST_BUILD_DIR=$(BR)/build-$(2)-native \ VPP_TEST_BIN=$(BR)/install-$(2)-native/vpp/bin/vpp \ - VPP_TEST_PLUGIN_PATH=$(BR)/install-$(2)-native/vpp/lib64/vpp_plugins \ + VPP_TEST_PLUGIN_PATH=$(wildcard $(BR)/install-$(2)-native/vpp/lib*/vpp_plugins) \ VPP_TEST_INSTALL_PATH=$(BR)/install-$(2)-native/ \ - LD_LIBRARY_PATH=$(BR)/install-$(2)-native/vpp/lib64/ \ + LD_LIBRARY_PATH=$(subst $(subst ,, ),:,$(wildcard $(BR)/install-$(2)-native/vpp/lib*/)) \ EXTENDED_TESTS=$(EXTENDED_TESTS) \ PYTHON=$(PYTHON) \ $(3) @@ -325,12 +325,12 @@ define run @echo "WARNING: STARTUP_CONF not defined or file doesn't exist." @echo " Running with minimal startup config: $(MINIMAL_STARTUP_CONF)\n" @cd $(STARTUP_DIR) && \ - sudo $(2) $(1)/vpp/bin/vpp $(MINIMAL_STARTUP_CONF) plugin_path $(1)/vpp/lib64/vpp_plugins + sudo $(2) $(1)/vpp/bin/vpp $(MINIMAL_STARTUP_CONF) plugin_path $(wildcard $(1)/vpp/lib*/vpp_plugins) endef else define run @cd $(STARTUP_DIR) && \ - sudo $(2) $(1)/vpp/bin/vpp $(shell cat $(STARTUP_CONF) | sed -e 's/#.*//') plugin_path $(1)/vpp/lib64/vpp_plugins + sudo $(2) $(1)/vpp/bin/vpp $(shell cat $(STARTUP_CONF) | sed -e 's/#.*//') plugin_path $(wildcard $(1)/vpp/lib*/vpp_plugins) endef endif diff --git a/build-data/platforms/vpp.mk b/build-data/platforms/vpp.mk index 5aafdd76..4577fa2e 100644 --- a/build-data/platforms/vpp.mk +++ b/build-data/platforms/vpp.mk @@ -46,6 +46,11 @@ vpp_root_packages = vpp gmod # vpp_dpdk_lib_dir = /usr/lib # vpp_dpdk_shared_lib = yes +# load balancer plugin is not portable on 32 bit platform +ifeq ($(MACHINE),i686) +vpp_configure_args_vpp = --disable-lb-plugin +endif + vpp_debug_TAG_CFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ -fstack-protector-all -fPIC -Werror vpp_debug_TAG_LDFLAGS = -g -O0 -DCLIB_DEBUG -DFORTIFY_SOURCE=2 -march=$(MARCH) \ diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index c80b3fa8..2d4762ab 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -455,8 +455,8 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, uword save_vpm_start, save_vpm_end, save_vpm_size; struct rte_mempool_memhdr *memhdr; - this_pool_start = ~0ULL; - this_pool_end = 0LL; + this_pool_start = ~0; + this_pool_end = 0; STAILQ_FOREACH (memhdr, &rmp->mem_list, next) { @@ -465,7 +465,7 @@ vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, if (((uword) memhdr->addr) < this_pool_start) this_pool_start = (uword) (memhdr->addr); } - ASSERT (this_pool_start < ~0ULL && this_pool_end > 0); + ASSERT (this_pool_start < ~0 && this_pool_end > 0); this_pool_size = this_pool_end - this_pool_start; if (CLIB_DEBUG > 1) diff --git a/src/plugins/ixge/ixge.c b/src/plugins/ixge/ixge.c index 08f5b692..0d287250 100644 --- a/src/plugins/ixge/ixge.c +++ b/src/plugins/ixge/ixge.c @@ -20,7 +20,7 @@ * Please use supported DPDK driver instead. */ -#if __x86_64__ +#if __x86_64__ || __i386__ #include #ifndef CLIB_HAVE_VEC128 @@ -2929,7 +2929,6 @@ ixge_set_next_node (ixge_rx_next_t next, char *name) break; } } -#endif /* *INDENT-OFF* */ VLIB_PLUGIN_REGISTER () = { @@ -2937,8 +2936,10 @@ VLIB_PLUGIN_REGISTER () = { .default_disabled = 1, .description = "Intel 82599 Family Native Driver (experimental)", }; +#endif /* *INDENT-ON* */ + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/svm/svm.c b/src/svm/svm.c index 97add5a7..c96135cf 100644 --- a/src/svm/svm.c +++ b/src/svm/svm.c @@ -491,7 +491,7 @@ svm_map_region (svm_map_region_args_t * a) return (0); } - rp = mmap ((void *) a->baseva, a->size, + rp = mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if (rp == (svm_region_t *) MAP_FAILED) @@ -533,9 +533,10 @@ svm_map_region (svm_map_region_args_t * a) rp->virtual_size = a->size; rp->region_heap = - mheap_alloc_with_flags ((void *) (a->baseva + MMAP_PAGESIZE), - (a->pvt_heap_size != 0) ? - a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, + mheap_alloc_with_flags (uword_to_pointer + (a->baseva + MMAP_PAGESIZE, void *), + (a->pvt_heap_size != + 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE, MHEAP_FLAG_DISABLE_VM); oldheap = svm_push_pvt_heap (rp); @@ -661,7 +662,7 @@ svm_map_region (svm_map_region_args_t * a) a->size = rp->virtual_size; munmap (rp, MMAP_PAGESIZE); - rp = (void *) mmap ((void *) a->baseva, a->size, + rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if ((uword) rp == (uword) MAP_FAILED) diff --git a/src/svm/svmtool.c b/src/svm/svmtool.c index b3195514..01ae4221 100644 --- a/src/svm/svmtool.c +++ b/src/svm/svmtool.c @@ -172,7 +172,7 @@ svm_map_region_nolock (svm_map_region_args_t * a) a->size = rp->virtual_size; munmap (rp, MMAP_PAGESIZE); - rp = (void *) mmap ((void *) a->baseva, a->size, + rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if ((uword) rp == (uword) MAP_FAILED) @@ -401,7 +401,7 @@ repair (char *chroot_path, int crash_root_region) a->size = root_rp->virtual_size; munmap (root_rp, MMAP_PAGESIZE); - root_rp = (void *) mmap ((void *) a->baseva, a->size, + root_rp = (void *) mmap (uword_to_pointer (a->baseva, void *), a->size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, svm_fd, 0); if ((uword) root_rp == (uword) MAP_FAILED) diff --git a/src/tools/vppapigen/gram.y b/src/tools/vppapigen/gram.y index 9cea6023..52bb65c5 100644 --- a/src/tools/vppapigen/gram.y +++ b/src/tools/vppapigen/gram.y @@ -53,9 +53,9 @@ stmt: flist defn {$$ = set_flags($1, $2);} | defn {$$ = $1;} ; -flist: flist flag {$$ = (YYSTYPE)(unsigned long long) - ((unsigned long long) $1 - | (unsigned long long) $2);} +flist: flist flag {$$ = (YYSTYPE)(unsigned long) + ((unsigned long) $1 + | (unsigned long) $2);} | flag {$$ = $1;} ; diff --git a/src/tools/vppapigen/node.c b/src/tools/vppapigen/node.c index 9f234037..15868ee5 100644 --- a/src/tools/vppapigen/node.c +++ b/src/tools/vppapigen/node.c @@ -397,7 +397,7 @@ void node_define_generate (node_t *this, enum passid which, FILE *fp) fprintf(fp, ",\n"); } indent_me(fp); - fprintf (fp, "{\"crc\" : \"0x%08x\"}\n", (u32)(u64)CDATA3); + fprintf (fp, "{\"crc\" : \"0x%08x\"}\n", (u32)(uword)CDATA3); indent -= 4; indent_me(fp); fprintf(fp, "]"); @@ -1219,7 +1219,7 @@ void generate_msg_name_crc_list (YYSTYPE a1, FILE *fp) if (!(np->flags & NODE_FLAG_TYPEONLY)) { fprintf (fp, "\\\n_(VL_API_%s, %s, %08x) ", uppercase (np->data[0]), (i8 *) np->data[0], - (u32)(u64)np->data[3]); + (u32)(uword)np->data[3]); } } np = np->peer; diff --git a/src/uri/uri_tcp_test.c b/src/uri/uri_tcp_test.c index b15fd6ce..22f246e5 100755 --- a/src/uri/uri_tcp_test.c +++ b/src/uri/uri_tcp_test.c @@ -262,7 +262,8 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * } utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->app_event_queue_address; + uword_to_pointer (mp->app_event_queue_address, + unix_shared_memory_queue_t *); utm->state = STATE_ATTACHED; } @@ -524,8 +525,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + utm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); /* * Setup session @@ -534,9 +536,9 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; @@ -858,16 +860,17 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) ip_str = format (0, "%U", format_ip46_address, &mp->ip, mp->is_ip4); clib_warning ("Accepted session from: %s:%d", ip_str, clib_net_to_host_u16 (mp->port)); - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + utm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); /* Allocate local session and set it up */ pool_get (utm->sessions, session); session_index = session - utm->sessions; - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session_index; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); tx_fifo->client_session_index = session_index; session->server_rx_fifo = rx_fifo; diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 266215c8..8fb12ed2 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -232,7 +232,8 @@ vl_api_application_attach_reply_t_handler (vl_api_application_attach_reply_t * } utm->our_event_queue = - (unix_shared_memory_queue_t *) mp->app_event_queue_address; + uword_to_pointer (mp->app_event_queue_address, + unix_shared_memory_queue_t *); } static void @@ -581,7 +582,8 @@ send_reply: vec_free (a->segment_name); - client_q = (unix_shared_memory_queue_t *) mp->client_queue_address; + client_q = + uword_to_pointer (mp->client_queue_address, unix_shared_memory_queue_t *); vl_msg_api_send_shmem (client_q, (u8 *) & rmp); } @@ -608,14 +610,15 @@ vl_api_accept_session_t_handler (vl_api_accept_session_t * mp) if (start_time == 0.0) start_time = clib_time_now (&utm->clib_time); - utm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + utm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); pool_get (utm->sessions, session); - rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); rx_fifo->client_session_index = session - utm->sessions; - tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); tx_fifo->client_session_index = session - utm->sessions; session->server_rx_fifo = rx_fifo; diff --git a/src/vat/api_format.c b/src/vat/api_format.c index 28b227b4..495b660e 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -1037,7 +1037,7 @@ vl_api_cli_reply_t_handler (vl_api_cli_reply_t * mp) i32 retval = ntohl (mp->retval); vam->retval = retval; - vam->shmem_result = (u8 *) mp->reply_in_shmem; + vam->shmem_result = uword_to_pointer (mp->reply_in_shmem, u8 *); vam->result_ready = 1; } @@ -1058,7 +1058,7 @@ vl_api_cli_reply_t_handler_json (vl_api_cli_reply_t * mp) pthread_mutex_lock (&am->vlib_rp->mutex); oldheap = svm_push_data_heap (am->vlib_rp); - reply = (u8 *) (mp->reply_in_shmem); + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); vec_free (reply); svm_pop_heap (oldheap); @@ -2405,7 +2405,7 @@ static void vl_api_get_node_graph_reply_t_handler if (retval != 0) return; - reply = (u8 *) (mp->reply_in_shmem); + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); pvt_copy = vec_dup (reply); /* Toss the shared-memory original... */ @@ -2456,7 +2456,7 @@ static void vl_api_get_node_graph_reply_t_handler_json vat_json_object_add_int (&node, "retval", ntohl (mp->retval)); vat_json_object_add_uint (&node, "reply_in_shmem", mp->reply_in_shmem); - reply = (u8 *) (mp->reply_in_shmem); + reply = uword_to_pointer (mp->reply_in_shmem, u8 *); /* Toss the shared-memory original... */ pthread_mutex_lock (&am->vlib_rp->mutex); @@ -4959,7 +4959,7 @@ exec (vat_main_t * vam) svm_pop_heap (oldheap); pthread_mutex_unlock (&am->vlib_rp->mutex); - mp->cmd_in_shmem = (u64) cmd; + mp->cmd_in_shmem = pointer_to_uword (cmd); S (mp); timeout = vat_time_now (vam) + 10.0; diff --git a/src/vlib/threads.c b/src/vlib/threads.c index 4a111f8d..9ccfd3a2 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -1125,7 +1125,7 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu"); -#if !defined (__x86_64__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) +#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) void __sync_fetch_and_add_8 (void) { diff --git a/src/vlibmemory/memory_client.c b/src/vlibmemory/memory_client.c index d48a4fa1..a162d6bb 100644 --- a/src/vlibmemory/memory_client.c +++ b/src/vlibmemory/memory_client.c @@ -137,7 +137,7 @@ vl_api_memclnt_create_reply_t_handler (vl_api_memclnt_create_reply_t * mp) am->msg_index_by_name_and_crc = hash_create_string (0, sizeof (uword)); /* Recreate the vnet-side API message handler table */ - tblv = (u8 *) mp->message_table; + tblv = uword_to_pointer (mp->message_table, u8 *); serialize_open_vector (sm, tblv); unserialize_integer (sm, &nmsgs, sizeof (u32)); diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 29a5c2c2..acba8b3f 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -216,7 +216,8 @@ vl_api_memclnt_create_t_handler (vl_api_memclnt_create_t * mp) am->shmem_hdr->application_restarts); rp->context = mp->context; rp->response = ntohl (rv); - rp->message_table = (u64) am->serialized_message_table_in_shmem; + rp->message_table = + pointer_to_uword (am->serialized_message_table_in_shmem); vl_msg_api_send_shmem (q, (u8 *) & rp); } diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c index acc7bf82..6ccc0d87 100644 --- a/src/vnet/devices/virtio/vhost-user.c +++ b/src/vnet/devices/virtio/vhost-user.c @@ -719,7 +719,7 @@ vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui, } if (is_host_address) { - addr = (u64) map_user_mem (vui, (uword) addr); + addr = pointer_to_uword (map_user_mem (vui, (uword) addr)); } if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size)) { diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index ad44baa1..f74b0cfe 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -247,7 +247,7 @@ vnet_application_attach (vnet_app_attach_args_t * a) a->session_cb_vft))) return rv; - a->app_event_queue_address = (u64) app->event_queue; + a->app_event_queue_address = pointer_to_uword (app->event_queue); sm = segment_manager_get (app->first_segment_manager); segment_manager_get_segment_info (sm->segment_indices[0], &seg_name, &a->segment_size); diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c index 5a02a08e..8266922c 100755 --- a/src/vnet/session/session_api.c +++ b/src/vnet/session/session_api.c @@ -102,9 +102,9 @@ send_session_accept_callback (stream_session_t * s) tc = tp_vft->get_connection (s->connection_index, s->thread_index); mp->listener_handle = listen_session_get_handle (listener); mp->handle = stream_session_handle (s); - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; - mp->vpp_event_queue_address = (u64) vpp_queue; + mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); + mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); + mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); mp->port = tc->rmt_port; mp->is_ip4 = tc->is_ip4; clib_memcpy (&mp->ip, &tc->rmt_ip, sizeof (tc->rmt_ip)); @@ -172,10 +172,10 @@ send_session_connected_callback (u32 app_index, u32 api_context, if (!is_fail) { vpp_queue = session_manager_get_vpp_event_queue (s->thread_index); - mp->server_rx_fifo = (u64) s->server_rx_fifo; - mp->server_tx_fifo = (u64) s->server_tx_fifo; + mp->server_rx_fifo = pointer_to_uword (s->server_rx_fifo); + mp->server_tx_fifo = pointer_to_uword (s->server_tx_fifo); mp->handle = stream_session_handle (s); - mp->vpp_event_queue_address = (u64) vpp_queue; + mp->vpp_event_queue_address = pointer_to_uword (vpp_queue); mp->retval = 0; } else @@ -225,7 +225,7 @@ redirect_connect_callback (u32 server_api_client_index, void *mp_arg) } /* Tell the server the client's API queue address, so it can reply */ - mp->client_queue_address = (u64) client_q; + mp->client_queue_address = pointer_to_uword (client_q); app = application_lookup (mp->client_index); if (!app) { diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 32d69a96..6f890874 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -274,11 +274,12 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - tm->our_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; - - tm->vpp_event_queue = (unix_shared_memory_queue_t *) - mp->vpp_event_queue_address; + tm->our_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); + tm->vpp_event_queue = + uword_to_pointer (mp->vpp_event_queue_address, + unix_shared_memory_queue_t *); /* * Setup session @@ -288,9 +289,11 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) session_index = session - tm->sessions; session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; - session->server_rx_fifo = (svm_fifo_t *) mp->server_rx_fifo; + session->server_rx_fifo = + uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *); session->server_rx_fifo->client_session_index = session_index; - session->server_tx_fifo = (svm_fifo_t *) mp->server_tx_fifo; + session->server_tx_fifo = + uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *); session->server_tx_fifo->client_session_index = session_index; session->vpp_session_handle = mp->handle; @@ -321,7 +324,7 @@ create_api_loopback (tclient_main_t * tm) memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; mp->context = 0xFEEDFACE; - mp->input_queue = (u64) tm->vl_input_queue; + mp->input_queue = pointer_to_uword (tm->vl_input_queue); strncpy ((char *) mp->name, "tcp_tester", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 34682699..621ce02a 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -244,7 +244,7 @@ create_api_loopback (vlib_main_t * vm) memset (mp, 0, sizeof (*mp)); mp->_vl_msg_id = VL_API_MEMCLNT_CREATE; mp->context = 0xFEEDFACE; - mp->input_queue = (u64) bsm->vl_input_queue; + mp->input_queue = pointer_to_uword (bsm->vl_input_queue); strncpy ((char *) mp->name, "tcp_test_server", sizeof (mp->name) - 1); vl_api_memclnt_create_t_handler (mp); diff --git a/src/vppinfra/mheap.c b/src/vppinfra/mheap.c index b8828f9e..192732db 100644 --- a/src/vppinfra/mheap.c +++ b/src/vppinfra/mheap.c @@ -304,7 +304,7 @@ mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin) uword mask; /* $$$$ ELIOT FIXME: add Altivec version of this routine */ -#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) +#if !defined (CLIB_HAVE_VEC128) || defined (__ALTIVEC__) || defined (__i386__) mask = 0; #else u8x16 b = u8x16_splat (bin); -- cgit 1.2.3-korg From 7b749fe890a4acb23431148859c25643a3597d2a Mon Sep 17 00:00:00 2001 From: Dave Wallace Date: Wed, 5 Jul 2017 14:30:46 -0400 Subject: Unlink /dev/shm files on application detach. Change-Id: Ieb5522cd5cb27bcbce808d4cd6df5248716da43c Signed-off-by: Dave Wallace --- src/vnet/session/application.c | 13 ++++++++++++- src/vnet/session/application_interface.c | 2 +- src/vnet/session/segment_manager.c | 25 ++++++++++++++++++++++--- src/vnet/session/segment_manager.h | 1 + 4 files changed, 36 insertions(+), 5 deletions(-) (limited to 'src/vnet/session/application_interface.c') diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 4bdb1027..3cc56f37 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -81,6 +81,9 @@ application_new () memset (app, 0, sizeof (*app)); app->index = application_get_index (app); app->connects_seg_manager = ~0; + app->first_segment_manager = ~0; + if (CLIB_DEBUG > 1) + clib_warning ("[%d] New app (%d)", getpid (), app->index); return app; } @@ -97,6 +100,8 @@ application_del (application_t * app) * The app event queue allocated in first segment is cleared with * the segment manager. No need to explicitly free it. */ + if (CLIB_DEBUG > 1) + clib_warning ("[%d] Delete app (%d)", getpid (), app->index); /* * Cleanup segment managers @@ -123,6 +128,12 @@ application_del (application_t * app) vnet_unbind (a); } + if (app->first_segment_manager != ~0) + { + sm = segment_manager_get (app->first_segment_manager); + segment_manager_first_segment_maybe_del (sm); + } + application_table_del (app); pool_put (app_pool, app); } @@ -214,7 +225,6 @@ application_alloc_segment_manager (application_t * app) if (app->first_segment_manager != (u32) ~ 0) { sm = segment_manager_get (app->first_segment_manager); - app->first_segment_manager = ~0; return sm; } @@ -288,6 +298,7 @@ application_stop_listen (application_t * srv, u64 handle) sm = segment_manager_get (*indexp); segment_manager_del (sm); + srv->first_segment_manager = ~0; hash_unset (srv->listeners_table, handle); listen_session_del (listener); diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index f74b0cfe..338ae857 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -113,7 +113,7 @@ vnet_unbind_i (u32 app_index, u64 handle) if (!app) { - clib_warning ("app not attached"); + clib_warning ("app (%d) not attached", app_index); return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; } diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index bf571963..dcef6261 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -67,7 +67,6 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, { clib_warning ("svm_fifo_segment_create ('%s', %d) failed", ca->segment_name, ca->segment_size); - vec_free (segment_name); return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; } @@ -167,7 +166,24 @@ segment_manager_init (segment_manager_t * sm, return 0; } -/** +void +segment_manager_first_segment_maybe_del (segment_manager_t * sm) +{ + svm_fifo_segment_private_t *fifo_segment; + + /* If the first semgment has no fifos, then delete the 1st segment + */ + fifo_segment = svm_fifo_get_segment (sm->segment_indices[0]); + if (!svm_fifo_segment_has_fifos (fifo_segment)) + { + clib_spinlock_lock (&sm->lockp); + svm_fifo_segment_delete (fifo_segment); + vec_del1 (sm->segment_indices, 0); + clib_spinlock_unlock (&sm->lockp); + } +} + + /** * Removes segment manager. * * Since the fifos allocated in the segment keep backpointers to the sessions @@ -178,11 +194,12 @@ void segment_manager_del (segment_manager_t * sm) { int j; + svm_fifo_segment_private_t *fifo_segment; + ASSERT (vec_len (sm->segment_indices)); /* Across all fifo segments used by the server */ for (j = 0; j < vec_len (sm->segment_indices); j++) { - svm_fifo_segment_private_t *fifo_segment; svm_fifo_t *fifo; /* Vector of fifos allocated in the segment */ @@ -216,6 +233,8 @@ segment_manager_del (segment_manager_t * sm) */ } + segment_manager_first_segment_maybe_del (sm); + clib_spinlock_free (&sm->lockp); pool_put (segment_managers, sm); } diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index d4b73208..df38d2b3 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -96,6 +96,7 @@ segment_manager_init (segment_manager_t * sm, void segment_manager_get_segment_info (u32 index, u8 ** name, u32 * size); int session_manager_add_first_segment (segment_manager_t * sm, u32 segment_size); +void segment_manager_first_segment_maybe_del (segment_manager_t * sm); int session_manager_add_segment (segment_manager_t * sm); void segment_manager_del (segment_manager_t * sm); int -- cgit 1.2.3-korg From 2c25a62cc1cc4937165de740a3b32d78429c72d6 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 26 Jun 2017 11:35:07 -0400 Subject: Horizontal (nSessions) scaling draft - Data structure preallocation. - Input state machine fixes for mid-stream 3-way handshake retries. - Batch connections in the builtin_client - Multiple private fifo segment support - Fix elog simultaneous event type registration - Fix sacks when segment hole is added after highest sacked - Add "accepting" session state for sessions pending accept - Add ssvm non-recursive locking - Estimate RTT for syn-ack - Don't init fifo pointers. We're using relative offsets for ooo segments - CLI to dump individual session Change-Id: Ie0598563fd246537bafba4feed7985478ea1d415 Signed-off-by: Dave Barach Signed-off-by: Florin Coras --- src/svm/ssvm.h | 17 +++ src/svm/svm_fifo.c | 56 +++++--- src/svm/svm_fifo.h | 16 ++- src/svm/svm_fifo_segment.c | 114 +++++++++++----- src/svm/svm_fifo_segment.h | 4 +- src/svm/test_svm_fifo1.c | 10 +- src/uri/uri_udp_test.c | 2 +- src/vnet/session/application.c | 2 + src/vnet/session/application_interface.c | 21 --- src/vnet/session/application_interface.h | 12 +- src/vnet/session/node.c | 23 +--- src/vnet/session/segment_manager.c | 26 ++-- src/vnet/session/segment_manager.h | 4 + src/vnet/session/session.c | 72 +++++++--- src/vnet/session/session.h | 30 ++++- src/vnet/session/session_cli.c | 99 +++++++++++--- src/vnet/session/transport.h | 6 + src/vnet/tcp/builtin_client.c | 118 +++++++++++----- src/vnet/tcp/builtin_client.h | 7 +- src/vnet/tcp/builtin_server.c | 66 +++++++-- src/vnet/tcp/tcp.c | 225 ++++++++++++++++++++++++++++--- src/vnet/tcp/tcp.h | 13 ++ src/vnet/tcp/tcp_debug.h | 13 +- src/vnet/tcp/tcp_input.c | 97 ++++++++----- src/vnet/tcp/tcp_newreno.c | 4 +- src/vnet/tcp/tcp_output.c | 53 +++++--- src/vnet/tcp/tcp_packet.h | 1 + src/vnet/tcp/tcp_test.c | 10 +- src/vnet/udp/udp_input.c | 2 +- 29 files changed, 838 insertions(+), 285 deletions(-) (limited to 'src/vnet/session/application_interface.c') diff --git a/src/svm/ssvm.h b/src/svm/ssvm.h index bccfc164..8466e155 100644 --- a/src/svm/ssvm.h +++ b/src/svm/ssvm.h @@ -101,6 +101,15 @@ ssvm_lock (ssvm_shared_header_t * h, u32 my_pid, u32 tag) h->tag = tag; } +always_inline void +ssvm_lock_non_recursive (ssvm_shared_header_t * h, u32 tag) +{ + while (__sync_lock_test_and_set (&h->lock, 1)) + ; + + h->tag = tag; +} + always_inline void ssvm_unlock (ssvm_shared_header_t * h) { @@ -113,6 +122,14 @@ ssvm_unlock (ssvm_shared_header_t * h) } } +always_inline void +ssvm_unlock_non_recursive (ssvm_shared_header_t * h) +{ + h->tag = 0; + CLIB_MEMORY_BARRIER (); + h->lock = 0; +} + static inline void * ssvm_push_heap (ssvm_shared_header_t * sh) { diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index aed5d6a7..da60fee5 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -19,29 +19,29 @@ static inline u8 position_lt (svm_fifo_t * f, u32 a, u32 b) { - return (ooo_segment_distance_to_tail (f, a) - < ooo_segment_distance_to_tail (f, b)); + return (ooo_segment_distance_from_tail (f, a) + < ooo_segment_distance_from_tail (f, b)); } static inline u8 position_leq (svm_fifo_t * f, u32 a, u32 b) { - return (ooo_segment_distance_to_tail (f, a) - <= ooo_segment_distance_to_tail (f, b)); + return (ooo_segment_distance_from_tail (f, a) + <= ooo_segment_distance_from_tail (f, b)); } static inline u8 position_gt (svm_fifo_t * f, u32 a, u32 b) { - return (ooo_segment_distance_to_tail (f, a) - > ooo_segment_distance_to_tail (f, b)); + return (ooo_segment_distance_from_tail (f, a) + > ooo_segment_distance_from_tail (f, b)); } static inline u32 position_diff (svm_fifo_t * f, u32 posa, u32 posb) { - return ooo_segment_distance_to_tail (f, posa) - - ooo_segment_distance_to_tail (f, posb); + return ooo_segment_distance_from_tail (f, posa) + - ooo_segment_distance_from_tail (f, posb); } static inline u32 @@ -113,7 +113,7 @@ svm_fifo_create (u32 data_size_in_bytes) if (f == 0) return 0; - memset (f, 0, sizeof (*f) + data_size_in_bytes); + memset (f, 0, sizeof (*f)); f->nitems = data_size_in_bytes; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; @@ -204,7 +204,19 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) { s = prev; s_end_pos = ooo_segment_end_pos (f, s); - goto merge; + + /* Check head and tail now since segment may be wider at both ends so + * merge tests lower won't work */ + if (position_lt (f, normalized_position, s->start)) + { + s->start = normalized_position; + s->length = position_diff (f, s_end_pos, s->start); + } + if (position_gt (f, normalized_end_position, s_end_pos)) + { + s->length = position_diff (f, normalized_end_position, s->start); + } + goto check_tail; } s_index = s - f->ooo_segments; @@ -257,8 +269,6 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length) * Merge needed */ -merge: - /* Merge at head */ if (position_lt (f, normalized_position, s->start)) { @@ -278,6 +288,7 @@ merge: goto done; } +check_tail: /* The new segment's tail may cover multiple smaller ones */ if (position_gt (f, normalized_end_position, s_end_pos)) { @@ -296,7 +307,8 @@ merge: /* If partial overlap with last, merge */ if (it && position_leq (f, it->start, normalized_end_position)) { - s->length = ooo_segment_end_pos (f, it) - s->start; + s->length = + position_diff (f, ooo_segment_end_pos (f, it), s->start); ooo_segment_del (f, it - f->ooo_segments); } } @@ -319,9 +331,9 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) i32 diff; s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); + diff = ooo_segment_distance_to_tail (f, s->start); - diff = (f->tail >= s->start) ? - f->tail - s->start : f->nitems + f->tail - s->start; + ASSERT (diff != n_bytes_enqueued); if (diff > n_bytes_enqueued) return 0; @@ -345,8 +357,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) if (s->next != OOO_SEGMENT_INVALID_INDEX) { s = pool_elt_at_index (f->ooo_segments, s->next); - diff = (f->tail >= s->start) ? - f->tail - s->start : f->nitems + f->tail - s->start; + diff = ooo_segment_distance_to_tail (f, s->start); ooo_segment_del (f, index); } /* End of search */ @@ -357,6 +368,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued) } } + ASSERT (bytes >= 0 && bytes <= f->nitems); return bytes; } @@ -401,6 +413,8 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) } else { + ASSERT (0); + /* Account for a zero-copy enqueue done elsewhere */ ASSERT (max_bytes <= (nitems - cursize)); f->tail += max_bytes; @@ -413,6 +427,7 @@ svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) total_copy_bytes += ooo_segment_try_collect (f, total_copy_bytes); /* Atomically increase the queue length */ + ASSERT (cursize + total_copy_bytes <= nitems); __sync_fetch_and_add (&f->cursize, total_copy_bytes); return (total_copy_bytes); @@ -475,6 +490,8 @@ svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f, cursize = svm_fifo_max_dequeue (f); nitems = f->nitems; + ASSERT (required_bytes < nitems); + normalized_offset = (f->tail + offset) % nitems; /* Will this request fit? */ @@ -557,6 +574,7 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) } else { + ASSERT (0); /* Account for a zero-copy dequeue done elsewhere */ ASSERT (max_bytes <= cursize); f->head += max_bytes; @@ -565,6 +583,8 @@ svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here) total_copy_bytes = max_bytes; } + ASSERT (f->head <= nitems); + ASSERT (cursize >= total_copy_bytes); __sync_fetch_and_sub (&f->cursize, total_copy_bytes); return (total_copy_bytes); @@ -702,6 +722,8 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes) f->head = (f->head == nitems) ? 0 : f->head; } + ASSERT (f->head <= nitems); + ASSERT (cursize >= total_drop_bytes); __sync_fetch_and_sub (&f->cursize, total_drop_bytes); return total_drop_bytes; diff --git a/src/svm/svm_fifo.h b/src/svm/svm_fifo.h index f32ef41d..fe21de47 100644 --- a/src/svm/svm_fifo.h +++ b/src/svm/svm_fifo.h @@ -133,25 +133,31 @@ svm_fifo_newest_ooo_segment (svm_fifo_t * f) } always_inline u32 -ooo_segment_distance_to_tail (svm_fifo_t * f, u32 a) +ooo_segment_distance_from_tail (svm_fifo_t * f, u32 pos) { /* Ambiguous. Assumption is that ooo segments don't touch tail */ - if (a == f->tail && f->tail == f->head) + if (PREDICT_FALSE (pos == f->tail && f->tail == f->head)) return f->nitems; - return ((f->nitems + a - f->tail) % f->nitems); + return (((f->nitems + pos) - f->tail) % f->nitems); +} + +always_inline u32 +ooo_segment_distance_to_tail (svm_fifo_t * f, u32 pos) +{ + return (((f->nitems + f->tail) - pos) % f->nitems); } always_inline u32 ooo_segment_offset (svm_fifo_t * f, ooo_segment_t * s) { - return ooo_segment_distance_to_tail (f, s->start); + return ooo_segment_distance_from_tail (f, s->start); } always_inline u32 ooo_segment_end_offset (svm_fifo_t * f, ooo_segment_t * s) { - return ooo_segment_distance_to_tail (f, s->start) + s->length; + return ooo_segment_distance_from_tail (f, s->start) + s->length; } always_inline u32 diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index c4ac2352..69d4ecb9 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -35,6 +35,11 @@ preallocate_fifo_pairs (svm_fifo_segment_header_t * fsh, rx_fifo_size = (sizeof (*f) + a->rx_fifo_size) * a->preallocated_fifo_pairs; tx_fifo_size = (sizeof (*f) + a->tx_fifo_size) * a->preallocated_fifo_pairs; + if (0) + clib_warning ("rx_fifo_size %u (%d mb), tx_fifo_size %u (%d mb)", + rx_fifo_size, rx_fifo_size >> 20, + tx_fifo_size, tx_fifo_size >> 20); + /* Allocate rx fifo space. May fail. */ rx_fifo_space = clib_mem_alloc_aligned_at_offset (rx_fifo_size, CLIB_CACHE_LINE_BYTES, 0 /* align_offset */ , @@ -129,7 +134,7 @@ svm_fifo_segment_create (svm_fifo_segment_create_args_t * a) ssvm_pop_heap (oldheap); sh->ready = 1; - a->new_segment_index = s - sm->segments; + vec_add1 (a->new_segment_indices, s - sm->segments); return (0); } @@ -141,35 +146,81 @@ svm_fifo_segment_create_process_private (svm_fifo_segment_create_args_t * a) svm_fifo_segment_main_t *sm = &svm_fifo_segment_main; ssvm_shared_header_t *sh; svm_fifo_segment_header_t *fsh; + void *oldheap; + u8 **heaps = 0; + mheap_t *heap_header; + int segment_count = 1; + int i; - /* Allocate a fresh segment */ - pool_get (sm->segments, s); - memset (s, 0, sizeof (*s)); - - s->ssvm.ssvm_size = ~0; - s->ssvm.i_am_master = 1; - s->ssvm.my_pid = getpid (); - s->ssvm.name = (u8 *) a->segment_name; - s->ssvm.requested_va = ~0; - - /* Allocate a [sic] shared memory header, in process memory... */ - sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); - s->ssvm.sh = sh; + if (a->private_segment_count && a->private_segment_size) + { + void *mem; + u8 *heap; + u32 pagesize = clib_mem_get_page_size (); + u32 rnd_size; - memset (sh, 0, sizeof (*sh)); - sh->heap = clib_mem_get_heap (); + for (i = 0; i < a->private_segment_count; i++) + { + rnd_size = (a->private_segment_size + (pagesize - 1)) & ~pagesize; + + mem = mmap (0, rnd_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, + -1 /* fd */ , 0 /* offset */ ); + + if (mem == MAP_FAILED) + { + clib_unix_warning ("mmap"); + return -1; + } + heap = mheap_alloc (mem, rnd_size); + heap_header = mheap_header (heap); + heap_header->flags |= MHEAP_FLAG_THREAD_SAFE; + vec_add1 (heaps, heap); + } + segment_count = a->private_segment_count; + } - /* Set up svm_fifo_segment shared header */ - fsh = clib_mem_alloc (sizeof (*fsh)); - memset (fsh, 0, sizeof (*fsh)); - sh->opaque[0] = fsh; - s->h = fsh; - fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + /* Spread preallocated fifo pairs across segments */ + a->preallocated_fifo_pairs /= segment_count; - preallocate_fifo_pairs (fsh, a); + /* Allocate segments */ + for (i = 0; i < segment_count; i++) + { + pool_get (sm->segments, s); + memset (s, 0, sizeof (*s)); + + s->ssvm.ssvm_size = ~0; + s->ssvm.i_am_master = 1; + s->ssvm.my_pid = getpid (); + s->ssvm.name = (u8 *) a->segment_name; + s->ssvm.requested_va = ~0; + + /* Allocate a [sic] shared memory header, in process memory... */ + sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); + s->ssvm.sh = sh; + + memset (sh, 0, sizeof (*sh)); + sh->heap = a->private_segment_count ? heaps[i] : clib_mem_get_heap (); + + /* Set up svm_fifo_segment shared header */ + fsh = clib_mem_alloc (sizeof (*fsh)); + memset (fsh, 0, sizeof (*fsh)); + sh->opaque[0] = fsh; + s->h = fsh; + fsh->segment_name = format (0, "%s%c", a->segment_name, 0); + + if (a->private_segment_count) + { + oldheap = clib_mem_get_heap (); + clib_mem_set_heap (sh->heap); + preallocate_fifo_pairs (fsh, a); + clib_mem_set_heap (oldheap); + } - sh->ready = 1; - a->new_segment_index = s - sm->segments; + sh->ready = 1; + vec_add1 (a->new_segment_indices, s - sm->segments); + } + vec_free (heaps); return (0); } @@ -205,7 +256,7 @@ svm_fifo_segment_attach (svm_fifo_segment_create_args_t * a) fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; s->h = fsh; - a->new_segment_index = s - sm->segments; + vec_add1 (a->new_segment_indices, s - sm->segments); return (0); } @@ -230,7 +281,7 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - ssvm_lock (sh, 1, 0); + ssvm_lock_non_recursive (sh, 1); oldheap = ssvm_push_heap (sh); switch (list_index) @@ -261,7 +312,7 @@ svm_fifo_segment_alloc_fifo (svm_fifo_segment_private_t * s, if (PREDICT_FALSE (f == 0)) { ssvm_pop_heap (oldheap); - ssvm_unlock (sh); + ssvm_unlock_non_recursive (sh); return (0); } @@ -281,7 +332,7 @@ found: } ssvm_pop_heap (oldheap); - ssvm_unlock (sh); + ssvm_unlock_non_recursive (sh); return (f); } @@ -293,10 +344,11 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, svm_fifo_segment_header_t *fsh; void *oldheap; + sh = s->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; - ssvm_lock (sh, 1, 0); + ssvm_lock_non_recursive (sh, 2); oldheap = ssvm_push_heap (sh); switch (list_index) @@ -325,7 +377,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, } ssvm_pop_heap (oldheap); - ssvm_unlock (sh); + ssvm_unlock_non_recursive (sh); } void diff --git a/src/svm/svm_fifo_segment.h b/src/svm/svm_fifo_segment.h index 31e14db5..a7a3f469 100644 --- a/src/svm/svm_fifo_segment.h +++ b/src/svm/svm_fifo_segment.h @@ -57,10 +57,12 @@ typedef struct { char *segment_name; u32 segment_size; - u32 new_segment_index; + u32 *new_segment_indices; u32 rx_fifo_size; u32 tx_fifo_size; u32 preallocated_fifo_pairs; + u32 private_segment_count; + u32 private_segment_size; } svm_fifo_segment_create_args_t; static inline svm_fifo_segment_private_t * diff --git a/src/svm/test_svm_fifo1.c b/src/svm/test_svm_fifo1.c index 63b4a9b7..63d75845 100644 --- a/src/svm/test_svm_fifo1.c +++ b/src/svm/test_svm_fifo1.c @@ -39,7 +39,7 @@ hello_world (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); @@ -92,7 +92,7 @@ master (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 4096, FIFO_SEGMENT_RX_FREELIST); @@ -128,7 +128,7 @@ mempig (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); for (i = 0; i < 1000; i++) { @@ -186,7 +186,7 @@ offset (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_create returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); f = svm_fifo_segment_alloc_fifo (sp, 200 << 10, FIFO_SEGMENT_RX_FREELIST); @@ -246,7 +246,7 @@ slave (int verbose) if (rv) return clib_error_return (0, "svm_fifo_segment_attach returned %d", rv); - sp = svm_fifo_get_segment (a->new_segment_index); + sp = svm_fifo_get_segment (a->new_segment_indices[0]); sh = sp->ssvm.sh; fsh = (svm_fifo_segment_header_t *) sh->opaque[0]; diff --git a/src/uri/uri_udp_test.c b/src/uri/uri_udp_test.c index 45ad35a4..a8e39eaa 100644 --- a/src/uri/uri_udp_test.c +++ b/src/uri/uri_udp_test.c @@ -707,7 +707,7 @@ vl_api_connect_uri_reply_t_handler (vl_api_connect_uri_reply_t * mp) return; } - segment_index = a->new_segment_index; + segment_index = a->new_segment_indices[0]; vec_add2 (utm->seg, seg, 1); memcpy (seg, sm->segments + segment_index, sizeof (*seg)); sleep (1); diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c index 3cc56f37..8a953719 100644 --- a/src/vnet/session/application.c +++ b/src/vnet/session/application.c @@ -174,6 +174,8 @@ application_init (application_t * app, u32 api_client_index, u64 * options, props->preallocated_fifo_pairs = options[APP_OPTIONS_PREALLOC_FIFO_PAIRS]; props->use_private_segment = options[APP_OPTIONS_FLAGS] & APP_OPTIONS_FLAGS_BUILTIN_APP; + props->private_segment_count = options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT]; + props->private_segment_size = options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE]; first_seg_size = options[SESSION_OPTIONS_SEGMENT_SIZE]; if ((rv = segment_manager_init (sm, props, first_seg_size))) diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 338ae857..566a52d7 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -275,27 +275,6 @@ vnet_application_detach (vnet_app_detach_args_t * a) return 0; } -session_type_t -session_type_from_proto_and_ip (session_api_proto_t proto, u8 is_ip4) -{ - if (proto == SESSION_PROTO_TCP) - { - if (is_ip4) - return SESSION_TYPE_IP4_TCP; - else - return SESSION_TYPE_IP6_TCP; - } - else - { - if (is_ip4) - return SESSION_TYPE_IP4_UDP; - else - return SESSION_TYPE_IP6_UDP; - } - - return SESSION_N_TYPES; -} - int vnet_bind_uri (vnet_bind_args_t * a) { diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h index 4d6f9def..ed9f89b3 100644 --- a/src/vnet/session/application_interface.h +++ b/src/vnet/session/application_interface.h @@ -22,12 +22,6 @@ #include #include -typedef enum _session_api_proto -{ - SESSION_PROTO_TCP, - SESSION_PROTO_UDP -} session_api_proto_t; - typedef struct _vnet_app_attach_args_t { /** Binary API client index */ @@ -65,7 +59,7 @@ typedef struct _vnet_bind_args_t struct { transport_endpoint_t tep; - session_api_proto_t proto; + transport_proto_t proto; }; }; @@ -98,7 +92,7 @@ typedef struct _vnet_connect_args struct { transport_endpoint_t tep; - session_api_proto_t proto; + transport_proto_t proto; }; }; u32 app_index; @@ -120,6 +114,8 @@ typedef enum APP_EVT_QUEUE_SIZE, APP_OPTIONS_FLAGS, APP_OPTIONS_PREALLOC_FIFO_PAIRS, + APP_OPTIONS_PRIVATE_SEGMENT_COUNT, + APP_OPTIONS_PRIVATE_SEGMENT_SIZE, SESSION_OPTIONS_SEGMENT_SIZE, SESSION_OPTIONS_ADD_SEGMENT_SIZE, SESSION_OPTIONS_RX_FIFO_SIZE, diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index b24f5fd9..56e62637 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -378,24 +378,12 @@ session_tx_fifo_dequeue_and_snd (vlib_main_t * vm, vlib_node_runtime_t * node, n_tx_pkts, 0); } -stream_session_t * -session_event_get_session (session_fifo_event_t * e0, u8 thread_index) +always_inline stream_session_t * +session_event_get_session (session_fifo_event_t * e, u8 thread_index) { - svm_fifo_t *f0; - stream_session_t *s0; - u32 session_index0; - - f0 = e0->fifo; - session_index0 = f0->master_session_index; - - /* $$$ add multiple event queues, per vpp worker thread */ - ASSERT (f0->master_thread_index == thread_index); - - s0 = stream_session_get_if_valid (session_index0, thread_index); - - ASSERT (s0 == 0 || s0->thread_index == thread_index); - - return s0; + ASSERT (e->fifo->master_thread_index == thread_index); + return stream_session_get_if_valid (e->fifo->master_session_index, + thread_index); } void @@ -569,7 +557,6 @@ skip_dequeue: case FIFO_EVENT_BUILTIN_RX: s0 = session_event_get_session (e0, my_thread_index); svm_fifo_unset_event (s0->server_rx_fifo); - /* Get session's server */ app = application_get (s0->app_index); app->cb_fns.builtin_server_rx_callback (s0); break; diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c index dcef6261..262b7faa 100644 --- a/src/vnet/session/segment_manager.c +++ b/src/vnet/session/segment_manager.c @@ -30,7 +30,7 @@ segment_manager_t *segment_managers = 0; /** * Process private segment index */ -u32 private_segment_index = ~0; +u32 *private_segment_indices; /** * Default fifo and segment size. TODO config. @@ -70,7 +70,8 @@ session_manager_add_segment_i (segment_manager_t * sm, u32 segment_size, return VNET_API_ERROR_SVM_SEGMENT_CREATE_FAIL; } - vec_add1 (sm->segment_indices, ca->new_segment_index); + vec_append (sm->segment_indices, ca->new_segment_indices); + vec_free (ca->new_segment_indices); return 0; } @@ -111,22 +112,23 @@ static void { svm_fifo_segment_create_args_t _a, *a = &_a; - if (private_segment_index != ~0) + if (private_segment_indices) return; memset (a, 0, sizeof (*a)); a->segment_name = "process-private-segment"; a->segment_size = ~0; - a->new_segment_index = ~0; a->rx_fifo_size = props->rx_fifo_size; a->tx_fifo_size = props->tx_fifo_size; a->preallocated_fifo_pairs = props->preallocated_fifo_pairs; + a->private_segment_count = props->private_segment_count; + a->private_segment_size = props->private_segment_size; if (svm_fifo_segment_create_process_private (a)) clib_warning ("Failed to create process private segment"); - private_segment_index = a->new_segment_index; - ASSERT (private_segment_index != ~0); + private_segment_indices = a->new_segment_indices; + ASSERT (vec_len (private_segment_indices)); } /** @@ -156,10 +158,10 @@ segment_manager_init (segment_manager_t * sm, } else { - if (private_segment_index == ~0) + if (vec_len (private_segment_indices) == 0) segment_manager_alloc_process_private_segment (properties); - ASSERT (private_segment_index != ~0); - vec_add1 (sm->segment_indices, private_segment_index); + ASSERT (vec_len (private_segment_indices)); + vec_append (sm->segment_indices, private_segment_indices); } clib_spinlock_init (&sm->lockp); @@ -320,7 +322,7 @@ again: /* See if we're supposed to create another segment */ if (*server_rx_fifo == 0) { - if (sm->properties->add_segment) + if (sm->properties->add_segment && !sm->properties->use_private_segment) { if (added_a_segment) { @@ -379,6 +381,10 @@ segment_manager_dealloc_fifos (u32 svm_segment_index, svm_fifo_t * rx_fifo, svm_fifo_segment_free_fifo (fifo_segment, tx_fifo, FIFO_SEGMENT_TX_FREELIST); + /* Don't try to delete process-private segments */ + if (sm->properties->private_segment_count > 0) + return; + /* Remove segment only if it holds no fifos and not the first */ if (sm->segment_indices[0] != svm_segment_index && !svm_fifo_segment_has_fifos (fifo_segment)) diff --git a/src/vnet/session/segment_manager.h b/src/vnet/session/segment_manager.h index df38d2b3..41abeb22 100644 --- a/src/vnet/session/segment_manager.h +++ b/src/vnet/session/segment_manager.h @@ -39,6 +39,10 @@ typedef struct _segment_manager_properties /** Use private memory segment instead of shared memory */ u8 use_private_segment; + + /** Use one or more private mheaps, instead of the global heap */ + u32 private_segment_count; + u32 private_segment_size; } segment_manager_properties_t; typedef struct _segment_manager diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index fe198044..0a86d563 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -198,21 +198,28 @@ stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto) */ stream_session_t * stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) + u16 lcl_port, u16 rmt_port, u8 proto) { session_manager_main_t *smm = &session_manager_main; session_kv4_t kv4; + stream_session_t *s; int rv; /* Lookup session amongst established ones */ make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto); rv = clib_bihash_search_inline_16_8 (&smm->v4_session_hash, &kv4); if (rv == 0) - return stream_session_get_tsi (kv4.value, my_thread_index); + return stream_session_get_from_handle (kv4.value); /* If nothing is found, check if any listener is available */ - return stream_session_lookup_listener4 (lcl, lcl_port, proto); + if ((s = stream_session_lookup_listener4 (lcl, lcl_port, proto))) + return s; + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); + if (rv == 0) + return stream_session_get_from_handle (kv4.value); + return 0; } stream_session_t * @@ -242,20 +249,27 @@ stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto) * wildcarded local source (listener bound to all interfaces) */ stream_session_t * stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, - u16 lcl_port, u16 rmt_port, u8 proto, - u32 my_thread_index) + u16 lcl_port, u16 rmt_port, u8 proto) { session_manager_main_t *smm = vnet_get_session_manager_main (); session_kv6_t kv6; + stream_session_t *s; int rv; make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto); rv = clib_bihash_search_inline_48_8 (&smm->v6_session_hash, &kv6); if (rv == 0) - return stream_session_get_tsi (kv6.value, my_thread_index); + return stream_session_get_from_handle (kv6.value); /* If nothing is found, check if any listener is available */ - return stream_session_lookup_listener6 (lcl, lcl_port, proto); + if ((s = stream_session_lookup_listener6 (lcl, lcl_port, proto))) + return s; + + /* Finally, try half-open connections */ + rv = clib_bihash_search_inline_48_8 (&smm->v6_half_open_hash, &kv6); + if (rv == 0) + return stream_session_get_from_handle (kv6.value); + return 0; } stream_session_t * @@ -340,7 +354,6 @@ stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, rv = clib_bihash_search_inline_16_8 (&smm->v4_half_open_hash, &kv4); if (rv == 0) return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF); - return 0; } @@ -390,6 +403,8 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc, u32 thread_index = tc->thread_index; int rv; + ASSERT (thread_index == vlib_get_thread_index ()); + if ((rv = segment_manager_alloc_session_fifos (sm, &server_rx_fifo, &server_tx_fifo, &fifo_segment_index))) @@ -854,6 +869,7 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index, s->app_index = server->index; s->listener_index = listener_index; + s->session_state = SESSION_STATE_ACCEPTING; /* Shoulder-tap the server */ if (notify) @@ -1088,6 +1104,27 @@ session_vpp_event_queue_allocate (session_manager_main_t * smm, } } +session_type_t +session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4) +{ + if (proto == TRANSPORT_PROTO_TCP) + { + if (is_ip4) + return SESSION_TYPE_IP4_TCP; + else + return SESSION_TYPE_IP6_TCP; + } + else + { + if (is_ip4) + return SESSION_TYPE_IP4_UDP; + else + return SESSION_TYPE_IP6_UDP; + } + + return SESSION_N_TYPES; +} + static clib_error_t * session_manager_main_enable (vlib_main_t * vm) { @@ -1131,14 +1168,13 @@ session_manager_main_enable (vlib_main_t * vm) session_vpp_event_queue_allocate (smm, i); /* $$$$ preallocate hack config parameter */ - for (i = 0; i < 200000; i++) + for (i = 0; i < smm->preallocated_sessions; i++) { - stream_session_t *ss; + stream_session_t *ss __attribute__ ((unused)); pool_get_aligned (smm->sessions[0], ss, CLIB_CACHE_LINE_BYTES); - memset (ss, 0, sizeof (*ss)); } - for (i = 0; i < 200000; i++) + for (i = 0; i < smm->preallocated_sessions; i++) pool_put_index (smm->sessions[0], i); clib_bihash_init_16_8 (&smm->v4_session_hash, "v4 session table", @@ -1208,9 +1244,10 @@ session_manager_main_init (vlib_main_t * vm) return 0; } -VLIB_INIT_FUNCTION (session_manager_main_init) - static clib_error_t *session_config_fn (vlib_main_t * vm, - unformat_input_t * input) +VLIB_INIT_FUNCTION (session_manager_main_init); + +static clib_error_t * +session_config_fn (vlib_main_t * vm, unformat_input_t * input) { session_manager_main_t *smm = &session_manager_main; u32 nitems; @@ -1224,6 +1261,9 @@ VLIB_INIT_FUNCTION (session_manager_main_init) else clib_warning ("event queue length %d too small, ignored", nitems); } + if (unformat (input, "preallocated-sessions %d", + &smm->preallocated_sessions)) + ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h index 5fa4225c..b4507d4e 100644 --- a/src/vnet/session/session.h +++ b/src/vnet/session/session.h @@ -80,6 +80,10 @@ typedef enum SESSION_N_TYPES, } session_type_t; + +session_type_t +session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4); + /* * Application session state */ @@ -87,6 +91,7 @@ typedef enum { SESSION_STATE_LISTENING, SESSION_STATE_CONNECTING, + SESSION_STATE_ACCEPTING, SESSION_STATE_READY, SESSION_STATE_CLOSED, SESSION_STATE_N_STATES, @@ -211,8 +216,12 @@ struct _session_manager_main /** Per transport rx function that can either dequeue or peek */ session_fifo_rx_fn *session_tx_fns[SESSION_N_TYPES]; + /** Session manager is enabled */ u8 is_enabled; + /** Preallocate session config parameter */ + u32 preallocated_sessions; + /* Convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; @@ -247,13 +256,12 @@ stream_session_t *stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto); stream_session_t *stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, - u16 rmt_port, u8 proto, - u32 thread_index); + u16 rmt_port, u8 proto); stream_session_t *stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto); stream_session_t *stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt, u16 lcl_port, - u16 rmt_port, u8, u32 thread_index); + u16 rmt_port, u8 proto); transport_connection_t * stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt, u16 lcl_port, @@ -277,9 +285,24 @@ stream_session_get_tsi (u64 ti_and_si, u32 thread_index) ti_and_si & 0xFFFFFFFFULL); } +always_inline u8 +stream_session_is_valid (u32 si, u8 thread_index) +{ + stream_session_t *s; + s = pool_elt_at_index (session_manager_main.sessions[thread_index], si); + if (s->thread_index != thread_index || s->session_index != si + || s->server_rx_fifo->master_session_index != si + || s->server_tx_fifo->master_session_index != si + || s->server_rx_fifo->master_thread_index != thread_index + || s->server_tx_fifo->master_thread_index != thread_index) + return 0; + return 1; +} + always_inline stream_session_t * stream_session_get (u32 si, u32 thread_index) { + ASSERT (stream_session_is_valid (si, thread_index)); return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } @@ -292,6 +315,7 @@ stream_session_get_if_valid (u64 si, u32 thread_index) if (pool_is_free_index (session_manager_main.sessions[thread_index], si)) return 0; + ASSERT (stream_session_is_valid (si, thread_index)); return pool_elt_at_index (session_manager_main.sessions[thread_index], si); } diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 6b8341aa..e06bc586 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -47,7 +47,8 @@ format_stream_session (u8 * s, va_list * args) svm_fifo_max_enqueue (ss->server_tx_fifo), stream_session_get_index (ss)); - if (ss->session_state == SESSION_STATE_READY) + if (ss->session_state == SESSION_STATE_READY + || ss->session_state == SESSION_STATE_ACCEPTING) { s = format (s, "%U", tp_vft->format_connection, ss->connection_index, ss->thread_index, verbose); @@ -68,8 +69,9 @@ format_stream_session (u8 * s, va_list * args) } else if (ss->session_state == SESSION_STATE_CLOSED) { - s = format (s, "[CL] %-40U", tp_vft->format_connection, - ss->connection_index, ss->thread_index, verbose); + s = + format (s, "[CL] %U", tp_vft->format_connection, ss->connection_index, + ss->thread_index, verbose); if (verbose == 1) s = format (s, "%v", str); if (verbose > 1) @@ -93,7 +95,13 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, int verbose = 0, i; stream_session_t *pool; stream_session_t *s; - u8 *str = 0; + u8 *str = 0, one_session = 0, proto_set = 0, proto = 0; + u8 is_ip4 = 0, s_type = 0; + ip4_address_t lcl_ip4, rmt_ip4; + u32 lcl_port = 0, rmt_port = 0; + + memset (&lcl_ip4, 0, sizeof (lcl_ip4)); + memset (&rmt_ip4, 0, sizeof (rmt_ip4)); if (!smm->is_enabled) { @@ -106,10 +114,43 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (input, "verbose")) verbose = 1; + else if (unformat (input, "tcp")) + { + proto_set = 1; + proto = TRANSPORT_PROTO_TCP; + } + else if (unformat (input, "%U:%d->%U:%d", + unformat_ip4_address, &lcl_ip4, &lcl_port, + unformat_ip4_address, &rmt_ip4, &rmt_port)) + { + one_session = 1; + is_ip4 = 1; + } + else break; } + if (one_session) + { + if (!proto_set) + { + vlib_cli_output (vm, "proto not set"); + return clib_error_return (0, "proto not set"); + } + + s_type = session_type_from_proto_and_ip (proto, is_ip4); + s = stream_session_lookup4 (&lcl_ip4, &rmt_ip4, + clib_host_to_net_u16 (lcl_port), + clib_host_to_net_u16 (rmt_port), s_type); + if (s) + vlib_cli_output (vm, "%U", format_stream_session, s, 2); + else + vlib_cli_output (vm, "session does not exist"); + + return 0; + } + for (i = 0; i < vec_len (smm->sessions); i++) { u32 once_per_pool; @@ -146,6 +187,7 @@ show_session_command_fn (vlib_main_t * vm, unformat_input_t * input, } else vlib_cli_output (vm, "Thread %d: no active sessions", i); + vec_reset_length (str); } vec_free (str); @@ -161,15 +203,22 @@ VLIB_CLI_COMMAND (show_session_command, static) = }; /* *INDENT-ON* */ +static int +clear_session (stream_session_t * s) +{ + application_t *server = application_get (s->app_index); + server->cb_fns.session_disconnect_callback (s); + return 0; +} + static clib_error_t * clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { session_manager_main_t *smm = &session_manager_main; - u32 thread_index = 0; + u32 thread_index = 0, clear_all = 0; u32 session_index = ~0; - stream_session_t *pool, *session; - application_t *server; + stream_session_t **pool, *session; if (!smm->is_enabled) { @@ -182,28 +231,36 @@ clear_session_command_fn (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (input, "session %d", &session_index)) ; + else if (unformat (input, "all")) + clear_all = 1; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } - if (session_index == ~0) + if (!clear_all && session_index == ~0) return clib_error_return (0, "session required, but not set."); - if (thread_index > vec_len (smm->sessions)) - return clib_error_return (0, "thread %d out of range [0-%d]", - thread_index, vec_len (smm->sessions)); - - pool = smm->sessions[thread_index]; - - if (pool_is_free_index (pool, session_index)) - return clib_error_return (0, "session %d not active", session_index); - - session = pool_elt_at_index (pool, session_index); - server = application_get (session->app_index); + if (session_index != ~0) + { + session = stream_session_get_if_valid (session_index, thread_index); + if (!session) + return clib_error_return (0, "no session %d on thread %d", + session_index, thread_index); + clear_session (session); + } - /* Disconnect both app and transport */ - server->cb_fns.session_disconnect_callback (session); + if (clear_all) + { + /* *INDENT-OFF* */ + vec_foreach (pool, smm->sessions) + { + pool_foreach(session, *pool, ({ + clear_session (session); + })); + }; + /* *INDENT-ON* */ + } return 0; } diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index 561a9257..9c38bab9 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -225,6 +225,12 @@ make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t) t->rmt_port, t->proto); } +typedef enum _transport_proto +{ + TRANSPORT_PROTO_TCP, + TRANSPORT_PROTO_UDP +} transport_proto_t; + typedef struct _transport_endpoint { ip46_address_t ip; /** ip address */ diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index 6f8be082..a6c8a235 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -170,62 +170,90 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { tclient_main_t *tm = &tclient_main; int my_thread_index = vlib_get_thread_index (); - vl_api_disconnect_session_t *dmp; session_t *sp; int i; int delete_session; u32 *connection_indices; - u32 tx_quota = 0; - u32 delta, prev_bytes_received_this_session; + u32 *connections_this_batch; + u32 nconnections_this_batch; connection_indices = tm->connection_index_by_thread[my_thread_index]; + connections_this_batch = + tm->connections_this_batch_by_thread[my_thread_index]; - if (tm->run_test == 0 || vec_len (connection_indices) == 0) + if ((tm->run_test == 0) || + ((vec_len (connection_indices) == 0) + && vec_len (connections_this_batch) == 0)) return 0; - for (i = 0; i < vec_len (connection_indices); i++) + /* Grab another pile of connections */ + if (PREDICT_FALSE (vec_len (connections_this_batch) == 0)) + { + nconnections_this_batch = + clib_min (tm->connections_per_batch, vec_len (connection_indices)); + + ASSERT (nconnections_this_batch > 0); + vec_validate (connections_this_batch, nconnections_this_batch - 1); + clib_memcpy (connections_this_batch, + connection_indices + vec_len (connection_indices) + - nconnections_this_batch, + nconnections_this_batch * sizeof (u32)); + _vec_len (connection_indices) -= nconnections_this_batch; + } + + if (PREDICT_FALSE (tm->prev_conns != tm->connections_per_batch + && tm->prev_conns == vec_len (connections_this_batch))) + { + tm->repeats++; + tm->prev_conns = vec_len (connections_this_batch); + if (tm->repeats == 500000) + { + clib_warning ("stuck clients"); + } + } + else + { + tm->prev_conns = vec_len (connections_this_batch); + tm->repeats = 0; + } + + for (i = 0; i < vec_len (connections_this_batch); i++) { delete_session = 1; - sp = pool_elt_at_index (tm->sessions, connection_indices[i]); + sp = pool_elt_at_index (tm->sessions, connections_this_batch[i]); - if ((tm->no_return || tx_quota < 60) && sp->bytes_to_send > 0) + if (sp->bytes_to_send > 0) { send_test_chunk (tm, sp); delete_session = 0; - tx_quota++; } - if (!tm->no_return && sp->bytes_to_receive > 0) + if (sp->bytes_to_receive > 0) { - prev_bytes_received_this_session = sp->bytes_received; receive_test_chunk (tm, sp); - delta = sp->bytes_received - prev_bytes_received_this_session; - if (delta > 0) - tx_quota--; delete_session = 0; } if (PREDICT_FALSE (delete_session == 1)) { - __sync_fetch_and_add (&tm->tx_total, tm->bytes_to_send); + u32 index, thread_index; + stream_session_t *s; + + __sync_fetch_and_add (&tm->tx_total, sp->bytes_sent); __sync_fetch_and_add (&tm->rx_total, sp->bytes_received); - dmp = vl_msg_api_alloc_as_if_client (sizeof (*dmp)); - memset (dmp, 0, sizeof (*dmp)); - dmp->_vl_msg_id = ntohs (VL_API_DISCONNECT_SESSION); - dmp->client_index = tm->my_client_index; - dmp->handle = sp->vpp_session_handle; - if (!unix_shared_memory_queue_add (tm->vl_input_queue, (u8 *) & dmp, - 1)) + stream_session_parse_handle (sp->vpp_session_handle, + &index, &thread_index); + s = stream_session_get_if_valid (index, thread_index); + + if (s) { - vec_delete (connection_indices, 1, i); - tm->connection_index_by_thread[my_thread_index] = - connection_indices; + stream_session_disconnect (s); + vec_delete (connections_this_batch, 1, i); + i--; __sync_fetch_and_add (&tm->ready_connections, -1); } else - { - vl_msg_api_free (dmp); - } + clib_warning ("session AWOL?"); /* Kick the debug CLI process */ if (tm->ready_connections == 0) @@ -236,6 +264,10 @@ builtin_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } } } + + tm->connection_index_by_thread[my_thread_index] = connection_indices; + tm->connections_this_batch_by_thread[my_thread_index] = + connections_this_batch; return 0; } @@ -356,6 +388,8 @@ tcp_test_clients_init (vlib_main_t * vm) tm->vlib_main = vm; vec_validate (tm->connection_index_by_thread, thread_main->n_vlib_mains); + vec_validate (tm->connections_this_batch_by_thread, + thread_main->n_vlib_mains); return 0; } @@ -388,7 +422,8 @@ builtin_session_connected_callback (u32 app_index, u32 api_context, pool_get (tm->sessions, session); memset (session, 0, sizeof (*session)); session_index = session - tm->sessions; - session->bytes_to_receive = session->bytes_to_send = tm->bytes_to_send; + session->bytes_to_send = tm->bytes_to_send; + session->bytes_to_receive = tm->no_return ? 0ULL : tm->bytes_to_send; session->server_rx_fifo = s->server_rx_fifo; session->server_rx_fifo->client_session_index = session_index; session->server_tx_fifo = s->server_tx_fifo; @@ -485,6 +520,8 @@ attach_builtin_test_clients_app (void) options[SESSION_OPTIONS_SEGMENT_SIZE] = (2ULL << 32); options[SESSION_OPTIONS_RX_FIFO_SIZE] = tm->fifo_size; options[SESSION_OPTIONS_TX_FIFO_SIZE] = tm->fifo_size / 2; + options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = tm->private_segment_count; + options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = tm->private_segment_size; options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = prealloc_fifos; options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; @@ -561,6 +598,9 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->bytes_to_send = 8192; tm->no_return = 0; tm->fifo_size = 64 << 10; + tm->connections_per_batch = 1000; + tm->private_segment_count = 0; + tm->private_segment_size = 0; vec_free (tm->connect_uri); @@ -582,6 +622,20 @@ test_tcp_clients_command_fn (vlib_main_t * vm, tm->no_return = 1; else if (unformat (input, "fifo-size %d", &tm->fifo_size)) tm->fifo_size <<= 10; + else if (unformat (input, "private-segment-count %d", + &tm->private_segment_count)) + ; + else if (unformat (input, "private-segment-size %dm", &tmp)) + tm->private_segment_size = tmp << 20; + else if (unformat (input, "private-segment-size %dg", &tmp)) + tm->private_segment_size = tmp << 30; + else if (unformat (input, "private-segment-size %d", &tmp)) + tm->private_segment_size = tmp; + else if (unformat (input, "preallocate-fifos")) + tm->prealloc_fifos = 1; + else + if (unformat (input, "client-batch %d", &tm->connections_per_batch)) + ; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); @@ -688,9 +742,13 @@ test_tcp_clients_command_fn (vlib_main_t * vm, vlib_cli_output (vm, "zero delta-t?"); cleanup: - pool_free (tm->sessions); + tm->run_test = 0; for (i = 0; i < vec_len (tm->connection_index_by_thread); i++) - vec_reset_length (tm->connection_index_by_thread[i]); + { + vec_reset_length (tm->connection_index_by_thread[i]); + vec_reset_length (tm->connections_this_batch_by_thread[i]); + } + pool_free (tm->sessions); return 0; } diff --git a/src/vnet/tcp/builtin_client.h b/src/vnet/tcp/builtin_client.h index 3462e0ee..38af231d 100644 --- a/src/vnet/tcp/builtin_client.h +++ b/src/vnet/tcp/builtin_client.h @@ -63,6 +63,9 @@ typedef struct u32 configured_segment_size; u32 fifo_size; u32 expected_connections; /**< Number of clients/connections */ + u32 connections_per_batch; /**< Connections to rx/tx at once */ + u32 private_segment_count; /**< Number of private fifo segs */ + u32 private_segment_size; /**< size of private fifo segs */ /* * Test state variables @@ -72,6 +75,7 @@ typedef struct uword *session_index_by_vpp_handles; /**< Hash table for disconnecting */ u8 *connect_test_data; /**< Pre-computed test data */ u32 **connection_index_by_thread; + u32 **connections_this_batch_by_thread; /**< active connection batch */ pthread_t client_thread_handle; volatile u32 ready_connections; @@ -82,7 +86,8 @@ typedef struct f64 test_start_time; f64 test_end_time; - + u32 prev_conns; + u32 repeats; /* * Flags */ diff --git a/src/vnet/tcp/builtin_server.c b/src/vnet/tcp/builtin_server.c index 775bfc26..8e958ac0 100644 --- a/src/vnet/tcp/builtin_server.c +++ b/src/vnet/tcp/builtin_server.c @@ -56,12 +56,15 @@ typedef struct u32 fifo_size; /**< Fifo size */ u32 rcv_buffer_size; /**< Rcv buffer size */ u32 prealloc_fifos; /**< Preallocate fifos */ + u32 private_segment_count; /**< Number of private segments */ + u32 private_segment_size; /**< Size of private segments */ /* * Test state */ u8 **rx_buf; /**< Per-thread RX buffer */ u64 byte_index; + u32 **rx_retries; vlib_main_t *vlib_main; } builtin_server_main_t; @@ -77,6 +80,8 @@ builtin_session_accept_callback (stream_session_t * s) session_manager_get_vpp_event_queue (s->thread_index); s->session_state = SESSION_STATE_READY; bsm->byte_index = 0; + vec_validate (bsm->rx_retries[s->thread_index], s->session_index); + bsm->rx_retries[s->thread_index][s->session_index] = 0; return 0; } @@ -173,11 +178,16 @@ builtin_server_rx_callback (stream_session_t * s) builtin_server_main_t *bsm = &builtin_server_main; session_fifo_event_t evt; static int serial_number = 0; - u32 my_thread_id = vlib_get_thread_index (); + u32 thread_index = vlib_get_thread_index (); + + ASSERT (s->thread_index == thread_index); rx_fifo = s->server_rx_fifo; tx_fifo = s->server_tx_fifo; + ASSERT (rx_fifo->master_thread_index == thread_index); + ASSERT (tx_fifo->master_thread_index == thread_index); + max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo); max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo); @@ -201,21 +211,31 @@ builtin_server_rx_callback (stream_session_t * s) evt.event_type = FIFO_EVENT_BUILTIN_RX; evt.event_id = 0; - q = bsm->vpp_queue[s->thread_index]; + q = bsm->vpp_queue[thread_index]; if (PREDICT_FALSE (q->cursize == q->maxsize)) clib_warning ("out of event queue space"); - else - unix_shared_memory_queue_add (q, (u8 *) & evt, - 0 /* don't wait for mutex */ ); + else if (unix_shared_memory_queue_add (q, (u8 *) & evt, 0 /* don't wait for mutex */ + )) + clib_warning ("failed to enqueue self-tap"); + + bsm->rx_retries[thread_index][s->session_index]++; + if (bsm->rx_retries[thread_index][s->session_index] == 500000) + { + clib_warning ("session stuck: %U", format_stream_session, s, 2); + } + } + else + { + bsm->rx_retries[thread_index][s->session_index] = 0; } return 0; } - _vec_len (bsm->rx_buf[my_thread_id]) = max_transfer; + _vec_len (bsm->rx_buf[thread_index]) = max_transfer; actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer, - bsm->rx_buf[my_thread_id]); + bsm->rx_buf[thread_index]); ASSERT (actual_transfer == max_transfer); // test_bytes (bsm, actual_transfer); @@ -225,7 +245,7 @@ builtin_server_rx_callback (stream_session_t * s) */ n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer, - bsm->rx_buf[my_thread_id]); + bsm->rx_buf[thread_index]); if (n_written != max_transfer) clib_warning ("short trout!"); @@ -237,11 +257,13 @@ builtin_server_rx_callback (stream_session_t * s) evt.event_type = FIFO_EVENT_APP_TX; evt.event_id = serial_number++; - unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], - (u8 *) & evt, 0 /* do wait for mutex */ ); + if (unix_shared_memory_queue_add (bsm->vpp_queue[s->thread_index], + (u8 *) & evt, + 0 /* do wait for mutex */ )) + clib_warning ("failed to enqueue tx evt"); } - if (PREDICT_FALSE (max_enqueue < max_dequeue)) + if (PREDICT_FALSE (n_written < max_dequeue)) goto rx_event; return 0; @@ -328,9 +350,13 @@ server_attach () a->options[SESSION_OPTIONS_SEGMENT_SIZE] = 512 << 20; a->options[SESSION_OPTIONS_RX_FIFO_SIZE] = bsm->fifo_size; a->options[SESSION_OPTIONS_TX_FIFO_SIZE] = bsm->fifo_size; - a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = bsm->private_segment_count; + a->options[APP_OPTIONS_PRIVATE_SEGMENT_SIZE] = bsm->private_segment_size; a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = bsm->prealloc_fifos ? bsm->prealloc_fifos : 1; + + a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP; + a->segment_name = segment_name; a->segment_name_length = ARRAY_LEN (segment_name); @@ -374,6 +400,8 @@ server_create (vlib_main_t * vm) num_threads = 1 /* main thread */ + vtm->n_threads; vec_validate (builtin_server_main.vpp_queue, num_threads - 1); vec_validate (bsm->rx_buf, num_threads - 1); + vec_validate (bsm->rx_retries, num_threads - 1); + for (i = 0; i < num_threads; i++) vec_validate (bsm->rx_buf[i], bsm->rcv_buffer_size); @@ -435,11 +463,14 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, { builtin_server_main_t *bsm = &builtin_server_main; int rv; + u32 tmp; bsm->no_echo = 0; bsm->fifo_size = 64 << 10; bsm->rcv_buffer_size = 128 << 10; bsm->prealloc_fifos = 0; + bsm->private_segment_count = 0; + bsm->private_segment_size = 0; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { @@ -449,8 +480,17 @@ server_create_command_fn (vlib_main_t * vm, unformat_input_t * input, bsm->fifo_size <<= 10; else if (unformat (input, "rcv-buf-size %d", &bsm->rcv_buffer_size)) ; - else if (unformat (input, "prealloc-fifos", &bsm->prealloc_fifos)) + else if (unformat (input, "prealloc-fifos %d", &bsm->prealloc_fifos)) + ; + else if (unformat (input, "private-segment-count %d", + &bsm->private_segment_count)) ; + else if (unformat (input, "private-segment-size %dm", &tmp)) + bsm->private_segment_size = tmp << 20; + else if (unformat (input, "private-segment-size %dg", &tmp)) + bsm->private_segment_size = tmp << 30; + else if (unformat (input, "private-segment-size %d", &tmp)) + bsm->private_segment_size = tmp; else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 4e85eb3f..f379e699 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -74,8 +74,16 @@ static void tcp_connection_unbind (u32 listener_index) { tcp_main_t *tm = vnet_get_tcp_main (); - TCP_EVT_DBG (TCP_EVT_UNBIND, - pool_elt_at_index (tm->listener_pool, listener_index)); + tcp_connection_t *tc; + + tc = pool_elt_at_index (tm->listener_pool, listener_index); + + TCP_EVT_DBG (TCP_EVT_UNBIND, tc); + + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (tc, 0xFA, sizeof (*tc)); + pool_put_index (tm->listener_pool, listener_index); } @@ -124,9 +132,20 @@ tcp_connection_cleanup (tcp_connection_t * tc) /* Check if half-open */ if (tc->state == TCP_STATE_SYN_SENT) - pool_put (tm->half_open_connections, tc); + { + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (tc, 0xFA, sizeof (*tc)); + pool_put (tm->half_open_connections, tc); + } else - pool_put (tm->connections[tc->c_thread_index], tc); + { + int thread_index = tc->c_thread_index; + /* Poison the entry */ + if (CLIB_DEBUG > 0) + memset (tc, 0xFA, sizeof (*tc)); + pool_put (tm->connections[thread_index], tc); + } } /** @@ -168,13 +187,14 @@ tcp_connection_reset (tcp_connection_t * tc) /* Make sure all timers are cleared */ tcp_connection_timers_reset (tc); - stream_session_reset_notify (&tc->connection); + + /* Wait for cleanup from session layer but not forever */ + tcp_timer_set (tc, TCP_TIMER_WAITCLOSE, TCP_CLEANUP_TIME); break; case TCP_STATE_CLOSED: return; } - } /** @@ -278,6 +298,9 @@ tcp_allocate_local_port (tcp_main_t * tm, ip46_address_t * ip) tries = max - min; time_now = tcp_time_now (); + /* Only support active opens from thread 0 */ + ASSERT (vlib_get_thread_index () == 0); + /* Start at random point or max */ pool_get (tm->local_endpoints, tep); clib_memcpy (&tep->ip, ip, sizeof (*ip)); @@ -343,6 +366,7 @@ tcp_connection_timers_reset (tcp_connection_t * tc) } } +#if 0 typedef struct ip4_tcp_hdr { ip4_header_t ip; @@ -435,6 +459,7 @@ tcp_connection_fib_attach (tcp_connection_t * tc) tcp_connection_stack_on_fib_entry (tc); } +#endif /* 0 */ /** Initialize tcp connection variables * @@ -447,7 +472,7 @@ tcp_connection_init_vars (tcp_connection_t * tc) tcp_init_mss (tc); scoreboard_init (&tc->sack_sb); tcp_cc_init (tc); - tcp_connection_fib_attach (tc); + // tcp_connection_fib_attach (tc); } int @@ -485,14 +510,38 @@ tcp_connection_open (ip46_address_t * rmt_addr, u16 rmt_port, u8 is_ip4) if (is_ip4) { ip4_address_t *ip4; - ip4 = ip_interface_get_first_ip (sw_if_index, 1); - lcl_addr.ip4.as_u32 = ip4->as_u32; + int index; + if (vec_len (tm->ip4_src_addresses)) + { + index = tm->last_v4_address_rotor++; + if (tm->last_v4_address_rotor >= vec_len (tm->ip4_src_addresses)) + tm->last_v4_address_rotor = 0; + lcl_addr.ip4.as_u32 = tm->ip4_src_addresses[index].as_u32; + } + else + { + ip4 = ip_interface_get_first_ip (sw_if_index, 1); + lcl_addr.ip4.as_u32 = ip4->as_u32; + } } else { ip6_address_t *ip6; - ip6 = ip_interface_get_first_ip (sw_if_index, 0); - clib_memcpy (&lcl_addr.ip6, ip6, sizeof (*ip6)); + int index; + + if (vec_len (tm->ip6_src_addresses)) + { + index = tm->last_v6_address_rotor++; + if (tm->last_v6_address_rotor >= vec_len (tm->ip6_src_addresses)) + tm->last_v6_address_rotor = 0; + clib_memcpy (&lcl_addr.ip6, &tm->ip6_src_addresses[index], + sizeof (*ip6)); + } + else + { + ip6 = ip_interface_get_first_ip (sw_if_index, 0); + clib_memcpy (&lcl_addr.ip6, ip6, sizeof (*ip6)); + } } /* Allocate source port */ @@ -614,7 +663,7 @@ u8 * format_tcp_vars (u8 * s, va_list * args) { tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); - s = format (s, " snd_una %u snd_nxt %u snd_una_max %u\n", + s = format (s, " snd_una %u snd_nxt %u snd_una_max %u", tc->snd_una - tc->iss, tc->snd_nxt - tc->iss, tc->snd_una_max - tc->iss); s = format (s, " rcv_nxt %u rcv_las %u\n", @@ -628,12 +677,17 @@ format_tcp_vars (u8 * s, va_list * args) s = format (s, " cong %U ", format_tcp_congestion_status, tc); s = format (s, "cwnd %u ssthresh %u rtx_bytes %u bytes_acked %u\n", tc->cwnd, tc->ssthresh, tc->snd_rxt_bytes, tc->bytes_acked); - s = format (s, " prev_ssthresh %u snd_congestion %u dupack %u\n", + s = format (s, " prev_ssthresh %u snd_congestion %u dupack %u", tc->prev_ssthresh, tc->snd_congestion - tc->iss, tc->rcv_dupacks); + s = format (s, " limited_transmit %u\n", tc->limited_transmit - tc->iss); + s = format (s, " tsecr %u tsecr_last_ack %u\n", tc->rcv_opts.tsecr, + tc->tsecr_last_ack); s = format (s, " rto %u rto_boff %u srtt %u rttvar %u rtt_ts %u ", tc->rto, tc->rto_boff, tc->srtt, tc->rttvar, tc->rtt_ts); s = format (s, "rtt_seq %u\n", tc->rtt_seq); + s = format (s, " tsval_recent %u tsval_recent_age %u\n", tc->tsval_recent, + tcp_time_now () - tc->tsval_recent_age); s = format (s, " scoreboard: %U\n", format_tcp_scoreboard, &tc->sack_sb); if (vec_len (tc->snd_sacks)) s = format (s, " sacks tx: %U\n", format_tcp_sacks, tc); @@ -719,11 +773,21 @@ format_tcp_sacks (u8 * s, va_list * args) tcp_connection_t *tc = va_arg (*args, tcp_connection_t *); sack_block_t *sacks = tc->snd_sacks; sack_block_t *block; - vec_foreach (block, sacks) - { - s = format (s, " start %u end %u\n", block->start - tc->irs, - block->end - tc->irs); - } + int i, len = 0; + + len = vec_len (sacks); + for (i = 0; i < len - 1; i++) + { + block = &sacks[i]; + s = format (s, " start %u end %u\n", block->start - tc->irs, + block->end - tc->irs); + } + if (len) + { + block = &sacks[len - 1]; + s = format (s, " start %u end %u", block->start - tc->irs, + block->end - tc->irs); + } return s; } @@ -796,14 +860,18 @@ tcp_session_send_mss (transport_connection_t * trans_conn) always_inline u32 tcp_round_snd_space (tcp_connection_t * tc, u32 snd_space) { - if (tc->snd_wnd < tc->snd_mss) + if (PREDICT_FALSE (tc->snd_wnd < tc->snd_mss)) { return tc->snd_wnd <= snd_space ? tc->snd_wnd : 0; } /* If we can't write at least a segment, don't try at all */ - if (snd_space < tc->snd_mss) - return 0; + if (PREDICT_FALSE (snd_space < tc->snd_mss)) + { + if (snd_space > clib_min (tc->mss, tc->rcv_opts.mss) - TCP_HDR_LEN_MAX) + return snd_space; + return 0; + } /* round down to mss multiple */ return snd_space - (snd_space % tc->snd_mss); @@ -1042,6 +1110,8 @@ tcp_main_enable (vlib_main_t * vm) vlib_thread_main_t *vtm = vlib_get_thread_main (); clib_error_t *error = 0; u32 num_threads; + int thread, i; + tcp_connection_t *tc __attribute__ ((unused)); if ((error = vlib_call_init_function (vm, ip_main_init))) return error; @@ -1074,6 +1144,27 @@ tcp_main_enable (vlib_main_t * vm) num_threads = 1 /* main thread */ + vtm->n_threads; vec_validate (tm->connections, num_threads - 1); + /* + * Preallocate connections + */ + for (thread = 0; thread < num_threads; thread++) + { + for (i = 0; i < tm->preallocated_connections; i++) + pool_get (tm->connections[thread], tc); + + for (i = 0; i < tm->preallocated_connections; i++) + pool_put_index (tm->connections[thread], i); + } + + /* + * Preallocate half-open connections + */ + for (i = 0; i < tm->preallocated_half_open_connections; i++) + pool_get (tm->half_open_connections, tc); + + for (i = 0; i < tm->preallocated_half_open_connections; i++) + pool_put_index (tm->half_open_connections, i); + /* Initialize per worker thread tx buffers (used for control messages) */ vec_validate (tm->tx_buffers, num_threads - 1); @@ -1116,7 +1207,6 @@ tcp_init (vlib_main_t * vm) { tcp_main_t *tm = vnet_get_tcp_main (); - tm->vlib_main = vm; tm->vnet_main = vnet_get_main (); tm->is_enabled = 0; @@ -1125,6 +1215,97 @@ tcp_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (tcp_init); + +static clib_error_t * +tcp_config_fn (vlib_main_t * vm, unformat_input_t * input) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat + (input, "preallocated-connections %d", + &tm->preallocated_connections)) + ; + else if (unformat (input, "preallocated-half-open-connections %d", + &tm->preallocated_half_open_connections)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + return 0; +} + +VLIB_CONFIG_FUNCTION (tcp_config_fn, "tcp"); + +static clib_error_t * +tcp_src_address (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd_arg) +{ + tcp_main_t *tm = vnet_get_tcp_main (); + ip4_address_t v4start, v4end; + ip6_address_t v6start, v6end; + int v4set = 0; + int v6set = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "%U - %U", unformat_ip4_address, &v4start, + unformat_ip4_address, &v4end)) + v4set = 1; + else if (unformat (input, "%U", unformat_ip4_address, &v4start)) + { + memcpy (&v4end, &v4start, sizeof (v4start)); + v4set = 1; + } + else if (unformat (input, "%U - %U", unformat_ip6_address, &v6start, + unformat_ip4_address, &v6end)) + v6set = 1; + else if (unformat (input, "%U", unformat_ip6_address, &v6start)) + { + memcpy (&v6end, &v6start, sizeof (v4start)); + v6set = 1; + } + else + break; + } + + if (!v4set && !v6set) + return clib_error_return (0, "at least one v4 or v6 address required"); + + if (v4set) + { + u32 tmp; + + do + { + vec_add1 (tm->ip4_src_addresses, v4start); + tmp = clib_net_to_host_u32 (v4start.as_u32); + tmp++; + v4start.as_u32 = clib_host_to_net_u32 (tmp); + } + while (clib_host_to_net_u32 (v4start.as_u32) <= + clib_host_to_net_u32 (v4end.as_u32)); + } + if (v6set) + { + clib_warning ("v6 src address list unimplemented..."); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (tcp_src_address_command, static) = +{ + .path = "tcp src-address", + .short_help = "tcp src-address [- ] add src address range", + .function = tcp_src_address, +}; +/* *INDENT-ON* */ + + + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/tcp/tcp.h b/src/vnet/tcp/tcp.h index 12d804b8..37b10fd4 100644 --- a/src/vnet/tcp/tcp.h +++ b/src/vnet/tcp/tcp.h @@ -348,6 +348,16 @@ typedef struct _tcp_main /* Flag that indicates if stack is on or off */ u8 is_enabled; + /** Number of preallocated connections */ + u32 preallocated_connections; + u32 preallocated_half_open_connections; + + /** Vectors of src addresses. Optional unless one needs > 63K active-opens */ + ip4_address_t *ip4_src_addresses; + u32 last_v4_address_rotor; + u32 last_v6_address_rotor; + ip6_address_t *ip6_src_addresses; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; @@ -569,6 +579,7 @@ tcp_connection_force_ack (tcp_connection_t * tc, vlib_buffer_t * b) always_inline void tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) { + ASSERT (tc->c_thread_index == vlib_get_thread_index ()); tc->timers[timer_id] = tw_timer_start_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], tc->c_c_index, timer_id, interval); @@ -577,6 +588,7 @@ tcp_timer_set (tcp_connection_t * tc, u8 timer_id, u32 interval) always_inline void tcp_timer_reset (tcp_connection_t * tc, u8 timer_id) { + ASSERT (tc->c_thread_index == vlib_get_thread_index ()); if (tc->timers[timer_id] == TCP_TIMER_HANDLE_INVALID) return; @@ -588,6 +600,7 @@ tcp_timer_reset (tcp_connection_t * tc, u8 timer_id) always_inline void tcp_timer_update (tcp_connection_t * tc, u8 timer_id, u32 interval) { + ASSERT (tc->c_thread_index == vlib_get_thread_index ()); if (tc->timers[timer_id] != TCP_TIMER_HANDLE_INVALID) tw_timer_stop_16t_2w_512sl (&tcp_main.timer_wheels[tc->c_thread_index], tc->timers[timer_id]); diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index ae68ad1b..be51bca2 100755 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -383,9 +383,16 @@ typedef enum _tcp_dbg_evt "establish", \ }, \ }; \ - DECLARE_ETD(_tc, _e, 2); \ - ed->data[0] = _timer_id; \ - ed->data[1] = _timer_id; \ + if (_tc) \ + { \ + DECLARE_ETD(_tc, _e, 2); \ + ed->data[0] = _timer_id; \ + ed->data[1] = _timer_id; \ + } \ + else \ + { \ + clib_warning ("pop for unexisting connection %d", _tc_index); \ + } \ } #define TCP_EVT_SEG_INVALID_HANDLER(_tc, _seq, _end, ...) \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index a2e6dad1..45db0da6 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -251,6 +251,7 @@ tcp_update_timestamp (tcp_connection_t * tc, u32 seq, u32 seq_end) if (tcp_opts_tstamp (&tc->rcv_opts) && tc->tsval_recent && seq_leq (seq, tc->rcv_las) && seq_leq (tc->rcv_las, seq_end)) { + ASSERT (timestamp_leq (tc->tsval_recent, tc->rcv_opts.tsval)); tc->tsval_recent = tc->rcv_opts.tsval; tc->tsval_recent_age = tcp_time_now (); } @@ -383,12 +384,9 @@ tcp_estimate_rtt (tcp_connection_t * tc, u32 mrtt) if (tc->srtt != 0) { err = mrtt - tc->srtt; -// tc->srtt += err >> 3; /* XXX Drop in RTT results in RTTVAR increase and bigger RTO. * The increase should be bound */ -// tc->rttvar += ((int) clib_abs (err) - (int) tc->rttvar) >> 2; - tc->srtt = clib_max ((int) tc->srtt + (err >> 3), 1); diff = (clib_abs (err) - (int) tc->rttvar) >> 2; tc->rttvar = clib_max ((int) tc->rttvar + diff, 1); @@ -491,6 +489,14 @@ tcp_ack_is_dupack (tcp_connection_t * tc, vlib_buffer_t * b, u32 prev_snd_wnd, && (prev_snd_wnd == tc->snd_wnd)); } +static u8 +tcp_is_lost_fin (tcp_connection_t * tc) +{ + if ((tc->flags & TCP_CONN_FINSNT) && tc->snd_una_max - tc->snd_una == 1) + return 1; + return 0; +} + /** * Checks if ack is a congestion control event. */ @@ -503,7 +509,7 @@ tcp_ack_is_cc_event (tcp_connection_t * tc, vlib_buffer_t * b, *is_dack = tc->sack_sb.last_sacked_bytes || tcp_ack_is_dupack (tc, b, prev_snd_wnd, prev_snd_una); - return (*is_dack || tcp_in_cong_recovery (tc)); + return ((*is_dack || tcp_in_cong_recovery (tc)) && !tcp_is_lost_fin (tc)); } void @@ -750,10 +756,20 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) * last hole end */ tmp = tc->rcv_opts.sacks[vec_len (tc->rcv_opts.sacks) - 1]; last_hole = scoreboard_last_hole (sb); - if (seq_gt (tc->snd_una_max, sb->high_sacked) - && seq_gt (tc->snd_una_max, last_hole->end)) - last_hole->end = tc->snd_una_max; - /* keep track of max byte sacked for when the last hole + if (seq_gt (tc->snd_una_max, last_hole->end)) + { + if (seq_geq (last_hole->start, sb->high_sacked)) + { + last_hole->end = tc->snd_una_max; + } + /* New hole after high sacked block */ + else if (seq_lt (sb->high_sacked, tc->snd_una_max)) + { + scoreboard_insert_hole (sb, sb->tail, sb->high_sacked, + tc->snd_una_max); + } + } + /* Keep track of max byte sacked for when the last hole * is acked */ if (seq_gt (tmp.end, sb->high_sacked)) sb->high_sacked = tmp.end; @@ -764,7 +780,6 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) while (hole && blk_index < vec_len (tc->rcv_opts.sacks)) { blk = &tc->rcv_opts.sacks[blk_index]; - if (seq_leq (blk->start, hole->start)) { /* Block covers hole. Remove hole */ @@ -784,6 +799,7 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) } else if (!next_hole) { + ASSERT (seq_geq (sb->high_sacked, ack)); sb->snd_una_adv = sb->high_sacked - ack; sb->last_bytes_delivered += sb->high_sacked - hole->end; } @@ -819,7 +835,6 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) { hole->end = blk->start; } - hole = scoreboard_next_hole (sb, hole); } } @@ -827,10 +842,13 @@ tcp_rcv_sacks (tcp_connection_t * tc, u32 ack) scoreboard_update_bytes (tc, sb); sb->last_sacked_bytes = sb->sacked_bytes - (old_sacked_bytes - sb->last_bytes_delivered); + ASSERT (sb->last_sacked_bytes <= sb->sacked_bytes); ASSERT (sb->sacked_bytes == 0 || sb->sacked_bytes < tc->snd_una_max - seq_max (tc->snd_una, ack)); ASSERT (sb->last_sacked_bytes + sb->lost_bytes <= tc->snd_una_max - seq_max (tc->snd_una, ack)); + ASSERT (sb->head == TCP_INVALID_SACK_HOLE_INDEX || tcp_in_recovery (tc) + || sb->holes[sb->head].start == ack + sb->snd_una_adv); } /** @@ -916,7 +934,8 @@ tcp_cc_congestion_undo (tcp_connection_t * tc) static u8 tcp_cc_is_spurious_retransmit (tcp_connection_t * tc) { - return (tc->snd_rxt_ts + return (tcp_in_recovery (tc) + && tc->snd_rxt_ts && tcp_opts_tstamp (&tc->rcv_opts) && timestamp_lt (tc->rcv_opts.tsecr, tc->snd_rxt_ts)); } @@ -994,6 +1013,7 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) { ASSERT (tc->snd_una != tc->snd_una_max || tc->sack_sb.last_sacked_bytes); + tc->rcv_dupacks++; if (tc->rcv_dupacks > TCP_DUPACK_THRESHOLD && !tc->bytes_acked) @@ -1012,17 +1032,20 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) goto partial_ack_test; } - /* If of of the two conditions lower hold, reset dupacks - * 1) Cumulative ack does not cover more than congestion threshold, - * and the following doesn't hold: the congestion window is - * greater than SMSS bytes and the difference between highest_ack - * and prev_highest_ack is at most 4*SMSS bytes (XXX) - * 2) RFC6582 heuristic to avoid multiple fast retransmits + /* If of of the two conditions lower hold, reset dupacks because + * we're probably after timeout (RFC6582 heuristics). + * If Cumulative ack does not cover more than congestion threshold, + * and: + * 1) The following doesn't hold: The congestion window is greater + * than SMSS bytes and the difference between highest_ack + * and prev_highest_ack is at most 4*SMSS bytes + * 2) Echoed timestamp in the last non-dup ack does not equal the + * stored timestamp */ - if ((seq_gt (tc->snd_una, tc->snd_congestion) - || !(tc->cwnd > tc->snd_mss - && tc->bytes_acked <= 4 * tc->snd_mss)) - || tc->rcv_opts.tsecr != tc->tsecr_last_ack) + if (seq_leq (tc->snd_una, tc->snd_congestion) + && ((!(tc->cwnd > tc->snd_mss + && tc->bytes_acked <= 4 * tc->snd_mss)) + || (tc->rcv_opts.tsecr != tc->tsecr_last_ack))) { tc->rcv_dupacks = 0; return; @@ -1038,6 +1061,7 @@ tcp_cc_handle_event (tcp_connection_t * tc, u32 is_dack) * three segments that have left the network and should've been * buffered at the receiver XXX */ tc->cwnd = tc->ssthresh + tc->rcv_dupacks * tc->snd_mss; + ASSERT (tc->cwnd >= tc->snd_mss); /* If cwnd allows, send more data */ if (tcp_opts_sack_permitted (&tc->rcv_opts) @@ -1112,7 +1136,7 @@ partial_ack: >= tc->sack_sb.last_bytes_delivered); rxt_delivered = tc->bytes_acked + tc->sack_sb.snd_una_adv - tc->sack_sb.last_bytes_delivered; - if (rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) + if (0 && rxt_delivered && seq_gt (tc->sack_sb.high_rxt, tc->snd_una)) { /* If we have sacks and we haven't gotten an ack beyond high_rxt, * remove sacked bytes delivered */ @@ -1301,6 +1325,8 @@ tcp_session_enqueue_data (tcp_connection_t * tc, vlib_buffer_t * b, { int written; + ASSERT (seq_geq (vnet_buffer (b)->tcp.seq_number, tc->rcv_nxt)); + /* Pure ACK. Update rcv_nxt and be done. */ if (PREDICT_FALSE (data_len == 0)) { @@ -1450,6 +1476,7 @@ tcp_segment_rcv (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b, /* Chop off the bytes in the past */ n_bytes_to_drop = tc->rcv_nxt - vnet_buffer (b)->tcp.seq_number; n_data_bytes -= n_bytes_to_drop; + vnet_buffer (b)->tcp.seq_number = tc->rcv_nxt; vlib_buffer_advance (b, n_bytes_to_drop); goto in_order; @@ -1912,11 +1939,12 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto drop; } - stream_session_init_fifos_pointers (&new_tc0->connection, - new_tc0->irs + 1, - new_tc0->iss + 1); /* Make sure after data segment processing ACK is sent */ new_tc0->flags |= TCP_CONN_SNDACK; + + /* Update rtt with the syn-ack sample */ + new_tc0->bytes_acked = 1; + tcp_update_rtt (new_tc0, vnet_buffer (b0)->tcp.ack_number); } /* SYN: Simultaneous open. Change state to SYN-RCVD and send SYN-ACK */ else @@ -1932,9 +1960,8 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto drop; } - stream_session_init_fifos_pointers (&new_tc0->connection, - new_tc0->irs + 1, - new_tc0->iss + 1); + tc0->rtt_ts = 0; + tcp_make_synack (new_tc0, b0); next0 = tcp_next_output (is_ip4); @@ -2151,8 +2178,6 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, << tc0->rcv_opts.wscale; tc0->snd_wl1 = vnet_buffer (b0)->tcp.seq_number; tc0->snd_wl2 = vnet_buffer (b0)->tcp.ack_number; - - /* Shoulder tap the server */ stream_session_accept_notify (&tc0->connection); /* Reset SYN-ACK retransmit timer */ @@ -2175,6 +2200,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* If FIN is ACKed */ if (tc0->snd_una == tc0->snd_una_max) { + ASSERT (tcp_fin (tcp0)); tc0->state = TCP_STATE_FIN_WAIT_2; /* Stop all timers, 2MSL will be set lower */ tcp_connection_timers_reset (tc0); @@ -2545,10 +2571,6 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, tcp_make_synack (child0, b0); next0 = tcp_next_output (is_ip4); - /* Init fifo pointers after we have iss */ - stream_session_init_fifos_pointers (&child0->connection, - child0->irs + 1, - child0->iss + 1); drop: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -2886,9 +2908,12 @@ do { \ _(LISTEN, TCP_FLAG_SYN, TCP_INPUT_NEXT_LISTEN, TCP_ERROR_NONE); _(LISTEN, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_NONE); _(LISTEN, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_NONE); + _(LISTEN, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, + TCP_ERROR_NONE); /* ACK for for a SYN-ACK -> tcp-rcv-process. */ _(SYN_RCVD, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(SYN_RCVD, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(SYN_RCVD, TCP_FLAG_SYN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* SYN-ACK for a SYN */ _(SYN_SENT, TCP_FLAG_SYN | TCP_FLAG_ACK, TCP_INPUT_NEXT_SYN_SENT, TCP_ERROR_NONE); @@ -2905,12 +2930,14 @@ do { \ _(ESTABLISHED, TCP_FLAG_RST, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); _(ESTABLISHED, TCP_FLAG_RST | TCP_FLAG_ACK, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); + _(ESTABLISHED, TCP_FLAG_SYN, TCP_INPUT_NEXT_ESTABLISHED, TCP_ERROR_NONE); /* ACK or FIN-ACK to our FIN */ _(FIN_WAIT_1, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_1, TCP_FLAG_ACK | TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* FIN in reply to our FIN from the other side */ _(FIN_WAIT_1, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); + _(FIN_WAIT_1, TCP_FLAG_RST, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); /* FIN confirming that the peer (app) has closed */ _(FIN_WAIT_2, TCP_FLAG_FIN, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); _(FIN_WAIT_2, TCP_FLAG_ACK, TCP_INPUT_NEXT_RCV_PROCESS, TCP_ERROR_NONE); @@ -2929,6 +2956,8 @@ do { \ TCP_ERROR_NONE); _(CLOSED, TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, TCP_ERROR_CONNECTION_CLOSED); _(CLOSED, TCP_FLAG_RST, TCP_INPUT_NEXT_DROP, TCP_ERROR_CONNECTION_CLOSED); + _(CLOSED, TCP_FLAG_FIN | TCP_FLAG_ACK, TCP_INPUT_NEXT_RESET, + TCP_ERROR_CONNECTION_CLOSED); #undef _ } diff --git a/src/vnet/tcp/tcp_newreno.c b/src/vnet/tcp/tcp_newreno.c index c825e952..103fea4c 100644 --- a/src/vnet/tcp/tcp_newreno.c +++ b/src/vnet/tcp/tcp_newreno.c @@ -63,8 +63,8 @@ newreno_rcv_cong_ack (tcp_connection_t * tc, tcp_cc_ack_t ack_type) * window deflation" attempts to ensure that, when fast recovery * eventually ends, approximately ssthresh amount of data will be * outstanding in the network.*/ - tc->cwnd = (tc->cwnd > tc->bytes_acked) ? - tc->cwnd - tc->bytes_acked : 0; + tc->cwnd = (tc->cwnd > tc->bytes_acked + tc->snd_mss) ? + tc->cwnd - tc->bytes_acked : tc->snd_mss; if (tc->bytes_acked > tc->snd_mss) tc->cwnd += tc->snd_mss; } diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 41bebcb3..b418e8ba 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -19,17 +19,20 @@ vlib_node_registration_t tcp4_output_node; vlib_node_registration_t tcp6_output_node; -typedef enum _tcp_output_nect +typedef enum _tcp_output_next { TCP_OUTPUT_NEXT_DROP, + TCP_OUTPUT_NEXT_IP_LOOKUP, TCP_OUTPUT_N_NEXT } tcp_output_next_t; #define foreach_tcp4_output_next \ _ (DROP, "error-drop") \ + _ (IP_LOOKUP, "ip4-lookup") #define foreach_tcp6_output_next \ _ (DROP, "error-drop") \ + _ (IP_LOOKUP, "ip6-lookup") static char *tcp_error_strings[] = { #define tcp_error(n,s) s, @@ -427,16 +430,16 @@ tcp_init_mss (tcp_connection_t * tc) #define tcp_get_free_buffer_index(tm, bidx) \ do { \ u32 *my_tx_buffers, n_free_buffers; \ - u32 thread_index = vlib_get_thread_index(); \ - my_tx_buffers = tm->tx_buffers[thread_index]; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ { \ n_free_buffers = 32; /* TODO config or macro */ \ vec_validate (my_tx_buffers, n_free_buffers - 1); \ _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ - tm->vlib_main, my_tx_buffers, n_free_buffers, \ + vlib_get_main(), my_tx_buffers, n_free_buffers, \ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ - tm->tx_buffers[thread_index] = my_tx_buffers; \ + tm->tx_buffers[thread_index] = my_tx_buffers; \ } \ /* buffer shortage */ \ if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ @@ -445,12 +448,12 @@ do { \ _vec_len (my_tx_buffers) -= 1; \ } while (0) -#define tcp_return_buffer(tm) \ -do { \ - u32 *my_tx_buffers; \ - u32 thread_index = vlib_get_thread_index(); \ - my_tx_buffers = tm->tx_buffers[thread_index]; \ - _vec_len (my_tx_buffers) +=1; \ +#define tcp_return_buffer(tm) \ +do { \ + u32 *my_tx_buffers; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ + _vec_len (my_tx_buffers) +=1; \ } while (0) always_inline void @@ -757,23 +760,22 @@ void tcp_push_ip_hdr (tcp_main_t * tm, tcp_connection_t * tc, vlib_buffer_t * b) { tcp_header_t *th = vlib_buffer_get_current (b); - + vlib_main_t *vm = vlib_get_main (); if (tc->c_is_ip4) { ip4_header_t *ih; - ih = vlib_buffer_push_ip4 (tm->vlib_main, b, &tc->c_lcl_ip4, + ih = vlib_buffer_push_ip4 (vm, b, &tc->c_lcl_ip4, &tc->c_rmt_ip4, IP_PROTOCOL_TCP); - th->checksum = ip4_tcp_udp_compute_checksum (tm->vlib_main, b, ih); + th->checksum = ip4_tcp_udp_compute_checksum (vm, b, ih); } else { ip6_header_t *ih; int bogus = ~0; - ih = vlib_buffer_push_ip6 (tm->vlib_main, b, &tc->c_lcl_ip6, + ih = vlib_buffer_push_ip6 (vm, b, &tc->c_lcl_ip6, &tc->c_rmt_ip6, IP_PROTOCOL_TCP); - th->checksum = ip6_tcp_udp_icmp_compute_checksum (tm->vlib_main, b, ih, - &bogus); + th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih, &bogus); ASSERT (!bogus); } } @@ -851,6 +853,13 @@ tcp_enqueue_to_output (vlib_main_t * vm, vlib_buffer_t * b, u32 bi, u8 is_ip4) /* Decide where to send the packet */ next_index = is_ip4 ? tcp4_output_node.index : tcp6_output_node.index; + /* Initialize the trajectory trace, if configured */ + if (VLIB_BUFFER_TRACE_TRAJECTORY > 0) + { + b->pre_data[0] = 1; + b->pre_data[1] = next_index; + } + /* Enqueue the packet */ f = vlib_get_frame_to_node (vm, next_index); to_next = vlib_frame_vector_args (f); @@ -1144,6 +1153,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) /* Account for the SYN */ tc->snd_nxt += 1; + tc->rtt_ts = 0; } else { @@ -1232,7 +1242,7 @@ tcp_timer_persist_handler (u32 index) /* Nothing to send */ if (n_bytes <= 0) { - clib_warning ("persist found nothing to send"); + // clib_warning ("persist found nothing to send"); tcp_return_buffer (tm); return; } @@ -1448,7 +1458,7 @@ tcp46_output_inline (vlib_main_t * vm, tcp_connection_t *tc0; tcp_tx_trace_t *t0; tcp_header_t *th0 = 0; - u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_DROP; + u32 error0 = TCP_ERROR_PKTS_SENT, next0 = TCP_OUTPUT_NEXT_IP_LOOKUP; bi0 = from[0]; to_next[0] = bi0; @@ -1527,6 +1537,7 @@ tcp46_output_inline (vlib_main_t * vm, tc0->rto_boff = 0; } +#if 0 /* Make sure we haven't lost route to our peer */ if (PREDICT_FALSE (tc0->last_fib_check < tc0->snd_opts.tsval + TCP_FIB_RECHECK_PERIOD)) @@ -1547,6 +1558,10 @@ tcp46_output_inline (vlib_main_t * vm, /* Use pre-computed dpo to set next node */ next0 = tc0->c_rmt_dpo.dpoi_next_node; vnet_buffer (b0)->ip.adj_index[VLIB_TX] = tc0->c_rmt_dpo.dpoi_index; +#endif + + vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0; b0->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; done: diff --git a/src/vnet/tcp/tcp_packet.h b/src/vnet/tcp/tcp_packet.h index a6f62ee1..9ccfe655 100644 --- a/src/vnet/tcp/tcp_packet.h +++ b/src/vnet/tcp/tcp_packet.h @@ -168,6 +168,7 @@ typedef struct #define TCP_OPTION_LEN_TIMESTAMP 10 #define TCP_OPTION_LEN_SACK_BLOCK 8 +#define TCP_HDR_LEN_MAX 60 #define TCP_WND_MAX 65535U #define TCP_MAX_WND_SCALE 14 /* See RFC 1323 */ #define TCP_OPTS_ALIGN 4 diff --git a/src/vnet/tcp/tcp_test.c b/src/vnet/tcp/tcp_test.c index a461e3b8..510deb4f 100644 --- a/src/vnet/tcp/tcp_test.c +++ b/src/vnet/tcp/tcp_test.c @@ -290,7 +290,7 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) { tcp_connection_t _tc, *tc = &_tc; sack_block_t *sacks; - int i, verbose = 0; + int i, verbose = 0, expected; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { @@ -326,8 +326,12 @@ tcp_test_sack_tx (vlib_main_t * vm, unformat_input_t * input) sacks = vec_dup (tc->snd_sacks); tcp_update_sack_list (tc, 1100, 1200); - TCP_TEST ((vec_len (tc->snd_sacks) == 5), "sack blocks %d expected %d", - vec_len (tc->snd_sacks), 5); + if (verbose) + vlib_cli_output (vm, "add new segment [1100, 1200]\n%U", + format_tcp_sacks, tc); + expected = 5 < TCP_MAX_SACK_BLOCKS ? 6 : 5; + TCP_TEST ((vec_len (tc->snd_sacks) == expected), + "sack blocks %d expected %d", vec_len (tc->snd_sacks), expected); TCP_TEST ((tc->snd_sacks[0].start == 1100), "first sack block start %u expected %u", tc->snd_sacks[0].start, 1100); diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index e6b4f8fc..9a8ff076 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -123,7 +123,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, /* lookup session */ s0 = stream_session_lookup4 (&ip0->dst_address, &ip0->src_address, udp0->dst_port, udp0->src_port, - SESSION_TYPE_IP4_UDP, my_thread_index); + SESSION_TYPE_IP4_UDP); /* no listener */ if (PREDICT_FALSE (s0 == 0)) -- cgit 1.2.3-korg From b7f1faa7fbd4575f28766e552a73810c6de0ace3 Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Tue, 29 Aug 2017 11:43:37 -0400 Subject: Add fixed-size, preallocated pool support Simply call pool_init_fixed(...) before using the pool. Note that fixed, preallocated pools live in individually-mmap'ed address segments, except for the free element bitmap. A large fixed pool can exceed 4gb. Fix tcp buffer allocator leak, remove broken assert Change-Id: I4421082e12a77c41c6e20f7747f3150dcd01fc26 Signed-off-by: Dave Barach --- src/svm/svm_fifo_segment.c | 2 +- src/vnet/session/application_interface.c | 19 +++++ src/vnet/session/session.c | 32 +++----- src/vnet/session/session_cli.c | 4 +- src/vnet/tcp/tcp.c | 24 +++--- src/vnet/tcp/tcp_output.c | 42 ++++++---- src/vppinfra.am | 26 +++--- src/vppinfra/bihash_24_8.h | 2 +- src/vppinfra/pool.c | 131 +++++++++++++++++++++++++++++++ src/vppinfra/pool.h | 119 +++++++++++++++++++++------- src/vppinfra/test_fpool.c | 69 ++++++++++++++++ src/vppinfra/tw_timer_16t_1w_2048sl.h | 1 + src/vppinfra/tw_timer_16t_2w_512sl.h | 3 +- src/vppinfra/tw_timer_1t_3w_1024sl_ov.h | 1 + src/vppinfra/tw_timer_2t_1w_2048sl.h | 1 + src/vppinfra/tw_timer_4t_3w_256sl.h | 1 + src/vppinfra/tw_timer_4t_3w_4sl_ov.h | 1 + src/vppinfra/tw_timer_template.c | 116 +++++++++++++++++++++++++-- src/vppinfra/tw_timer_template.h | 24 +++++- 19 files changed, 520 insertions(+), 98 deletions(-) create mode 100644 src/vppinfra/pool.c create mode 100644 src/vppinfra/test_fpool.c (limited to 'src/vnet/session/application_interface.c') diff --git a/src/svm/svm_fifo_segment.c b/src/svm/svm_fifo_segment.c index 6600a423..97d9976b 100644 --- a/src/svm/svm_fifo_segment.c +++ b/src/svm/svm_fifo_segment.c @@ -461,7 +461,7 @@ svm_fifo_segment_free_fifo (svm_fifo_segment_private_t * s, svm_fifo_t * f, freelist_index = f->freelist_index; - ASSERT (freelist_index > 0 && freelist_index < vec_len (fsh->free_fifos)); + ASSERT (freelist_index < vec_len (fsh->free_fifos)); ssvm_lock_non_recursive (sh, 2); oldheap = ssvm_push_heap (sh); diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 566a52d7..8dbc3a1a 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -207,11 +207,22 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) return 0; } +static u8 *cache_uri; +static session_type_t cache_sst; +static transport_endpoint_t *cache_tep; + int parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep) { unformat_input_t _input, *input = &_input; + if (cache_uri && !strncmp (uri, (char *) cache_uri, vec_len (cache_uri))) + { + *sst = cache_sst; + *tep = *cache_tep; + return 0; + } + /* Make sure */ uri = (char *) format (0, "%s%c", uri, 0); @@ -224,6 +235,14 @@ parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep) } unformat_free (input); + vec_free (cache_uri); + cache_uri = (u8 *) uri; + cache_sst = *sst; + if (cache_tep) + clib_mem_free (cache_tep); + cache_tep = clib_mem_alloc (sizeof (*tep)); + *cache_tep = *tep; + return 0; } diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c index dcd141f1..17644e29 100644 --- a/src/vnet/session/session.c +++ b/src/vnet/session/session.c @@ -889,32 +889,24 @@ session_manager_main_enable (vlib_main_t * vm) session_vpp_event_queue_allocate (smm, i); /* Preallocate sessions */ - if (num_threads == 1) + if (smm->preallocated_sessions) { - for (i = 0; i < smm->preallocated_sessions; i++) + if (num_threads == 1) { - stream_session_t *ss __attribute__ ((unused)); - pool_get_aligned (smm->sessions[0], ss, CLIB_CACHE_LINE_BYTES); + pool_init_fixed (smm->sessions[0], smm->preallocated_sessions); } - - for (i = 0; i < smm->preallocated_sessions; i++) - pool_put_index (smm->sessions[0], i); - } - else - { - int j; - preallocated_sessions_per_worker = smm->preallocated_sessions / - (num_threads - 1); - - for (j = 1; j < num_threads; j++) + else { - for (i = 0; i < preallocated_sessions_per_worker; i++) + int j; + preallocated_sessions_per_worker = + (1.1 * (f64) smm->preallocated_sessions / + (f64) (num_threads - 1)); + + for (j = 1; j < num_threads; j++) { - stream_session_t *ss __attribute__ ((unused)); - pool_get_aligned (smm->sessions[j], ss, CLIB_CACHE_LINE_BYTES); + pool_init_fixed (smm->sessions[j], + preallocated_sessions_per_worker); } - for (i = 0; i < preallocated_sessions_per_worker; i++) - pool_put_index (smm->sessions[j], i); } } diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c index 028dc9d8..d9f516be 100755 --- a/src/vnet/session/session_cli.c +++ b/src/vnet/session/session_cli.c @@ -115,8 +115,8 @@ unformat_stream_session_id (unformat_input_t * input, va_list * args) { *proto = TRANSPORT_PROTO_UDP; } - else if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4, - lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port)) + if (unformat (input, "%U:%d->%U:%d", unformat_ip4_address, &lcl->ip4, + lcl_port, unformat_ip4_address, &rmt->ip4, rmt_port)) { *is_ip4 = 1; tuple_is_set = 1; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index 0a826a52..a4c13084 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -1150,6 +1150,10 @@ tcp_timer_establish_handler (u32 conn_index) else { tc = tcp_connection_get (conn_index, vlib_get_thread_index ()); + /* note: the connection may have already disappeared */ + if (PREDICT_FALSE (tc == 0)) + return; + ASSERT (tc->state == TCP_STATE_SYN_RCVD); } tc->timers[TCP_TIMER_ESTABLISH] = TCP_TIMER_HANDLE_INVALID; @@ -1244,7 +1248,7 @@ tcp_main_enable (vlib_main_t * vm) vlib_thread_main_t *vtm = vlib_get_thread_main (); clib_error_t *error = 0; u32 num_threads; - int i, thread; + int thread; tcp_connection_t *tc __attribute__ ((unused)); u32 preallocated_connections_per_thread; @@ -1297,21 +1301,17 @@ tcp_main_enable (vlib_main_t * vm) } for (; thread < num_threads; thread++) { - for (i = 0; i < preallocated_connections_per_thread; i++) - pool_get (tm->connections[thread], tc); - - for (i = 0; i < preallocated_connections_per_thread; i++) - pool_put_index (tm->connections[thread], i); + if (preallocated_connections_per_thread) + pool_init_fixed (tm->connections[thread], + preallocated_connections_per_thread); } /* - * Preallocate half-open connections + * Use a preallocated half-open connection pool? */ - for (i = 0; i < tm->preallocated_half_open_connections; i++) - pool_get (tm->half_open_connections, tc); - - for (i = 0; i < tm->preallocated_half_open_connections; i++) - pool_put_index (tm->half_open_connections, i); + if (tm->preallocated_half_open_connections) + pool_init_fixed (tm->half_open_connections, + tm->preallocated_half_open_connections); /* Initialize per worker thread tx buffers (used for control messages) */ vec_validate (tm->tx_buffers, num_threads - 1); diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index 02555513..15a9dcb4 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -440,13 +440,16 @@ tcp_init_mss (tcp_connection_t * tc) always_inline int tcp_alloc_tx_buffers (tcp_main_t * tm, u8 thread_index, u32 n_free_buffers) { + u32 current_length = vec_len (tm->tx_buffers[thread_index]); + vec_validate (tm->tx_buffers[thread_index], - vec_len (tm->tx_buffers[thread_index]) + n_free_buffers - 1); + current_length + n_free_buffers - 1); _vec_len (tm->tx_buffers[thread_index]) = - vlib_buffer_alloc_from_free_list (vlib_get_main (), - tm->tx_buffers[thread_index], - n_free_buffers, - VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + current_length + vlib_buffer_alloc_from_free_list (vlib_get_main (), + tm->tx_buffers + [thread_index], + n_free_buffers, + VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); /* buffer shortage, report failure */ if (vec_len (tm->tx_buffers[thread_index]) == 0) { @@ -1293,11 +1296,17 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) if (is_syn) { tc = tcp_half_open_connection_get (index); + /* Note: the connection may have transitioned to ESTABLISHED... */ + if (PREDICT_FALSE (tc == 0)) + return; tc->timers[TCP_TIMER_RETRANSMIT_SYN] = TCP_TIMER_HANDLE_INVALID; } else { tc = tcp_connection_get (index, thread_index); + /* Note: the connection may have been closed and pool_put */ + if (PREDICT_FALSE (tc == 0)) + return; tc->timers[TCP_TIMER_RETRANSMIT] = TCP_TIMER_HANDLE_INVALID; } @@ -1332,25 +1341,27 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) TCP_EVT_DBG (TCP_EVT_CC_EVT, tc, 1); - /* Send one segment */ + /* Send one segment. Note that n_bytes may be zero due to buffer shortfall */ n_bytes = tcp_prepare_retransmit_segment (tc, 0, tc->snd_mss, &b); - ASSERT (n_bytes); - bi = vlib_get_buffer_index (vm, b); + /* TODO be less aggressive about this */ scoreboard_clear (&tc->sack_sb); if (n_bytes == 0) { - clib_warning ("could not retransmit anything"); - clib_warning ("%U", format_tcp_connection, tc, 2); - + if (b) + { + clib_warning ("retransmit fail: %U", format_tcp_connection, tc, + 2); + ASSERT (tc->rto_boff > 1 && tc->snd_una == tc->snd_congestion); + } /* Try again eventually */ tcp_retransmit_timer_set (tc); - ASSERT (0 || (tc->rto_boff > 1 - && tc->snd_una == tc->snd_congestion)); return; } + bi = vlib_get_buffer_index (vm, b); + /* For first retransmit, record timestamp (Eifel detection RFC3522) */ if (tc->rto_boff == 1) tc->snd_rxt_ts = tcp_time_now (); @@ -1378,7 +1389,10 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) tc->rto = clib_min (tc->rto << 1, TCP_RTO_MAX); if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi))) - return; + { + clib_warning ("tcp_get_free_buffer_index FAIL"); + return; + } b = vlib_get_buffer (vm, bi); tcp_init_buffer (vm, b); tcp_push_hdr_i (tc, b, tc->state, 1); diff --git a/src/vppinfra.am b/src/vppinfra.am index 533bacd6..8f01114c 100644 --- a/src/vppinfra.am +++ b/src/vppinfra.am @@ -18,10 +18,11 @@ TESTS = if ENABLE_TESTS TESTS += test_bihash_template \ test_dlist \ - test_elog \ test_elf \ + test_elog \ test_fifo \ test_format \ + test_fpool \ test_hash \ test_heap \ test_longjmp \ @@ -42,17 +43,16 @@ TESTS += test_bihash_template \ test_zvec endif -TESTS += test_bihash_template - noinst_PROGRAMS = $(TESTS) check_PROGRAMS = $(TESTS) test_bihash_template_SOURCES = vppinfra/test_bihash_template.c test_dlist_SOURCES = vppinfra/test_dlist.c -test_elog_SOURCES = vppinfra/test_elog.c test_elf_SOURCES = vppinfra/test_elf.c +test_elog_SOURCES = vppinfra/test_elog.c test_fifo_SOURCES = vppinfra/test_fifo.c test_format_SOURCES = vppinfra/test_format.c +test_fpool_SOURCES = vppinfra/test_fpool.c test_hash_SOURCES = vppinfra/test_hash.c test_heap_SOURCES = vppinfra/test_heap.c test_longjmp_SOURCES = vppinfra/test_longjmp.c @@ -61,8 +61,8 @@ test_md5_SOURCES = vppinfra/test_md5.c test_mheap_SOURCES = vppinfra/test_mheap.c test_pool_iterate_SOURCES = vppinfra/test_pool_iterate.c test_ptclosure_SOURCES = vppinfra/test_ptclosure.c -test_random_SOURCES = vppinfra/test_random.c test_random_isaac_SOURCES = vppinfra/test_random_isaac.c +test_random_SOURCES = vppinfra/test_random.c test_serialize_SOURCES = vppinfra/test_serialize.c test_slist_SOURCES = vppinfra/test_slist.c test_socket_SOURCES = vppinfra/test_socket.c @@ -76,10 +76,11 @@ test_zvec_SOURCES = vppinfra/test_zvec.c # So we'll need -DDEBUG to enable ASSERTs test_bihash_template_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_dlist_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG -test_elog_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_elf_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG +test_elog_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_fifo_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_format_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG +test_fpool_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_hash_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_heap_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_longjmp_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG @@ -90,9 +91,9 @@ test_pool_iterate_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_ptclosure_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_random_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_random_isaac_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG -test_socket_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_serialize_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_slist_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG +test_socket_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_time_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_timing_wheel_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_tw_timer_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG @@ -101,10 +102,11 @@ test_zvec_CPPFLAGS = $(AM_CPPFLAGS) -DCLIB_DEBUG test_bihash_template_LDADD = libvppinfra.la test_dlist_LDADD = libvppinfra.la -test_elog_LDADD = libvppinfra.la test_elf_LDADD = libvppinfra.la +test_elog_LDADD = libvppinfra.la test_fifo_LDADD = libvppinfra.la test_format_LDADD = libvppinfra.la +test_fpool_LDADD = libvppinfra.la test_hash_LDADD = libvppinfra.la test_heap_LDADD = libvppinfra.la test_longjmp_LDADD = libvppinfra.la @@ -113,8 +115,8 @@ test_md5_LDADD = libvppinfra.la test_mheap_LDADD = libvppinfra.la test_pool_iterate_LDADD = libvppinfra.la test_ptclosure_LDADD = libvppinfra.la -test_random_LDADD = libvppinfra.la test_random_isaac_LDADD = libvppinfra.la +test_random_LDADD = libvppinfra.la test_serialize_LDADD = libvppinfra.la test_slist_LDADD = libvppinfra.la test_socket_LDADD = libvppinfra.la @@ -126,10 +128,11 @@ test_zvec_LDADD = libvppinfra.la test_bihash_template_LDFLAGS = -static test_dlist_LDFLAGS = -static -test_elog_LDFLAGS = -static test_elf_LDFLAGS = -static +test_elog_LDFLAGS = -static test_fifo_LDFLAGS = -static test_format_LDFLAGS = -static +test_fpool_LDFLAGS = -static test_hash_LDFLAGS = -static test_heap_LDFLAGS = -static test_longjmp_LDFLAGS = -static @@ -138,8 +141,8 @@ test_md5_LDFLAGS = -static test_mheap_LDFLAGS = -static test_pool_iterate_LDFLAGS = -static test_ptclosure_LDFLAGS = -static -test_random_LDFLAGS = -static test_random_isaac_LDFLAGS = -static +test_random_LDFLAGS = -static test_serialize_LDFLAGS = -static test_slist_LDFLAGS = -static test_socket_LDFLAGS = -static @@ -247,6 +250,7 @@ CLIB_CORE = \ vppinfra/fifo.c \ vppinfra/fheap.c \ vppinfra/format.c \ + vppinfra/pool.c \ vppinfra/graph.c \ vppinfra/hash.c \ vppinfra/heap.c \ diff --git a/src/vppinfra/bihash_24_8.h b/src/vppinfra/bihash_24_8.h index d0be028c..173168fe 100644 --- a/src/vppinfra/bihash_24_8.h +++ b/src/vppinfra/bihash_24_8.h @@ -18,7 +18,7 @@ #define BIHASH_TYPE _24_8 #define BIHASH_KVP_PER_PAGE 4 -#define BIHASH_KVP_CACHE_SIZE 3 +#define BIHASH_KVP_CACHE_SIZE 0 #ifndef __included_bihash_24_8_h__ #define __included_bihash_24_8_h__ diff --git a/src/vppinfra/pool.c b/src/vppinfra/pool.c new file mode 100644 index 00000000..ed83b41a --- /dev/null +++ b/src/vppinfra/pool.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + Copyright (c) 2001, 2002, 2003, 2004 Eliot Dresselhaus + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +#include + +void +_pool_init_fixed (void **pool_ptr, u32 elt_size, u32 max_elts) +{ + u8 *mmap_base; + u64 vector_size; + u64 free_index_size; + u64 total_size; + u64 page_size; + pool_header_t *fh; + vec_header_t *vh; + u8 *v; + u32 *fi; + u32 i; + u32 set_bits; + + ASSERT (elt_size); + ASSERT (max_elts); + + vector_size = pool_aligned_header_bytes + vec_header_bytes (0) + + (u64) elt_size *max_elts; + + free_index_size = vec_header_bytes (0) + sizeof (u32) * max_elts; + + /* Round up to a cache line boundary */ + vector_size = (vector_size + CLIB_CACHE_LINE_BYTES - 1) + & ~(CLIB_CACHE_LINE_BYTES - 1); + + free_index_size = (free_index_size + CLIB_CACHE_LINE_BYTES - 1) + & ~(CLIB_CACHE_LINE_BYTES - 1); + + total_size = vector_size + free_index_size; + + /* Round up to an even number of pages */ + page_size = clib_mem_get_page_size (); + total_size = (total_size + page_size - 1) & ~(page_size - 1); + + /* mmap demand zero memory */ + + mmap_base = mmap (0, total_size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + + if (mmap_base == MAP_FAILED) + { + clib_unix_warning ("mmap"); + *pool_ptr = 0; + } + + /* First comes the pool header */ + fh = (pool_header_t *) mmap_base; + /* Find the user vector pointer */ + v = (u8 *) (mmap_base + pool_aligned_header_bytes); + /* Finally, the vector header */ + vh = _vec_find (v); + + fh->free_bitmap = 0; /* No free elts (yet) */ + fh->max_elts = max_elts; + fh->mmap_base = mmap_base; + fh->mmap_size = total_size; + + vh->len = max_elts; + + /* Build the free-index vector */ + vh = (vec_header_t *) (v + vector_size); + vh->len = max_elts; + fi = (u32 *) (vh + 1); + + fh->free_indices = fi; + + /* Set the entire free bitmap */ + clib_bitmap_alloc (fh->free_bitmap, max_elts); + memset (fh->free_bitmap, 0xff, vec_len (fh->free_bitmap) * sizeof (uword)); + + /* Clear any extraneous set bits */ + set_bits = vec_len (fh->free_bitmap) * BITS (uword); + + for (i = max_elts; i < set_bits; i++) + fh->free_bitmap = clib_bitmap_set (fh->free_bitmap, i, 0); + + /* Create the initial free vector */ + for (i = 0; i < max_elts; i++) + fi[i] = (max_elts - 1) - i; + + *pool_ptr = v; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/pool.h b/src/vppinfra/pool.h index 56536b77..62d5b54e 100644 --- a/src/vppinfra/pool.h +++ b/src/vppinfra/pool.h @@ -56,6 +56,16 @@ typedef struct /** Vector of free indices. One element for each set bit in bitmap. */ u32 *free_indices; + + /* The following fields are set for fixed-size, preallocated pools */ + + /** Maximum size of the pool, in elements */ + u32 max_elts; + + /** mmap segment info: base + length */ + u8 *mmap_base; + u64 mmap_size; + } pool_header_t; /** Align pool header so that pointers are naturally aligned. */ @@ -69,6 +79,15 @@ pool_header (void *v) return vec_aligned_header (v, sizeof (pool_header_t), sizeof (void *)); } +extern void _pool_init_fixed (void **, u32, u32); +extern void fpool_free (void *); + +/** initialize a fixed-size, preallocated pool */ +#define pool_init_fixed(pool,max_elts) \ +{ \ + _pool_init_fixed((void **)&(pool),sizeof(pool[0]),max_elts); \ +} + /** Validate a pool */ always_inline void pool_validate (void *v) @@ -98,7 +117,7 @@ pool_header_validate_index (void *v, uword index) do { \ uword __pool_validate_index = (i); \ vec_validate_ha ((v), __pool_validate_index, \ - pool_aligned_header_bytes, /* align */ 0); \ + pool_aligned_header_bytes, /* align */ 0); \ pool_header_validate_index ((v), __pool_validate_index); \ } while (0) @@ -166,34 +185,40 @@ pool_free_elts (void *v) First search free list. If nothing is free extend vector of objects. */ -#define pool_get_aligned(P,E,A) \ -do { \ - pool_header_t * _pool_var (p) = pool_header (P); \ - uword _pool_var (l); \ - \ - _pool_var (l) = 0; \ - if (P) \ - _pool_var (l) = vec_len (_pool_var (p)->free_indices); \ - \ - if (_pool_var (l) > 0) \ - { \ - /* Return free element from free list. */ \ +#define pool_get_aligned(P,E,A) \ +do { \ + pool_header_t * _pool_var (p) = pool_header (P); \ + uword _pool_var (l); \ + \ + _pool_var (l) = 0; \ + if (P) \ + _pool_var (l) = vec_len (_pool_var (p)->free_indices); \ + \ + if (_pool_var (l) > 0) \ + { \ + /* Return free element from free list. */ \ uword _pool_var (i) = _pool_var (p)->free_indices[_pool_var (l) - 1]; \ - (E) = (P) + _pool_var (i); \ - _pool_var (p)->free_bitmap = \ + (E) = (P) + _pool_var (i); \ + _pool_var (p)->free_bitmap = \ clib_bitmap_andnoti (_pool_var (p)->free_bitmap, _pool_var (i)); \ - _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \ - } \ - else \ - { \ - /* Nothing on free list, make a new element and return it. */ \ - P = _vec_resize (P, \ - /* length_increment */ 1, \ + _vec_len (_pool_var (p)->free_indices) = _pool_var (l) - 1; \ + } \ + else \ + { \ + /* fixed-size, preallocated pools cannot expand */ \ + if ((P) && _pool_var(p)->max_elts) \ + { \ + clib_warning ("can't expand fixed-size pool"); \ + os_out_of_memory(); \ + } \ + /* Nothing on free list, make a new element and return it. */ \ + P = _vec_resize (P, \ + /* length_increment */ 1, \ /* new size */ (vec_len (P) + 1) * sizeof (P[0]), \ - pool_aligned_header_bytes, \ - /* align */ (A)); \ - E = vec_end (P) - 1; \ - } \ + pool_aligned_header_bytes, \ + /* align */ (A)); \ + E = vec_end (P) - 1; \ + } \ } while (0) /** Allocate an object E from a pool P (unspecified alignment). */ @@ -207,7 +232,11 @@ do { \ \ _pool_var (l) = 0; \ if (P) \ + { \ + if (_pool_var (p)->max_elts) \ + return 0; \ _pool_var (l) = vec_len (_pool_var (p)->free_indices); \ + } \ \ /* Free elements, certainly won't expand */ \ if (_pool_var (l) > 0) \ @@ -248,7 +277,16 @@ do { \ /* Add element to free bitmap and to free list. */ \ _pool_var (p)->free_bitmap = \ clib_bitmap_ori (_pool_var (p)->free_bitmap, _pool_var (l)); \ - vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \ + /* Preallocated pool? */ \ + if (_pool_var (p)->max_elts) \ + { \ + ASSERT(_pool_var(l) < _pool_var (p)->max_elts); \ + _pool_var(p)->free_indices[_vec_len(_pool_var(p)->free_indices)] = \ + _pool_var(l); \ + _vec_len(_pool_var(p)->free_indices) += 1; \ + } \ + else \ + vec_add1 (_pool_var (p)->free_indices, _pool_var (l)); \ } while (0) /** Free pool element with given index. */ @@ -262,6 +300,17 @@ do { \ #define pool_alloc_aligned(P,N,A) \ do { \ pool_header_t * _p; \ + \ + if ((P)) \ + { \ + _p = pool_header (P); \ + if (_p->max_elts) \ + { \ + clib_warning ("Can't expand fixed-size pool"); \ + os_out_of_memory(); \ + } \ + } \ + \ (P) = _vec_resize ((P), 0, (vec_len (P) + (N)) * sizeof (P[0]), \ pool_aligned_header_bytes, \ (A)); \ @@ -281,8 +330,20 @@ _pool_free (void *v) if (!v) return v; clib_bitmap_free (p->free_bitmap); - vec_free (p->free_indices); - vec_free_h (v, pool_aligned_header_bytes); + + if (p->max_elts) + { + int rv; + + rv = munmap (p->mmap_base, p->mmap_size); + if (rv) + clib_unix_warning ("munmap"); + } + else + { + vec_free (p->free_indices); + vec_free_h (v, pool_aligned_header_bytes); + } return 0; } diff --git a/src/vppinfra/test_fpool.c b/src/vppinfra/test_fpool.c new file mode 100644 index 00000000..e2d67f16 --- /dev/null +++ b/src/vppinfra/test_fpool.c @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +#include + +/* can be a very large size */ +#define NELTS 1024 + +int +main (int argc, char *argv[]) +{ + u32 *junk = 0; + int i; + u32 *tp = 0; + u32 *indices = 0; + + clib_mem_init (0, 3ULL << 30); + + vec_validate (indices, NELTS - 1); + _vec_len (indices) = 0; + + pool_init_fixed (tp, NELTS); + + for (i = 0; i < NELTS; i++) + { + pool_get (tp, junk); + vec_add1 (indices, junk - tp); + *junk = i; + } + + for (i = 0; i < NELTS; i++) + { + junk = pool_elt_at_index (tp, indices[i]); + ASSERT (*junk == i); + } + + fformat (stdout, "%d pool elts before deletes\n", pool_elts (tp)); + + pool_put_index (tp, indices[12]); + pool_put_index (tp, indices[43]); + + fformat (stdout, "%d pool elts after deletes\n", pool_elts (tp)); + + pool_validate (tp); + + pool_free (tp); + return 0; +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vppinfra/tw_timer_16t_1w_2048sl.h b/src/vppinfra/tw_timer_16t_1w_2048sl.h index 66cf7d37..761646b3 100644 --- a/src/vppinfra/tw_timer_16t_1w_2048sl.h +++ b/src/vppinfra/tw_timer_16t_1w_2048sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 1 #define TW_SLOTS_PER_RING 2048 diff --git a/src/vppinfra/tw_timer_16t_2w_512sl.h b/src/vppinfra/tw_timer_16t_2w_512sl.h index 00587b8e..029f529d 100644 --- a/src/vppinfra/tw_timer_16t_2w_512sl.h +++ b/src/vppinfra/tw_timer_16t_2w_512sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 2 #define TW_SLOTS_PER_RING 512 @@ -36,7 +37,7 @@ #define LOG2_TW_TIMERS_PER_OBJECT 4 #define TW_SUFFIX _16t_2w_512sl #define TW_FAST_WHEEL_BITMAP 0 -#define TW_TIMER_ALLOW_DUPLICATE_STOP 0 +#define TW_TIMER_ALLOW_DUPLICATE_STOP 1 #include diff --git a/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h index e5e4cc19..0b455e02 100644 --- a/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h +++ b/src/vppinfra/tw_timer_1t_3w_1024sl_ov.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 3 #define TW_SLOTS_PER_RING 1024 diff --git a/src/vppinfra/tw_timer_2t_1w_2048sl.h b/src/vppinfra/tw_timer_2t_1w_2048sl.h index 98b548b3..6ae86688 100644 --- a/src/vppinfra/tw_timer_2t_1w_2048sl.h +++ b/src/vppinfra/tw_timer_2t_1w_2048sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 1 #define TW_SLOTS_PER_RING 2048 diff --git a/src/vppinfra/tw_timer_4t_3w_256sl.h b/src/vppinfra/tw_timer_4t_3w_256sl.h index 07203de8..16c41bcd 100644 --- a/src/vppinfra/tw_timer_4t_3w_256sl.h +++ b/src/vppinfra/tw_timer_4t_3w_256sl.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 3 #define TW_SLOTS_PER_RING 256 diff --git a/src/vppinfra/tw_timer_4t_3w_4sl_ov.h b/src/vppinfra/tw_timer_4t_3w_4sl_ov.h index 20a01d05..845ffeac 100644 --- a/src/vppinfra/tw_timer_4t_3w_4sl_ov.h +++ b/src/vppinfra/tw_timer_4t_3w_4sl_ov.h @@ -27,6 +27,7 @@ #undef TW_OVERFLOW_VECTOR #undef TW_FAST_WHEEL_BITMAP #undef TW_TIMER_ALLOW_DUPLICATE_STOP +#undef TW_START_STOP_TRACE_SIZE #define TW_TIMER_WHEELS 3 #define TW_SLOTS_PER_RING 4 diff --git a/src/vppinfra/tw_timer_template.c b/src/vppinfra/tw_timer_template.c index c0a9685a..aba00142 100644 --- a/src/vppinfra/tw_timer_template.c +++ b/src/vppinfra/tw_timer_template.c @@ -18,6 +18,87 @@ * * */ +#if TW_START_STOP_TRACE_SIZE > 0 + +void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id, + u32 pool_index, u32 handle) +{ + TWT (trace) * t = &tw->traces[tw->trace_index]; + + t->timer_id = timer_id; + t->pool_index = pool_index; + t->handle = handle; + + tw->trace_index++; + if (tw->trace_index == TW_START_STOP_TRACE_SIZE) + { + tw->trace_index = 0; + tw->trace_wrapped++; + } +} + +void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle) +{ + u32 i, start_pos; + TWT (trace) * t; + char *s = "bogus!"; + + /* reverse search for the supplied handle */ + + start_pos = tw->trace_index; + if (start_pos == 0) + start_pos = TW_START_STOP_TRACE_SIZE - 1; + else + start_pos--; + + for (i = start_pos; i > 0; i--) + { + t = &tw->traces[i]; + if (t->handle == handle) + { + switch (t->timer_id) + { + case 0xFF: + s = "stopped"; + break; + case 0xFE: + s = "expired"; + break; + default: + s = "started"; + break; + } + fformat (stderr, "handle 0x%x (%d) %s at trace %d\n", + handle, handle, s, i); + } + } + if (tw->trace_wrapped > 0) + { + for (i = TW_START_STOP_TRACE_SIZE; i >= tw->trace_index; i--) + { + t = &tw->traces[i]; + if (t->handle == handle) + { + switch (t->timer_id) + { + case 0xFF: + s = "stopped"; + break; + case 0xFE: + s = "expired"; + break; + default: + s = "started"; + break; + } + fformat (stderr, "handle 0x%x (%d) %s at trace %d\n", + handle, handle, s, i); + } + } + } +} +#endif /* TW_START_STOP_TRACE_SIZE > 0 */ + static inline u32 TW (make_internal_timer_handle) (u32 pool_index, u32 timer_id) { @@ -127,6 +208,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, t->expiration_time = tw->current_tick + interval; ts = &tw->overflow; timer_addhead (tw->timers, ts->head_index, t - tw->timers); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); +#endif return t - tw->timers; } #endif @@ -177,7 +261,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, ts = &tw->w[TW_TIMER_RING_GLACIER][glacier_ring_offset]; timer_addhead (tw->timers, ts->head_index, t - tw->timers); - +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); +#endif return t - tw->timers; } #endif @@ -193,7 +279,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, ts = &tw->w[TW_TIMER_RING_SLOW][slow_ring_offset]; timer_addhead (tw->timers, ts->head_index, t - tw->timers); - +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); +#endif return t - tw->timers; } #else @@ -208,6 +296,9 @@ TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, u32 pool_index, u32 timer_id, #if TW_FAST_WHEEL_BITMAP tw->fast_slot_bitmap = clib_bitmap_set (tw->fast_slot_bitmap, fast_ring_offset, 1); +#endif +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, timer_id, pool_index, t - tw->timers); #endif return t - tw->timers; } @@ -265,6 +356,9 @@ void TW (tw_timer_stop) (TWT (tw_timer_wheel) * tw, u32 handle) if (pool_is_free_index (tw->timers, handle)) return; #endif +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, ~0, ~0, handle); +#endif t = pool_elt_at_index (tw->timers, handle); @@ -302,6 +396,7 @@ TW (tw_timer_wheel_init) (TWT (tw_timer_wheel) * tw, tw->timer_interval = timer_interval_in_seconds; tw->ticks_per_second = 1.0 / timer_interval_in_seconds; tw->first_expires_tick = ~0ULL; + vec_validate (tw->expired_timer_handles, 0); _vec_len (tw->expired_timer_handles) = 0; @@ -476,6 +571,9 @@ static inline new_glacier_ring_offset == 0)) { vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } /* Timer moves to the glacier ring */ @@ -536,6 +634,9 @@ static inline t->fast_ring_offset == 0)) { vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } /* Timer expires during slow-wheel tick 0 */ @@ -587,6 +688,9 @@ static inline if (PREDICT_FALSE (t->fast_ring_offset == 0)) { vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } else /* typical case */ @@ -620,6 +724,9 @@ static inline t = pool_elt_at_index (tw->timers, next_index); next_index = t->next; vec_add1 (callback_vector, t->user_handle); +#if TW_START_STOP_TRACE_SIZE > 0 + TW (tw_timer_trace) (tw, 0xfe, ~0, t - tw->timers); +#endif pool_put (tw->timers, t); } @@ -628,10 +735,7 @@ static inline { /* The callback is optional. We return the u32 * handle vector */ if (tw->expired_timer_callback) - { - tw->expired_timer_callback (callback_vector); - _vec_len (callback_vector) = 0; - } + tw->expired_timer_callback (callback_vector); tw->expired_timer_handles = callback_vector; } diff --git a/src/vppinfra/tw_timer_template.h b/src/vppinfra/tw_timer_template.h index 0404e3f4..0217644d 100644 --- a/src/vppinfra/tw_timer_template.h +++ b/src/vppinfra/tw_timer_template.h @@ -170,6 +170,13 @@ typedef enum } tw_ring_index_t; #endif /* __defined_tw_timer_wheel_slot__ */ +typedef CLIB_PACKED (struct + { + u8 timer_id; + u32 pool_index; + u32 handle; + }) TWT (trace); + typedef struct { /** Timer pool */ @@ -211,11 +218,20 @@ typedef struct /** expired timer callback, receives a vector of handles */ void (*expired_timer_callback) (u32 * expired_timer_handles); - /** vector of expired timers */ + /** vectors of expired timers */ u32 *expired_timer_handles; /** maximum expirations */ u32 max_expirations; + + /** current trace index */ +#if TW_START_STOP_TRACE_SIZE > 0 + /* Start/stop/expire tracing */ + u32 trace_index; + u32 trace_wrapped; + TWT (trace) traces[TW_START_STOP_TRACE_SIZE]; +#endif + } TWT (tw_timer_wheel); u32 TW (tw_timer_start) (TWT (tw_timer_wheel) * tw, @@ -236,6 +252,12 @@ u32 *TW (tw_timer_expire_timers_vec) (TWT (tw_timer_wheel) * tw, f64 now, u32 TW (tw_timer_first_expires_in_ticks) (TWT (tw_timer_wheel) * tw); #endif +#if TW_START_STOP_TRACE_SIZE > 0 +void TW (tw_search_trace) (TWT (tw_timer_wheel) * tw, u32 handle); +void TW (tw_timer_trace) (TWT (tw_timer_wheel) * tw, u32 timer_id, + u32 pool_index, u32 handle); +#endif + /* * fd.io coding-style-patch-verification: ON * -- cgit 1.2.3-korg From 0e4956869b7b919957433f04cc8fe52a2f391251 Mon Sep 17 00:00:00 2001 From: Florin Coras Date: Tue, 19 Sep 2017 22:27:18 -0700 Subject: session: store tep port in net order Change-Id: Ie3a99f09f44ec081d9b88a213bdb8d987fb462de Signed-off-by: Florin Coras --- src/vnet/session/application_interface.c | 12 ++++++------ src/vnet/session/transport.h | 2 +- src/vnet/tcp/tcp.c | 4 ++-- src/vnet/udp/udp.c | 10 +++++----- 4 files changed, 14 insertions(+), 14 deletions(-) (limited to 'src/vnet/session/application_interface.c') diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c index 8dbc3a1a..7e7449aa 100644 --- a/src/vnet/session/application_interface.c +++ b/src/vnet/session/application_interface.c @@ -92,9 +92,7 @@ vnet_bind_i (u32 app_index, session_type_t sst, return VNET_API_ERROR_APPLICATION_NOT_ATTACHED; } - listener = stream_session_lookup_listener (&tep->ip, - clib_host_to_net_u16 (tep->port), - sst); + listener = stream_session_lookup_listener (&tep->ip, tep->port, sst); if (listener) return VNET_API_ERROR_ADDRESS_IN_USE; @@ -131,9 +129,7 @@ vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst, /* * Figure out if connecting to a local server */ - listener = stream_session_lookup_listener (&tep->ip, - clib_host_to_net_u16 (tep->port), - sst); + listener = stream_session_lookup_listener (&tep->ip, tep->port, sst); if (listener) { server = application_get (listener->app_index); @@ -181,6 +177,7 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) &tep->port)) { *sst = SESSION_TYPE_IP4_TCP; + tep->port = clib_host_to_net_u16 (tep->port); tep->is_ip4 = 1; return 1; } @@ -188,6 +185,7 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) &tep->port)) { *sst = SESSION_TYPE_IP4_UDP; + tep->port = clib_host_to_net_u16 (tep->port); tep->is_ip4 = 1; return 1; } @@ -195,12 +193,14 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args) &tep->port)) { *sst = SESSION_TYPE_IP6_UDP; + tep->port = clib_host_to_net_u16 (tep->port); return 1; } if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &tep->ip.ip6, &tep->port)) { *sst = SESSION_TYPE_IP6_TCP; + tep->port = clib_host_to_net_u16 (tep->port); return 1; } diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h index e56be338..e2c47949 100644 --- a/src/vnet/session/transport.h +++ b/src/vnet/session/transport.h @@ -78,7 +78,7 @@ typedef enum _transport_proto typedef struct _transport_endpoint { ip46_address_t ip; /** ip address */ - u16 port; /** port in host order */ + u16 port; /** port in net order */ u8 is_ip4; /** 1 if ip4 */ u32 vrf; /** fib table the endpoint is associated with */ } transport_endpoint_t; diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index d43fb149..38a21dbb 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -38,7 +38,7 @@ tcp_connection_bind (u32 session_index, transport_endpoint_t * lcl) memset (listener, 0, sizeof (*listener)); listener->c_c_index = listener - tm->listener_pool; - listener->c_lcl_port = clib_host_to_net_u16 (lcl->port); + listener->c_lcl_port = lcl->port; if (lcl->is_ip4) { @@ -701,7 +701,7 @@ tcp_connection_open (transport_endpoint_t * rmt) tc = tcp_half_open_connection_new (); clib_memcpy (&tc->c_rmt_ip, &rmt->ip, sizeof (ip46_address_t)); clib_memcpy (&tc->c_lcl_ip, &lcl_addr, sizeof (ip46_address_t)); - tc->c_rmt_port = clib_host_to_net_u16 (rmt->port); + tc->c_rmt_port = rmt->port; tc->c_lcl_port = clib_host_to_net_u16 (lcl_port); tc->c_is_ip4 = rmt->is_ip4; tc->c_transport_proto = TRANSPORT_PROTO_TCP; diff --git a/src/vnet/udp/udp.c b/src/vnet/udp/udp.c index fedf2cc0..0e0336b5 100644 --- a/src/vnet/udp/udp.c +++ b/src/vnet/udp/udp.c @@ -32,11 +32,11 @@ udp_session_bind_ip4 (u32 session_index, transport_endpoint_t * lcl) pool_get (um->udp_listeners, listener); memset (listener, 0, sizeof (udp_connection_t)); - listener->c_lcl_port = clib_host_to_net_u16 (lcl->port); + listener->c_lcl_port = lcl->port; listener->c_lcl_ip4.as_u32 = lcl->ip.ip4.as_u32; listener->c_transport_proto = TRANSPORT_PROTO_UDP; - udp_register_dst_port (um->vlib_main, lcl->port, udp4_uri_input_node.index, - 1 /* is_ipv4 */ ); + udp_register_dst_port (um->vlib_main, clib_net_to_host_u16 (lcl->port), + udp4_uri_input_node.index, 1 /* is_ipv4 */ ); return 0; } @@ -47,10 +47,10 @@ udp_session_bind_ip6 (u32 session_index, transport_endpoint_t * lcl) udp_connection_t *listener; pool_get (um->udp_listeners, listener); - listener->c_lcl_port = clib_host_to_net_u16 (lcl->port); + listener->c_lcl_port = lcl->port; clib_memcpy (&listener->c_lcl_ip6, &lcl->ip.ip6, sizeof (ip6_address_t)); listener->c_transport_proto = TRANSPORT_PROTO_UDP; - udp_register_dst_port (um->vlib_main, lcl->port, + udp_register_dst_port (um->vlib_main, clib_net_to_host_u16 (lcl->port), udp4_uri_input_node.index, 0 /* is_ipv4 */ ); return 0; } -- cgit 1.2.3-korg