aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/session
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2017-10-02 00:18:51 -0700
committerDave Barach <openvpp@barachs.net>2017-10-10 20:42:50 +0000
commitcea194d8f973a2f2b5ef72d212533057174cc70a (patch)
tree6fdd2e8a929c62625d1ad35bfbec342129989aef /src/vnet/session
parent1f36a93d3d68f5ba6dcda08809394ce757cefd72 (diff)
session: add support for application namespacing
Applications are now provided the option to select the namespace they are to be attached to and the scope of their attachement. Application namespaces are meant to: 1) constrain the scope of communication through the network by association with source interfaces and/or fib tables that provide the source ips to be used and limit the scope of routing 2) provide a namespace local scope to session layer communication, as opposed to the global scope provided by 1). That is, sessions can be established without assistance from transport and network layers. Albeit, zero/local-host ip addresses must still be provided in session establishment messages due to existing application idiosyncrasies. This mode of communication uses shared-memory fifos (cut-through sessions) exclusively. If applications request no namespace, they are assigned to the default one, which at its turn uses the default fib. Applications can request access to both local and global scopes for a namespace. If no scope is specified, session layer defaults to the global one. When a sw_if_index is provided for a namespace, zero-ip (INADDR_ANY) binds are converted to binds to the requested interface. Change-Id: Ia0f660bbf7eec7f89673f75b4821fc7c3d58e3d1 Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/vnet/session')
-rw-r--r--src/vnet/session/application.c158
-rw-r--r--src/vnet/session/application.h34
-rw-r--r--src/vnet/session/application_interface.c380
-rw-r--r--src/vnet/session/application_interface.h36
-rw-r--r--src/vnet/session/application_namespace.c293
-rw-r--r--src/vnet/session/application_namespace.h83
-rw-r--r--src/vnet/session/segment_manager.c2
-rw-r--r--src/vnet/session/session.api51
-rw-r--r--src/vnet/session/session.c91
-rw-r--r--src/vnet/session/session.h41
-rwxr-xr-xsrc/vnet/session/session_api.c153
-rwxr-xr-xsrc/vnet/session/session_cli.c36
-rw-r--r--src/vnet/session/session_debug.h17
-rw-r--r--src/vnet/session/session_lookup.c859
-rw-r--r--src/vnet/session/session_lookup.h126
-rw-r--r--src/vnet/session/session_node.c6
-rw-r--r--src/vnet/session/session_table.c124
-rw-r--r--src/vnet/session/session_table.h61
-rw-r--r--src/vnet/session/session_test.c473
-rw-r--r--src/vnet/session/stream_session.h39
-rw-r--r--src/vnet/session/transport.h29
21 files changed, 2462 insertions, 630 deletions
diff --git a/src/vnet/session/application.c b/src/vnet/session/application.c
index 2b789c5f420..75d3cfb2e33 100644
--- a/src/vnet/session/application.c
+++ b/src/vnet/session/application.c
@@ -15,6 +15,7 @@
#include <vnet/session/application.h>
#include <vnet/session/application_interface.h>
+#include <vnet/session/application_namespace.h>
#include <vnet/session/session.h>
/**
@@ -32,6 +33,46 @@ static uword *app_by_api_client_index;
*/
static u32 default_app_evt_queue_size = 128;
+static u8 *
+app_get_name_from_reg_index (application_t * app)
+{
+ u8 *app_name;
+
+ vl_api_registration_t *regp;
+ regp = vl_api_client_index_to_registration (app->api_client_index);
+ if (!regp)
+ app_name = format (0, "builtin-%d%c", app->index, 0);
+ else
+ app_name = format (0, "%s%c", regp->name, 0);
+
+ return app_name;
+}
+
+u32
+application_session_table (application_t * app, u8 fib_proto)
+{
+ app_namespace_t *app_ns;
+ app_ns = app_namespace_get (app->ns_index);
+ if (!application_has_global_scope (app))
+ return APP_INVALID_INDEX;
+ if (fib_proto == FIB_PROTOCOL_IP4)
+ return session_lookup_get_index_for_fib (fib_proto,
+ app_ns->ip4_fib_index);
+ else
+ return session_lookup_get_index_for_fib (fib_proto,
+ app_ns->ip6_fib_index);
+}
+
+u32
+application_local_session_table (application_t * app)
+{
+ app_namespace_t *app_ns;
+ if (!application_has_local_scope (app))
+ return APP_INVALID_INDEX;
+ app_ns = app_namespace_get (app->ns_index);
+ return app_ns->local_table_index;
+}
+
int
application_api_queue_is_full (application_t * app)
{
@@ -50,6 +91,21 @@ application_api_queue_is_full (application_t * app)
return 0;
}
+/**
+ * Returns app name
+ *
+ * Since the name is not stored per app, we generate it on the fly. It is
+ * the caller's responsibility to free the vector
+ */
+u8 *
+application_name_from_index (u32 app_index)
+{
+ application_t *app = application_get (app_index);
+ if (!app)
+ return 0;
+ return app_get_name_from_reg_index (app);
+}
+
static void
application_table_add (application_t * app)
{
@@ -135,7 +191,6 @@ application_del (application_t * app)
segment_manager_init_del (sm);
}
-
/* If first segment manager is used by a listener */
if (app->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
&& app->first_segment_manager != app->connects_seg_manager)
@@ -180,7 +235,9 @@ application_init (application_t * app, u32 api_client_index, u64 * options,
app_evt_queue_size = options[APP_EVT_QUEUE_SIZE] > 0 ?
options[APP_EVT_QUEUE_SIZE] : default_app_evt_queue_size;
- /* Setup segment manager */
+ /*
+ * Setup segment manager
+ */
sm = segment_manager_new ();
sm->app_index = app->index;
props = &app->sm_properties;
@@ -203,10 +260,19 @@ application_init (application_t * app, u32 api_client_index, u64 * options,
return rv;
sm->first_is_protected = 1;
+ /*
+ * Setup application
+ */
app->first_segment_manager = segment_manager_index (sm);
app->api_client_index = api_client_index;
app->flags = options[APP_OPTIONS_FLAGS];
app->cb_fns = *cb_fns;
+ app->ns_index = options[APP_OPTIONS_NAMESPACE];
+
+ /* If no scope enabled, default to global */
+ if (!application_has_global_scope (app)
+ && !application_has_local_scope (app))
+ app->flags |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
/* Allocate app event queue in the first shared-memory segment */
app->event_queue = segment_manager_alloc_queue (sm, app_evt_queue_size);
@@ -223,6 +289,8 @@ application_init (application_t * app, u32 api_client_index, u64 * options,
application_t *
application_get (u32 index)
{
+ if (index == APP_INVALID_INDEX)
+ return 0;
return pool_elt_at_index (app_pool, index);
}
@@ -269,17 +337,19 @@ application_alloc_segment_manager (application_t * app)
* it's own specific listening connection.
*/
int
-application_start_listen (application_t * srv, session_type_t session_type,
- transport_endpoint_t * tep, u64 * res)
+application_start_listen (application_t * srv, session_endpoint_t * sep,
+ u64 * res)
{
segment_manager_t *sm;
stream_session_t *s;
u64 handle;
+ session_type_t sst;
- s = listen_session_new (session_type);
+ sst = session_type_from_proto_and_ip (sep->transport_proto, sep->is_ip4);
+ s = listen_session_new (sst);
s->app_index = srv->index;
- if (stream_session_listen (s, tep))
+ if (stream_session_listen (s, sep))
goto err;
/* Allocate segment manager. All sessions derived out of a listen session
@@ -341,15 +411,15 @@ application_stop_listen (application_t * srv, u64 handle)
}
int
-application_open_session (application_t * app, session_type_t sst,
- transport_endpoint_t * tep, u32 api_context)
+application_open_session (application_t * app, session_endpoint_t * sep,
+ u32 api_context)
{
segment_manager_t *sm;
transport_connection_t *tc = 0;
int rv;
/* Make sure we have a segment manager for connects */
- if (app->connects_seg_manager == (u32) ~ 0)
+ if (app->connects_seg_manager == APP_INVALID_SEGMENT_MANAGER_INDEX)
{
sm = application_alloc_segment_manager (app);
if (sm == 0)
@@ -357,7 +427,7 @@ application_open_session (application_t * app, session_type_t sst,
app->connects_seg_manager = segment_manager_index (sm);
}
- if ((rv = stream_session_open (app->index, sst, tep, &tc)))
+ if ((rv = stream_session_open (app->index, sep, &tc)))
return rv;
/* Store api_context for when the reply comes. Not the nicest thing
@@ -384,21 +454,6 @@ application_get_listen_segment_manager (application_t * app,
return segment_manager_get (*smp);
}
-static u8 *
-app_get_name_from_reg_index (application_t * app)
-{
- u8 *app_name;
-
- vl_api_registration_t *regp;
- regp = vl_api_client_index_to_registration (app->api_client_index);
- if (!regp)
- app_name = format (0, "builtin-%d%c", app->index, 0);
- else
- app_name = format (0, "%s%c", regp->name, 0);
-
- return app_name;
-}
-
int
application_is_proxy (application_t * app)
{
@@ -420,6 +475,18 @@ application_add_segment_notify (u32 app_index, u32 fifo_segment_index)
seg_size);
}
+u8
+application_has_local_scope (application_t * app)
+{
+ return app->flags & APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+}
+
+u8
+application_has_global_scope (application_t * app)
+{
+ return app->flags & APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+}
+
u8 *
format_application_listener (u8 * s, va_list * args)
{
@@ -500,7 +567,7 @@ application_format_connects (application_t * app, int verbose)
session_index = fifo->master_session_index;
thread_index = fifo->master_thread_index;
- session = stream_session_get (session_index, thread_index);
+ session = session_get (session_index, thread_index);
str = format (0, "%U", format_stream_session, session, verbose);
if (verbose)
@@ -526,27 +593,33 @@ format_application (u8 * s, va_list * args)
{
application_t *app = va_arg (*args, application_t *);
CLIB_UNUSED (int verbose) = va_arg (*args, int);
+ const u8 *app_ns_name;
u8 *app_name;
if (app == 0)
{
if (verbose)
- s = format (s, "%-10s%-20s%-15s%-15s%-15s%-15s", "Index", "Name",
- "API Client", "Add seg size", "Rx fifo size",
+ s = format (s, "%-10s%-20s%-15s%-15s%-15s%-15s%-15s", "Index", "Name",
+ "Namespace", "API Client", "Add seg size", "Rx fifo size",
"Tx fifo size");
else
- s = format (s, "%-10s%-20s%-20s", "Index", "Name", "API Client");
+ s =
+ format (s, "%-10s%-20s%-15s%-20s", "Index", "Name", "Namespace",
+ "API Client");
return s;
}
app_name = app_get_name_from_reg_index (app);
+ app_ns_name = app_namespace_id_from_index (app->ns_index);
if (verbose)
- s = format (s, "%-10d%-20s%-15d%-15d%-15d%-15d", app->index, app_name,
- app->api_client_index, app->sm_properties.add_segment_size,
- app->sm_properties.rx_fifo_size,
- app->sm_properties.tx_fifo_size);
+ s =
+ format (s, "%-10d%-20s%-15s%-15d%-15d%-15d%-15d", app->index, app_name,
+ app_ns_name, app->api_client_index,
+ app->sm_properties.add_segment_size,
+ app->sm_properties.rx_fifo_size,
+ app->sm_properties.tx_fifo_size);
else
- s = format (s, "%-10d%-20s%-20d", app->index, app_name,
+ s = format (s, "%-10d%-20s%-15s%-20d", app->index, app_name, app_ns_name,
app->api_client_index);
return s;
}
@@ -560,10 +633,7 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input,
int do_client = 0;
int verbose = 0;
- if (!session_manager_is_enabled ())
- {
- clib_error_return (0, "session layer is not enabled");
- }
+ session_cli_return_if_not_enabled ();
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
@@ -627,13 +697,11 @@ show_app_command_fn (vlib_main_t * vm, unformat_input_t * input,
if (!do_server && !do_client)
{
vlib_cli_output (vm, "%U", format_application, 0, verbose);
- pool_foreach (app, app_pool, (
- {
- vlib_cli_output (vm, "%U",
- format_application, app,
- verbose);
- }
- ));
+ /* *INDENT-OFF* */
+ pool_foreach (app, app_pool, ({
+ vlib_cli_output (vm, "%U", format_application, app, verbose);
+ }));
+ /* *INDENT-ON* */
}
return 0;
diff --git a/src/vnet/session/application.h b/src/vnet/session/application.h
index e030c376d83..00a5554dd23 100644
--- a/src/vnet/session/application.h
+++ b/src/vnet/session/application.h
@@ -19,7 +19,7 @@
#include <vnet/vnet.h>
#include <vnet/session/session.h>
#include <vnet/session/segment_manager.h>
-
+#include <vnet/session/application_namespace.h>
typedef enum
{
APP_SERVER,
@@ -36,7 +36,7 @@ typedef struct _stream_session_cb_vft
/** Notify server of newly accepted session */
int (*session_accept_callback) (stream_session_t * new_session);
- /* Connection request callback */
+ /** Connection request callback */
int (*session_connected_callback) (u32 app_index, u32 opaque,
stream_session_t * s, u8 code);
@@ -46,10 +46,10 @@ typedef struct _stream_session_cb_vft
/** Notify app that session was reset */
void (*session_reset_callback) (stream_session_t * s);
- /* Direct RX callback, for built-in servers */
+ /** Direct RX callback, for built-in servers */
int (*builtin_server_rx_callback) (stream_session_t * session);
- /* Redirect connection to local server */
+ /** Redirect connection to local server */
int (*redirect_connect_callback) (u32 api_client_index, void *mp);
} session_cb_vft_t;
@@ -68,6 +68,9 @@ typedef struct _application
/** Binary API connection index, ~0 if internal */
u32 api_client_index;
+ /** Namespace the application belongs to */
+ u32 ns_index;
+
/** Application listens for events on this svm queue */
unix_shared_memory_queue_t *event_queue;
@@ -95,25 +98,24 @@ typedef struct _application
segment_manager_properties_t sm_properties;
} application_t;
+#define APP_INVALID_INDEX ((u32)~0)
+#define APP_NS_INVALID_INDEX ((u32)~0)
#define APP_INVALID_SEGMENT_MANAGER_INDEX ((u32) ~0)
application_t *application_new ();
-int
-application_init (application_t * app, u32 api_client_index, u64 * options,
- session_cb_vft_t * cb_fns);
+int application_init (application_t * app, u32 api_client_index,
+ u64 * options, session_cb_vft_t * cb_fns);
void application_del (application_t * app);
application_t *application_get (u32 index);
application_t *application_get_if_valid (u32 index);
application_t *application_lookup (u32 api_client_index);
u32 application_get_index (application_t * app);
-int
-application_start_listen (application_t * app, session_type_t session_type,
- transport_endpoint_t * tep, u64 * handle);
+int application_start_listen (application_t * app,
+ session_endpoint_t * tep, u64 * handle);
int application_stop_listen (application_t * srv, u64 handle);
-int
-application_open_session (application_t * app, session_type_t sst,
- transport_endpoint_t * tep, u32 api_context);
+int application_open_session (application_t * app, session_endpoint_t * tep,
+ u32 api_context);
int application_api_queue_is_full (application_t * app);
segment_manager_t *application_get_listen_segment_manager (application_t *
@@ -124,6 +126,12 @@ segment_manager_t *application_get_connect_segment_manager (application_t *
app);
int application_is_proxy (application_t * app);
int application_add_segment_notify (u32 app_index, u32 fifo_segment_index);
+u32 application_session_table (application_t * app, u8 fib_proto);
+u32 application_local_session_table (application_t * app);
+u8 *application_name_from_index (u32 app_index);
+
+u8 application_has_local_scope (application_t * app);
+u8 application_has_global_scope (application_t * app);
#endif /* SRC_VNET_SESSION_APPLICATION_H_ */
diff --git a/src/vnet/session/application_interface.c b/src/vnet/session/application_interface.c
index 7e7449aa161..a0dff90565a 100644
--- a/src/vnet/session/application_interface.c
+++ b/src/vnet/session/application_interface.c
@@ -17,45 +17,37 @@
#include <vnet/session/session.h>
#include <vlibmemory/api.h>
#include <vnet/dpo/load_balance.h>
-#include <vnet/fib/ip4_fib.h>
/** @file
VPP's application/session API bind/unbind/connect/disconnect calls
*/
static u8
-ip_is_zero (ip46_address_t * ip46_address, u8 is_ip4)
+session_endpoint_is_local (session_endpoint_t * sep)
{
- if (is_ip4)
- return (ip46_address->ip4.as_u32 == 0);
- else
- return (ip46_address->as_u64[0] == 0 && ip46_address->as_u64[1] == 0);
+ return (ip_is_zero (&sep->ip, sep->is_ip4)
+ || ip_is_local_host (&sep->ip, sep->is_ip4));
}
static u8
-ip_is_local (ip46_address_t * ip46_address, u8 is_ip4)
+session_endpoint_is_zero (session_endpoint_t * sep)
{
- fib_node_index_t fei;
- fib_entry_flag_t flags;
- fib_prefix_t prefix;
+ return ip_is_zero (&sep->ip, sep->is_ip4);
+}
- /* Check if requester is local */
- if (is_ip4)
- {
- prefix.fp_len = 32;
- prefix.fp_proto = FIB_PROTOCOL_IP4;
- }
- else
+u8
+session_endpoint_in_ns (session_endpoint_t * sep)
+{
+ u8 is_zero = ip_is_zero (&sep->ip, sep->is_ip4);
+ if (!is_zero && sep->sw_if_index != ENDPOINT_INVALID_INDEX
+ && !ip_interface_has_address (sep->sw_if_index, &sep->ip, sep->is_ip4))
{
- prefix.fp_len = 128;
- prefix.fp_proto = FIB_PROTOCOL_IP6;
+ clib_warning ("sw_if_index %u not configured with ip %U",
+ sep->sw_if_index, format_ip46_address, &sep->ip,
+ sep->is_ip4);
+ return 0;
}
-
- clib_memcpy (&prefix.fp_addr, ip46_address, sizeof (ip46_address_t));
- fei = fib_table_lookup (0, &prefix);
- flags = fib_entry_get_flags (fei);
-
- return (flags & FIB_ENTRY_FLAG_LOCAL);
+ return (is_zero || ip_is_local (sep->fib_index, &sep->ip, sep->is_ip4));
}
int
@@ -78,76 +70,190 @@ api_parse_session_handle (u64 handle, u32 * session_index, u32 * thread_index)
return 0;
}
-int
-vnet_bind_i (u32 app_index, session_type_t sst,
- transport_endpoint_t * tep, u64 * handle)
+static void
+session_endpoint_update_for_app (session_endpoint_t * sep,
+ application_t * app)
+{
+ app_namespace_t *app_ns;
+ app_ns = app_namespace_get (app->ns_index);
+ if (app_ns)
+ {
+ /* Ask transport and network to bind to/connect using local interface
+ * that "supports" app's namespace. This will fix our local connection
+ * endpoint.
+ */
+ sep->sw_if_index = app_ns->sw_if_index;
+ sep->fib_index =
+ sep->is_ip4 ? app_ns->ip4_fib_index : app_ns->ip6_fib_index;
+ }
+}
+
+static int
+vnet_bind_i (u32 app_index, session_endpoint_t * sep, u64 * handle)
{
application_t *app;
- stream_session_t *listener;
+ u32 table_index, listener_index;
+ int rv, have_local = 0;
app = application_get_if_valid (app_index);
if (!app)
{
- clib_warning ("app not attached");
+ SESSION_DBG ("app not attached");
return VNET_API_ERROR_APPLICATION_NOT_ATTACHED;
}
- listener = stream_session_lookup_listener (&tep->ip, tep->port, sst);
- if (listener)
+ session_endpoint_update_for_app (sep, app);
+ if (!session_endpoint_in_ns (sep))
+ return VNET_API_ERROR_INVALID_VALUE_2;
+
+ table_index = application_session_table (app,
+ session_endpoint_fib_proto (sep));
+ listener_index = session_lookup_session_endpoint (table_index, sep);
+ if (listener_index != SESSION_INVALID_INDEX)
return VNET_API_ERROR_ADDRESS_IN_USE;
- if (!ip_is_zero (&tep->ip, tep->is_ip4)
- && !ip_is_local (&tep->ip, tep->is_ip4))
- return VNET_API_ERROR_INVALID_VALUE_2;
+ /*
+ * Add session endpoint to local session table. Only binds to "inaddr_any"
+ * (i.e., zero address) are added to local scope table.
+ */
+ if (application_has_local_scope (app) && session_endpoint_is_zero (sep))
+ {
+ table_index = application_local_session_table (app);
+ listener_index = session_lookup_session_endpoint (table_index, sep);
+ if (listener_index != SESSION_INVALID_INDEX)
+ return VNET_API_ERROR_ADDRESS_IN_USE;
+ session_lookup_add_session_endpoint (table_index, sep, app->index);
+ *handle = session_lookup_local_listener_make_handle (sep);
+ have_local = 1;
+ }
+
+ if (!application_has_global_scope (app))
+ return (have_local - 1);
+
+ /*
+ * Add session endpoint to global session table
+ */
/* Setup listen path down to transport */
- return application_start_listen (app, sst, tep, handle);
+ rv = application_start_listen (app, sep, handle);
+ if (rv && have_local)
+ session_lookup_del_session_endpoint (table_index, sep);
+ return rv;
}
int
vnet_unbind_i (u32 app_index, u64 handle)
{
application_t *app = application_get_if_valid (app_index);
+ stream_session_t *listener = 0;
+ u32 table_index;
if (!app)
{
- clib_warning ("app (%d) not attached", app_index);
+ SESSION_DBG ("app (%d) not attached", app_index);
return VNET_API_ERROR_APPLICATION_NOT_ATTACHED;
}
- /* Clear the listener */
- return application_stop_listen (app, handle);
+ /*
+ * Clean up local session table. If we have a listener session use it to
+ * find the port and proto. If not, the handle must be a local table handle
+ * so parse it.
+ */
+
+ if (application_has_local_scope (app))
+ {
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
+ if (!session_lookup_local_is_handle (handle))
+ listener = listen_session_get_from_handle (handle);
+ if (listener)
+ {
+ if (listen_session_get_local_session_endpoint (listener, &sep))
+ {
+ clib_warning ("broken listener");
+ return -1;
+ }
+ }
+ else
+ {
+ if (session_lookup_local_listener_parse_handle (handle, &sep))
+ {
+ clib_warning ("can't parse handle");
+ return -1;
+ }
+ }
+ table_index = application_local_session_table (app);
+ session_lookup_del_session_endpoint (table_index, &sep);
+ }
+
+ /*
+ * Clear the global scope table of the listener
+ */
+ if (application_has_global_scope (app))
+ return application_stop_listen (app, handle);
+ return 0;
+}
+
+static int
+app_connect_redirect (application_t * server, void *mp)
+{
+ return server->cb_fns.redirect_connect_callback (server->api_client_index,
+ mp);
}
int
-vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst,
- transport_endpoint_t * tep, void *mp)
+vnet_connect_i (u32 app_index, u32 api_context, session_endpoint_t * sep,
+ void *mp)
{
- stream_session_t *listener;
application_t *server, *app;
+ u32 table_index;
+
+ if (session_endpoint_is_zero (sep))
+ return VNET_API_ERROR_INVALID_VALUE;
+
+ app = application_get (app_index);
+ session_endpoint_update_for_app (sep, app);
/*
- * Figure out if connecting to a local server
+ * First check the the local scope for locally attached destinations.
+ * If we have local scope, we pass *all* connects through it since we may
+ * have special policy rules even for non-local destinations, think proxy.
*/
- listener = stream_session_lookup_listener (&tep->ip, tep->port, sst);
- if (listener)
+ if (application_has_local_scope (app))
{
- server = application_get (listener->app_index);
-
+ table_index = application_local_session_table (app);
+ app_index = session_lookup_local_session_endpoint (table_index, sep);
+ server = application_get (app_index);
/*
* Server is willing to have a direct fifo connection created
* instead of going through the state machine, etc.
*/
- if (server->flags & APP_OPTIONS_FLAGS_USE_FIFO)
- return server->cb_fns.
- redirect_connect_callback (server->api_client_index, mp);
+ if (server && (server->flags & APP_OPTIONS_FLAGS_ACCEPT_REDIRECT))
+ return app_connect_redirect (server, mp);
}
/*
- * Not connecting to a local server. Create regular session
+ * If nothing found, check the global scope for locally attached
+ * destinations. Make sure first that we're allowed to.
*/
- app = application_get (app_index);
- return application_open_session (app, sst, tep, api_context);
+ if (session_endpoint_is_local (sep))
+ return VNET_API_ERROR_SESSION_CONNECT;
+
+ if (!application_has_global_scope (app))
+ return VNET_API_ERROR_APP_CONNECT_SCOPE;
+
+ table_index = application_session_table (app,
+ session_endpoint_fib_proto (sep));
+ app_index = session_lookup_session_endpoint (table_index, sep);
+ server = application_get (app_index);
+ if (server && (server->flags & APP_OPTIONS_FLAGS_ACCEPT_REDIRECT))
+ return app_connect_redirect (server, mp);
+
+ /*
+ * Not connecting to a local server, propagate to transport
+ */
+ if (application_open_session (app, sep, api_context))
+ return VNET_API_ERROR_SESSION_CONNECT;
+ return 0;
}
/**
@@ -170,37 +276,38 @@ vnet_connect_i (u32 app_index, u32 api_context, session_type_t sst,
uword
unformat_vnet_uri (unformat_input_t * input, va_list * args)
{
- session_type_t *sst = va_arg (*args, session_type_t *);
- transport_endpoint_t *tep = va_arg (*args, transport_endpoint_t *);
+ session_endpoint_t *sep = va_arg (*args, session_endpoint_t *);
- if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &tep->ip.ip4,
- &tep->port))
+ if (unformat (input, "tcp://%U/%d", unformat_ip4_address, &sep->ip.ip4,
+ &sep->port))
{
- *sst = SESSION_TYPE_IP4_TCP;
- tep->port = clib_host_to_net_u16 (tep->port);
- tep->is_ip4 = 1;
+ sep->transport_proto = TRANSPORT_PROTO_TCP;
+ sep->port = clib_host_to_net_u16 (sep->port);
+ sep->is_ip4 = 1;
return 1;
}
- if (unformat (input, "udp://%U/%d", unformat_ip4_address, &tep->ip.ip4,
- &tep->port))
+ if (unformat (input, "udp://%U/%d", unformat_ip4_address, &sep->ip.ip4,
+ &sep->port))
{
- *sst = SESSION_TYPE_IP4_UDP;
- tep->port = clib_host_to_net_u16 (tep->port);
- tep->is_ip4 = 1;
+ sep->transport_proto = TRANSPORT_PROTO_UDP;
+ sep->port = clib_host_to_net_u16 (sep->port);
+ sep->is_ip4 = 1;
return 1;
}
- if (unformat (input, "udp://%U/%d", unformat_ip6_address, &tep->ip.ip6,
- &tep->port))
+ if (unformat (input, "udp://%U/%d", unformat_ip6_address, &sep->ip.ip6,
+ &sep->port))
{
- *sst = SESSION_TYPE_IP6_UDP;
- tep->port = clib_host_to_net_u16 (tep->port);
+ sep->transport_proto = TRANSPORT_PROTO_UDP;
+ sep->port = clib_host_to_net_u16 (sep->port);
+ sep->is_ip4 = 0;
return 1;
}
- if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &tep->ip.ip6,
- &tep->port))
+ if (unformat (input, "tcp://%U/%d", unformat_ip6_address, &sep->ip.ip6,
+ &sep->port))
{
- *sst = SESSION_TYPE_IP6_TCP;
- tep->port = clib_host_to_net_u16 (tep->port);
+ sep->transport_proto = TRANSPORT_PROTO_TCP;
+ sep->port = clib_host_to_net_u16 (sep->port);
+ sep->is_ip4 = 0;
return 1;
}
@@ -208,18 +315,16 @@ unformat_vnet_uri (unformat_input_t * input, va_list * args)
}
static u8 *cache_uri;
-static session_type_t cache_sst;
-static transport_endpoint_t *cache_tep;
+static session_endpoint_t *cache_sep;
int
-parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep)
+parse_uri (char *uri, session_endpoint_t * sep)
{
unformat_input_t _input, *input = &_input;
if (cache_uri && !strncmp (uri, (char *) cache_uri, vec_len (cache_uri)))
{
- *sst = cache_sst;
- *tep = *cache_tep;
+ *sep = *cache_sep;
return 0;
}
@@ -228,7 +333,7 @@ parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep)
/* Parse uri */
unformat_init_string (input, uri, strlen (uri));
- if (!unformat (input, "%U", unformat_vnet_uri, sst, tep))
+ if (!unformat (input, "%U", unformat_vnet_uri, sep))
{
unformat_free (input);
return VNET_API_ERROR_INVALID_VALUE;
@@ -237,34 +342,67 @@ parse_uri (char *uri, session_type_t * sst, transport_endpoint_t * tep)
vec_free (cache_uri);
cache_uri = (u8 *) uri;
- cache_sst = *sst;
- if (cache_tep)
- clib_mem_free (cache_tep);
- cache_tep = clib_mem_alloc (sizeof (*tep));
- *cache_tep = *tep;
+ if (cache_sep)
+ clib_mem_free (cache_sep);
+ cache_sep = clib_mem_alloc (sizeof (*sep));
+ *cache_sep = *sep;
return 0;
}
+static int
+session_validate_namespace (u8 * namespace_id, u64 secret, u32 * app_ns_index)
+{
+ app_namespace_t *app_ns;
+ if (vec_len (namespace_id) == 0)
+ {
+ /* Use default namespace */
+ *app_ns_index = 0;
+ return 0;
+ }
+
+ *app_ns_index = app_namespace_index_from_id (namespace_id);
+ if (*app_ns_index == APP_NAMESPACE_INVALID_INDEX)
+ return VNET_API_ERROR_APP_INVALID_NS;
+ app_ns = app_namespace_get (*app_ns_index);
+ if (!app_ns)
+ return VNET_API_ERROR_APP_INVALID_NS;
+ if (app_ns->ns_secret != secret)
+ return VNET_API_ERROR_APP_WRONG_NS_SECRET;
+ return 0;
+}
+
/**
- * Attaches application.
+ * Attach application to vpp
*
* Allocates a vpp app, i.e., a structure that keeps back pointers
* to external app and a segment manager for shared memory fifo based
* communication with the external app.
*/
-int
+clib_error_t *
vnet_application_attach (vnet_app_attach_args_t * a)
{
application_t *app = 0;
segment_manager_t *sm;
u8 *seg_name;
+ u64 secret;
+ u32 app_ns_index = 0;
int rv;
+ app = application_lookup (a->api_client_index);
+ if (app)
+ return clib_error_return_code (0, VNET_API_ERROR_APP_ALREADY_ATTACHED,
+ 0, "app already attached");
+
+ secret = a->options[APP_OPTIONS_NAMESPACE_SECRET];
+ if ((rv = session_validate_namespace (a->namespace_id, secret,
+ &app_ns_index)))
+ return clib_error_return_code (0, rv, 0, "namespace validation: %d", rv);
+ a->options[APP_OPTIONS_NAMESPACE] = app_ns_index;
app = application_new ();
if ((rv = application_init (app, a->api_client_index, a->options,
a->session_cb_vft)))
- return rv;
+ return clib_error_return_code (0, rv, 0, "app init: %d", rv);
a->app_event_queue_address = pointer_to_uword (app->event_queue);
sm = segment_manager_get (app->first_segment_manager);
@@ -278,6 +416,9 @@ vnet_application_attach (vnet_app_attach_args_t * a)
return 0;
}
+/**
+ * Detach application from vpp
+ */
int
vnet_application_detach (vnet_app_detach_args_t * a)
{
@@ -297,56 +438,48 @@ vnet_application_detach (vnet_app_detach_args_t * a)
int
vnet_bind_uri (vnet_bind_args_t * a)
{
- session_type_t sst = SESSION_N_TYPES;
- transport_endpoint_t tep;
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
int rv;
- memset (&tep, 0, sizeof (tep));
- rv = parse_uri (a->uri, &sst, &tep);
+ rv = parse_uri (a->uri, &sep);
if (rv)
return rv;
- if ((rv = vnet_bind_i (a->app_index, sst, &tep, &a->handle)))
- return rv;
-
- return 0;
+ return vnet_bind_i (a->app_index, &sep, &a->handle);
}
int
vnet_unbind_uri (vnet_unbind_args_t * a)
{
- session_type_t sst = SESSION_N_TYPES;
stream_session_t *listener;
- transport_endpoint_t tep;
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
int rv;
- rv = parse_uri (a->uri, &sst, &tep);
+ rv = parse_uri (a->uri, &sep);
if (rv)
return rv;
- listener = stream_session_lookup_listener (&tep.ip,
- clib_host_to_net_u16 (tep.port),
- sst);
+ /* NOTE: only default table supported for uri */
+ listener = session_lookup_listener (0, &sep);
if (!listener)
return VNET_API_ERROR_ADDRESS_NOT_IN_USE;
return vnet_unbind_i (a->app_index, listen_session_get_handle (listener));
}
-int
+clib_error_t *
vnet_connect_uri (vnet_connect_args_t * a)
{
- transport_endpoint_t tep;
- session_type_t sst;
+ session_endpoint_t sep = SESSION_ENDPOINT_NULL;
int rv;
/* Parse uri */
- memset (&tep, 0, sizeof (tep));
- rv = parse_uri (a->uri, &sst, &tep);
+ rv = parse_uri (a->uri, &sep);
if (rv)
- return rv;
-
- return vnet_connect_i (a->app_index, a->api_context, sst, &tep, a->mp);
+ return clib_error_return_code (0, rv, 0, "app init: %d", rv);
+ if ((rv = vnet_connect_i (a->app_index, a->api_context, &sep, a->mp)))
+ return clib_error_return_code (0, rv, 0, "connect failed");
+ return 0;
}
int
@@ -355,7 +488,7 @@ vnet_disconnect_session (vnet_disconnect_args_t * a)
u32 index, thread_index;
stream_session_t *s;
- stream_session_parse_handle (a->handle, &index, &thread_index);
+ session_parse_handle (a->handle, &index, &thread_index);
s = stream_session_get_if_valid (index, thread_index);
if (!s || s->app_index != a->app_index)
@@ -369,32 +502,31 @@ vnet_disconnect_session (vnet_disconnect_args_t * a)
return 0;
}
-int
+clib_error_t *
vnet_bind (vnet_bind_args_t * a)
{
- session_type_t sst = SESSION_N_TYPES;
int rv;
-
- sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4);
- if ((rv = vnet_bind_i (a->app_index, sst, &a->tep, &a->handle)))
- return rv;
-
+ if ((rv = vnet_bind_i (a->app_index, &a->sep, &a->handle)))
+ return clib_error_return_code (0, rv, 0, "bind failed");
return 0;
}
-int
+clib_error_t *
vnet_unbind (vnet_unbind_args_t * a)
{
- return vnet_unbind_i (a->app_index, a->handle);
+ int rv;
+ if ((rv = vnet_unbind_i (a->app_index, a->handle)))
+ return clib_error_return_code (0, rv, 0, "unbind failed");
+ return 0;
}
-int
+clib_error_t *
vnet_connect (vnet_connect_args_t * a)
{
- session_type_t sst;
-
- sst = session_type_from_proto_and_ip (a->proto, a->tep.is_ip4);
- return vnet_connect_i (a->app_index, a->api_context, sst, &a->tep, a->mp);
+ int rv;
+ if ((rv = vnet_connect_i (a->app_index, a->api_context, &a->sep, a->mp)))
+ return clib_error_return_code (0, rv, 0, "connect failed");
+ return 0;
}
/*
diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h
index 1d63f6cc102..5e1fe8ee528 100644
--- a/src/vnet/session/application_interface.h
+++ b/src/vnet/session/application_interface.h
@@ -30,12 +30,12 @@ typedef struct _vnet_app_attach_args_t
/** Application and segment manager options */
u64 *options;
+ /* Namespace id */
+ u8 *namespace_id;
+
/** Session to application callback functions */
session_cb_vft_t *session_cb_vft;
- /** Flag that indicates if app is builtin */
- u8 builtin;
-
/*
* Results
*/
@@ -58,7 +58,7 @@ typedef struct _vnet_bind_args_t
char *uri;
struct
{
- transport_endpoint_t tep;
+ session_endpoint_t sep;
transport_proto_t proto;
};
};
@@ -91,7 +91,7 @@ typedef struct _vnet_connect_args
char *uri;
struct
{
- transport_endpoint_t tep;
+ session_endpoint_t sep;
transport_proto_t proto;
};
};
@@ -119,6 +119,8 @@ typedef enum
APP_OPTIONS_PREALLOC_FIFO_PAIRS,
APP_OPTIONS_PRIVATE_SEGMENT_COUNT,
APP_OPTIONS_PRIVATE_SEGMENT_SIZE,
+ APP_OPTIONS_NAMESPACE,
+ APP_OPTIONS_NAMESPACE_SECRET,
SESSION_OPTIONS_SEGMENT_SIZE,
SESSION_OPTIONS_ADD_SEGMENT_SIZE,
SESSION_OPTIONS_RX_FIFO_SIZE,
@@ -129,10 +131,12 @@ typedef enum
} app_attach_options_index_t;
#define foreach_app_options_flags \
- _(USE_FIFO, "Use FIFO with redirects") \
+ _(ACCEPT_REDIRECT, "Use FIFO with redirects") \
_(ADD_SEGMENT, "Add segment and signal app if needed") \
_(BUILTIN_APP, "Application is builtin") \
- _(IS_PROXY, "Application is proxying")
+ _(IS_PROXY, "Application is proxying") \
+ _(USE_GLOBAL_SCOPE, "App can use global session scope") \
+ _(USE_LOCAL_SCOPE, "App can use local session scope")
typedef enum _app_options
{
@@ -148,25 +152,17 @@ typedef enum _app_options_flags
#undef _
} app_options_flags_t;
-///** Server can handle delegated connect requests from local clients */
-//#define APP_OPTIONS_FLAGS_USE_FIFO (1<<0)
-//
-///** Server wants vpp to add segments when out of memory for fifos */
-//#define APP_OPTIONS_FLAGS_ADD_SEGMENT (1<<1)
-
-#define VNET_CONNECT_REDIRECTED 123
-
-int vnet_application_attach (vnet_app_attach_args_t * a);
+clib_error_t *vnet_application_attach (vnet_app_attach_args_t * a);
int vnet_application_detach (vnet_app_detach_args_t * a);
int vnet_bind_uri (vnet_bind_args_t *);
int vnet_unbind_uri (vnet_unbind_args_t * a);
-int vnet_connect_uri (vnet_connect_args_t * a);
+clib_error_t *vnet_connect_uri (vnet_connect_args_t * a);
int vnet_disconnect_session (vnet_disconnect_args_t * a);
-int vnet_bind (vnet_bind_args_t * a);
-int vnet_connect (vnet_connect_args_t * a);
-int vnet_unbind (vnet_unbind_args_t * a);
+clib_error_t *vnet_bind (vnet_bind_args_t * a);
+clib_error_t *vnet_connect (vnet_connect_args_t * a);
+clib_error_t *vnet_unbind (vnet_unbind_args_t * a);
int
api_parse_session_handle (u64 handle, u32 * session_index,
diff --git a/src/vnet/session/application_namespace.c b/src/vnet/session/application_namespace.c
new file mode 100644
index 00000000000..7f90943f9cd
--- /dev/null
+++ b/src/vnet/session/application_namespace.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/session/application_namespace.h>
+#include <vnet/session/session_table.h>
+#include <vnet/session/session.h>
+#include <vnet/fib/fib_table.h>
+
+/**
+ * Hash table of application namespaces by app ns ids
+ */
+uword *app_namespace_lookup_table;
+
+/**
+ * Pool of application namespaces
+ */
+static app_namespace_t *app_namespace_pool;
+
+app_namespace_t *
+app_namespace_get (u32 index)
+{
+ return pool_elt_at_index (app_namespace_pool, index);
+}
+
+app_namespace_t *
+app_namespace_get_from_id (const u8 * ns_id)
+{
+ u32 index = app_namespace_index_from_id (ns_id);
+ if (index == APP_NAMESPACE_INVALID_INDEX)
+ return 0;
+ return app_namespace_get (index);
+}
+
+u32
+app_namespace_index (app_namespace_t * app_ns)
+{
+ return (app_ns - app_namespace_pool);
+}
+
+app_namespace_t *
+app_namespace_alloc (u8 * ns_id)
+{
+ app_namespace_t *app_ns;
+ pool_get (app_namespace_pool, app_ns);
+ memset (app_ns, 0, sizeof (*app_ns));
+ app_ns->ns_id = vec_dup (ns_id);
+ hash_set_mem (app_namespace_lookup_table, app_ns->ns_id,
+ app_ns - app_namespace_pool);
+ return app_ns;
+}
+
+clib_error_t *
+vnet_app_namespace_add_del (vnet_app_namespace_add_del_args_t * a)
+{
+ app_namespace_t *app_ns;
+ session_table_t *st;
+
+ if (a->is_add)
+ {
+ if (a->sw_if_index != APP_NAMESPACE_INVALID_INDEX
+ && !vnet_get_sw_interface_safe (vnet_get_main (), a->sw_if_index))
+ return clib_error_return_code (0, VNET_API_ERROR_INVALID_SW_IF_INDEX,
+ 0, "sw_if_index %u doesn't exist",
+ a->sw_if_index);
+
+ if (a->sw_if_index != APP_NAMESPACE_INVALID_INDEX)
+ {
+ a->ip4_fib_id =
+ fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP4,
+ a->sw_if_index);
+ a->ip6_fib_id =
+ fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP4,
+ a->sw_if_index);
+ }
+ if (a->sw_if_index == APP_NAMESPACE_INVALID_INDEX
+ && a->ip4_fib_id == APP_NAMESPACE_INVALID_INDEX)
+ return clib_error_return_code (0, VNET_API_ERROR_INVALID_VALUE, 0,
+ "sw_if_index or fib_id must be "
+ "configured");
+ st = session_table_alloc ();
+ session_table_init (st);
+
+ app_ns = app_namespace_get_from_id (a->ns_id);
+ if (!app_ns)
+ app_ns = app_namespace_alloc (a->ns_id);
+ app_ns->ns_secret = a->secret;
+ app_ns->sw_if_index = a->sw_if_index;
+ app_ns->local_table_index = session_table_index (st);
+ app_ns->ip4_fib_index =
+ fib_table_find (FIB_PROTOCOL_IP4, a->ip4_fib_id);
+ app_ns->ip6_fib_index =
+ fib_table_find (FIB_PROTOCOL_IP6, a->ip6_fib_id);
+ }
+ else
+ {
+ return clib_error_return_code (0, VNET_API_ERROR_UNIMPLEMENTED, 0,
+ "namespace deletion not supported");
+ }
+ return 0;
+}
+
+const u8 *
+app_namespace_id (app_namespace_t * app_ns)
+{
+ return app_ns->ns_id;
+}
+
+u32
+app_namespace_index_from_id (const u8 * ns_id)
+{
+ uword *indexp;
+ indexp = hash_get_mem (app_namespace_lookup_table, ns_id);
+ if (!indexp)
+ return APP_NAMESPACE_INVALID_INDEX;
+ return *indexp;
+}
+
+const u8 *
+app_namespace_id_from_index (u32 index)
+{
+ app_namespace_t *app_ns;
+
+ app_ns = app_namespace_get (index);
+ return app_namespace_id (app_ns);
+}
+
+void
+app_namespaces_init (void)
+{
+ u8 *ns_id = format (0, "default");
+ app_namespace_lookup_table =
+ hash_create_vec (0, sizeof (u8), sizeof (uword));
+
+ /*
+ * Allocate default namespace
+ */
+ vnet_app_namespace_add_del_args_t a = {
+ .ns_id = ns_id,
+ .secret = 0,
+ .sw_if_index = APP_NAMESPACE_INVALID_INDEX,
+ .is_add = 1
+ };
+ vnet_app_namespace_add_del (&a);
+ vec_free (ns_id);
+}
+
+static clib_error_t *
+app_ns_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ u8 is_add = 0, *ns_id = 0, secret_set = 0, sw_if_index_set = 0;
+ u32 sw_if_index, fib_id = APP_NAMESPACE_INVALID_INDEX;
+ u64 secret;
+ clib_error_t *error = 0;
+
+ session_cli_return_if_not_enabled ();
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "add"))
+ is_add = 1;
+ else if (unformat (input, "id %_%v%_", &ns_id))
+ ;
+ else if (unformat (input, "secret %lu", &secret))
+ secret_set = 1;
+ else if (unformat (input, "sw_if_index %u", &sw_if_index))
+ sw_if_index_set = 1;
+ else if (unformat (input, "fib_id", &fib_id))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (!ns_id || !secret_set || !sw_if_index_set)
+ {
+ vlib_cli_output (vm, "namespace-id, secret and sw_if_index must be "
+ "provided");
+ return 0;
+ }
+
+ if (is_add)
+ {
+ vnet_app_namespace_add_del_args_t args = {
+ .ns_id = ns_id,
+ .secret = secret,
+ .sw_if_index = sw_if_index,
+ .ip4_fib_id = fib_id,
+ .is_add = 1
+ };
+ error = vnet_app_namespace_add_del (&args);
+ }
+
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (app_ns_command, static) =
+{
+ .path = "app ns",
+ .short_help = "app ns [add] id <namespace-id> secret <secret> "
+ "sw_if_index <sw_if_index>",
+ .function = app_ns_fn,
+};
+/* *INDENT-ON* */
+
+u8 *
+format_app_namespace (u8 * s, va_list * args)
+{
+ app_namespace_t *app_ns = va_arg (*args, app_namespace_t *);
+ s = format (s, "%-20v%-20lu%-20u", app_ns->ns_id, app_ns->ns_secret,
+ app_ns->sw_if_index);
+ return s;
+}
+
+static clib_error_t *
+show_app_ns_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ app_namespace_t *app_ns;
+ session_table_t *st;
+ u8 *ns_id, do_table = 0;
+
+ session_cli_return_if_not_enabled ();
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "table %_%v%_", &ns_id))
+ do_table = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (do_table)
+ {
+ app_ns = app_namespace_get_from_id (ns_id);
+ if (!app_ns)
+ {
+ vlib_cli_output (vm, "ns %v not found", ns_id);
+ return 0;
+ }
+ st = session_table_get (app_ns->local_table_index);
+ if (!st)
+ {
+ vlib_cli_output (vm, "table for ns %v could not be found", ns_id);
+ return 0;
+ }
+ session_lookup_show_table_entries (vm, st, 0, 1);
+ vec_free (ns_id);
+ return 0;
+ }
+
+ vlib_cli_output (vm, "%-20s%-20s%-20s", "Namespace", "Secret",
+ "sw_if_index");
+
+ /* *INDENT-OFF* */
+ pool_foreach (app_ns, app_namespace_pool, ({
+ vlib_cli_output (vm, "%U", format_app_namespace, app_ns);
+ }));
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_app_ns_command, static) =
+{
+ .path = "show app ns",
+ .short_help = "show app ns",
+ .function = show_app_ns_fn,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session/application_namespace.h b/src/vnet/session/application_namespace.h
new file mode 100644
index 00000000000..da3f6017961
--- /dev/null
+++ b/src/vnet/session/application_namespace.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+
+#ifndef SRC_VNET_SESSION_APPLICATION_NAMESPACE_H_
+#define SRC_VNET_SESSION_APPLICATION_NAMESPACE_H_
+
+typedef struct _app_namespace
+{
+ /**
+ * Local sw_if_index that supports transport connections for this namespace
+ */
+ u32 sw_if_index;
+
+ /**
+ * Network namespace (e.g., fib_index associated to the sw_if_index)
+ * wherein connections are to be established. Since v4 and v6 fibs are
+ * separate, we actually need to keep pointers to both.
+ */
+ u32 ip4_fib_index;
+ u32 ip6_fib_index;
+
+ /**
+ * Local session table associated to ns
+ */
+ u32 local_table_index;
+
+ /**
+ * Secret apps need to provide to authorize attachment to the namespace
+ */
+ u64 ns_secret;
+
+ /**
+ * Application namespace id
+ */
+ u8 *ns_id;
+} app_namespace_t;
+
+typedef struct _vnet_app_namespace_add_del_args
+{
+ u8 *ns_id;
+ u64 secret;
+ u32 sw_if_index;
+ u32 ip4_fib_id;
+ u32 ip6_fib_id;
+ u8 is_add;
+} vnet_app_namespace_add_del_args_t;
+
+#define APP_NAMESPACE_INVALID_INDEX ((u32)~0)
+
+app_namespace_t *app_namespace_alloc (u8 * ns_id);
+app_namespace_t *app_namespace_get (u32 index);
+app_namespace_t *app_namespace_get_from_id (const u8 * ns_id);
+u32 app_namespace_index (app_namespace_t * app_ns);
+const u8 *app_namespace_id (app_namespace_t * app_ns);
+const u8 *app_namespace_id_from_index (u32 index);
+u32 app_namespace_index_from_id (const u8 * ns_id);
+void app_namespaces_init (void);
+clib_error_t *vnet_app_namespace_add_del (vnet_app_namespace_add_del_args_t *
+ a);
+
+#endif /* SRC_VNET_SESSION_APPLICATION_NAMESPACE_H_ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c
index 48d027553b1..f35dec72d88 100644
--- a/src/vnet/session/segment_manager.c
+++ b/src/vnet/session/segment_manager.c
@@ -267,7 +267,7 @@ segment_manager_del_sessions (segment_manager_t * sm)
session_index = fifo->master_session_index;
thread_index = fifo->master_thread_index;
- session = stream_session_get (session_index, thread_index);
+ session = session_get (session_index, thread_index);
/* Instead of directly removing the session call disconnect */
if (session->session_state != SESSION_STATE_CLOSED)
diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api
index 992a0638698..12a5d10429c 100644
--- a/src/vnet/session/session.api
+++ b/src/vnet/session/session.api
@@ -21,12 +21,16 @@ vl_api_version 1.0.0
@param initial_segment_size - size of the initial shm segment to be
allocated
@param options - segment size, fifo sizes, etc.
+ @param namespace_id_len - length of the namespace id c-string
+ @param namespace_id - 0 terminted c-string
*/
define application_attach {
u32 client_index;
u32 context;
u32 initial_segment_size;
u64 options[16];
+ u8 namespace_id_len;
+ u8 namespace_id [64];
};
/** \brief Application attach reply
@@ -99,20 +103,19 @@ autoreply define unbind_uri {
/** \brief Connect to a given URI
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
- @param accept_cookie - sender accept cookie, to identify this bind flavor
- @param uri - a URI, e.g. "tcp4://0.0.0.0/0/80"
- "tcp6://::/0/80" [ipv6], etc.
- @param options - socket options, fifo sizes, etc. passed by vpp to the
- server when redirecting connects
@param client_queue_address - binary API client queue address. Used by
local server when connect was redirected.
+ @param options - socket options, fifo sizes, etc. passed by vpp to the
+ server when redirecting connects
+ @param uri - a URI, e.g. "tcp4://0.0.0.0/0/80"
+ "tcp6://::/0/80" [ipv6], etc.
*/
autoreply define connect_uri {
u32 client_index;
u32 context;
- u8 uri[128];
u64 client_queue_address;
u64 options[16];
+ u8 uri[128];
};
/** \brief vpp->client, accept this session
@@ -240,26 +243,25 @@ autoreply define unbind_sock {
/** \brief Connect to a remote peer
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
- @param app_connect - application connection id to be returned in reply
+ @param client_queue_address - client's API queue address. Non-zero when
+ used to perform redirects
+ @param options - socket options, fifo sizes, etc. when doing redirects
@param vrf - connection namespace
@param is_ip4 - flag that is 1 if ip address family is IPv4
@param ip - ip address
@param port - port
@param proto - protocol 0 - TCP 1 - UDP
- @param client_queue_address - client's API queue address. Non-zero when
- used to perform redirects
- @param options - socket options, fifo sizes, etc. when doing redirects
*/
autoreply define connect_sock {
u32 client_index;
u32 context;
+ u64 client_queue_address;
+ u64 options[16];
u32 vrf;
u8 is_ip4;
u8 ip[16];
u16 port;
u8 proto;
- u64 client_queue_address;
- u64 options[16];
};
/** \brief Bind reply
@@ -326,6 +328,31 @@ autoreply define session_enable_disable {
u8 is_enable;
};
+/** \brief add/del application namespace
+ @param client_index - opaque cookie to identify the sender
+ client to vpp direction only
+ @param context - sender context, to match reply w/ request
+ @param secret - secret shared between app and vpp
+ @param sw_if_index - local interface that "supports" namespace. Set to
+ ~0 if no preference
+ @param ip4_fib_id - id of ip4 fib that "supports" the namespace. Ignored
+ if sw_if_index set.
+ @param ip6_fib_id - id of ip6 fib that "supports" the namespace. Ignored
+ if sw_if_index set.
+ @param namespace_id_len - length of namespace id lower
+ @param namespace_id - namespace id
+*/
+autoreply define app_namespace_add_del {
+ u32 client_index;
+ u32 context;
+ u64 secret;
+ u32 sw_if_index;
+ u32 ip4_fib_id;
+ u32 ip6_fib_id;
+ u8 namespace_id_len;
+ u8 namespace_id[64];
+};
+
/*
* Local Variables:
* eval: (c-set-style "gnu")
diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c
index dc930ce87d3..88b38f15a61 100644
--- a/src/vnet/session/session.c
+++ b/src/vnet/session/session.c
@@ -85,7 +85,7 @@ stream_session_create_i (segment_manager_t * sm, transport_connection_t * tc,
/* Add to the main lookup table */
value = stream_session_handle (s);
- stream_session_table_add_for_tc (tc, value);
+ session_lookup_add_connection (tc, value);
*ret_s = s;
@@ -223,7 +223,7 @@ stream_session_enqueue_data (transport_connection_t * tc, vlib_buffer_t * b,
stream_session_t *s;
int enqueued = 0, rv, in_order_off;
- s = stream_session_get (tc->s_index, tc->thread_index);
+ s = session_get (tc->s_index, tc->thread_index);
if (is_in_order)
{
@@ -275,7 +275,7 @@ u8
stream_session_no_space (transport_connection_t * tc, u32 thread_index,
u16 data_len)
{
- stream_session_t *s = stream_session_get (tc->s_index, thread_index);
+ stream_session_t *s = session_get (tc->s_index, thread_index);
if (PREDICT_FALSE (s->session_state != SESSION_STATE_READY))
return 1;
@@ -289,7 +289,7 @@ stream_session_no_space (transport_connection_t * tc, u32 thread_index,
u32
stream_session_tx_fifo_max_dequeue (transport_connection_t * tc)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+ stream_session_t *s = session_get (tc->s_index, tc->thread_index);
if (!s->server_tx_fifo)
return 0;
return svm_fifo_max_dequeue (s->server_tx_fifo);
@@ -299,14 +299,14 @@ int
stream_session_peek_bytes (transport_connection_t * tc, u8 * buffer,
u32 offset, u32 max_bytes)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+ stream_session_t *s = session_get (tc->s_index, tc->thread_index);
return svm_fifo_peek (s->server_tx_fifo, offset, max_bytes, buffer);
}
u32
stream_session_dequeue_drop (transport_connection_t * tc, u32 max_bytes)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+ stream_session_t *s = session_get (tc->s_index, tc->thread_index);
return svm_fifo_dequeue_drop (s->server_tx_fifo, max_bytes);
}
@@ -432,7 +432,7 @@ stream_session_init_fifos_pointers (transport_connection_t * tc,
u32 rx_pointer, u32 tx_pointer)
{
stream_session_t *s;
- s = stream_session_get (tc->s_index, tc->thread_index);
+ s = session_get (tc->s_index, tc->thread_index);
svm_fifo_init_pointers (s->server_rx_fifo, rx_pointer);
svm_fifo_init_pointers (s->server_tx_fifo, tx_pointer);
}
@@ -445,20 +445,16 @@ stream_session_connect_notify (transport_connection_t * tc, u8 is_fail)
u64 handle;
u32 opaque = 0;
int error = 0;
- u8 st;
- st = session_type_from_proto_and_ip (tc->transport_proto, tc->is_ip4);
- handle = stream_session_half_open_lookup_handle (&tc->lcl_ip, &tc->rmt_ip,
- tc->lcl_port, tc->rmt_port,
- st);
+ handle = session_lookup_half_open_handle (tc);
if (handle == HALF_OPEN_LOOKUP_INVALID_VALUE)
{
- TCP_DBG ("half-open was removed!");
+ SESSION_DBG ("half-open was removed!");
return -1;
}
/* Cleanup half-open table */
- stream_session_half_open_table_del (tc);
+ session_lookup_del_half_open (tc);
/* Get the app's index from the handle we stored when opening connection
* and the opaque (api_context for external apps) from transport session
@@ -489,7 +485,7 @@ stream_session_connect_notify (transport_connection_t * tc, u8 is_fail)
if (app->cb_fns.session_connected_callback (app->index, opaque, new_s,
is_fail))
{
- clib_warning ("failed to notify app");
+ SESSION_DBG ("failed to notify app");
if (!is_fail)
stream_session_disconnect (new_s);
}
@@ -508,7 +504,7 @@ stream_session_accept_notify (transport_connection_t * tc)
application_t *server;
stream_session_t *s;
- s = stream_session_get (tc->s_index, tc->thread_index);
+ s = session_get (tc->s_index, tc->thread_index);
server = application_get (s->app_index);
server->cb_fns.session_accept_callback (s);
}
@@ -526,7 +522,7 @@ stream_session_disconnect_notify (transport_connection_t * tc)
application_t *server;
stream_session_t *s;
- s = stream_session_get (tc->s_index, tc->thread_index);
+ s = session_get (tc->s_index, tc->thread_index);
server = application_get (s->app_index);
server->cb_fns.session_disconnect_callback (s);
}
@@ -541,7 +537,7 @@ stream_session_delete (stream_session_t * s)
int rv;
/* Delete from the main lookup table. */
- if ((rv = stream_session_table_del (s)))
+ if ((rv = session_lookup_del_session (s)))
clib_warning ("hash delete error, rv %d", rv);
/* Cleanup fifo segments */
@@ -581,7 +577,7 @@ stream_session_reset_notify (transport_connection_t * tc)
{
stream_session_t *s;
application_t *app;
- s = stream_session_get (tc->s_index, tc->thread_index);
+ s = session_get (tc->s_index, tc->thread_index);
app = application_get (s->app_index);
app->cb_fns.session_reset_callback (s);
@@ -592,14 +588,16 @@ stream_session_reset_notify (transport_connection_t * tc)
*/
int
stream_session_accept (transport_connection_t * tc, u32 listener_index,
- u8 sst, u8 notify)
+ u8 notify)
{
application_t *server;
stream_session_t *s, *listener;
segment_manager_t *sm;
-
+ session_type_t sst;
int rv;
+ sst = session_type_from_proto_and_ip (tc->transport_proto, tc->is_ip4);
+
/* Find the server */
listener = listen_session_get (sst, listener_index);
server = application_get (listener->app_index);
@@ -634,22 +632,23 @@ stream_session_accept (transport_connection_t * tc, u32 listener_index,
* @param res Resulting transport connection .
*/
int
-stream_session_open (u32 app_index, session_type_t st,
- transport_endpoint_t * rmt,
+stream_session_open (u32 app_index, session_endpoint_t * rmt,
transport_connection_t ** res)
{
transport_connection_t *tc;
+ session_type_t sst;
int rv;
u64 handle;
- rv = tp_vfts[st].open (rmt);
+ sst = session_type_from_proto_and_ip (rmt->transport_proto, rmt->is_ip4);
+ rv = tp_vfts[sst].open (session_endpoint_to_transport (rmt));
if (rv < 0)
{
clib_warning ("Transport failed to open connection.");
- return VNET_API_ERROR_SESSION_CONNECT_FAIL;
+ return VNET_API_ERROR_SESSION_CONNECT;
}
- tc = tp_vfts[st].get_half_open ((u32) rv);
+ tc = tp_vfts[sst].get_half_open ((u32) rv);
/* Save app and tc index. The latter is needed to help establish the
* connection while the former is needed when the connect notify comes
@@ -657,7 +656,7 @@ stream_session_open (u32 app_index, session_type_t st,
handle = (((u64) app_index) << 32) | (u64) tc->c_index;
/* Add to the half-open lookup table */
- stream_session_half_open_table_add (tc, handle);
+ session_lookup_add_half_open (tc, handle);
*res = tc;
@@ -673,13 +672,14 @@ stream_session_open (u32 app_index, session_type_t st,
* @param tep Local endpoint to be listened on.
*/
int
-stream_session_listen (stream_session_t * s, transport_endpoint_t * tep)
+stream_session_listen (stream_session_t * s, session_endpoint_t * tep)
{
transport_connection_t *tc;
u32 tci;
/* Transport bind/listen */
- tci = tp_vfts[s->session_type].bind (s->session_index, tep);
+ tci = tp_vfts[s->session_type].bind (s->session_index,
+ session_endpoint_to_transport (tep));
if (tci == (u32) ~ 0)
return -1;
@@ -693,7 +693,7 @@ stream_session_listen (stream_session_t * s, transport_endpoint_t * tep)
return -1;
/* Add to the main lookup table */
- stream_session_table_add_for_tc (tc, s->session_index);
+ session_lookup_add_connection (tc, s->session_index);
return 0;
}
@@ -721,7 +721,7 @@ stream_session_stop_listen (stream_session_t * s)
return VNET_API_ERROR_ADDRESS_NOT_IN_USE;
}
- stream_session_table_del_for_tc (tc);
+ session_lookup_del_connection (tc);
tp_vfts[s->session_type].unbind (s->connection_index);
return 0;
}
@@ -780,7 +780,7 @@ stream_session_cleanup (stream_session_t * s)
s->session_state = SESSION_STATE_CLOSED;
/* Delete from the main lookup table to avoid more enqueues */
- rv = stream_session_table_del (s);
+ rv = session_lookup_del_session (s);
if (rv)
clib_warning ("hash delete error, rv %d", rv);
@@ -837,6 +837,26 @@ session_type_from_proto_and_ip (transport_proto_t proto, u8 is_ip4)
return SESSION_N_TYPES;
}
+int
+listen_session_get_local_session_endpoint (stream_session_t * listener,
+ session_endpoint_t * sep)
+{
+ transport_connection_t *tc;
+ tc =
+ tp_vfts[listener->session_type].get_listener (listener->connection_index);
+ if (!tc)
+ {
+ clib_warning ("no transport");
+ return -1;
+ }
+
+ /* N.B. The ip should not be copied because this is the local endpoint */
+ sep->port = tc->lcl_port;
+ sep->transport_proto = tc->transport_proto;
+ sep->is_ip4 = tc->is_ip4;
+ return 0;
+}
+
static clib_error_t *
session_manager_main_enable (vlib_main_t * vm)
{
@@ -903,6 +923,7 @@ session_manager_main_enable (vlib_main_t * vm)
}
session_lookup_init ();
+ app_namespaces_init ();
smm->is_enabled = 1;
@@ -927,14 +948,14 @@ session_node_enable_disable (u8 is_en)
clib_error_t *
vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
{
+ clib_error_t *error = 0;
if (is_en)
{
if (session_manager_main.is_enabled)
return 0;
session_node_enable_disable (is_en);
-
- return session_manager_main_enable (vm);
+ error = session_manager_main_enable (vm);
}
else
{
@@ -942,7 +963,7 @@ vnet_session_enable_disable (vlib_main_t * vm, u8 is_en)
session_node_enable_disable (is_en);
}
- return 0;
+ return error;
}
clib_error_t *
diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h
index 83addec2744..b1a03d213e9 100644
--- a/src/vnet/session/session.h
+++ b/src/vnet/session/session.h
@@ -214,7 +214,7 @@ stream_session_is_valid (u32 si, u8 thread_index)
}
always_inline stream_session_t *
-stream_session_get (u32 si, u32 thread_index)
+session_get (u32 si, u32 thread_index)
{
ASSERT (stream_session_is_valid (si, thread_index));
return pool_elt_at_index (session_manager_main.sessions[thread_index], si);
@@ -240,31 +240,31 @@ stream_session_handle (stream_session_t * s)
}
always_inline u32
-stream_session_index_from_handle (u64 handle)
+session_index_from_handle (u64 handle)
{
return handle & 0xFFFFFFFF;
}
always_inline u32
-stream_session_thread_from_handle (u64 handle)
+session_thread_from_handle (u64 handle)
{
return handle >> 32;
}
always_inline void
-stream_session_parse_handle (u64 handle, u32 * index, u32 * thread_index)
+session_parse_handle (u64 handle, u32 * index, u32 * thread_index)
{
- *index = stream_session_index_from_handle (handle);
- *thread_index = stream_session_thread_from_handle (handle);
+ *index = session_index_from_handle (handle);
+ *thread_index = session_thread_from_handle (handle);
}
always_inline stream_session_t *
-stream_session_get_from_handle (u64 handle)
+session_get_from_handle (u64 handle)
{
session_manager_main_t *smm = &session_manager_main;
- return pool_elt_at_index (smm->sessions[stream_session_thread_from_handle
- (handle)],
- stream_session_index_from_handle (handle));
+ return
+ pool_elt_at_index (smm->sessions[session_thread_from_handle (handle)],
+ session_index_from_handle (handle));
}
always_inline stream_session_t *
@@ -285,14 +285,14 @@ stream_session_get_index (stream_session_t * s)
always_inline u32
stream_session_max_rx_enqueue (transport_connection_t * tc)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+ stream_session_t *s = session_get (tc->s_index, tc->thread_index);
return svm_fifo_max_enqueue (s->server_rx_fifo);
}
always_inline u32
stream_session_rx_fifo_size (transport_connection_t * tc)
{
- stream_session_t *s = stream_session_get (tc->s_index, tc->thread_index);
+ stream_session_t *s = session_get (tc->s_index, tc->thread_index);
return s->server_rx_fifo->nitems;
}
@@ -316,12 +316,11 @@ void stream_session_delete_notify (transport_connection_t * tc);
void stream_session_reset_notify (transport_connection_t * tc);
int
stream_session_accept (transport_connection_t * tc, u32 listener_index,
- u8 sst, u8 notify);
+ u8 notify);
int
-stream_session_open (u32 app_index, session_type_t st,
- transport_endpoint_t * tep,
+stream_session_open (u32 app_index, session_endpoint_t * tep,
transport_connection_t ** tc);
-int stream_session_listen (stream_session_t * s, transport_endpoint_t * tep);
+int stream_session_listen (stream_session_t * s, session_endpoint_t * tep);
int stream_session_stop_listen (stream_session_t * s);
void stream_session_disconnect (stream_session_t * s);
void stream_session_cleanup (stream_session_t * s);
@@ -401,6 +400,10 @@ listen_session_del (stream_session_t * s)
pool_put (session_manager_main.listen_sessions[s->session_type], s);
}
+int
+listen_session_get_local_session_endpoint (stream_session_t * listener,
+ session_endpoint_t * sep);
+
always_inline stream_session_t *
session_manager_get_listener (u8 type, u32 index)
{
@@ -425,6 +428,12 @@ session_manager_is_enabled ()
return session_manager_main.is_enabled == 1;
}
+#define session_cli_return_if_not_enabled() \
+do { \
+ if (!session_manager_main.is_enabled) \
+ return clib_error_return(0, "session layer is not enabled"); \
+} while (0)
+
#endif /* __included_session_h__ */
/*
diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c
index 60d9b4de51b..5bfca7be6fc 100755
--- a/src/vnet/session/session_api.c
+++ b/src/vnet/session/session_api.c
@@ -47,10 +47,11 @@ _(DISCONNECT_SESSION, disconnect_session) \
_(DISCONNECT_SESSION_REPLY, disconnect_session_reply) \
_(ACCEPT_SESSION_REPLY, accept_session_reply) \
_(RESET_SESSION_REPLY, reset_session_reply) \
-_(BIND_SOCK, bind_sock) \
+_(BIND_SOCK, bind_sock) \
_(UNBIND_SOCK, unbind_sock) \
_(CONNECT_SOCK, connect_sock) \
_(SESSION_ENABLE_DISABLE, session_enable_disable) \
+_(APP_NAMESPACE_ADD_DEL, app_namespace_add_del) \
static int
send_add_segment_callback (u32 api_client_index, const u8 * segment_name,
@@ -180,7 +181,7 @@ send_session_connected_callback (u32 app_index, u32 api_context,
}
else
{
- mp->retval = clib_host_to_net_u32 (VNET_API_ERROR_SESSION_CONNECT_FAIL);
+ mp->retval = clib_host_to_net_u32 (VNET_API_ERROR_SESSION_CONNECT);
}
vl_msg_api_send_shmem (q, (u8 *) & mp);
@@ -195,7 +196,7 @@ send_session_connected_callback (u32 app_index, u32 api_context,
static int
redirect_connect_callback (u32 server_api_client_index, void *mp_arg)
{
- vl_api_connect_uri_t *mp = mp_arg;
+ vl_api_connect_sock_t *mp = mp_arg;
unix_shared_memory_queue_t *server_q, *client_q;
vlib_main_t *vm = vlib_get_main ();
f64 timeout = vlib_time_now (vm) + 0.5;
@@ -242,7 +243,7 @@ redirect_connect_callback (u32 server_api_client_index, void *mp_arg)
{
/* correctly enqueued */
case 0:
- return VNET_CONNECT_REDIRECTED;
+ return VNET_API_ERROR_SESSION_REDIRECT;
/* continue spinning, wait for pthread_mutex_trylock to work */
case -1:
@@ -260,7 +261,7 @@ out:
return rv;
}
-static session_cb_vft_t uri_session_cb_vft = {
+static session_cb_vft_t session_cb_vft = {
.session_accept_callback = send_session_accept_callback,
.session_disconnect_callback = send_session_disconnect_callback,
.session_connected_callback = send_session_connected_callback,
@@ -285,7 +286,8 @@ vl_api_application_attach_t_handler (vl_api_application_attach_t * mp)
{
vl_api_application_attach_reply_t *rmp;
vnet_app_attach_args_t _a, *a = &_a;
- int rv;
+ clib_error_t *error = 0;
+ int rv = 0;
if (session_manager_is_enabled () == 0)
{
@@ -298,12 +300,28 @@ vl_api_application_attach_t_handler (vl_api_application_attach_t * mp)
"Out of options, fix api message definition");
memset (a, 0, sizeof (*a));
-
a->api_client_index = mp->client_index;
a->options = mp->options;
- a->session_cb_vft = &uri_session_cb_vft;
+ a->session_cb_vft = &session_cb_vft;
- rv = vnet_application_attach (a);
+ if (mp->namespace_id_len > 64)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto done;
+ }
+
+ if (mp->namespace_id_len)
+ {
+ vec_validate (a->namespace_id, mp->namespace_id_len);
+ clib_memcpy (a->namespace_id, mp->namespace_id, mp->namespace_id_len);
+ }
+
+ if ((error = vnet_application_attach (a)))
+ {
+ rv = clib_error_get_code (error);
+ clib_error_report (error);
+ }
+ vec_free (a->namespace_id);
done:
@@ -312,7 +330,6 @@ done:
if (!rv)
{
rmp->segment_name_length = 0;
- /* $$$$ policy? */
rmp->segment_size = a->segment_size;
if (a->segment_name_length)
{
@@ -418,7 +435,8 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp)
vl_api_connect_session_reply_t *rmp;
vnet_connect_args_t _a, *a = &_a;
application_t *app;
- int rv;
+ clib_error_t *error = 0;
+ int rv = 0;
if (session_manager_is_enabled () == 0)
{
@@ -433,14 +451,19 @@ vl_api_connect_uri_t_handler (vl_api_connect_uri_t * mp)
a->api_context = mp->context;
a->app_index = app->index;
a->mp = mp;
- rv = vnet_connect_uri (a);
+ if ((error = vnet_connect_uri (a)))
+ {
+ rv = clib_error_get_code (error);
+ if (rv != VNET_API_ERROR_SESSION_REDIRECT)
+ clib_error_report (error);
+ }
}
else
{
rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED;
}
- if (rv == 0 || rv == VNET_CONNECT_REDIRECTED)
+ if (rv == 0 || rv == VNET_API_ERROR_SESSION_REDIRECT)
return;
/* Got some error, relay it */
@@ -516,7 +539,7 @@ vl_api_reset_session_reply_t_handler (vl_api_reset_session_reply_t * mp)
if (!app)
return;
- stream_session_parse_handle (mp->handle, &index, &thread_index);
+ session_parse_handle (mp->handle, &index, &thread_index);
s = stream_session_get_if_valid (index, thread_index);
if (s == 0 || app->index != s->app_index)
{
@@ -552,7 +575,7 @@ vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp)
}
else
{
- stream_session_parse_handle (mp->handle, &session_index, &thread_index);
+ session_parse_handle (mp->handle, &session_index, &thread_index);
s = stream_session_get_if_valid (session_index, thread_index);
if (!s)
{
@@ -564,7 +587,6 @@ vl_api_accept_session_reply_t_handler (vl_api_accept_session_reply_t * mp)
clib_warning ("app doesn't own session");
return;
}
- /* XXX volatile? */
s->session_state = SESSION_STATE_READY;
}
}
@@ -581,7 +603,8 @@ vl_api_bind_sock_t_handler (vl_api_bind_sock_t * mp)
{
vl_api_bind_sock_reply_t *rmp;
vnet_bind_args_t _a, *a = &_a;
- int rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED;
+ int rv = 0;
+ clib_error_t *error;
application_t *app;
if (session_manager_is_enabled () == 0)
@@ -594,18 +617,28 @@ vl_api_bind_sock_t_handler (vl_api_bind_sock_t * mp)
if (app)
{
ip46_address_t *ip46 = (ip46_address_t *) mp->ip;
-
memset (a, 0, sizeof (*a));
- a->tep.is_ip4 = mp->is_ip4;
- a->tep.ip = *ip46;
- a->tep.port = mp->port;
- a->tep.vrf = mp->vrf;
+ a->sep.is_ip4 = mp->is_ip4;
+ a->sep.ip = *ip46;
+ a->sep.port = mp->port;
+ a->sep.fib_index = mp->vrf;
+ a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
a->app_index = app->index;
+ a->proto = mp->proto;
- rv = vnet_bind (a);
+ if ((error = vnet_bind (a)))
+ {
+ rv = clib_error_get_code (error);
+ clib_error_report (error);
+ }
}
done:
- REPLY_MACRO (VL_API_BIND_SOCK_REPLY);
+ /* *INDENT-OFF* */
+ REPLY_MACRO2 (VL_API_BIND_SOCK_REPLY,({
+ if (!rv)
+ rmp->handle = a->handle;
+ }));
+ /* *INDENT-ON* */
}
static void
@@ -614,7 +647,8 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp)
vl_api_unbind_sock_reply_t *rmp;
vnet_unbind_args_t _a, *a = &_a;
application_t *app;
- int rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED;
+ clib_error_t *error;
+ int rv = 0;
if (session_manager_is_enabled () == 0)
{
@@ -627,7 +661,11 @@ vl_api_unbind_sock_t_handler (vl_api_unbind_sock_t * mp)
{
a->app_index = mp->client_index;
a->handle = mp->handle;
- rv = vnet_unbind (a);
+ if ((error = vnet_unbind (a)))
+ {
+ rv = clib_error_get_code (error);
+ clib_error_report (error);
+ }
}
done:
@@ -640,7 +678,8 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp)
vl_api_connect_session_reply_t *rmp;
vnet_connect_args_t _a, *a = &_a;
application_t *app;
- int rv;
+ clib_error_t *error = 0;
+ int rv = 0;
if (session_manager_is_enabled () == 0)
{
@@ -656,22 +695,28 @@ vl_api_connect_sock_t_handler (vl_api_connect_sock_t * mp)
client_q = vl_api_client_index_to_input_queue (mp->client_index);
mp->client_queue_address = pointer_to_uword (client_q);
- a->tep.is_ip4 = mp->is_ip4;
- a->tep.ip = *ip46;
- a->tep.port = mp->port;
- a->tep.vrf = mp->vrf;
+ a->sep.is_ip4 = mp->is_ip4;
+ a->sep.ip = *ip46;
+ a->sep.port = mp->port;
+ a->sep.transport_proto = mp->proto;
+ a->sep.fib_index = mp->vrf;
+ a->sep.sw_if_index = ENDPOINT_INVALID_INDEX;
a->api_context = mp->context;
a->app_index = app->index;
- a->proto = mp->proto;
a->mp = mp;
- rv = vnet_connect (a);
+ if ((error = vnet_connect (a)))
+ {
+ rv = clib_error_get_code (error);
+ if (rv != VNET_API_ERROR_SESSION_REDIRECT)
+ clib_error_report (error);
+ }
}
else
{
rv = VNET_API_ERROR_APPLICATION_NOT_ATTACHED;
}
- if (rv == 0 || rv == VNET_CONNECT_REDIRECTED)
+ if (rv == 0 || rv == VNET_API_ERROR_SESSION_REDIRECT)
return;
/* Got some error, relay it */
@@ -680,6 +725,46 @@ done:
REPLY_MACRO (VL_API_CONNECT_SESSION_REPLY);
}
+static void
+vl_api_app_namespace_add_del_t_handler (vl_api_app_namespace_add_del_t * mp)
+{
+ vl_api_app_namespace_add_del_reply_t *rmp;
+ u8 *ns_id = 0;
+ clib_error_t *error = 0;
+ int rv = 0;
+ if (!session_manager_is_enabled ())
+ {
+ rv = VNET_API_ERROR_FEATURE_DISABLED;
+ goto done;
+ }
+
+ if (mp->namespace_id_len > ARRAY_LEN (mp->namespace_id))
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto done;
+ }
+
+ vec_validate (ns_id, mp->namespace_id_len - 1);
+ clib_memcpy (ns_id, mp->namespace_id, mp->namespace_id_len);
+ vnet_app_namespace_add_del_args_t args = {
+ .ns_id = ns_id,
+ .secret = mp->secret,
+ .sw_if_index = clib_net_to_host_u32 (mp->sw_if_index),
+ .ip4_fib_id = clib_net_to_host_u32 (mp->ip4_fib_id),
+ .ip6_fib_id = clib_net_to_host_u32 (mp->ip6_fib_id),
+ .is_add = 1
+ };
+ error = vnet_app_namespace_add_del (&args);
+ if (error)
+ {
+ rv = clib_error_get_code (error);
+ clib_error_report (error);
+ }
+ vec_free (ns_id);
+done:
+ REPLY_MACRO (VL_API_APP_NAMESPACE_ADD_DEL_REPLY);
+}
+
static clib_error_t *
application_reaper_cb (u32 client_index)
{
diff --git a/src/vnet/session/session_cli.c b/src/vnet/session/session_cli.c
index 8c30a1df7eb..588cb603d39 100755
--- a/src/vnet/session/session_cli.c
+++ b/src/vnet/session/session_cli.c
@@ -138,22 +138,21 @@ unformat_stream_session (unformat_input_t * input, va_list * args)
stream_session_t *s;
u8 proto = ~0;
ip46_address_t lcl, rmt;
- u32 lcl_port = 0, rmt_port = 0;
- u8 is_ip4 = 0, s_type = ~0;
+ u32 lcl_port = 0, rmt_port = 0, fib_index = 0;
+ u8 is_ip4 = 0;
if (!unformat (input, "%U", unformat_stream_session_id, &proto, &lcl, &rmt,
&lcl_port, &rmt_port, &is_ip4))
return 0;
- s_type = session_type_from_proto_and_ip (proto, is_ip4);
if (is_ip4)
- s = stream_session_lookup4 (&lcl.ip4, &rmt.ip4,
- clib_host_to_net_u16 (lcl_port),
- clib_host_to_net_u16 (rmt_port), s_type);
+ s = session_lookup4 (fib_index, &lcl.ip4, &rmt.ip4,
+ clib_host_to_net_u16 (lcl_port),
+ clib_host_to_net_u16 (rmt_port), proto);
else
- s = stream_session_lookup6 (&lcl.ip6, &rmt.ip6,
- clib_host_to_net_u16 (lcl_port),
- clib_host_to_net_u16 (rmt_port), s_type);
+ s = session_lookup6 (fib_index, &lcl.ip6, &rmt.ip6,
+ clib_host_to_net_u16 (lcl_port),
+ clib_host_to_net_u16 (rmt_port), proto);
if (s)
{
*result = s;
@@ -170,8 +169,8 @@ unformat_transport_connection (unformat_input_t * input, va_list * args)
transport_connection_t *tc;
u8 proto = ~0;
ip46_address_t lcl, rmt;
- u32 lcl_port = 0, rmt_port = 0;
- u8 is_ip4 = 0, s_type = ~0;
+ u32 lcl_port = 0, rmt_port = 0, fib_index = 0;
+ u8 is_ip4 = 0;
if (!unformat (input, "%U", unformat_stream_session_id, &proto, &lcl, &rmt,
&lcl_port, &rmt_port, &is_ip4))
@@ -180,17 +179,14 @@ unformat_transport_connection (unformat_input_t * input, va_list * args)
proto = (proto == (u8) ~ 0) ? suggested_proto : proto;
if (proto == (u8) ~ 0)
return 0;
- s_type = session_type_from_proto_and_ip (proto, is_ip4);
if (is_ip4)
- tc = stream_session_lookup_transport4 (&lcl.ip4, &rmt.ip4,
- clib_host_to_net_u16 (lcl_port),
- clib_host_to_net_u16 (rmt_port),
- s_type);
+ tc = session_lookup_connection4 (fib_index, &lcl.ip4, &rmt.ip4,
+ clib_host_to_net_u16 (lcl_port),
+ clib_host_to_net_u16 (rmt_port), proto);
else
- tc = stream_session_lookup_transport6 (&lcl.ip6, &rmt.ip6,
- clib_host_to_net_u16 (lcl_port),
- clib_host_to_net_u16 (rmt_port),
- s_type);
+ tc = session_lookup_connection6 (fib_index, &lcl.ip6, &rmt.ip6,
+ clib_host_to_net_u16 (lcl_port),
+ clib_host_to_net_u16 (rmt_port), proto);
if (tc)
{
diff --git a/src/vnet/session/session_debug.h b/src/vnet/session/session_debug.h
index eb11f1a00b9..d9b71035314 100644
--- a/src/vnet/session/session_debug.h
+++ b/src/vnet/session/session_debug.h
@@ -31,11 +31,13 @@ typedef enum _session_evt_dbg
#undef _
} session_evt_dbg_e;
-#define SESSION_DBG (0)
+#define SESSION_DEBUG (0 && TRANSPORT_DEBUG)
#define SESSION_DEQ_NODE_EVTS (0)
#define SESSION_EVT_POLL_DBG (1)
-#if TRANSPORT_DEBUG && SESSION_DBG
+#if SESSION_DEBUG
+
+#define SESSION_DBG(_fmt, _args...) clib_warning (_fmt, ##_args)
#define DEC_SESSION_ETD(_s, _e, _size) \
struct \
@@ -78,7 +80,7 @@ typedef enum _session_evt_dbg
do { _body; } while (0); \
}
-#if SESSION_DEQ_NODE_EVTS
+#if SESSION_DEQ_NODE_EVTS && SESSION_DEBUG > 1
#define SESSION_EVT_DEQ_NODE_HANDLER(_node_evt) \
{ \
ELOG_TYPE_DECLARE (_e) = \
@@ -96,9 +98,9 @@ typedef enum _session_evt_dbg
}
#else
#define SESSION_EVT_DEQ_NODE_HANDLER(_node_evt)
-#endif
+#endif /* SESSION_DEQ_NODE_EVTS */
-#if SESSION_DBG && SESSION_EVT_POLL_DBG
+#if SESSION_EVT_POLL_DBG && SESSION_DEBUG > 1
#define SESSION_EVT_POLL_GAP(_smm, _my_thread_index) \
{ \
ELOG_TYPE_DECLARE (_e) = \
@@ -122,7 +124,7 @@ typedef enum _session_evt_dbg
#else
#define SESSION_EVT_POLL_GAP(_smm, _my_thread_index)
#define SESSION_EVT_POLL_GAP_TRACK_HANDLER(_smm, _my_thread_index)
-#endif
+#endif /* SESSION_EVT_POLL_DBG */
#define CONCAT_HELPER(_a, _b) _a##_b
#define CC(_a, _b) CONCAT_HELPER(_a, _b)
@@ -130,7 +132,8 @@ typedef enum _session_evt_dbg
#else
#define SESSION_EVT_DBG(_evt, _args...)
-#endif
+#define SESSION_DBG(_fmt, _args...)
+#endif /* SESSION_DEBUG */
#endif /* SRC_VNET_SESSION_SESSION_DEBUG_H_ */
/*
diff --git a/src/vnet/session/session_lookup.c b/src/vnet/session/session_lookup.c
index 4487b1c305f..796d93ec33e 100644
--- a/src/vnet/session/session_lookup.c
+++ b/src/vnet/session/session_lookup.c
@@ -27,10 +27,19 @@
#include <vppinfra/bihash_template.c>
#include <vnet/session/session_lookup.h>
#include <vnet/session/session.h>
+#include <vnet/session/application.h>
-static session_lookup_t session_lookup;
+/**
+ * External vector of per transport virtual functions table
+ */
extern transport_proto_vft_t *tp_vfts;
+/**
+ * Network namespace index (i.e., fib index) to session lookup table. We
+ * should have one per network protocol type but for now we only support IP4/6
+ */
+static u32 *fib_index_to_table_index[2];
+
/* *INDENT-OFF* */
/* 16 octets */
typedef CLIB_PACKED (struct {
@@ -153,461 +162,859 @@ make_v6_ss_kv_from_tc (session_kv6_t * kv, transport_connection_t * t)
session_type_from_proto_and_ip (t->transport_proto, 0));
}
-/*
- * Session lookup key; (src-ip, dst-ip, src-port, dst-port, session-type)
- * Value: (owner thread index << 32 | session_index);
+
+static session_table_t *
+session_table_get_or_alloc_for_connection (transport_connection_t * tc)
+{
+ session_table_t *st;
+ u32 table_index, fib_proto = transport_connection_fib_proto (tc);
+ if (vec_len (fib_index_to_table_index[fib_proto]) <= tc->fib_index)
+ {
+ st = session_table_alloc ();
+ table_index = session_table_index (st);
+ vec_validate (fib_index_to_table_index[fib_proto], tc->fib_index);
+ fib_index_to_table_index[fib_proto][tc->fib_index] = table_index;
+ return st;
+ }
+ else
+ {
+ table_index = fib_index_to_table_index[fib_proto][tc->fib_index];
+ return session_table_get (table_index);
+ }
+}
+
+static session_table_t *
+session_table_get_for_connection (transport_connection_t * tc)
+{
+ u32 fib_proto = transport_connection_fib_proto (tc);
+ if (vec_len (fib_index_to_table_index[fib_proto]) <= tc->fib_index)
+ return 0;
+ return
+ session_table_get (fib_index_to_table_index[fib_proto][tc->fib_index]);
+}
+
+static session_table_t *
+session_table_get_for_fib_index (u32 fib_proto, u32 fib_index)
+{
+ if (vec_len (fib_index_to_table_index[fib_proto]) <= fib_index)
+ return 0;
+ return session_table_get (fib_index_to_table_index[fib_proto][fib_index]);
+}
+
+u32
+session_lookup_get_index_for_fib (u32 fib_proto, u32 fib_index)
+{
+ if (vec_len (fib_index_to_table_index[fib_proto]) <= fib_index)
+ return SESSION_TABLE_INVALID_INDEX;
+ return fib_index_to_table_index[fib_proto][fib_index];
+}
+
+/**
+ * Add transport connection to a session table
+ *
+ * Session lookup 5-tuple (src-ip, dst-ip, src-port, dst-port, session-type)
+ * is added to requested session table.
+ *
+ * @param tc transport connection to be added
+ * @param value value to be stored
+ *
+ * @return non-zero if failure
*/
-void
-stream_session_table_add_for_tc (transport_connection_t * tc, u64 value)
+int
+session_lookup_add_connection (transport_connection_t * tc, u64 value)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
session_kv6_t kv6;
+ st = session_table_get_or_alloc_for_connection (tc);
+ if (!st)
+ return -1;
if (tc->is_ip4)
{
make_v4_ss_kv_from_tc (&kv4, tc);
kv4.value = value;
- clib_bihash_add_del_16_8 (&sl->v4_session_hash, &kv4, 1 /* is_add */ );
+ return clib_bihash_add_del_16_8 (&st->v4_session_hash, &kv4,
+ 1 /* is_add */ );
}
else
{
make_v6_ss_kv_from_tc (&kv6, tc);
kv6.value = value;
- clib_bihash_add_del_48_8 (&sl->v6_session_hash, &kv6, 1 /* is_add */ );
+ return clib_bihash_add_del_48_8 (&st->v6_session_hash, &kv6,
+ 1 /* is_add */ );
}
}
-void
-stream_session_table_add (session_manager_main_t * smm, stream_session_t * s,
- u64 value)
+int
+session_lookup_add_session_endpoint (u32 table_index,
+ session_endpoint_t * sep, u64 value)
{
- transport_connection_t *tc;
+ session_table_t *st;
+ session_kv4_t kv4;
+ session_kv6_t kv6;
- tc = tp_vfts[s->session_type].get_connection (s->connection_index,
- s->thread_index);
- stream_session_table_add_for_tc (tc, value);
+ st = session_table_get (table_index);
+ if (!st)
+ return -1;
+ if (sep->is_ip4)
+ {
+ make_v4_listener_kv (&kv4, &sep->ip.ip4, sep->port,
+ sep->transport_proto);
+ kv4.value = value;
+ return clib_bihash_add_del_16_8 (&st->v4_session_hash, &kv4, 1);
+ }
+ else
+ {
+ make_v6_listener_kv (&kv6, &sep->ip.ip6, sep->port,
+ sep->transport_proto);
+ kv6.value = value;
+ return clib_bihash_add_del_48_8 (&st->v6_session_hash, &kv6, 1);
+ }
}
int
-stream_session_table_del_for_tc (transport_connection_t * tc)
+session_lookup_del_session_endpoint (u32 table_index,
+ session_endpoint_t * sep)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
session_kv6_t kv6;
+ st = session_table_get (table_index);
+ if (!st)
+ return -1;
+ if (sep->is_ip4)
+ {
+ make_v4_listener_kv (&kv4, &sep->ip.ip4, sep->port,
+ sep->transport_proto);
+ return clib_bihash_add_del_16_8 (&st->v4_session_hash, &kv4, 0);
+ }
+ else
+ {
+ make_v6_listener_kv (&kv6, &sep->ip.ip6, sep->port,
+ sep->transport_proto);
+ return clib_bihash_add_del_48_8 (&st->v6_session_hash, &kv6, 0);
+ }
+}
+
+/**
+ * Delete transport connection from session table
+ *
+ * @param table_index session table index
+ * @param tc transport connection to be removed
+ *
+ * @return non-zero if failure
+ */
+int
+session_lookup_del_connection (transport_connection_t * tc)
+{
+ session_table_t *st;
+ session_kv4_t kv4;
+ session_kv6_t kv6;
+
+ st = session_table_get_for_connection (tc);
+ if (!st)
+ return -1;
if (tc->is_ip4)
{
make_v4_ss_kv_from_tc (&kv4, tc);
- return clib_bihash_add_del_16_8 (&sl->v4_session_hash, &kv4,
+ return clib_bihash_add_del_16_8 (&st->v4_session_hash, &kv4,
0 /* is_add */ );
}
else
{
make_v6_ss_kv_from_tc (&kv6, tc);
- return clib_bihash_add_del_48_8 (&sl->v6_session_hash, &kv6,
+ return clib_bihash_add_del_48_8 (&st->v6_session_hash, &kv6,
0 /* is_add */ );
}
-
- return 0;
}
int
-stream_session_table_del (stream_session_t * s)
+session_lookup_del_session (stream_session_t * s)
{
transport_connection_t *ts;
ts = tp_vfts[s->session_type].get_connection (s->connection_index,
s->thread_index);
- return stream_session_table_del_for_tc (ts);
+ return session_lookup_del_connection (ts);
}
-
-void
-stream_session_half_open_table_add (transport_connection_t * tc, u64 value)
+u32
+session_lookup_session_endpoint (u32 table_index, session_endpoint_t * sep)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
session_kv6_t kv6;
+ int rv;
- if (tc->is_ip4)
+ st = session_table_get (table_index);
+ if (!st)
+ return SESSION_INVALID_INDEX;
+ if (sep->is_ip4)
{
- make_v4_ss_kv_from_tc (&kv4, tc);
- kv4.value = value;
- (void) clib_bihash_add_del_16_8 (&sl->v4_half_open_hash, &kv4,
- 1 /* is_add */ );
+ make_v4_listener_kv (&kv4, &sep->ip.ip4, sep->port,
+ sep->transport_proto);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
+ if (rv == 0)
+ return (u32) kv4.value;
}
else
{
- make_v6_ss_kv_from_tc (&kv6, tc);
- kv6.value = value;
- (void) clib_bihash_add_del_48_8 (&sl->v6_half_open_hash, &kv6,
- 1 /* is_add */ );
+ make_v6_listener_kv (&kv6, &sep->ip.ip6, sep->port,
+ sep->transport_proto);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
+ if (rv == 0)
+ return (u32) kv6.value;
}
+ return SESSION_INVALID_INDEX;
}
-void
-stream_session_half_open_table_del (transport_connection_t * tc)
+u32
+session_lookup_local_session_endpoint (u32 table_index,
+ session_endpoint_t * sep)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
session_kv6_t kv6;
+ int rv;
- if (tc->is_ip4)
+ st = session_table_get (table_index);
+ if (!st)
+ return SESSION_INVALID_INDEX;
+ if (sep->is_ip4)
{
- make_v4_ss_kv_from_tc (&kv4, tc);
- clib_bihash_add_del_16_8 (&sl->v4_half_open_hash, &kv4,
- 0 /* is_add */ );
+ make_v4_listener_kv (&kv4, &sep->ip.ip4, sep->port,
+ sep->transport_proto);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
+ if (rv == 0)
+ return (u32) kv4.value;
+
+ /*
+ * Zero out the ip. Logic is that connect to local ips, say
+ * 127.0.0.1:port, can match 0.0.0.0:port
+ */
+ kv4.key[0] = 0;
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
+ if (rv == 0)
+ return (u32) kv4.value;
}
else
{
- make_v6_ss_kv_from_tc (&kv6, tc);
- clib_bihash_add_del_48_8 (&sl->v6_half_open_hash, &kv6,
- 0 /* is_add */ );
+ make_v6_listener_kv (&kv6, &sep->ip.ip6, sep->port,
+ sep->transport_proto);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
+ if (rv == 0)
+ return (u32) kv6.value;
+
+ /*
+ * Zero out the ip. Same logic as above.
+ */
+ kv6.key[0] = kv6.key[1] = 0;
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
+ if (rv == 0)
+ return (u32) kv6.value;
}
+ return SESSION_INVALID_INDEX;
}
-stream_session_t *
-stream_session_lookup_listener4 (ip4_address_t * lcl, u16 lcl_port, u8 proto)
+static stream_session_t *
+session_lookup_listener4_i (session_table_t * st, ip4_address_t * lcl,
+ u16 lcl_port, u8 proto)
{
- session_lookup_t *sl = &session_lookup;
session_kv4_t kv4;
int rv;
make_v4_listener_kv (&kv4, lcl, lcl_port, proto);
- rv = clib_bihash_search_inline_16_8 (&sl->v4_session_hash, &kv4);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
if (rv == 0)
return session_manager_get_listener (proto, (u32) kv4.value);
/* Zero out the lcl ip */
kv4.key[0] = 0;
- rv = clib_bihash_search_inline_16_8 (&sl->v4_session_hash, &kv4);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
if (rv == 0)
return session_manager_get_listener (proto, (u32) kv4.value);
return 0;
}
-/** Looks up a session based on the 5-tuple passed as argument.
- *
- * First it tries to find an established session, if this fails, it tries
- * finding a listener session if this fails, it tries a lookup with a
- * wildcarded local source (listener bound to all interfaces)
- */
stream_session_t *
-stream_session_lookup4 (ip4_address_t * lcl, ip4_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto)
+session_lookup_listener4 (u32 fib_index, ip4_address_t * lcl, u16 lcl_port,
+ u8 proto)
{
- session_lookup_t *sl = &session_lookup;
- session_kv4_t kv4;
- stream_session_t *s;
- int rv;
-
- /* Lookup session amongst established ones */
- make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&sl->v4_session_hash, &kv4);
- if (rv == 0)
- return stream_session_get_from_handle (kv4.value);
-
- /* If nothing is found, check if any listener is available */
- if ((s = stream_session_lookup_listener4 (lcl, lcl_port, proto)))
- return s;
-
- /* Finally, try half-open connections */
- rv = clib_bihash_search_inline_16_8 (&sl->v4_half_open_hash, &kv4);
- if (rv == 0)
- return stream_session_get_from_handle (kv4.value);
- return 0;
+ session_table_t *st;
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP4, fib_index);
+ if (!st)
+ return 0;
+ return session_lookup_listener4_i (st, lcl, lcl_port, proto);
}
-stream_session_t *
-stream_session_lookup_listener6 (ip6_address_t * lcl, u16 lcl_port, u8 proto)
+static stream_session_t *
+session_lookup_listener6_i (session_table_t * st, ip6_address_t * lcl,
+ u16 lcl_port, u8 proto)
{
- session_lookup_t *sl = &session_lookup;
session_kv6_t kv6;
int rv;
make_v6_listener_kv (&kv6, lcl, lcl_port, proto);
- rv = clib_bihash_search_inline_48_8 (&sl->v6_session_hash, &kv6);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
if (rv == 0)
return session_manager_get_listener (proto, (u32) kv6.value);
/* Zero out the lcl ip */
kv6.key[0] = kv6.key[1] = 0;
- rv = clib_bihash_search_inline_48_8 (&sl->v6_session_hash, &kv6);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
if (rv == 0)
return session_manager_get_listener (proto, (u32) kv6.value);
return 0;
}
-/* Looks up a session based on the 5-tuple passed as argument.
- * First it tries to find an established session, if this fails, it tries
- * finding a listener session if this fails, it tries a lookup with a
- * wildcarded local source (listener bound to all interfaces) */
stream_session_t *
-stream_session_lookup6 (ip6_address_t * lcl, ip6_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto)
+session_lookup_listener6 (u32 fib_index, ip6_address_t * lcl, u16 lcl_port,
+ u8 proto)
{
- session_lookup_t *sl = &session_lookup;
- session_kv6_t kv6;
- stream_session_t *s;
- int rv;
+ session_table_t *st;
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP6, fib_index);
+ if (!st)
+ return 0;
+ return session_lookup_listener6_i (st, lcl, lcl_port, proto);
+}
- make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&sl->v6_session_hash, &kv6);
- if (rv == 0)
- return stream_session_get_from_handle (kv6.value);
+stream_session_t *
+session_lookup_listener (u32 table_index, session_endpoint_t * sep)
+{
+ session_table_t *st;
+ st = session_table_get (table_index);
+ if (!st)
+ return 0;
+ if (sep->is_ip4)
+ return session_lookup_listener4_i (st, &sep->ip.ip4, sep->port,
+ sep->transport_proto);
+ else
+ return session_lookup_listener6_i (st, &sep->ip.ip6, sep->port,
+ sep->transport_proto);
+ return 0;
+}
- /* If nothing is found, check if any listener is available */
- if ((s = stream_session_lookup_listener6 (lcl, lcl_port, proto)))
- return s;
+int
+session_lookup_add_half_open (transport_connection_t * tc, u64 value)
+{
+ session_table_t *st;
+ session_kv4_t kv4;
+ session_kv6_t kv6;
- /* Finally, try half-open connections */
- rv = clib_bihash_search_inline_48_8 (&sl->v6_half_open_hash, &kv6);
- if (rv == 0)
- return stream_session_get_from_handle (kv6.value);
- return 0;
+ st = session_table_get_or_alloc_for_connection (tc);
+ if (!st)
+ return 0;
+ if (tc->is_ip4)
+ {
+ make_v4_ss_kv_from_tc (&kv4, tc);
+ kv4.value = value;
+ return clib_bihash_add_del_16_8 (&st->v4_half_open_hash, &kv4,
+ 1 /* is_add */ );
+ }
+ else
+ {
+ make_v6_ss_kv_from_tc (&kv6, tc);
+ kv6.value = value;
+ return clib_bihash_add_del_48_8 (&st->v6_half_open_hash, &kv6,
+ 1 /* is_add */ );
+ }
}
-stream_session_t *
-stream_session_lookup_listener (ip46_address_t * lcl, u16 lcl_port, u8 proto)
+int
+session_lookup_del_half_open (transport_connection_t * tc)
{
- switch (proto)
+ session_table_t *st;
+ session_kv4_t kv4;
+ session_kv6_t kv6;
+
+ st = session_table_get_for_connection (tc);
+ if (!st)
+ return -1;
+ if (tc->is_ip4)
{
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- return stream_session_lookup_listener4 (&lcl->ip4, lcl_port, proto);
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- return stream_session_lookup_listener6 (&lcl->ip6, lcl_port, proto);
- break;
+ make_v4_ss_kv_from_tc (&kv4, tc);
+ return clib_bihash_add_del_16_8 (&st->v4_half_open_hash, &kv4,
+ 0 /* is_add */ );
+ }
+ else
+ {
+ make_v6_ss_kv_from_tc (&kv6, tc);
+ return clib_bihash_add_del_48_8 (&st->v6_half_open_hash, &kv6,
+ 0 /* is_add */ );
}
- return 0;
}
u64
-stream_session_half_open_lookup_handle (ip46_address_t * lcl,
- ip46_address_t * rmt, u16 lcl_port,
- u16 rmt_port, u8 proto)
+session_lookup_half_open_handle (transport_connection_t * tc)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
session_kv6_t kv6;
int rv;
- switch (proto)
+ st = session_table_get_for_fib_index (transport_connection_fib_proto (tc),
+ tc->fib_index);
+ if (!st)
+ return HALF_OPEN_LOOKUP_INVALID_VALUE;
+ if (tc->is_ip4)
{
- case SESSION_TYPE_IP4_UDP:
- case SESSION_TYPE_IP4_TCP:
- make_v4_ss_kv (&kv4, &lcl->ip4, &rmt->ip4, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&sl->v4_half_open_hash, &kv4);
-
+ make_v4_ss_kv (&kv4, &tc->lcl_ip.ip4, &tc->rmt_ip.ip4, tc->lcl_port,
+ tc->rmt_port, tc->transport_proto);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_half_open_hash, &kv4);
if (rv == 0)
return kv4.value;
-
- return HALF_OPEN_LOOKUP_INVALID_VALUE;
- break;
- case SESSION_TYPE_IP6_UDP:
- case SESSION_TYPE_IP6_TCP:
- make_v6_ss_kv (&kv6, &lcl->ip6, &rmt->ip6, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&sl->v6_half_open_hash, &kv6);
-
+ }
+ else
+ {
+ make_v6_ss_kv (&kv6, &tc->lcl_ip.ip6, &tc->rmt_ip.ip6, tc->lcl_port,
+ tc->rmt_port, tc->transport_proto);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_half_open_hash, &kv6);
if (rv == 0)
return kv6.value;
-
- return HALF_OPEN_LOOKUP_INVALID_VALUE;
- break;
}
return HALF_OPEN_LOOKUP_INVALID_VALUE;
}
transport_connection_t *
-stream_session_half_open_lookup (ip46_address_t * lcl, ip46_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto)
+session_lookup_half_open_connection (u64 handle, u8 proto, u8 is_ip4)
{
- u64 handle;
- handle =
- stream_session_half_open_lookup_handle (lcl, rmt, lcl_port, rmt_port,
- proto);
+ u32 sst;
+
if (handle != HALF_OPEN_LOOKUP_INVALID_VALUE)
- return tp_vfts[proto].get_half_open (handle & 0xFFFFFFFF);
+ {
+ sst = session_type_from_proto_and_ip (proto, is_ip4);
+ return tp_vfts[sst].get_half_open (handle & 0xFFFFFFFF);
+ }
return 0;
}
-always_inline stream_session_t *
-stream_session_get_tsi (u64 ti_and_si, u32 thread_index)
-{
- ASSERT ((u32) (ti_and_si >> 32) == thread_index);
- return pool_elt_at_index (session_manager_main.sessions[thread_index],
- ti_and_si & 0xFFFFFFFFULL);
-}
-
+/**
+ * Lookup connection with ip4 and transport layer information
+ *
+ * This is used on the fast path so it needs to be fast. Thereby,
+ * duplication of code and 'hacks' allowed.
+ *
+ * The lookup is incremental and returns whenever something is matched. The
+ * steps are:
+ * - Try to find an established session
+ * - Try to find a fully-formed or local source wildcarded (listener bound to
+ * all interfaces) listener session
+ * - Try to find a half-open connection
+ * - return 0
+ *
+ * @param fib_index index of fib wherein the connection was received
+ * @param lcl local ip4 address
+ * @param rmt remote ip4 address
+ * @param lcl_port local port
+ * @param rmt_port remote port
+ * @param proto transport protocol (e.g., tcp, udp)
+ * @param thread_index thread index for request
+ *
+ * @return pointer to transport connection, if one is found, 0 otherwise
+ */
transport_connection_t *
-stream_session_lookup_transport_wt4 (ip4_address_t * lcl, ip4_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto,
- u32 my_thread_index)
+session_lookup_connection_wt4 (u32 fib_index, ip4_address_t * lcl,
+ ip4_address_t * rmt, u16 lcl_port,
+ u16 rmt_port, u8 proto, u32 thread_index)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
stream_session_t *s;
int rv;
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP4, fib_index);
+ if (PREDICT_FALSE (!st))
+ return 0;
+
/* Lookup session amongst established ones */
make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&sl->v4_session_hash, &kv4);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
if (rv == 0)
{
- s = stream_session_get_tsi (kv4.value, my_thread_index);
+ ASSERT ((u32) (kv4.value >> 32) == thread_index);
+ s = session_get (kv4.value & 0xFFFFFFFFULL, thread_index);
return tp_vfts[s->session_type].get_connection (s->connection_index,
- my_thread_index);
+ thread_index);
}
/* If nothing is found, check if any listener is available */
- s = stream_session_lookup_listener4 (lcl, lcl_port, proto);
+ s = session_lookup_listener4_i (st, lcl, lcl_port, proto);
if (s)
return tp_vfts[s->session_type].get_listener (s->connection_index);
/* Finally, try half-open connections */
- rv = clib_bihash_search_inline_16_8 (&sl->v4_half_open_hash, &kv4);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_half_open_hash, &kv4);
if (rv == 0)
- return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF);
+ {
+ u32 sst = session_type_from_proto_and_ip (proto, 1);
+ return tp_vfts[sst].get_half_open (kv4.value & 0xFFFFFFFF);
+ }
return 0;
}
+/**
+ * Lookup connection with ip4 and transport layer information
+ *
+ * Not optimized. This is used on the fast path so it needs to be fast.
+ * Thereby, duplication of code and 'hacks' allowed. Lookup logic is identical
+ * to that of @ref session_lookup_connection_wt4
+ *
+ * @param fib_index index of the fib wherein the connection was received
+ * @param lcl local ip4 address
+ * @param rmt remote ip4 address
+ * @param lcl_port local port
+ * @param rmt_port remote port
+ * @param proto transport protocol (e.g., tcp, udp)
+ *
+ * @return pointer to transport connection, if one is found, 0 otherwise
+ */
transport_connection_t *
-stream_session_lookup_transport4 (ip4_address_t * lcl, ip4_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto)
+session_lookup_connection4 (u32 fib_index, ip4_address_t * lcl,
+ ip4_address_t * rmt, u16 lcl_port, u16 rmt_port,
+ u8 proto)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
session_kv4_t kv4;
stream_session_t *s;
int rv;
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP4, fib_index);
+ if (PREDICT_FALSE (!st))
+ return 0;
+
/* Lookup session amongst established ones */
make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_16_8 (&sl->v4_session_hash, &kv4);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
if (rv == 0)
{
- s = stream_session_get_from_handle (kv4.value);
+ s = session_get_from_handle (kv4.value);
return tp_vfts[s->session_type].get_connection (s->connection_index,
s->thread_index);
}
/* If nothing is found, check if any listener is available */
- s = stream_session_lookup_listener4 (lcl, lcl_port, proto);
+ s = session_lookup_listener4_i (st, lcl, lcl_port, proto);
if (s)
return tp_vfts[s->session_type].get_listener (s->connection_index);
/* Finally, try half-open connections */
- rv = clib_bihash_search_inline_16_8 (&sl->v4_half_open_hash, &kv4);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_half_open_hash, &kv4);
if (rv == 0)
- return tp_vfts[proto].get_half_open (kv4.value & 0xFFFFFFFF);
+ {
+ u32 sst = session_type_from_proto_and_ip (proto, 1);
+ return tp_vfts[sst].get_half_open (kv4.value & 0xFFFFFFFF);
+ }
return 0;
}
+/**
+ * Lookup session with ip4 and transport layer information
+ *
+ * Lookup logic is identical to that of @ref session_lookup_connection_wt4 but
+ * this returns a session as opposed to a transport connection;
+ */
+stream_session_t *
+session_lookup4 (u32 fib_index, ip4_address_t * lcl, ip4_address_t * rmt,
+ u16 lcl_port, u16 rmt_port, u8 proto)
+{
+ session_table_t *st;
+ session_kv4_t kv4;
+ stream_session_t *s;
+ int rv;
+
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP4, fib_index);
+ if (PREDICT_FALSE (!st))
+ return 0;
+
+ /* Lookup session amongst established ones */
+ make_v4_ss_kv (&kv4, lcl, rmt, lcl_port, rmt_port, proto);
+ rv = clib_bihash_search_inline_16_8 (&st->v4_session_hash, &kv4);
+ if (rv == 0)
+ return session_get_from_handle (kv4.value);
+
+ /* If nothing is found, check if any listener is available */
+ if ((s = session_lookup_listener4_i (st, lcl, lcl_port, proto)))
+ return s;
+
+ /* Finally, try half-open connections */
+ rv = clib_bihash_search_inline_16_8 (&st->v4_half_open_hash, &kv4);
+ if (rv == 0)
+ return session_get_from_handle (kv4.value);
+ return 0;
+}
+
+/**
+ * Lookup connection with ip6 and transport layer information
+ *
+ * This is used on the fast path so it needs to be fast. Thereby,
+ * duplication of code and 'hacks' allowed.
+ *
+ * The lookup is incremental and returns whenever something is matched. The
+ * steps are:
+ * - Try to find an established session
+ * - Try to find a fully-formed or local source wildcarded (listener bound to
+ * all interfaces) listener session
+ * - Try to find a half-open connection
+ * - return 0
+ *
+ * @param fib_index index of the fib wherein the connection was received
+ * @param lcl local ip6 address
+ * @param rmt remote ip6 address
+ * @param lcl_port local port
+ * @param rmt_port remote port
+ * @param proto transport protocol (e.g., tcp, udp)
+ * @param thread_index thread index for request
+ *
+ * @return pointer to transport connection, if one is found, 0 otherwise
+ */
transport_connection_t *
-stream_session_lookup_transport_wt6 (ip6_address_t * lcl, ip6_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto,
- u32 my_thread_index)
+session_lookup_connection_wt6 (u32 fib_index, ip6_address_t * lcl,
+ ip6_address_t * rmt, u16 lcl_port,
+ u16 rmt_port, u8 proto, u32 thread_index)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
stream_session_t *s;
session_kv6_t kv6;
int rv;
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP6, fib_index);
+ if (PREDICT_FALSE (!st))
+ return 0;
+
make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&sl->v6_session_hash, &kv6);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
if (rv == 0)
{
- s = stream_session_get_tsi (kv6.value, my_thread_index);
+ ASSERT ((u32) (kv6.value >> 32) == thread_index);
+ s = session_get (kv6.value & 0xFFFFFFFFULL, thread_index);
return tp_vfts[s->session_type].get_connection (s->connection_index,
- my_thread_index);
+ thread_index);
}
/* If nothing is found, check if any listener is available */
- s = stream_session_lookup_listener6 (lcl, lcl_port, proto);
+ s = session_lookup_listener6_i (st, lcl, lcl_port, proto);
if (s)
return tp_vfts[s->session_type].get_listener (s->connection_index);
/* Finally, try half-open connections */
- rv = clib_bihash_search_inline_48_8 (&sl->v6_half_open_hash, &kv6);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_half_open_hash, &kv6);
if (rv == 0)
- return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF);
+ {
+ u32 sst = session_type_from_proto_and_ip (proto, 1);
+ return tp_vfts[sst].get_half_open (kv6.value & 0xFFFFFFFF);
+ }
return 0;
}
+/**
+ * Lookup connection with ip6 and transport layer information
+ *
+ * Not optimized. This is used on the fast path so it needs to be fast.
+ * Thereby, duplication of code and 'hacks' allowed. Lookup logic is identical
+ * to that of @ref session_lookup_connection_wt4
+ *
+ * @param fib_index index of the fib wherein the connection was received
+ * @param lcl local ip6 address
+ * @param rmt remote ip6 address
+ * @param lcl_port local port
+ * @param rmt_port remote port
+ * @param proto transport protocol (e.g., tcp, udp)
+ *
+ * @return pointer to transport connection, if one is found, 0 otherwise
+ */
transport_connection_t *
-stream_session_lookup_transport6 (ip6_address_t * lcl, ip6_address_t * rmt,
- u16 lcl_port, u16 rmt_port, u8 proto)
+session_lookup_connection6 (u32 fib_index, ip6_address_t * lcl,
+ ip6_address_t * rmt, u16 lcl_port, u16 rmt_port,
+ u8 proto)
{
- session_lookup_t *sl = &session_lookup;
+ session_table_t *st;
stream_session_t *s;
session_kv6_t kv6;
int rv;
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP6, fib_index);
+ if (PREDICT_FALSE (!st))
+ return 0;
+
make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
- rv = clib_bihash_search_inline_48_8 (&sl->v6_session_hash, &kv6);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
if (rv == 0)
{
- s = stream_session_get_from_handle (kv6.value);
+ s = session_get_from_handle (kv6.value);
return tp_vfts[s->session_type].get_connection (s->connection_index,
s->thread_index);
}
/* If nothing is found, check if any listener is available */
- s = stream_session_lookup_listener6 (lcl, lcl_port, proto);
+ s = session_lookup_listener6 (fib_index, lcl, lcl_port, proto);
if (s)
return tp_vfts[s->session_type].get_listener (s->connection_index);
/* Finally, try half-open connections */
- rv = clib_bihash_search_inline_48_8 (&sl->v6_half_open_hash, &kv6);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_half_open_hash, &kv6);
+ if (rv == 0)
+ {
+ u32 sst = session_type_from_proto_and_ip (proto, 1);
+ return tp_vfts[sst].get_half_open (kv6.value & 0xFFFFFFFF);
+ }
+
+ return 0;
+}
+
+/**
+ * Lookup session with ip6 and transport layer information
+ *
+ * Lookup logic is identical to that of @ref session_lookup_connection_wt6 but
+ * this returns a session as opposed to a transport connection;
+ */
+stream_session_t *
+session_lookup6 (u32 fib_index, ip6_address_t * lcl, ip6_address_t * rmt,
+ u16 lcl_port, u16 rmt_port, u8 proto)
+{
+ session_table_t *st;
+ session_kv6_t kv6;
+ stream_session_t *s;
+ int rv;
+
+ st = session_table_get_for_fib_index (FIB_PROTOCOL_IP6, fib_index);
+ if (PREDICT_FALSE (!st))
+ return 0;
+
+ make_v6_ss_kv (&kv6, lcl, rmt, lcl_port, rmt_port, proto);
+ rv = clib_bihash_search_inline_48_8 (&st->v6_session_hash, &kv6);
+ if (rv == 0)
+ return session_get_from_handle (kv6.value);
+
+ /* If nothing is found, check if any listener is available */
+ if ((s = session_lookup_listener6_i (st, lcl, lcl_port, proto)))
+ return s;
+
+ /* Finally, try half-open connections */
+ rv = clib_bihash_search_inline_48_8 (&st->v6_half_open_hash, &kv6);
if (rv == 0)
- return tp_vfts[proto].get_half_open (kv6.value & 0xFFFFFFFF);
+ return session_get_from_handle (kv6.value);
+ return 0;
+}
+u64
+session_lookup_local_listener_make_handle (session_endpoint_t * sep)
+{
+ return ((u64) SESSION_LOCAL_TABLE_PREFIX << 32
+ | (u32) sep->port << 16 | (u32) sep->transport_proto << 8
+ | (u32) sep->is_ip4);
+}
+
+u8
+session_lookup_local_is_handle (u64 handle)
+{
+ if (handle >> 32 == SESSION_LOCAL_TABLE_PREFIX)
+ return 1;
return 0;
}
-#define foreach_hash_table_parameter \
- _(v4,session,buckets,20000) \
- _(v4,session,memory,(64<<20)) \
- _(v6,session,buckets,20000) \
- _(v6,session,memory,(64<<20)) \
- _(v4,halfopen,buckets,20000) \
- _(v4,halfopen,memory,(64<<20)) \
- _(v6,halfopen,buckets,20000) \
- _(v6,halfopen,memory,(64<<20))
+int
+session_lookup_local_listener_parse_handle (u64 handle,
+ session_endpoint_t * sep)
+{
+ u32 local_table_handle;
+ if (handle >> 32 != SESSION_LOCAL_TABLE_PREFIX)
+ return -1;
+ local_table_handle = handle & 0xFFFFFFFFULL;
+ sep->is_ip4 = local_table_handle & 0xff;
+ local_table_handle >>= 8;
+ sep->transport_proto = local_table_handle & 0xff;
+ sep->port = local_table_handle >> 8;
+ return 0;
+}
+
+u8 *
+format_ip4_session_lookup_kvp (u8 * s, va_list * args)
+{
+ clib_bihash_kv_16_8_t *kvp = va_arg (*args, clib_bihash_kv_16_8_t *);
+ u32 is_local = va_arg (*args, u32);
+ u8 *app_name, *str = 0;
+ stream_session_t *session;
+ v4_connection_key_t *key = (v4_connection_key_t *) kvp->key;
+
+ char *proto = key->proto == TRANSPORT_PROTO_TCP ? "T" : "U";
+ if (!is_local)
+ {
+ session = session_get_from_handle (kvp->value);
+ app_name = application_name_from_index (session->app_index);
+ str = format (0, "[%s] %U:%d->%U:%d", proto, format_ip4_address,
+ &key->src, clib_net_to_host_u16 (key->src_port),
+ format_ip4_address, &key->dst,
+ clib_net_to_host_u16 (key->dst_port));
+ s = format (s, "%-40v%-30v", str, app_name);
+ }
+ else
+ {
+ app_name = application_name_from_index (kvp->value);
+ str = format (0, "[%s] %U:%d", proto, format_ip4_address,
+ &key->src, clib_net_to_host_u16 (key->src_port));
+ s = format (s, "%-30v%-30v", str, app_name);
+ }
+ vec_free (app_name);
+ return s;
+}
+
+typedef struct _ip4_session_table_show_ctx_t
+{
+ vlib_main_t *vm;
+ u8 is_local;
+} ip4_session_table_show_ctx_t;
+
+static int
+ip4_session_table_show (clib_bihash_kv_16_8_t * kvp, void *arg)
+{
+ ip4_session_table_show_ctx_t *ctx = arg;
+ vlib_cli_output (ctx->vm, "%U", format_ip4_session_lookup_kvp, kvp,
+ ctx->is_local);
+ return 1;
+}
+
+void
+session_lookup_show_table_entries (vlib_main_t * vm, session_table_t * table,
+ u8 type, u8 is_local)
+{
+ ip4_session_table_show_ctx_t ctx = {
+ .vm = vm,
+ .is_local = is_local,
+ };
+ if (!is_local)
+ vlib_cli_output (vm, "%-40s%-30s", "Session", "Application");
+ else
+ vlib_cli_output (vm, "%-30s%-30s", "Listener", "Application");
+ switch (type)
+ {
+ /* main table v4 */
+ case 0:
+ ip4_session_table_walk (&table->v4_session_hash, ip4_session_table_show,
+ &ctx);
+ break;
+ default:
+ clib_warning ("not supported");
+ }
+}
void
session_lookup_init (void)
{
- session_lookup_t *sl = &session_lookup;
-
-#define _(af,table,parm,value) \
- u32 configured_##af##_##table##_table_##parm = value;
- foreach_hash_table_parameter;
-#undef _
-
-#define _(af,table,parm,value) \
- if (session_manager_main.configured_##af##_##table##_table_##parm) \
- configured_##af##_##table##_table_##parm = \
- session_manager_main.configured_##af##_##table##_table_##parm;
- foreach_hash_table_parameter;
-#undef _
-
- clib_bihash_init_16_8 (&sl->v4_session_hash, "v4 session table",
- configured_v4_session_table_buckets,
- configured_v4_session_table_memory);
- clib_bihash_init_48_8 (&sl->v6_session_hash, "v6 session table",
- configured_v6_session_table_buckets,
- configured_v6_session_table_memory);
- clib_bihash_init_16_8 (&sl->v4_half_open_hash, "v4 half-open table",
- configured_v4_halfopen_table_buckets,
- configured_v4_halfopen_table_memory);
- clib_bihash_init_48_8 (&sl->v6_half_open_hash, "v6 half-open table",
- configured_v6_halfopen_table_buckets,
- configured_v6_halfopen_table_memory);
+ /*
+ * Allocate default table and map it to fib_index 0
+ */
+ session_table_t *st = session_table_alloc ();
+ vec_validate (fib_index_to_table_index[FIB_PROTOCOL_IP4], 0);
+ fib_index_to_table_index[FIB_PROTOCOL_IP4][0] = session_table_index (st);
+ session_table_init (st);
+ st = session_table_alloc ();
+ vec_validate (fib_index_to_table_index[FIB_PROTOCOL_IP6], 0);
+ fib_index_to_table_index[FIB_PROTOCOL_IP6][0] = session_table_index (st);
+ session_table_init (st);
}
/*
diff --git a/src/vnet/session/session_lookup.h b/src/vnet/session/session_lookup.h
index cf1dc01356e..20cbaf2acd6 100644
--- a/src/vnet/session/session_lookup.h
+++ b/src/vnet/session/session_lookup.h
@@ -16,77 +16,73 @@
#ifndef SRC_VNET_SESSION_SESSION_LOOKUP_H_
#define SRC_VNET_SESSION_SESSION_LOOKUP_H_
+#include <vnet/session/session_table.h>
#include <vnet/session/stream_session.h>
#include <vnet/session/transport.h>
-typedef struct _session_lookup
-{
- /** Lookup tables for established sessions and listeners */
- clib_bihash_16_8_t v4_session_hash;
- clib_bihash_48_8_t v6_session_hash;
-
- /** Lookup tables for half-open sessions */
- clib_bihash_16_8_t v4_half_open_hash;
- clib_bihash_48_8_t v6_half_open_hash;
-} session_lookup_t;
-
-stream_session_t *stream_session_lookup_listener4 (ip4_address_t * lcl,
- u16 lcl_port, u8 proto);
-stream_session_t *stream_session_lookup4 (ip4_address_t * lcl,
- ip4_address_t * rmt, u16 lcl_port,
- u16 rmt_port, u8 proto);
-stream_session_t *stream_session_lookup_listener6 (ip6_address_t * lcl,
- u16 lcl_port, u8 proto);
-stream_session_t *stream_session_lookup6 (ip6_address_t * lcl,
- ip6_address_t * rmt, u16 lcl_port,
- u16 rmt_port, u8 proto);
-transport_connection_t *stream_session_lookup_transport_wt4 (ip4_address_t *
- lcl,
- ip4_address_t *
- rmt,
- u16 lcl_port,
- u16 rmt_port,
- u8 proto,
- u32
- thread_index);
-transport_connection_t *stream_session_lookup_transport4 (ip4_address_t * lcl,
- ip4_address_t * rmt,
- u16 lcl_port,
- u16 rmt_port,
- u8 proto);
-transport_connection_t *stream_session_lookup_transport_wt6 (ip6_address_t *
- lcl,
- ip6_address_t *
- rmt,
- u16 lcl_port,
- u16 rmt_port,
+stream_session_t *session_lookup4 (u32 fib_index, ip4_address_t * lcl,
+ ip4_address_t * rmt, u16 lcl_port,
+ u16 rmt_port, u8 proto);
+stream_session_t *session_lookup6 (u32 fib_index, ip6_address_t * lcl,
+ ip6_address_t * rmt, u16 lcl_port,
+ u16 rmt_port, u8 proto);
+transport_connection_t *session_lookup_connection_wt4 (u32 fib_index,
+ ip4_address_t * lcl,
+ ip4_address_t * rmt,
+ u16 lcl_port,
+ u16 rmt_port, u8 proto,
+ u32 thread_index);
+transport_connection_t *session_lookup_connection4 (u32 fib_index,
+ ip4_address_t * lcl,
+ ip4_address_t * rmt,
+ u16 lcl_port,
+ u16 rmt_port, u8 proto);
+transport_connection_t *session_lookup_connection_wt6 (u32 fib_index,
+ ip6_address_t * lcl,
+ ip6_address_t * rmt,
+ u16 lcl_port,
+ u16 rmt_port, u8 proto,
+ u32 thread_index);
+transport_connection_t *session_lookup_connection6 (u32 fib_index,
+ ip6_address_t * lcl,
+ ip6_address_t * rmt,
+ u16 lcl_port,
+ u16 rmt_port, u8 proto);
+stream_session_t *session_lookup_listener4 (u32 fib_index,
+ ip4_address_t * lcl, u16 lcl_port,
+ u8 proto);
+stream_session_t *session_lookup_listener6 (u32 fib_index,
+ ip6_address_t * lcl, u16 lcl_port,
+ u8 proto);
+stream_session_t *session_lookup_listener (u32 table_index,
+ session_endpoint_t * sep);
+int session_lookup_add_connection (transport_connection_t * tc, u64 value);
+int session_lookup_del_connection (transport_connection_t * tc);
+u32 session_lookup_session_endpoint (u32 table_index,
+ session_endpoint_t * sep);
+u32 session_lookup_local_session_endpoint (u32 table_index,
+ session_endpoint_t * sep);
+int session_lookup_add_session_endpoint (u32 table_index,
+ session_endpoint_t * sep, u64 value);
+int session_lookup_del_session_endpoint (u32 table_index,
+ session_endpoint_t * sep);
+int session_lookup_del_session (stream_session_t * s);
+int session_lookup_del_half_open (transport_connection_t * tc);
+int session_lookup_add_half_open (transport_connection_t * tc, u64 value);
+u64 session_lookup_half_open_handle (transport_connection_t * tc);
+transport_connection_t *session_lookup_half_open_connection (u64 handle,
u8 proto,
- u32
- thread_index);
-transport_connection_t *stream_session_lookup_transport6 (ip6_address_t * lcl,
- ip6_address_t * rmt,
- u16 lcl_port,
- u16 rmt_port,
- u8 proto);
+ u8 is_ip4);
+u32 session_lookup_get_index_for_fib (u32 fib_proto, u32 fib_index);
-stream_session_t *stream_session_lookup_listener (ip46_address_t * lcl,
- u16 lcl_port, u8 proto);
-u64 stream_session_half_open_lookup_handle (ip46_address_t * lcl,
- ip46_address_t * rmt,
- u16 lcl_port,
- u16 rmt_port, u8 proto);
-transport_connection_t *stream_session_half_open_lookup (ip46_address_t * lcl,
- ip46_address_t * rmt,
- u16 lcl_port,
- u16 rmt_port,
- u8 proto);
-void stream_session_table_add_for_tc (transport_connection_t * tc, u64 value);
-int stream_session_table_del_for_tc (transport_connection_t * tc);
-int stream_session_table_del (stream_session_t * s);
-void stream_session_half_open_table_del (transport_connection_t * tc);
-void stream_session_half_open_table_add (transport_connection_t * tc,
- u64 value);
+u64 session_lookup_local_listener_make_handle (session_endpoint_t * sep);
+u8 session_lookup_local_is_handle (u64 handle);
+int session_lookup_local_listener_parse_handle (u64 handle,
+ session_endpoint_t * sep);
+void session_lookup_show_table_entries (vlib_main_t * vm,
+ session_table_t * table, u8 type,
+ u8 is_local);
void session_lookup_init (void);
#endif /* SRC_VNET_SESSION_SESSION_LOOKUP_H_ */
diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c
index d015584990e..d2291fa38de 100644
--- a/src/vnet/session/session_node.c
+++ b/src/vnet/session/session_node.c
@@ -434,7 +434,7 @@ dump_thread_0_event_queue (void)
break;
case FIFO_EVENT_DISCONNECT:
- s0 = stream_session_get_from_handle (e->session_handle);
+ s0 = session_get_from_handle (e->session_handle);
fformat (stdout, "[%04d] disconnect session %d\n", i,
s0->session_index);
break;
@@ -477,7 +477,7 @@ session_node_cmp_event (session_fifo_event_t * e, svm_fifo_t * f)
case FIFO_EVENT_DISCONNECT:
break;
case FIFO_EVENT_RPC:
- s = stream_session_get_from_handle (e->session_handle);
+ s = session_get_from_handle (e->session_handle);
if (!s)
{
clib_warning ("session has event but doesn't exist!");
@@ -644,7 +644,7 @@ skip_dequeue:
}
break;
case FIFO_EVENT_DISCONNECT:
- s0 = stream_session_get_from_handle (e0->session_handle);
+ s0 = session_get_from_handle (e0->session_handle);
stream_session_disconnect (s0);
break;
case FIFO_EVENT_BUILTIN_RX:
diff --git a/src/vnet/session/session_table.c b/src/vnet/session/session_table.c
new file mode 100644
index 00000000000..04c0c816ab0
--- /dev/null
+++ b/src/vnet/session/session_table.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/session/session_table.h>
+#include <vnet/session/session.h>
+
+/**
+ * Pool of session tables
+ */
+static session_table_t *lookup_tables;
+
+session_table_t *
+session_table_alloc (void)
+{
+ session_table_t *slt;
+ pool_get_aligned (lookup_tables, slt, CLIB_CACHE_LINE_BYTES);
+ memset (slt, 0, sizeof (*slt));
+ return slt;
+}
+
+u32
+session_table_index (session_table_t * slt)
+{
+ return (slt - lookup_tables);
+}
+
+session_table_t *
+session_table_get (u32 table_index)
+{
+ if (vec_len (lookup_tables) <= table_index)
+ return 0;
+ return vec_elt_at_index (lookup_tables, table_index);
+}
+
+#define foreach_hash_table_parameter \
+ _(v4,session,buckets,20000) \
+ _(v4,session,memory,(64<<20)) \
+ _(v6,session,buckets,20000) \
+ _(v6,session,memory,(64<<20)) \
+ _(v4,halfopen,buckets,20000) \
+ _(v4,halfopen,memory,(64<<20)) \
+ _(v6,halfopen,buckets,20000) \
+ _(v6,halfopen,memory,(64<<20))
+
+/**
+ * Initialize session table hash tables
+ *
+ * If vpp configured with set of table parameters it uses them,
+ * otherwise it uses defaults above.
+ */
+void
+session_table_init (session_table_t * slt)
+{
+#define _(af,table,parm,value) \
+ u32 configured_##af##_##table##_table_##parm = value;
+ foreach_hash_table_parameter;
+#undef _
+
+#define _(af,table,parm,value) \
+ if (session_manager_main.configured_##af##_##table##_table_##parm) \
+ configured_##af##_##table##_table_##parm = \
+ session_manager_main.configured_##af##_##table##_table_##parm;
+ foreach_hash_table_parameter;
+#undef _
+
+ clib_bihash_init_16_8 (&slt->v4_session_hash, "v4 session table",
+ configured_v4_session_table_buckets,
+ configured_v4_session_table_memory);
+ clib_bihash_init_48_8 (&slt->v6_session_hash, "v6 session table",
+ configured_v6_session_table_buckets,
+ configured_v6_session_table_memory);
+ clib_bihash_init_16_8 (&slt->v4_half_open_hash, "v4 half-open table",
+ configured_v4_halfopen_table_buckets,
+ configured_v4_halfopen_table_memory);
+ clib_bihash_init_48_8 (&slt->v6_half_open_hash, "v6 half-open table",
+ configured_v6_halfopen_table_buckets,
+ configured_v6_halfopen_table_memory);
+}
+
+typedef struct _ip4_session_table_walk_ctx_t
+{
+ ip4_session_table_walk_fn_t fn;
+ void *ctx;
+} ip4_session_table_walk_ctx_t;
+
+void
+ip4_session_table_walk_cb (clib_bihash_kv_16_8_t * kvp, void *arg)
+{
+ ip4_session_table_walk_ctx_t *ctx = arg;
+ ctx->fn (kvp, ctx->ctx);
+}
+
+void
+ip4_session_table_walk (clib_bihash_16_8_t * hash,
+ ip4_session_table_walk_fn_t fn, void *arg)
+{
+ ip4_session_table_walk_ctx_t ctx = {
+ .fn = fn,
+ .ctx = arg,
+ };
+ clib_bihash_foreach_key_value_pair_16_8 (hash, ip4_session_table_walk_cb,
+ &ctx);
+}
+
+/* *INDENT-ON* */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session/session_table.h b/src/vnet/session/session_table.h
new file mode 100644
index 00000000000..ce0b4a2ff25
--- /dev/null
+++ b/src/vnet/session/session_table.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SRC_VNET_SESSION_SESSION_TABLE_H_
+#define SRC_VNET_SESSION_SESSION_TABLE_H_
+
+#include <vppinfra/bihash_16_8.h>
+#include <vppinfra/bihash_48_8.h>
+
+typedef struct _session_lookup_table
+{
+ /**
+ * Lookup tables for established sessions and listeners
+ */
+ clib_bihash_16_8_t v4_session_hash;
+ clib_bihash_48_8_t v6_session_hash;
+
+ /**
+ * Lookup tables for half-open sessions
+ */
+ clib_bihash_16_8_t v4_half_open_hash;
+ clib_bihash_48_8_t v6_half_open_hash;
+} session_table_t;
+
+#define SESSION_TABLE_INVALID_INDEX ((u32)~0)
+#define SESSION_LOCAL_TABLE_PREFIX ((u32)~0)
+#define SESSION_INVALID_INDEX ((u32)~0)
+
+typedef int (*ip4_session_table_walk_fn_t) (clib_bihash_kv_16_8_t * kvp,
+ void *ctx);
+
+void ip4_session_table_walk_cb (clib_bihash_kv_16_8_t * kvp, void *arg);
+void ip4_session_table_walk (clib_bihash_16_8_t * hash,
+ ip4_session_table_walk_fn_t fn, void *arg);
+
+session_table_t *session_table_alloc (void);
+session_table_t *session_table_get (u32 table_index);
+u32 session_table_index (session_table_t * slt);
+void session_table_init (session_table_t * slt);
+
+#endif /* SRC_VNET_SESSION_SESSION_TABLE_H_ */
+/* *INDENT-ON* */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session/session_test.c b/src/vnet/session/session_test.c
new file mode 100644
index 00000000000..b46b33d1396
--- /dev/null
+++ b/src/vnet/session/session_test.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/session/application_namespace.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/application.h>
+#include <vnet/session/session.h>
+
+#define SESSION_TEST_I(_cond, _comment, _args...) \
+({ \
+ int _evald = (_cond); \
+ if (!(_evald)) { \
+ fformat(stderr, "FAIL:%d: " _comment "\n", \
+ __LINE__, ##_args); \
+ } else { \
+ fformat(stderr, "PASS:%d: " _comment "\n", \
+ __LINE__, ##_args); \
+ } \
+ _evald; \
+})
+
+#define SESSION_TEST(_cond, _comment, _args...) \
+{ \
+ if (!SESSION_TEST_I(_cond, _comment, ##_args)) { \
+ return 1; \
+ } \
+}
+
+void
+dummy_session_reset_callback (stream_session_t * s)
+{
+ clib_warning ("called...");
+}
+
+int
+dummy_session_connected_callback (u32 app_index, u32 api_context,
+ stream_session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+int
+dummy_add_segment_callback (u32 client_index, const u8 * seg_name,
+ u32 seg_size)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+int
+dummy_redirect_connect_callback (u32 client_index, void *mp)
+{
+ return VNET_API_ERROR_SESSION_REDIRECT;
+}
+
+void
+dummy_session_disconnect_callback (stream_session_t * s)
+{
+ clib_warning ("called...");
+}
+
+int
+dummy_session_accept_callback (stream_session_t * s)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+int
+dummy_server_rx_callback (stream_session_t * s)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+/* *INDENT-OFF* */
+static session_cb_vft_t dummy_session_cbs = {
+ .session_reset_callback = dummy_session_reset_callback,
+ .session_connected_callback = dummy_session_connected_callback,
+ .session_accept_callback = dummy_session_accept_callback,
+ .session_disconnect_callback = dummy_session_disconnect_callback,
+ .builtin_server_rx_callback = dummy_server_rx_callback,
+ .redirect_connect_callback = dummy_redirect_connect_callback,
+};
+/* *INDENT-ON* */
+
+static int
+session_test_namespace (vlib_main_t * vm, unformat_input_t * input)
+{
+ u64 options[SESSION_OPTIONS_N_OPTIONS], dummy_secret = 1234;
+ u32 server_index, server_st_index, server_local_st_index;
+ u32 dummy_port = 1234, local_listener, client_index;
+ u32 dummy_api_context = 4321, dummy_client_api_index = 1234;
+ u32 dummy_server_api_index = ~0, sw_if_index = 0;
+ session_endpoint_t server_sep = SESSION_ENDPOINT_NULL;
+ session_endpoint_t client_sep = SESSION_ENDPOINT_NULL;
+ session_endpoint_t intf_sep = SESSION_ENDPOINT_NULL;
+ clib_error_t *error = 0;
+ u8 *ns_id = format (0, "appns1"), intf_mac[6];
+ app_namespace_t *app_ns;
+ u8 segment_name[128];
+ application_t *server;
+ stream_session_t *s;
+ int code;
+
+ server_sep.is_ip4 = 1;
+ server_sep.port = dummy_port;
+ client_sep.is_ip4 = 1;
+ client_sep.port = dummy_port;
+ memset (options, 0, sizeof (options));
+ memset (intf_mac, 0, sizeof (intf_mac));
+
+ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_BUILTIN_APP;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_ACCEPT_REDIRECT;
+ vnet_app_attach_args_t attach_args = {
+ .api_client_index = ~0,
+ .options = options,
+ .namespace_id = 0,
+ .session_cb_vft = &dummy_session_cbs,
+ .segment_name = segment_name,
+ };
+
+ vnet_bind_args_t bind_args = {
+ .sep = server_sep,
+ .app_index = 0,
+ };
+
+ vnet_connect_args_t connect_args = {
+ .sep = client_sep,
+ .app_index = 0,
+ .api_context = 0,
+ };
+
+ vnet_unbind_args_t unbind_args = {
+ .handle = bind_args.handle,
+ .app_index = 0,
+ };
+
+ vnet_app_detach_args_t detach_args = {
+ .app_index = 0,
+ };
+
+ ip4_address_t intf_addr = {
+ .as_u32 = clib_host_to_net_u32 (0x06000105),
+ };
+
+ intf_sep.ip.ip4 = intf_addr;
+ intf_sep.is_ip4 = 1;
+ intf_sep.port = dummy_port;
+
+ /*
+ * Insert namespace and lookup
+ */
+
+ vnet_app_namespace_add_del_args_t ns_args = {
+ .ns_id = ns_id,
+ .secret = dummy_secret,
+ .sw_if_index = APP_NAMESPACE_INVALID_INDEX,
+ .is_add = 1
+ };
+ error = vnet_app_namespace_add_del (&ns_args);
+ SESSION_TEST ((error == 0), "app ns insertion should succeed: %d",
+ clib_error_get_code (error));
+
+ app_ns = app_namespace_get_from_id (ns_id);
+ SESSION_TEST ((app_ns != 0), "should find ns %v status", ns_id);
+ SESSION_TEST ((app_ns->ns_secret == dummy_secret), "secret should be %d",
+ dummy_secret);
+ SESSION_TEST ((app_ns->sw_if_index == APP_NAMESPACE_INVALID_INDEX),
+ "sw_if_index should be invalid");
+
+ /*
+ * Try application attach with wrong secret
+ */
+
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = dummy_secret - 1;
+ attach_args.namespace_id = ns_id;
+ attach_args.api_client_index = dummy_server_api_index;
+
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error != 0), "app attachment should fail");
+ code = clib_error_get_code (error);
+ SESSION_TEST ((code == VNET_API_ERROR_APP_WRONG_NS_SECRET),
+ "code should be wrong ns secret: %d", code);
+
+ /*
+ * Attach server with global default scope
+ */
+ options[APP_OPTIONS_FLAGS] &= ~APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] &= ~APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = 0;
+ attach_args.namespace_id = 0;
+ attach_args.api_client_index = dummy_server_api_index;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "server attachment should work");
+ server_index = attach_args.app_index;
+ server = application_get (server_index);
+ SESSION_TEST ((server->ns_index == 0),
+ "server should be in the default ns");
+
+ bind_args.app_index = server_index;
+ error = vnet_bind (&bind_args);
+ SESSION_TEST ((error == 0), "server bind should work");
+
+ server_st_index = application_session_table (server, FIB_PROTOCOL_IP4);
+ s = session_lookup_listener (server_st_index, &server_sep);
+ SESSION_TEST ((s != 0), "listener should exist in global table");
+ SESSION_TEST ((s->app_index == server_index), "app_index should be that of "
+ "the server");
+ server_local_st_index = application_local_session_table (server);
+ SESSION_TEST ((server_local_st_index == APP_INVALID_INDEX),
+ "server shouldn't have access to local table");
+
+ unbind_args.app_index = server_index;
+ error = vnet_unbind (&unbind_args);
+ SESSION_TEST ((error == 0), "unbind should work");
+
+ s = session_lookup_listener (server_st_index, &server_sep);
+ SESSION_TEST ((s == 0), "listener should not exist in global table");
+
+ detach_args.app_index = server_index;
+ vnet_application_detach (&detach_args);
+
+ /*
+ * Attach server with local and global scope
+ */
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = dummy_secret;
+ attach_args.namespace_id = ns_id;
+ attach_args.api_client_index = dummy_server_api_index;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "server attachment should work");
+ server_index = attach_args.app_index;
+ server = application_get (server_index);
+ SESSION_TEST ((server->ns_index == app_namespace_index (app_ns)),
+ "server should be in the right ns");
+
+ bind_args.app_index = server_index;
+ error = vnet_bind (&bind_args);
+ SESSION_TEST ((error == 0), "bind should work");
+ server_st_index = application_session_table (server, FIB_PROTOCOL_IP4);
+ s = session_lookup_listener (server_st_index, &server_sep);
+ SESSION_TEST ((s != 0), "listener should exist in global table");
+ SESSION_TEST ((s->app_index == server_index), "app_index should be that of "
+ "the server");
+ server_local_st_index = application_local_session_table (server);
+ local_listener = session_lookup_session_endpoint (server_local_st_index,
+ &server_sep);
+ SESSION_TEST ((local_listener != SESSION_INVALID_INDEX),
+ "listener should exist in local table");
+
+ /*
+ * Try client connect with 1) local scope 2) global scope
+ */
+ options[APP_OPTIONS_FLAGS] &= ~APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ attach_args.api_client_index = dummy_client_api_index;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "client attachment should work");
+ client_index = attach_args.app_index;
+ connect_args.api_context = dummy_api_context;
+ connect_args.app_index = client_index;
+ error = vnet_connect (&connect_args);
+ SESSION_TEST ((error != 0), "client connect should return error code");
+ code = clib_error_get_code (error);
+ SESSION_TEST ((code == VNET_API_ERROR_INVALID_VALUE),
+ "error code should be invalid value (zero ip)");
+ connect_args.sep.ip.ip4.as_u8[0] = 127;
+ error = vnet_connect (&connect_args);
+ SESSION_TEST ((error != 0), "client connect should return error code");
+ code = clib_error_get_code (error);
+ SESSION_TEST ((code == VNET_API_ERROR_SESSION_REDIRECT),
+ "error code should be redirect");
+ detach_args.app_index = client_index;
+ vnet_application_detach (&detach_args);
+
+ options[APP_OPTIONS_FLAGS] &= ~APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ attach_args.api_client_index = dummy_client_api_index;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "client attachment should work");
+ error = vnet_connect (&connect_args);
+ SESSION_TEST ((error != 0), "client connect should return error code");
+ code = clib_error_get_code (error);
+ SESSION_TEST ((code == VNET_API_ERROR_SESSION_CONNECT),
+ "error code should be connect (nothing in local scope)");
+ detach_args.app_index = client_index;
+ vnet_application_detach (&detach_args);
+
+ /*
+ * Unbind and detach server and then re-attach with local scope only
+ */
+ unbind_args.handle = bind_args.handle;
+ unbind_args.app_index = server_index;
+ error = vnet_unbind (&unbind_args);
+ SESSION_TEST ((error == 0), "unbind should work");
+
+ s = session_lookup_listener (server_st_index, &server_sep);
+ SESSION_TEST ((s == 0), "listener should not exist in global table");
+ local_listener = session_lookup_session_endpoint (server_local_st_index,
+ &server_sep);
+ SESSION_TEST ((s == 0), "listener should not exist in local table");
+
+ detach_args.app_index = server_index;
+ vnet_application_detach (&detach_args);
+
+ options[APP_OPTIONS_FLAGS] &= ~APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ attach_args.api_client_index = dummy_server_api_index;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "app attachment should work");
+ server_index = attach_args.app_index;
+ server = application_get (server_index);
+ SESSION_TEST ((server->ns_index == app_namespace_index (app_ns)),
+ "app should be in the right ns");
+
+ bind_args.app_index = server_index;
+ error = vnet_bind (&bind_args);
+ SESSION_TEST ((error == 0), "bind should work");
+
+ server_st_index = application_session_table (server, FIB_PROTOCOL_IP4);
+ s = session_lookup_listener (server_st_index, &server_sep);
+ SESSION_TEST ((s == 0), "listener should not exist in global table");
+ server_local_st_index = application_local_session_table (server);
+ local_listener = session_lookup_session_endpoint (server_local_st_index,
+ &server_sep);
+ SESSION_TEST ((local_listener != SESSION_INVALID_INDEX),
+ "listener should exist in local table");
+
+ unbind_args.handle = bind_args.handle;
+ error = vnet_unbind (&unbind_args);
+ SESSION_TEST ((error == 0), "unbind should work");
+
+ local_listener = session_lookup_session_endpoint (server_local_st_index,
+ &server_sep);
+ SESSION_TEST ((local_listener == SESSION_INVALID_INDEX),
+ "listener should not exist in local table");
+
+ /*
+ * Client attach + connect in default ns with local scope
+ */
+ options[APP_OPTIONS_FLAGS] &= ~APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ attach_args.namespace_id = 0;
+ attach_args.api_client_index = dummy_client_api_index;
+ vnet_application_attach (&attach_args);
+ error = vnet_connect (&connect_args);
+ SESSION_TEST ((error != 0), "client connect should return error code");
+ code = clib_error_get_code (error);
+ SESSION_TEST ((code == VNET_API_ERROR_SESSION_CONNECT),
+ "error code should be connect (not in same ns)");
+ detach_args.app_index = client_index;
+ vnet_application_detach (&detach_args);
+
+ /*
+ * Detach server
+ */
+ detach_args.app_index = server_index;
+ vnet_application_detach (&detach_args);
+
+ /*
+ * Create loopback interface
+ */
+ if (vnet_create_loopback_interface (&sw_if_index, intf_mac, 0, 0))
+ {
+ clib_warning ("couldn't create loopback. stopping the test!");
+ return 0;
+ }
+ vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
+ VNET_SW_INTERFACE_FLAG_ADMIN_UP);
+ ip4_add_del_interface_address (vlib_get_main (), sw_if_index, &intf_addr,
+ 24, 0);
+
+ /*
+ * Update namespace
+ */
+ ns_args.sw_if_index = sw_if_index;
+ error = vnet_app_namespace_add_del (&ns_args);
+ SESSION_TEST ((error == 0), "app ns insertion should succeed: %d",
+ clib_error_get_code (error));
+
+ /*
+ * Attach server with local and global scope
+ */
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ options[APP_OPTIONS_FLAGS] |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = dummy_secret;
+ attach_args.namespace_id = ns_id;
+ attach_args.api_client_index = dummy_server_api_index;
+ error = vnet_application_attach (&attach_args);
+ SESSION_TEST ((error == 0), "server attachment should work");
+ server_index = attach_args.app_index;
+
+ bind_args.app_index = server_index;
+ error = vnet_bind (&bind_args);
+ server_st_index = application_session_table (server, FIB_PROTOCOL_IP4);
+ s = session_lookup_listener (server_st_index, &server_sep);
+ SESSION_TEST ((s == 0), "zero listener should not exist in global table");
+
+ s = session_lookup_listener (server_st_index, &intf_sep);
+ SESSION_TEST ((s != 0), "intf listener should exist in global table");
+ SESSION_TEST ((s->app_index == server_index), "app_index should be that of "
+ "the server");
+ server_local_st_index = application_local_session_table (server);
+ local_listener = session_lookup_session_endpoint (server_local_st_index,
+ &server_sep);
+ SESSION_TEST ((local_listener != SESSION_INVALID_INDEX),
+ "zero listener should exist in local table");
+ detach_args.app_index = server_index;
+ vnet_application_detach (&detach_args);
+
+ /*
+ * Cleanup
+ */
+ vec_free (ns_id);
+ vnet_delete_loopback_interface (sw_if_index);
+ return 0;
+}
+
+static clib_error_t *
+session_test (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd_arg)
+{
+ int res = 0;
+
+ vnet_session_enable_disable (vm, 1);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "namespace"))
+ {
+ res = session_test_namespace (vm, input);
+ }
+ else
+ break;
+ }
+
+ if (res)
+ return clib_error_return (0, "Session unit test failed");
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (tcp_test_command, static) =
+{
+ .path = "test session",
+ .short_help = "internal session unit tests",
+ .function = session_test,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session/stream_session.h b/src/vnet/session/stream_session.h
index 275052d3ee5..1ed6e0b9eec 100644
--- a/src/vnet/session/stream_session.h
+++ b/src/vnet/session/stream_session.h
@@ -18,6 +18,7 @@
#include <vnet/vnet.h>
#include <svm/svm_fifo.h>
+#include <vnet/session/transport.h>
#define foreach_session_type \
_(IP4_TCP, ip4_tcp) \
@@ -81,6 +82,44 @@ typedef struct _stream_session_t
CLIB_CACHE_LINE_ALIGN_MARK (pad);
} stream_session_t;
+typedef struct _session_endpoint
+{
+ /*
+ * Network specific
+ */
+#define _(type, name) type name;
+ foreach_transport_connection_fields
+#undef _
+ /*
+ * Session specific
+ */
+ u8 transport_proto; /**< transport protocol for session */
+} session_endpoint_t;
+
+#define SESSION_IP46_ZERO \
+{ \
+ .ip6 = { \
+ { 0, 0, }, \
+ }, \
+}
+#define SESSION_ENDPOINT_NULL \
+{ \
+ .sw_if_index = ENDPOINT_INVALID_INDEX, \
+ .ip = SESSION_IP46_ZERO, \
+ .fib_index = ENDPOINT_INVALID_INDEX, \
+ .is_ip4 = 0, \
+ .port = 0, \
+ .transport_proto = 0, \
+}
+
+#define session_endpoint_to_transport(_sep) ((transport_endpoint_t *)_sep)
+
+always_inline u8
+session_endpoint_fib_proto (session_endpoint_t * sep)
+{
+ return sep->is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+}
+
#endif /* SRC_VNET_SESSION_STREAM_SESSION_H_ */
/*
diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h
index e2c479494ca..8c299c46490 100644
--- a/src/vnet/session/transport.h
+++ b/src/vnet/session/transport.h
@@ -18,8 +18,6 @@
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
-#include <vppinfra/bihash_16_8.h>
-#include <vppinfra/bihash_48_8.h>
#include <vnet/tcp/tcp_debug.h>
/*
@@ -33,7 +31,7 @@ typedef struct _transport_connection
u16 rmt_port; /**< Remote port */
u8 transport_proto; /**< Protocol id */
u8 is_ip4; /**< Flag if IP4 connection */
- u32 vrf; /**< FIB table id */
+ u32 fib_index; /**< Network namespace */
u32 s_index; /**< Parent session index */
u32 c_index; /**< Connection index in transport pool */
@@ -57,8 +55,7 @@ typedef struct _transport_connection
#define c_lcl_port connection.lcl_port
#define c_rmt_port connection.rmt_port
#define c_transport_proto connection.transport_proto
-#define c_vrf connection.vrf
-#define c_state connection.state
+#define c_fib_index connection.fib_index
#define c_s_index connection.s_index
#define c_c_index connection.c_index
#define c_is_ip4 connection.is_ip4
@@ -75,14 +72,28 @@ typedef enum _transport_proto
TRANSPORT_PROTO_UDP
} transport_proto_t;
+#define foreach_transport_connection_fields \
+ _(u32, sw_if_index) /**< interface endpoint is associated with */ \
+ _(ip46_address_t, ip) /**< ip address */ \
+ _(u32, fib_index) /**< fib table endpoint is associated with */ \
+ _(u8, is_ip4) /**< 1 if ip4 */ \
+ _(u16, port) /**< port in net order */ \
+
typedef struct _transport_endpoint
{
- ip46_address_t ip; /** ip address */
- u16 port; /** port in net order */
- u8 is_ip4; /** 1 if ip4 */
- u32 vrf; /** fib table the endpoint is associated with */
+#define _(type, name) type name;
+ foreach_transport_connection_fields
+#undef _
} transport_endpoint_t;
+#define ENDPOINT_INVALID_INDEX ((u32)~0)
+
+always_inline u8
+transport_connection_fib_proto (transport_connection_t * tc)
+{
+ return tc->is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+}
+
#endif /* VNET_VNET_URI_TRANSPORT_H_ */
/*