summaryrefslogtreecommitdiffstats
path: root/src/vnet/session-apps
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/session-apps')
-rw-r--r--src/vnet/session-apps/echo_client.c821
-rw-r--r--src/vnet/session-apps/echo_client.h118
-rw-r--r--src/vnet/session-apps/echo_server.c485
-rw-r--r--src/vnet/session-apps/http_server.c668
-rw-r--r--src/vnet/session-apps/proxy.c611
-rw-r--r--src/vnet/session-apps/proxy.h93
6 files changed, 2796 insertions, 0 deletions
diff --git a/src/vnet/session-apps/echo_client.c b/src/vnet/session-apps/echo_client.c
new file mode 100644
index 00000000000..b8a4fb31f26
--- /dev/null
+++ b/src/vnet/session-apps/echo_client.c
@@ -0,0 +1,821 @@
+/*
+ * echo_client.c - vpp built-in echo client code
+ *
+ * Copyright (c) 2017 by Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <vnet/session-apps/echo_client.h>
+
+echo_client_main_t echo_client_main;
+
+#define ECHO_CLIENT_DBG (0)
+
+static void
+signal_evt_to_cli_i (int *code)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ ASSERT (vlib_get_thread_index () == 0);
+ vlib_process_signal_event (ecm->vlib_main, ecm->cli_node_index, *code, 0);
+}
+
+static void
+signal_evt_to_cli (int code)
+{
+ if (vlib_get_thread_index () != 0)
+ vl_api_rpc_call_main_thread (signal_evt_to_cli_i, (u8 *) & code,
+ sizeof (code));
+ else
+ signal_evt_to_cli_i (&code);
+}
+
+static void
+send_data_chunk (echo_client_main_t * ecm, session_t * s)
+{
+ u8 *test_data = ecm->connect_test_data;
+ int test_buf_offset;
+ u32 bytes_this_chunk;
+ session_fifo_event_t evt;
+ svm_fifo_t *txf;
+ int rv;
+
+ ASSERT (vec_len (test_data) > 0);
+
+ test_buf_offset = s->bytes_sent % vec_len (test_data);
+ bytes_this_chunk = vec_len (test_data) - test_buf_offset;
+
+ bytes_this_chunk = bytes_this_chunk < s->bytes_to_send
+ ? bytes_this_chunk : s->bytes_to_send;
+
+ txf = s->server_tx_fifo;
+ rv = svm_fifo_enqueue_nowait (txf, bytes_this_chunk,
+ test_data + test_buf_offset);
+
+ /* If we managed to enqueue data... */
+ if (rv > 0)
+ {
+ /* Account for it... */
+ s->bytes_to_send -= rv;
+ s->bytes_sent += rv;
+
+ if (ECHO_CLIENT_DBG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "tx-enq: xfer %d bytes, sent %u remain %u",
+ .format_args = "i4i4i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 data[3];
+ } *ed;
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->data[0] = rv;
+ ed->data[1] = s->bytes_sent;
+ ed->data[2] = s->bytes_to_send;
+ }
+
+ /* Poke the session layer */
+ if (svm_fifo_set_event (txf))
+ {
+ /* Fabricate TX event, send to vpp */
+ evt.fifo = txf;
+ evt.event_type = FIFO_EVENT_APP_TX;
+
+ if (svm_queue_add
+ (ecm->vpp_event_queue[txf->master_thread_index], (u8 *) & evt,
+ 0 /* do wait for mutex */ ))
+ clib_warning ("could not enqueue event");
+ }
+ }
+}
+
+static void
+receive_data_chunk (echo_client_main_t * ecm, session_t * s)
+{
+ svm_fifo_t *rx_fifo = s->server_rx_fifo;
+ u32 my_thread_index = vlib_get_thread_index ();
+ int n_read, i;
+
+ if (ecm->test_bytes)
+ {
+ n_read = svm_fifo_dequeue_nowait (rx_fifo,
+ vec_len (ecm->rx_buf
+ [my_thread_index]),
+ ecm->rx_buf[my_thread_index]);
+ }
+ else
+ {
+ n_read = svm_fifo_max_dequeue (rx_fifo);
+ svm_fifo_dequeue_drop (rx_fifo, n_read);
+ }
+
+ if (n_read > 0)
+ {
+ if (ECHO_CLIENT_DBG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "rx-deq: %d bytes",
+ .format_args = "i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 data[1];
+ } *ed;
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->data[0] = n_read;
+ }
+
+ if (ecm->test_bytes)
+ {
+ for (i = 0; i < n_read; i++)
+ {
+ if (ecm->rx_buf[my_thread_index][i]
+ != ((s->bytes_received + i) & 0xff))
+ {
+ clib_warning ("read %d error at byte %lld, 0x%x not 0x%x",
+ n_read, s->bytes_received + i,
+ ecm->rx_buf[my_thread_index][i],
+ ((s->bytes_received + i) & 0xff));
+ ecm->test_failed = 1;
+ }
+ }
+ }
+ s->bytes_to_receive -= n_read;
+ s->bytes_received += n_read;
+ }
+}
+
+static uword
+echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ int my_thread_index = vlib_get_thread_index ();
+ session_t *sp;
+ int i;
+ int delete_session;
+ u32 *connection_indices;
+ u32 *connections_this_batch;
+ u32 nconnections_this_batch;
+
+ connection_indices = ecm->connection_index_by_thread[my_thread_index];
+ connections_this_batch =
+ ecm->connections_this_batch_by_thread[my_thread_index];
+
+ if ((ecm->run_test == 0) ||
+ ((vec_len (connection_indices) == 0)
+ && vec_len (connections_this_batch) == 0))
+ return 0;
+
+ /* Grab another pile of connections */
+ if (PREDICT_FALSE (vec_len (connections_this_batch) == 0))
+ {
+ nconnections_this_batch =
+ clib_min (ecm->connections_per_batch, vec_len (connection_indices));
+
+ ASSERT (nconnections_this_batch > 0);
+ vec_validate (connections_this_batch, nconnections_this_batch - 1);
+ clib_memcpy (connections_this_batch,
+ connection_indices + vec_len (connection_indices)
+ - nconnections_this_batch,
+ nconnections_this_batch * sizeof (u32));
+ _vec_len (connection_indices) -= nconnections_this_batch;
+ }
+
+ if (PREDICT_FALSE (ecm->prev_conns != ecm->connections_per_batch
+ && ecm->prev_conns == vec_len (connections_this_batch)))
+ {
+ ecm->repeats++;
+ ecm->prev_conns = vec_len (connections_this_batch);
+ if (ecm->repeats == 500000)
+ {
+ clib_warning ("stuck clients");
+ }
+ }
+ else
+ {
+ ecm->prev_conns = vec_len (connections_this_batch);
+ ecm->repeats = 0;
+ }
+
+ for (i = 0; i < vec_len (connections_this_batch); i++)
+ {
+ delete_session = 1;
+
+ sp = pool_elt_at_index (ecm->sessions, connections_this_batch[i]);
+
+ if (sp->bytes_to_send > 0)
+ {
+ send_data_chunk (ecm, sp);
+ delete_session = 0;
+ }
+ if (sp->bytes_to_receive > 0)
+ {
+ receive_data_chunk (ecm, sp);
+ delete_session = 0;
+ }
+ if (PREDICT_FALSE (delete_session == 1))
+ {
+ u32 index, thread_index;
+ stream_session_t *s;
+
+ __sync_fetch_and_add (&ecm->tx_total, sp->bytes_sent);
+ __sync_fetch_and_add (&ecm->rx_total, sp->bytes_received);
+
+ session_parse_handle (sp->vpp_session_handle,
+ &index, &thread_index);
+ s = session_get_if_valid (index, thread_index);
+
+ if (s)
+ {
+ vnet_disconnect_args_t _a, *a = &_a;
+ a->handle = session_handle (s);
+ a->app_index = ecm->app_index;
+ vnet_disconnect_session (a);
+
+ vec_delete (connections_this_batch, 1, i);
+ i--;
+ __sync_fetch_and_add (&ecm->ready_connections, -1);
+ }
+ else
+ clib_warning ("session AWOL?");
+
+ /* Kick the debug CLI process */
+ if (ecm->ready_connections == 0)
+ {
+ signal_evt_to_cli (2);
+ }
+ }
+ }
+
+ ecm->connection_index_by_thread[my_thread_index] = connection_indices;
+ ecm->connections_this_batch_by_thread[my_thread_index] =
+ connections_this_batch;
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (echo_clients_node) =
+{
+ .function = echo_client_node_fn,
+ .name = "echo-clients",
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+};
+/* *INDENT-ON* */
+
+static int
+create_api_loopback (echo_client_main_t * ecm)
+{
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ ecm->vl_input_queue = shmem_hdr->vl_input_queue;
+ ecm->my_client_index = vl_api_memclnt_create_internal ("echo_client",
+ ecm->vl_input_queue);
+ return 0;
+}
+
+static int
+echo_clients_init (vlib_main_t * vm)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+ int i;
+
+ if (create_api_loopback (ecm))
+ return -1;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+
+ /* Init test data. Bigecmuffer */
+ vec_validate (ecm->connect_test_data, 1024 * 1024 - 1);
+ for (i = 0; i < vec_len (ecm->connect_test_data); i++)
+ ecm->connect_test_data[i] = i & 0xff;
+
+ vec_validate (ecm->rx_buf, num_threads - 1);
+ for (i = 0; i < num_threads; i++)
+ vec_validate (ecm->rx_buf[i], vec_len (ecm->connect_test_data) - 1);
+
+ ecm->is_init = 1;
+
+ vec_validate (ecm->connection_index_by_thread, vtm->n_vlib_mains);
+ vec_validate (ecm->connections_this_batch_by_thread, vtm->n_vlib_mains);
+ vec_validate (ecm->vpp_event_queue, vtm->n_vlib_mains);
+
+ return 0;
+}
+
+static int
+echo_clients_session_connected_callback (u32 app_index, u32 api_context,
+ stream_session_t * s, u8 is_fail)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ session_t *session;
+ u32 session_index;
+ u8 thread_index = vlib_get_thread_index ();
+
+ if (is_fail)
+ {
+ clib_warning ("connection %d failed!", api_context);
+ signal_evt_to_cli (-1);
+ return 0;
+ }
+
+ ASSERT (s->thread_index == thread_index);
+
+ if (!ecm->vpp_event_queue[thread_index])
+ ecm->vpp_event_queue[thread_index] =
+ session_manager_get_vpp_event_queue (thread_index);
+
+ /*
+ * Setup session
+ */
+ clib_spinlock_lock_if_init (&ecm->sessions_lock);
+ pool_get (ecm->sessions, session);
+ clib_spinlock_unlock_if_init (&ecm->sessions_lock);
+
+ memset (session, 0, sizeof (*session));
+ session_index = session - ecm->sessions;
+ session->bytes_to_send = ecm->bytes_to_send;
+ session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send;
+ session->server_rx_fifo = s->server_rx_fifo;
+ session->server_rx_fifo->client_session_index = session_index;
+ session->server_tx_fifo = s->server_tx_fifo;
+ session->server_tx_fifo->client_session_index = session_index;
+ session->vpp_session_handle = session_handle (s);
+
+ vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
+ __sync_fetch_and_add (&ecm->ready_connections, 1);
+ if (ecm->ready_connections == ecm->expected_connections)
+ {
+ ecm->run_test = 1;
+ /* Signal the CLI process that the action is starting... */
+ signal_evt_to_cli (1);
+ }
+
+ return 0;
+}
+
+static void
+echo_clients_session_reset_callback (stream_session_t * s)
+{
+ if (s->session_state == SESSION_STATE_READY)
+ clib_warning ("Reset active connection %U", format_stream_session, s, 2);
+ stream_session_cleanup (s);
+ return;
+}
+
+static int
+echo_clients_session_create_callback (stream_session_t * s)
+{
+ return 0;
+}
+
+static void
+echo_clients_session_disconnect_callback (stream_session_t * s)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_disconnect_args_t _a, *a = &_a;
+ a->handle = session_handle (s);
+ a->app_index = ecm->app_index;
+ vnet_disconnect_session (a);
+ return;
+}
+
+static int
+echo_clients_rx_callback (stream_session_t * s)
+{
+ clib_warning ("BUG");
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static session_cb_vft_t echo_clients = {
+ .session_reset_callback = echo_clients_session_reset_callback,
+ .session_connected_callback = echo_clients_session_connected_callback,
+ .session_accept_callback = echo_clients_session_create_callback,
+ .session_disconnect_callback = echo_clients_session_disconnect_callback,
+ .builtin_server_rx_callback = echo_clients_rx_callback
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+echo_clients_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
+{
+ u32 prealloc_fifos, segment_size = 2 << 20;
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_app_attach_args_t _a, *a = &_a;
+ u64 options[16];
+ clib_error_t *error = 0;
+
+ memset (a, 0, sizeof (*a));
+ memset (options, 0, sizeof (options));
+
+ a->api_client_index = ecm->my_client_index;
+ a->session_cb_vft = &echo_clients;
+
+ prealloc_fifos = ecm->prealloc_fifos ? ecm->expected_connections : 1;
+
+ if (ecm->private_segment_size)
+ segment_size = ecm->private_segment_size;
+
+ options[APP_OPTIONS_ACCEPT_COOKIE] = 0x12345678;
+ options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ options[APP_OPTIONS_RX_FIFO_SIZE] = ecm->fifo_size;
+ options[APP_OPTIONS_TX_FIFO_SIZE] = ecm->fifo_size;
+ options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = ecm->private_segment_count;
+ options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = prealloc_fifos;
+
+ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ if (appns_id)
+ {
+ options[APP_OPTIONS_FLAGS] |= appns_flags;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = appns_secret;
+ }
+ a->options = options;
+ a->namespace_id = appns_id;
+
+ if ((error = vnet_application_attach (a)))
+ return error;
+
+ ecm->app_index = a->app_index;
+ return 0;
+}
+
+static int
+echo_clients_detach ()
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_app_detach_args_t _da, *da = &_da;
+ int rv;
+
+ da->app_index = ecm->app_index;
+ rv = vnet_application_detach (da);
+ ecm->test_client_attached = 0;
+ ecm->app_index = ~0;
+ return rv;
+}
+
+static void *
+echo_client_thread_fn (void *arg)
+{
+ return 0;
+}
+
+/** Start a transmit thread */
+int
+echo_clients_start_tx_pthread (echo_client_main_t * ecm)
+{
+ if (ecm->client_thread_handle == 0)
+ {
+ int rv = pthread_create (&ecm->client_thread_handle,
+ NULL /*attr */ ,
+ echo_client_thread_fn, 0);
+ if (rv)
+ {
+ ecm->client_thread_handle = 0;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+clib_error_t *
+echo_clients_connect (vlib_main_t * vm, u32 n_clients)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_connect_args_t _a, *a = &_a;
+ clib_error_t *error = 0;
+ int i;
+ for (i = 0; i < n_clients; i++)
+ {
+ memset (a, 0, sizeof (*a));
+
+ a->uri = (char *) ecm->connect_uri;
+ a->api_context = i;
+ a->app_index = ecm->app_index;
+ a->mp = 0;
+
+ if ((error = vnet_connect_uri (a)))
+ return error;
+
+ /* Crude pacing for call setups */
+ if ((i % 4) == 0)
+ vlib_process_suspend (vm, 10e-6);
+ ASSERT (i + 1 >= ecm->ready_connections);
+ while (i + 1 - ecm->ready_connections > 1000)
+ {
+ vlib_process_suspend (vm, 100e-6);
+ }
+ }
+ return 0;
+}
+
+#define ec_cli_output(_fmt, _args...) \
+ if (!ecm->no_output) \
+ vlib_cli_output(vm, _fmt, ##_args)
+
+static clib_error_t *
+echo_clients_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vlib_thread_main_t *thread_main = vlib_get_thread_main ();
+ uword *event_data = 0, event_type;
+ u8 *default_uri = (u8 *) "tcp://6.0.1.1/1234", *appns_id = 0;
+ u64 tmp, total_bytes, appns_flags = 0, appns_secret = 0;
+ f64 test_timeout = 20.0, syn_timeout = 20.0, delta;
+ f64 time_before_connects;
+ u32 n_clients = 1;
+ int preallocate_sessions = 0;
+ char *transfer_type;
+ clib_error_t *error = 0;
+ int i;
+
+ ecm->bytes_to_send = 8192;
+ ecm->no_return = 0;
+ ecm->fifo_size = 64 << 10;
+ ecm->connections_per_batch = 1000;
+ ecm->private_segment_count = 0;
+ ecm->private_segment_size = 0;
+ ecm->no_output = 0;
+ ecm->test_bytes = 0;
+ ecm->test_failed = 0;
+ ecm->vlib_main = vm;
+ if (thread_main->n_vlib_mains > 1)
+ clib_spinlock_init (&ecm->sessions_lock);
+ vec_free (ecm->connect_uri);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "uri %s", &ecm->connect_uri))
+ ;
+ else if (unformat (input, "nclients %d", &n_clients))
+ ;
+ else if (unformat (input, "mbytes %lld", &tmp))
+ ecm->bytes_to_send = tmp << 20;
+ else if (unformat (input, "gbytes %lld", &tmp))
+ ecm->bytes_to_send = tmp << 30;
+ else if (unformat (input, "bytes %lld", &ecm->bytes_to_send))
+ ;
+ else if (unformat (input, "test-timeout %f", &test_timeout))
+ ;
+ else if (unformat (input, "syn-timeout %f", &syn_timeout))
+ ;
+ else if (unformat (input, "no-return"))
+ ecm->no_return = 1;
+ else if (unformat (input, "fifo-size %d", &ecm->fifo_size))
+ ecm->fifo_size <<= 10;
+ else if (unformat (input, "private-segment-count %d",
+ &ecm->private_segment_count))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ return clib_error_return
+ (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ ecm->private_segment_size = tmp;
+ }
+ else if (unformat (input, "preallocate-fifos"))
+ ecm->prealloc_fifos = 1;
+ else if (unformat (input, "preallocate-sessions"))
+ preallocate_sessions = 1;
+ else
+ if (unformat (input, "client-batch %d", &ecm->connections_per_batch))
+ ;
+ else if (unformat (input, "appns %_%v%_", &appns_id))
+ ;
+ else if (unformat (input, "all-scope"))
+ appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE
+ | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
+ else if (unformat (input, "local-scope"))
+ appns_flags = APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ else if (unformat (input, "global-scope"))
+ appns_flags = APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ else if (unformat (input, "secret %lu", &appns_secret))
+ ;
+ else if (unformat (input, "no-output"))
+ ecm->no_output = 1;
+ else if (unformat (input, "test-bytes"))
+ ecm->test_bytes = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ /* Store cli process node index for signalling */
+ ecm->cli_node_index =
+ vlib_get_current_process (vm)->node_runtime.node_index;
+
+ if (ecm->is_init == 0)
+ {
+ if (echo_clients_init (vm))
+ return clib_error_return (0, "failed init");
+ }
+
+
+ ecm->ready_connections = 0;
+ ecm->expected_connections = n_clients;
+ ecm->rx_total = 0;
+ ecm->tx_total = 0;
+
+ if (!ecm->connect_uri)
+ {
+ clib_warning ("No uri provided. Using default: %v", default_uri);
+ ecm->connect_uri = default_uri;
+ }
+
+#if ECHO_CLIENT_PTHREAD
+ echo_clients_start_tx_pthread ();
+#endif
+
+ vlib_worker_thread_barrier_sync (vm);
+ vnet_session_enable_disable (vm, 1 /* turn on session and transports */ );
+ vlib_worker_thread_barrier_release (vm);
+
+ if (ecm->test_client_attached == 0)
+ {
+ if ((error = echo_clients_attach (appns_id, appns_flags, appns_secret)))
+ {
+ vec_free (appns_id);
+ clib_error_report (error);
+ return error;
+ }
+ vec_free (appns_id);
+ }
+ ecm->test_client_attached = 1;
+
+ /* Turn on the builtin client input nodes */
+ for (i = 0; i < thread_main->n_vlib_mains; i++)
+ vlib_node_set_state (vlib_mains[i], echo_clients_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ if (preallocate_sessions)
+ {
+ session_t *sp __attribute__ ((unused));
+ for (i = 0; i < n_clients; i++)
+ pool_get (ecm->sessions, sp);
+ for (i = 0; i < n_clients; i++)
+ pool_put_index (ecm->sessions, i);
+ }
+
+ /* Fire off connect requests */
+ time_before_connects = vlib_time_now (vm);
+ if ((error = echo_clients_connect (vm, n_clients)))
+ return error;
+
+ /* Park until the sessions come up, or ten seconds elapse... */
+ vlib_process_wait_for_event_or_clock (vm, syn_timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0:
+ ec_cli_output ("Timeout with only %d sessions active...",
+ ecm->ready_connections);
+ error = clib_error_return (0, "failed: syn timeout with %d sessions",
+ ecm->ready_connections);
+ goto cleanup;
+
+ case 1:
+ delta = vlib_time_now (vm) - time_before_connects;
+ if (delta != 0.0)
+ ec_cli_output ("%d three-way handshakes in %.2f seconds %.2f/s",
+ n_clients, delta, ((f64) n_clients) / delta);
+
+ ecm->test_start_time = vlib_time_now (ecm->vlib_main);
+ ec_cli_output ("Test started at %.6f", ecm->test_start_time);
+ break;
+
+ default:
+ ec_cli_output ("unexpected event(1): %d", event_type);
+ error = clib_error_return (0, "failed: unexpected event(1): %d",
+ event_type);
+ goto cleanup;
+ }
+
+ /* Now wait for the sessions to finish... */
+ vlib_process_wait_for_event_or_clock (vm, test_timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0:
+ ec_cli_output ("Timeout with %d sessions still active...",
+ ecm->ready_connections);
+ error = clib_error_return (0, "failed: timeout with %d sessions",
+ ecm->ready_connections);
+ goto cleanup;
+
+ case 2:
+ ecm->test_end_time = vlib_time_now (vm);
+ ec_cli_output ("Test finished at %.6f", ecm->test_end_time);
+ break;
+
+ default:
+ ec_cli_output ("unexpected event(2): %d", event_type);
+ error = clib_error_return (0, "failed: unexpected event(2): %d",
+ event_type);
+ goto cleanup;
+ }
+
+ delta = ecm->test_end_time - ecm->test_start_time;
+ if (delta != 0.0)
+ {
+ total_bytes = (ecm->no_return ? ecm->tx_total : ecm->rx_total);
+ transfer_type = ecm->no_return ? "half-duplex" : "full-duplex";
+ ec_cli_output ("%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds",
+ total_bytes, total_bytes / (1ULL << 20),
+ total_bytes / (1ULL << 30), delta);
+ ec_cli_output ("%.2f bytes/second %s", ((f64) total_bytes) / (delta),
+ transfer_type);
+ ec_cli_output ("%.4f gbit/second %s",
+ (((f64) total_bytes * 8.0) / delta / 1e9),
+ transfer_type);
+ }
+ else
+ {
+ ec_cli_output ("zero delta-t?");
+ error = clib_error_return (0, "failed: zero delta-t");
+ goto cleanup;
+ }
+
+ if (ecm->test_bytes && ecm->test_failed)
+ error = clib_error_return (0, "failed: test bytes");
+
+cleanup:
+ ecm->run_test = 0;
+ for (i = 0; i < vec_len (ecm->connection_index_by_thread); i++)
+ {
+ vec_reset_length (ecm->connection_index_by_thread[i]);
+ vec_reset_length (ecm->connections_this_batch_by_thread[i]);
+ }
+
+ pool_free (ecm->sessions);
+
+ /* Detach the application, so we can use different fifo sizes next time */
+ if (ecm->test_client_attached)
+ {
+ if (echo_clients_detach ())
+ {
+ error = clib_error_return (0, "failed: app detach");
+ ec_cli_output ("WARNING: app detach failed...");
+ }
+ }
+ if (error)
+ ec_cli_output ("test failed");
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (echo_clients_command, static) =
+{
+ .path = "test echo clients",
+ .short_help = "test echo clients [nclients %d][[m|g]bytes <bytes>]"
+ "[test-timeout <time>][syn-timeout <time>][no-return][fifo-size <size>]"
+ "[private-segment-count <count>][private-segment-size <bytes>[m|g]]"
+ "[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]"
+ "[uri <tcp://ip/port>][test-bytes][no-output]",
+ .function = echo_clients_command_fn,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+echo_clients_main_init (vlib_main_t * vm)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ ecm->is_init = 0;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (echo_clients_main_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session-apps/echo_client.h b/src/vnet/session-apps/echo_client.h
new file mode 100644
index 00000000000..4ae63ecee1f
--- /dev/null
+++ b/src/vnet/session-apps/echo_client.h
@@ -0,0 +1,118 @@
+
+/*
+ * echo_client.h - built-in application layer echo client
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_echo_client_h__
+#define __included_echo_client_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <svm/svm_fifo_segment.h>
+#include <vnet/session/session.h>
+#include <vnet/session/application_interface.h>
+
+typedef struct
+{
+ u64 bytes_to_send;
+ u64 bytes_sent;
+ u64 bytes_to_receive;
+ u64 bytes_received;
+
+ svm_fifo_t *server_rx_fifo;
+ svm_fifo_t *server_tx_fifo;
+
+ u64 vpp_session_handle;
+} session_t;
+
+typedef struct
+{
+ /*
+ * Application setup parameters
+ */
+ svm_queue_t *vl_input_queue; /**< vpe input queue */
+ svm_queue_t **vpp_event_queue;
+
+ u32 cli_node_index; /**< cli process node index */
+ u32 my_client_index; /**< loopback API client handle */
+ u32 app_index; /**< app index after attach */
+
+ /*
+ * Configuration params
+ */
+ u8 *connect_uri; /**< URI for slave's connect */
+ u64 bytes_to_send; /**< Bytes to send */
+ u32 configured_segment_size;
+ u32 fifo_size;
+ u32 expected_connections; /**< Number of clients/connections */
+ u32 connections_per_batch; /**< Connections to rx/tx at once */
+ u32 private_segment_count; /**< Number of private fifo segs */
+ u32 private_segment_size; /**< size of private fifo segs */
+
+ /*
+ * Test state variables
+ */
+ session_t *sessions; /**< Session pool, shared */
+ clib_spinlock_t sessions_lock;
+ u8 **rx_buf; /**< intermediate rx buffers */
+ u8 *connect_test_data; /**< Pre-computed test data */
+ u32 **connection_index_by_thread;
+ u32 **connections_this_batch_by_thread; /**< active connection batch */
+ pthread_t client_thread_handle;
+
+ volatile u32 ready_connections;
+ volatile u32 finished_connections;
+ volatile u64 rx_total;
+ volatile u64 tx_total;
+ volatile int run_test; /**< Signal start of test */
+
+ f64 test_start_time;
+ f64 test_end_time;
+ u32 prev_conns;
+ u32 repeats;
+ /*
+ * Flags
+ */
+ u8 is_init;
+ u8 test_client_attached;
+ u8 no_return;
+ u8 test_return_packets;
+ int i_am_master;
+ int drop_packets; /**< drop all packets */
+ u8 prealloc_fifos; /**< Request fifo preallocation */
+ u8 no_output;
+ u8 test_bytes;
+ u8 test_failed;
+
+ vlib_main_t *vlib_main;
+} echo_client_main_t;
+
+extern echo_client_main_t echo_client_main;
+
+vlib_node_registration_t echo_clients_node;
+
+#endif /* __included_echo_client_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/session-apps/echo_server.c b/src/vnet/session-apps/echo_server.c
new file mode 100644
index 00000000000..37a51d507fc
--- /dev/null
+++ b/src/vnet/session-apps/echo_server.c
@@ -0,0 +1,485 @@
+/*
+* Copyright (c) 2015-2017 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+
+typedef struct
+{
+ /*
+ * Server app parameters
+ */
+ svm_queue_t **vpp_queue;
+ svm_queue_t *vl_input_queue; /**< Sever's event queue */
+
+ u32 app_index; /**< Server app index */
+ u32 my_client_index; /**< API client handle */
+ u32 node_index; /**< process node index for evnt scheduling */
+
+ /*
+ * Config params
+ */
+ u8 no_echo; /**< Don't echo traffic */
+ u32 fifo_size; /**< Fifo size */
+ u32 rcv_buffer_size; /**< Rcv buffer size */
+ u32 prealloc_fifos; /**< Preallocate fifos */
+ u32 private_segment_count; /**< Number of private segments */
+ u32 private_segment_size; /**< Size of private segments */
+ char *server_uri; /**< Server URI */
+
+ /*
+ * Test state
+ */
+ u8 **rx_buf; /**< Per-thread RX buffer */
+ u64 byte_index;
+ u32 **rx_retries;
+
+ vlib_main_t *vlib_main;
+} echo_server_main_t;
+
+echo_server_main_t echo_server_main;
+
+int
+echo_server_session_accept_callback (stream_session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+
+ esm->vpp_queue[s->thread_index] =
+ session_manager_get_vpp_event_queue (s->thread_index);
+ s->session_state = SESSION_STATE_READY;
+ esm->byte_index = 0;
+ vec_validate (esm->rx_retries[s->thread_index], s->session_index);
+ esm->rx_retries[s->thread_index][s->session_index] = 0;
+ return 0;
+}
+
+void
+echo_server_session_disconnect_callback (stream_session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_disconnect_args_t _a, *a = &_a;
+
+ a->handle = session_handle (s);
+ a->app_index = esm->app_index;
+ vnet_disconnect_session (a);
+}
+
+void
+echo_server_session_reset_callback (stream_session_t * s)
+{
+ clib_warning ("Reset session %U", format_stream_session, s, 2);
+ stream_session_cleanup (s);
+}
+
+int
+echo_server_session_connected_callback (u32 app_index, u32 api_context,
+ stream_session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+int
+echo_server_add_segment_callback (u32 client_index, const ssvm_private_t * sp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+int
+echo_server_redirect_connect_callback (u32 client_index, void *mp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+void
+test_bytes (echo_server_main_t * esm, int actual_transfer)
+{
+ int i;
+ u32 my_thread_id = vlib_get_thread_index ();
+
+ for (i = 0; i < actual_transfer; i++)
+ {
+ if (esm->rx_buf[my_thread_id][i] != ((esm->byte_index + i) & 0xff))
+ {
+ clib_warning ("at %lld expected %d got %d", esm->byte_index + i,
+ (esm->byte_index + i) & 0xff,
+ esm->rx_buf[my_thread_id][i]);
+ }
+ }
+ esm->byte_index += actual_transfer;
+}
+
+/*
+ * If no-echo, just read the data and be done with it
+ */
+int
+echo_server_builtin_server_rx_callback_no_echo (stream_session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ u32 my_thread_id = vlib_get_thread_index ();
+ int actual_transfer;
+ svm_fifo_t *rx_fifo;
+
+ rx_fifo = s->server_rx_fifo;
+
+ do
+ {
+ actual_transfer =
+ svm_fifo_dequeue_nowait (rx_fifo, esm->rcv_buffer_size,
+ esm->rx_buf[my_thread_id]);
+ }
+ while (actual_transfer > 0);
+ return 0;
+}
+
+int
+echo_server_rx_callback (stream_session_t * s)
+{
+ u32 n_written, max_dequeue, max_enqueue, max_transfer;
+ int actual_transfer;
+ svm_fifo_t *tx_fifo, *rx_fifo;
+ echo_server_main_t *esm = &echo_server_main;
+ session_fifo_event_t evt;
+ u32 thread_index = vlib_get_thread_index ();
+
+ ASSERT (s->thread_index == thread_index);
+
+ rx_fifo = s->server_rx_fifo;
+ tx_fifo = s->server_tx_fifo;
+
+ ASSERT (rx_fifo->master_thread_index == thread_index);
+ ASSERT (tx_fifo->master_thread_index == thread_index);
+
+ max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo);
+ max_enqueue = svm_fifo_max_enqueue (s->server_tx_fifo);
+
+ if (PREDICT_FALSE (max_dequeue == 0))
+ return 0;
+
+ /* Number of bytes we're going to copy */
+ max_transfer = (max_dequeue < max_enqueue) ? max_dequeue : max_enqueue;
+
+ /* No space in tx fifo */
+ if (PREDICT_FALSE (max_transfer == 0))
+ {
+ /* XXX timeout for session that are stuck */
+
+ rx_event:
+ /* Program self-tap to retry */
+ if (svm_fifo_set_event (rx_fifo))
+ {
+ svm_queue_t *q;
+ evt.fifo = rx_fifo;
+ evt.event_type = FIFO_EVENT_BUILTIN_RX;
+
+ q = esm->vpp_queue[thread_index];
+ if (PREDICT_FALSE (q->cursize == q->maxsize))
+ clib_warning ("out of event queue space");
+ else if (svm_queue_add (q, (u8 *) & evt, 0))
+ clib_warning ("failed to enqueue self-tap");
+
+ if (esm->rx_retries[thread_index][s->session_index] == 500000)
+ {
+ clib_warning ("session stuck: %U", format_stream_session, s, 2);
+ }
+ if (esm->rx_retries[thread_index][s->session_index] < 500001)
+ esm->rx_retries[thread_index][s->session_index]++;
+ }
+
+ return 0;
+ }
+
+ _vec_len (esm->rx_buf[thread_index]) = max_transfer;
+
+ actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_transfer,
+ esm->rx_buf[thread_index]);
+ ASSERT (actual_transfer == max_transfer);
+
+// test_bytes (esm, actual_transfer);
+
+ /*
+ * Echo back
+ */
+
+ n_written = svm_fifo_enqueue_nowait (tx_fifo, actual_transfer,
+ esm->rx_buf[thread_index]);
+
+ if (n_written != max_transfer)
+ clib_warning ("short trout!");
+
+ if (svm_fifo_set_event (tx_fifo))
+ {
+ /* Fabricate TX event, send to vpp */
+ evt.fifo = tx_fifo;
+ evt.event_type = FIFO_EVENT_APP_TX;
+
+ if (svm_queue_add (esm->vpp_queue[s->thread_index],
+ (u8 *) & evt, 0 /* do wait for mutex */ ))
+ clib_warning ("failed to enqueue tx evt");
+ }
+
+ if (PREDICT_FALSE (n_written < max_dequeue))
+ goto rx_event;
+
+ return 0;
+}
+
+static session_cb_vft_t echo_server_session_cb_vft = {
+ .session_accept_callback = echo_server_session_accept_callback,
+ .session_disconnect_callback = echo_server_session_disconnect_callback,
+ .session_connected_callback = echo_server_session_connected_callback,
+ .add_segment_callback = echo_server_add_segment_callback,
+ .redirect_connect_callback = echo_server_redirect_connect_callback,
+ .builtin_server_rx_callback = echo_server_rx_callback,
+ .session_reset_callback = echo_server_session_reset_callback
+};
+
+/* Abuse VPP's input queue */
+static int
+create_api_loopback (vlib_main_t * vm)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ esm->vl_input_queue = shmem_hdr->vl_input_queue;
+ esm->my_client_index = vl_api_memclnt_create_internal ("echo_server",
+ esm->vl_input_queue);
+ return 0;
+}
+
+static int
+echo_server_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+ u32 segment_size = 512 << 20;
+
+ memset (a, 0, sizeof (*a));
+ memset (options, 0, sizeof (options));
+
+ if (esm->no_echo)
+ echo_server_session_cb_vft.builtin_server_rx_callback =
+ echo_server_builtin_server_rx_callback_no_echo;
+ else
+ echo_server_session_cb_vft.builtin_server_rx_callback =
+ echo_server_rx_callback;
+
+ if (esm->private_segment_size)
+ segment_size = esm->private_segment_size;
+
+ a->api_client_index = esm->my_client_index;
+ a->session_cb_vft = &echo_server_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] = esm->fifo_size;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] = esm->fifo_size;
+ a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = esm->private_segment_count;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+ esm->prealloc_fifos ? esm->prealloc_fifos : 1;
+
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ if (appns_id)
+ {
+ a->namespace_id = appns_id;
+ a->options[APP_OPTIONS_FLAGS] |= appns_flags;
+ a->options[APP_OPTIONS_NAMESPACE_SECRET] = appns_secret;
+ }
+
+ if (vnet_application_attach (a))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ esm->app_index = a->app_index;
+ return 0;
+}
+
+static int
+echo_server_detach (void)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_app_detach_args_t _da, *da = &_da;
+ int rv;
+
+ da->app_index = esm->app_index;
+ rv = vnet_application_detach (da);
+ esm->app_index = ~0;
+ return rv;
+}
+
+static int
+echo_server_listen ()
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_bind_args_t _a, *a = &_a;
+ memset (a, 0, sizeof (*a));
+ a->app_index = esm->app_index;
+ a->uri = esm->server_uri;
+ return vnet_bind_uri (a);
+}
+
+static int
+echo_server_create (vlib_main_t * vm, u8 * appns_id, u64 appns_flags,
+ u64 appns_secret)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+ int i;
+
+ if (esm->my_client_index == (u32) ~ 0)
+ {
+ if (create_api_loopback (vm))
+ {
+ clib_warning ("failed to create api loopback");
+ return -1;
+ }
+ }
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (echo_server_main.vpp_queue, num_threads - 1);
+ vec_validate (esm->rx_buf, num_threads - 1);
+ vec_validate (esm->rx_retries, num_threads - 1);
+
+ for (i = 0; i < num_threads; i++)
+ vec_validate (esm->rx_buf[i], esm->rcv_buffer_size);
+
+ if (echo_server_attach (appns_id, appns_flags, appns_secret))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ if (echo_server_listen ())
+ {
+ clib_warning ("failed to start listening");
+ if (echo_server_detach ())
+ clib_warning ("failed to detach");
+ return -1;
+ }
+ return 0;
+}
+
+static clib_error_t *
+echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ u8 server_uri_set = 0, *appns_id = 0;
+ u64 tmp, appns_flags = 0, appns_secret = 0;
+ char *default_uri = "tcp://0.0.0.0/1234";
+ int rv;
+
+ esm->no_echo = 0;
+ esm->fifo_size = 64 << 10;
+ esm->rcv_buffer_size = 128 << 10;
+ esm->prealloc_fifos = 0;
+ esm->private_segment_count = 0;
+ esm->private_segment_size = 0;
+ vec_free (esm->server_uri);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "uri %s", &esm->server_uri))
+ server_uri_set = 1;
+ else if (unformat (input, "no-echo"))
+ esm->no_echo = 1;
+ else if (unformat (input, "fifo-size %d", &esm->fifo_size))
+ esm->fifo_size <<= 10;
+ else if (unformat (input, "rcv-buf-size %d", &esm->rcv_buffer_size))
+ ;
+ else if (unformat (input, "prealloc-fifos %d", &esm->prealloc_fifos))
+ ;
+ else if (unformat (input, "private-segment-count %d",
+ &esm->private_segment_count))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ return clib_error_return
+ (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ esm->private_segment_size = tmp;
+ }
+ else if (unformat (input, "appns %_%v%_", &appns_id))
+ ;
+ else if (unformat (input, "all-scope"))
+ appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE
+ | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
+ else if (unformat (input, "local-scope"))
+ appns_flags |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ else if (unformat (input, "global-scope"))
+ appns_flags |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ else if (unformat (input, "secret %lu", &appns_secret))
+ ;
+ else
+ return clib_error_return (0, "failed: unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ );
+
+ if (!server_uri_set)
+ {
+ clib_warning ("No uri provided! Using default: %s", default_uri);
+ esm->server_uri = (char *) format (0, "%s%c", default_uri, 0);
+ }
+
+ rv = echo_server_create (vm, appns_id, appns_flags, appns_secret);
+ vec_free (appns_id);
+ if (rv)
+ {
+ vec_free (esm->server_uri);
+ return clib_error_return (0, "failed: server_create returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (echo_server_create_command, static) =
+{
+ .path = "test echo server",
+ .short_help = "test echo server proto <proto> [no echo][fifo-size <mbytes>]"
+ "[rcv-buf-size <bytes>][prealloc-fifos <count>]"
+ "[private-segment-count <count>][private-segment-size <bytes[m|g]>]"
+ "[uri <tcp://ip/port>]",
+ .function = echo_server_create_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+echo_server_main_init (vlib_main_t * vm)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ esm->my_client_index = ~0;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (echo_server_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/vnet/session-apps/http_server.c b/src/vnet/session-apps/http_server.c
new file mode 100644
index 00000000000..07eaab4c1ae
--- /dev/null
+++ b/src/vnet/session-apps/http_server.c
@@ -0,0 +1,668 @@
+/*
+* Copyright (c) 2015-2017 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+
+typedef enum
+{
+ EVENT_WAKEUP = 1,
+} http_process_event_t;
+
+typedef struct
+{
+ u64 session_handle;
+ u64 node_index;
+ u8 *data;
+} http_server_args;
+
+typedef struct
+{
+ u8 **rx_buf;
+ svm_queue_t **vpp_queue;
+ u64 byte_index;
+
+ uword *handler_by_get_request;
+
+ u32 *free_http_cli_process_node_indices;
+
+ /* Sever's event queue */
+ svm_queue_t *vl_input_queue;
+
+ /* API client handle */
+ u32 my_client_index;
+
+ u32 app_index;
+
+ /* process node index for evnt scheduling */
+ u32 node_index;
+
+ u32 prealloc_fifos;
+ u32 private_segment_size;
+ u32 fifo_size;
+ vlib_main_t *vlib_main;
+} http_server_main_t;
+
+http_server_main_t http_server_main;
+
+static void
+free_http_process (http_server_args * args)
+{
+ vlib_node_runtime_t *rt;
+ vlib_main_t *vm = &vlib_global_main;
+ http_server_main_t *hsm = &http_server_main;
+ vlib_node_t *n;
+ u32 node_index;
+ http_server_args **save_args;
+
+ node_index = args->node_index;
+ ASSERT (node_index != 0);
+
+ n = vlib_get_node (vm, node_index);
+ rt = vlib_node_get_runtime (vm, n->index);
+ save_args = vlib_node_get_runtime_data (vm, n->index);
+
+ /* Reset process session pointer */
+ clib_mem_free (*save_args);
+ *save_args = 0;
+
+ /* Turn off the process node */
+ vlib_node_set_state (vm, rt->node_index, VLIB_NODE_STATE_DISABLED);
+
+ /* add node index to the freelist */
+ vec_add1 (hsm->free_http_cli_process_node_indices, node_index);
+}
+
+static const char
+ *http_response = "HTTP/1.1 200 OK\r\n"
+ "Content-Type: text/html\r\n"
+ "Expires: Mon, 11 Jan 1970 10:10:10 GMT\r\n"
+ "Connection: close\r\n"
+ "Pragma: no-cache\r\n" "Content-Length: %d\r\n\r\n%s";
+
+static const char
+ *http_error_template = "HTTP/1.1 %s\r\n"
+ "Content-Type: text/html\r\n"
+ "Expires: Mon, 11 Jan 1970 10:10:10 GMT\r\n"
+ "Connection: close\r\n" "Pragma: no-cache\r\n" "Content-Length: 0\r\n\r\n";
+
+/* Header, including incantation to suppress favicon.ico requests */
+static const char
+ *html_header_template = "<html><head><title>%v</title>"
+ "</head><link rel=\"icon\" href=\"data:,\"><body><pre>";
+
+static const char *html_footer = "</pre></body></html>\r\n";
+
+static const char
+ *html_header_static = "<html><head><title>static reply</title></head>"
+ "<link rel=\"icon\" href=\"data:,\"><body><pre>hello</pre></body>"
+ "</html>\r\n";
+
+static u8 *static_http;
+
+static void
+http_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
+{
+ u8 **output_vecp = (u8 **) arg;
+ u8 *output_vec;
+ u32 offset;
+
+ output_vec = *output_vecp;
+
+ offset = vec_len (output_vec);
+ vec_validate (output_vec, offset + buffer_bytes - 1);
+ clib_memcpy (output_vec + offset, buffer, buffer_bytes);
+
+ *output_vecp = output_vec;
+}
+
+void
+send_data (stream_session_t * s, u8 * data)
+{
+ session_fifo_event_t evt;
+ u32 offset, bytes_to_send;
+ f64 delay = 10e-3;
+ http_server_main_t *hsm = &http_server_main;
+ vlib_main_t *vm = hsm->vlib_main;
+ f64 last_sent_timer = vlib_time_now (vm);
+
+ bytes_to_send = vec_len (data);
+ offset = 0;
+
+ while (bytes_to_send > 0)
+ {
+ int actual_transfer;
+
+ actual_transfer = svm_fifo_enqueue_nowait
+ (s->server_tx_fifo, bytes_to_send, data + offset);
+
+ /* Made any progress? */
+ if (actual_transfer <= 0)
+ {
+ vlib_process_suspend (vm, delay);
+ /* 10s deadman timer */
+ if (vlib_time_now (vm) > last_sent_timer + 10.0)
+ {
+ /* $$$$ FC: reset transport session here? */
+ break;
+ }
+ /* Exponential backoff, within reason */
+ if (delay < 1.0)
+ delay = delay * 2.0;
+ }
+ else
+ {
+ last_sent_timer = vlib_time_now (vm);
+ offset += actual_transfer;
+ bytes_to_send -= actual_transfer;
+
+ if (svm_fifo_set_event (s->server_tx_fifo))
+ {
+ /* Fabricate TX event, send to vpp */
+ evt.fifo = s->server_tx_fifo;
+ evt.event_type = FIFO_EVENT_APP_TX;
+
+ svm_queue_add (hsm->vpp_queue[s->thread_index],
+ (u8 *) & evt, 0 /* do wait for mutex */ );
+ }
+ delay = 10e-3;
+ }
+ }
+}
+
+static void
+send_error (stream_session_t * s, char *str)
+{
+ u8 *data;
+
+ data = format (0, http_error_template, str);
+ send_data (s, data);
+ vec_free (data);
+}
+
+static uword
+http_cli_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ http_server_main_t *hsm = &http_server_main;
+ u8 *request = 0, *reply = 0;
+ http_server_args **save_args;
+ http_server_args *args;
+ stream_session_t *s;
+ unformat_input_t input;
+ int i;
+ u8 *http = 0, *html = 0;
+
+ save_args = vlib_node_get_runtime_data (hsm->vlib_main, rt->node_index);
+ args = *save_args;
+ s = session_get_from_handle (args->session_handle);
+ ASSERT (s);
+
+ request = (u8 *) (void *) (args->data);
+ if (vec_len (request) < 7)
+ {
+ send_error (s, "400 Bad Request");
+ goto out;
+ }
+
+ for (i = 0; i < vec_len (request) - 4; i++)
+ {
+ if (request[i] == 'G' &&
+ request[i + 1] == 'E' &&
+ request[i + 2] == 'T' && request[i + 3] == ' ')
+ goto found;
+ }
+bad_request:
+ send_error (s, "400 Bad Request");
+ goto out;
+
+found:
+ /* Lose "GET " */
+ vec_delete (request, i + 5, 0);
+
+ /* Replace slashes with spaces, stop at the end of the path */
+ i = 0;
+ while (1)
+ {
+ if (request[i] == '/')
+ request[i] = ' ';
+ else if (request[i] == ' ')
+ {
+ /* vlib_cli_input is vector-based, no need for a NULL */
+ _vec_len (request) = i;
+ break;
+ }
+ i++;
+ /* Should never happen */
+ if (i == vec_len (request))
+ goto bad_request;
+ }
+
+ /* Generate the html header */
+ html = format (0, html_header_template, request /* title */ );
+
+ /* Run the command */
+ unformat_init_vector (&input, request);
+ vlib_cli_input (vm, &input, http_cli_output, (uword) & reply);
+ unformat_free (&input);
+ request = 0;
+
+ /* Generate the html page */
+ html = format (html, "%v", reply);
+ html = format (html, html_footer);
+ /* And the http reply */
+ http = format (0, http_response, vec_len (html), html);
+
+ /* Send it */
+ send_data (s, http);
+
+out:
+ /* Cleanup */
+ vec_free (request);
+ vec_free (reply);
+ vec_free (html);
+ vec_free (http);
+
+ free_http_process (args);
+ return (0);
+}
+
+static void
+alloc_http_process (http_server_args * args)
+{
+ char *name;
+ vlib_node_t *n;
+ http_server_main_t *hsm = &http_server_main;
+ vlib_main_t *vm = hsm->vlib_main;
+ uword l = vec_len (hsm->free_http_cli_process_node_indices);
+ http_server_args **save_args;
+
+ if (vec_len (hsm->free_http_cli_process_node_indices) > 0)
+ {
+ n = vlib_get_node (vm, hsm->free_http_cli_process_node_indices[l - 1]);
+ vlib_node_set_state (vm, n->index, VLIB_NODE_STATE_POLLING);
+ _vec_len (hsm->free_http_cli_process_node_indices) = l - 1;
+ }
+ else
+ {
+ static vlib_node_registration_t r = {
+ .function = http_cli_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .process_log2_n_stack_bytes = 16,
+ .runtime_data_bytes = sizeof (void *),
+ };
+
+ name = (char *) format (0, "http-cli-%d", l);
+ r.name = name;
+ vlib_register_node (vm, &r);
+ vec_free (name);
+
+ n = vlib_get_node (vm, r.index);
+ }
+
+ /* Save the node index in the args. It won't be zero. */
+ args->node_index = n->index;
+
+ /* Save the args (pointer) in the node runtime */
+ save_args = vlib_node_get_runtime_data (vm, n->index);
+ *save_args = args;
+
+ vlib_start_process (vm, n->runtime_index);
+}
+
+static void
+alloc_http_process_callback (void *cb_args)
+{
+ alloc_http_process ((http_server_args *) cb_args);
+}
+
+static int
+session_rx_request (stream_session_t * s)
+{
+ http_server_main_t *hsm = &http_server_main;
+ svm_fifo_t *rx_fifo;
+ u32 max_dequeue;
+ int actual_transfer;
+
+ rx_fifo = s->server_rx_fifo;
+ max_dequeue = svm_fifo_max_dequeue (rx_fifo);
+ svm_fifo_unset_event (rx_fifo);
+ if (PREDICT_FALSE (max_dequeue == 0))
+ return -1;
+
+ vec_validate (hsm->rx_buf[s->thread_index], max_dequeue - 1);
+ _vec_len (hsm->rx_buf[s->thread_index]) = max_dequeue;
+
+ actual_transfer = svm_fifo_dequeue_nowait (rx_fifo, max_dequeue,
+ hsm->rx_buf[s->thread_index]);
+ ASSERT (actual_transfer > 0);
+ _vec_len (hsm->rx_buf[s->thread_index]) = actual_transfer;
+ return 0;
+}
+
+static int
+http_server_rx_callback (stream_session_t * s)
+{
+ http_server_main_t *hsm = &http_server_main;
+ http_server_args *args;
+ int rv;
+
+ rv = session_rx_request (s);
+ if (rv)
+ return rv;
+
+ /* send the command to a new/recycled vlib process */
+ args = clib_mem_alloc (sizeof (*args));
+ args->data = vec_dup (hsm->rx_buf[s->thread_index]);
+ args->session_handle = session_handle (s);
+
+ /* Send an RPC request via the thread-0 input node */
+ if (vlib_get_thread_index () != 0)
+ {
+ session_fifo_event_t evt;
+ evt.rpc_args.fp = alloc_http_process_callback;
+ evt.rpc_args.arg = args;
+ evt.event_type = FIFO_EVENT_RPC;
+ svm_queue_add
+ (session_manager_get_vpp_event_queue (0 /* main thread */ ),
+ (u8 *) & evt, 0 /* do wait for mutex */ );
+ }
+ else
+ alloc_http_process (args);
+ return 0;
+}
+
+static int
+http_server_rx_callback_static (stream_session_t * s)
+{
+ http_server_main_t *hsm = &http_server_main;
+ u8 *request = 0;
+ int i;
+ int rv;
+
+ rv = session_rx_request (s);
+ if (rv)
+ return rv;
+
+ request = hsm->rx_buf[s->thread_index];
+ if (vec_len (request) < 7)
+ {
+ send_error (s, "400 Bad Request");
+ goto out;
+ }
+
+ for (i = 0; i < vec_len (request) - 4; i++)
+ {
+ if (request[i] == 'G' &&
+ request[i + 1] == 'E' &&
+ request[i + 2] == 'T' && request[i + 3] == ' ')
+ goto found;
+ }
+ send_error (s, "400 Bad Request");
+ goto out;
+
+found:
+
+ /* Send it */
+ send_data (s, static_http);
+
+out:
+ /* Cleanup */
+ vec_free (request);
+ hsm->rx_buf[s->thread_index] = request;
+ return 0;
+}
+
+static int
+http_server_session_accept_callback (stream_session_t * s)
+{
+ http_server_main_t *bsm = &http_server_main;
+
+ bsm->vpp_queue[s->thread_index] =
+ session_manager_get_vpp_event_queue (s->thread_index);
+ s->session_state = SESSION_STATE_READY;
+ bsm->byte_index = 0;
+ return 0;
+}
+
+static void
+http_server_session_disconnect_callback (stream_session_t * s)
+{
+ http_server_main_t *bsm = &http_server_main;
+ vnet_disconnect_args_t _a, *a = &_a;
+
+ a->handle = session_handle (s);
+ a->app_index = bsm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+http_server_session_reset_callback (stream_session_t * s)
+{
+ clib_warning ("called.. ");
+ stream_session_cleanup (s);
+}
+
+static int
+http_server_session_connected_callback (u32 app_index, u32 api_context,
+ stream_session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+http_server_add_segment_callback (u32 client_index, const ssvm_private_t * sp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+http_server_redirect_connect_callback (u32 client_index, void *mp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static session_cb_vft_t http_server_session_cb_vft = {
+ .session_accept_callback = http_server_session_accept_callback,
+ .session_disconnect_callback = http_server_session_disconnect_callback,
+ .session_connected_callback = http_server_session_connected_callback,
+ .add_segment_callback = http_server_add_segment_callback,
+ .redirect_connect_callback = http_server_redirect_connect_callback,
+ .builtin_server_rx_callback = http_server_rx_callback,
+ .session_reset_callback = http_server_session_reset_callback
+};
+
+/* Abuse VPP's input queue */
+static int
+create_api_loopback (vlib_main_t * vm)
+{
+ http_server_main_t *hsm = &http_server_main;
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ hsm->vl_input_queue = shmem_hdr->vl_input_queue;
+ hsm->my_client_index =
+ vl_api_memclnt_create_internal ("http_server", hsm->vl_input_queue);
+ return 0;
+}
+
+static int
+server_attach ()
+{
+ http_server_main_t *hsm = &http_server_main;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+ u32 segment_size = 128 << 20;
+
+ memset (a, 0, sizeof (*a));
+ memset (options, 0, sizeof (options));
+
+ if (hsm->private_segment_size)
+ segment_size = hsm->private_segment_size;
+
+ a->api_client_index = hsm->my_client_index;
+ a->session_cb_vft = &http_server_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] =
+ hsm->fifo_size ? hsm->fifo_size : 8 << 10;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] =
+ hsm->fifo_size ? hsm->fifo_size : 32 << 10;
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hsm->prealloc_fifos;
+
+ if (vnet_application_attach (a))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ hsm->app_index = a->app_index;
+ return 0;
+}
+
+static int
+http_server_listen ()
+{
+ http_server_main_t *hsm = &http_server_main;
+ vnet_bind_args_t _a, *a = &_a;
+ memset (a, 0, sizeof (*a));
+ a->app_index = hsm->app_index;
+ a->uri = "tcp://0.0.0.0/80";
+ return vnet_bind_uri (a);
+}
+
+static int
+http_server_create (vlib_main_t * vm)
+{
+ http_server_main_t *hsm = &http_server_main;
+ u32 num_threads;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+
+ ASSERT (hsm->my_client_index == (u32) ~ 0);
+ if (create_api_loopback (vm))
+ return -1;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (http_server_main.vpp_queue, num_threads - 1);
+
+ if (server_attach ())
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ if (http_server_listen ())
+ {
+ clib_warning ("failed to start listening");
+ return -1;
+ }
+ return 0;
+}
+
+static clib_error_t *
+http_server_create_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ http_server_main_t *hsm = &http_server_main;
+ int rv, is_static = 0;
+ u64 seg_size;
+ u8 *html;
+
+ hsm->prealloc_fifos = 0;
+ hsm->private_segment_size = 0;
+ hsm->fifo_size = 0;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "static"))
+ is_static = 1;
+ else if (unformat (input, "prealloc-fifos %d", &hsm->prealloc_fifos))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &seg_size))
+ {
+ if (seg_size >= 0x100000000ULL)
+ {
+ vlib_cli_output (vm, "private segment size %llu, too large",
+ seg_size);
+ return 0;
+ }
+ hsm->private_segment_size = seg_size;
+ }
+ else if (unformat (input, "fifo-size %d", &hsm->fifo_size))
+ hsm->fifo_size <<= 10;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ if (hsm->my_client_index != (u32) ~ 0)
+ return clib_error_return (0, "test http server is already running");
+
+ vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ );
+
+ if (is_static)
+ {
+ http_server_session_cb_vft.builtin_server_rx_callback =
+ http_server_rx_callback_static;
+ html = format (0, html_header_static);
+ static_http = format (0, http_response, vec_len (html), html);
+ }
+ rv = http_server_create (vm);
+ switch (rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return (0, "server_create returned %d", rv);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (http_server_create_command, static) =
+{
+ .path = "test http server",
+ .short_help = "test http server",
+ .function = http_server_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+http_server_main_init (vlib_main_t * vm)
+{
+ http_server_main_t *hsm = &http_server_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+
+ hsm->my_client_index = ~0;
+ hsm->vlib_main = vm;
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (hsm->rx_buf, num_threads - 1);
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (http_server_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/vnet/session-apps/proxy.c b/src/vnet/session-apps/proxy.c
new file mode 100644
index 00000000000..2fdb63f1780
--- /dev/null
+++ b/src/vnet/session-apps/proxy.c
@@ -0,0 +1,611 @@
+/*
+* Copyright (c) 2015-2017 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session-apps/proxy.h>
+
+proxy_main_t proxy_main;
+
+static void
+delete_proxy_session (stream_session_t * s, int is_active_open)
+{
+ proxy_main_t *pm = &proxy_main;
+ proxy_session_t *ps = 0;
+ vnet_disconnect_args_t _a, *a = &_a;
+ stream_session_t *active_open_session = 0;
+ stream_session_t *server_session = 0;
+ uword *p;
+ u64 handle;
+
+ handle = session_handle (s);
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+ if (is_active_open)
+ {
+ active_open_session = s;
+
+ p = hash_get (pm->proxy_session_by_active_open_handle, handle);
+ if (p == 0)
+ {
+ clib_warning ("proxy session for %s handle %lld (%llx) AWOL",
+ is_active_open ? "active open" : "server",
+ handle, handle);
+ }
+ else
+ {
+ ps = pool_elt_at_index (pm->sessions, p[0]);
+ if (ps->vpp_server_handle != ~0)
+ server_session = session_get_from_handle (ps->vpp_server_handle);
+ else
+ server_session = 0;
+ }
+ }
+ else
+ {
+ server_session = s;
+
+ p = hash_get (pm->proxy_session_by_server_handle, handle);
+ if (p == 0)
+ {
+ clib_warning ("proxy session for %s handle %lld (%llx) AWOL",
+ is_active_open ? "active open" : "server",
+ handle, handle);
+ }
+ else
+ {
+ ps = pool_elt_at_index (pm->sessions, p[0]);
+ if (ps->vpp_server_handle != ~0)
+ active_open_session = session_get_from_handle
+ (ps->vpp_server_handle);
+ else
+ active_open_session = 0;
+ }
+ }
+
+ if (ps)
+ {
+ if (CLIB_DEBUG > 0)
+ memset (ps, 0xFE, sizeof (*ps));
+ pool_put (pm->sessions, ps);
+ }
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ if (active_open_session)
+ {
+ a->handle = session_handle (active_open_session);
+ a->app_index = pm->active_open_app_index;
+ hash_unset (pm->proxy_session_by_active_open_handle,
+ session_handle (active_open_session));
+ vnet_disconnect_session (a);
+ }
+
+ if (server_session)
+ {
+ a->handle = session_handle (server_session);
+ a->app_index = pm->server_app_index;
+ hash_unset (pm->proxy_session_by_server_handle,
+ session_handle (server_session));
+ vnet_disconnect_session (a);
+ }
+}
+
+static int
+proxy_accept_callback (stream_session_t * s)
+{
+ proxy_main_t *pm = &proxy_main;
+
+ s->session_state = SESSION_STATE_READY;
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+
+ return 0;
+}
+
+static void
+proxy_disconnect_callback (stream_session_t * s)
+{
+ delete_proxy_session (s, 0 /* is_active_open */ );
+}
+
+static void
+proxy_reset_callback (stream_session_t * s)
+{
+ clib_warning ("Reset session %U", format_stream_session, s, 2);
+ delete_proxy_session (s, 0 /* is_active_open */ );
+}
+
+static int
+proxy_connected_callback (u32 app_index, u32 api_context,
+ stream_session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+proxy_add_segment_callback (u32 client_index, const ssvm_private_t * sp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+proxy_redirect_connect_callback (u32 client_index, void *mp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+proxy_rx_callback (stream_session_t * s)
+{
+ u32 max_dequeue;
+ int actual_transfer __attribute__ ((unused));
+ svm_fifo_t *tx_fifo, *rx_fifo;
+ proxy_main_t *pm = &proxy_main;
+ u32 thread_index = vlib_get_thread_index ();
+ vnet_connect_args_t _a, *a = &_a;
+ proxy_session_t *ps;
+ int proxy_index;
+ uword *p;
+ svm_fifo_t *active_open_tx_fifo;
+ session_fifo_event_t evt;
+
+ ASSERT (s->thread_index == thread_index);
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+ p = hash_get (pm->proxy_session_by_server_handle, session_handle (s));
+
+ if (PREDICT_TRUE (p != 0))
+ {
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+ active_open_tx_fifo = s->server_rx_fifo;
+
+ /*
+ * Send event for active open tx fifo
+ */
+ if (svm_fifo_set_event (active_open_tx_fifo))
+ {
+ evt.fifo = active_open_tx_fifo;
+ evt.event_type = FIFO_EVENT_APP_TX;
+ if (svm_queue_add
+ (pm->active_open_event_queue[thread_index], (u8 *) & evt,
+ 0 /* do wait for mutex */ ))
+ clib_warning ("failed to enqueue tx evt");
+ }
+ }
+ else
+ {
+ rx_fifo = s->server_rx_fifo;
+ tx_fifo = s->server_tx_fifo;
+
+ ASSERT (rx_fifo->master_thread_index == thread_index);
+ ASSERT (tx_fifo->master_thread_index == thread_index);
+
+ max_dequeue = svm_fifo_max_dequeue (s->server_rx_fifo);
+
+ if (PREDICT_FALSE (max_dequeue == 0))
+ return 0;
+
+ actual_transfer = svm_fifo_peek (rx_fifo, 0 /* relative_offset */ ,
+ max_dequeue, pm->rx_buf[thread_index]);
+
+ /* $$$ your message in this space: parse url, etc. */
+
+ memset (a, 0, sizeof (*a));
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+ pool_get (pm->sessions, ps);
+ memset (ps, 0, sizeof (*ps));
+ ps->server_rx_fifo = rx_fifo;
+ ps->server_tx_fifo = tx_fifo;
+ ps->vpp_server_handle = session_handle (s);
+
+ proxy_index = ps - pm->sessions;
+
+ hash_set (pm->proxy_session_by_server_handle, ps->vpp_server_handle,
+ proxy_index);
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ a->uri = (char *) pm->client_uri;
+ a->api_context = proxy_index;
+ a->app_index = pm->active_open_app_index;
+ a->mp = 0;
+ vnet_connect_uri (a);
+ }
+
+ return 0;
+}
+
+static session_cb_vft_t proxy_session_cb_vft = {
+ .session_accept_callback = proxy_accept_callback,
+ .session_disconnect_callback = proxy_disconnect_callback,
+ .session_connected_callback = proxy_connected_callback,
+ .add_segment_callback = proxy_add_segment_callback,
+ .redirect_connect_callback = proxy_redirect_connect_callback,
+ .builtin_server_rx_callback = proxy_rx_callback,
+ .session_reset_callback = proxy_reset_callback
+};
+
+static int
+active_open_connected_callback (u32 app_index, u32 opaque,
+ stream_session_t * s, u8 is_fail)
+{
+ proxy_main_t *pm = &proxy_main;
+ proxy_session_t *ps;
+ u8 thread_index = vlib_get_thread_index ();
+ session_fifo_event_t evt;
+
+ if (is_fail)
+ {
+ clib_warning ("connection %d failed!", opaque);
+ return 0;
+ }
+
+ /*
+ * Setup proxy session handle.
+ */
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+
+ ps = pool_elt_at_index (pm->sessions, opaque);
+ ps->vpp_active_open_handle = session_handle (s);
+
+ s->server_tx_fifo = ps->server_rx_fifo;
+ s->server_rx_fifo = ps->server_tx_fifo;
+
+ /*
+ * Reset the active-open tx-fifo master indices so the active-open session
+ * will receive data, etc.
+ */
+ s->server_tx_fifo->master_session_index = s->session_index;
+ s->server_tx_fifo->master_thread_index = s->thread_index;
+
+ /*
+ * Account for the active-open session's use of the fifos
+ * so they won't disappear until the last session which uses
+ * them disappears
+ */
+ s->server_tx_fifo->refcnt++;
+ s->server_rx_fifo->refcnt++;
+
+ hash_set (pm->proxy_session_by_active_open_handle,
+ ps->vpp_active_open_handle, opaque);
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ /*
+ * Send event for active open tx fifo
+ */
+ if (svm_fifo_set_event (s->server_tx_fifo))
+ {
+ evt.fifo = s->server_tx_fifo;
+ evt.event_type = FIFO_EVENT_APP_TX;
+ if (svm_queue_add
+ (pm->active_open_event_queue[thread_index], (u8 *) & evt,
+ 0 /* do wait for mutex */ ))
+ clib_warning ("failed to enqueue tx evt");
+ }
+
+ return 0;
+}
+
+static void
+active_open_reset_callback (stream_session_t * s)
+{
+ delete_proxy_session (s, 1 /* is_active_open */ );
+}
+
+static int
+active_open_create_callback (stream_session_t * s)
+{
+ return 0;
+}
+
+static void
+active_open_disconnect_callback (stream_session_t * s)
+{
+ delete_proxy_session (s, 1 /* is_active_open */ );
+}
+
+static int
+active_open_rx_callback (stream_session_t * s)
+{
+ proxy_main_t *pm = &proxy_main;
+ session_fifo_event_t evt;
+ svm_fifo_t *server_rx_fifo;
+ u32 thread_index = vlib_get_thread_index ();
+
+ server_rx_fifo = s->server_rx_fifo;
+
+ /*
+ * Send event for server tx fifo
+ */
+ if (svm_fifo_set_event (server_rx_fifo))
+ {
+ evt.fifo = server_rx_fifo;
+ evt.event_type = FIFO_EVENT_APP_TX;
+ if (svm_queue_add
+ (pm->server_event_queue[thread_index], (u8 *) & evt,
+ 0 /* do wait for mutex */ ))
+ clib_warning ("failed to enqueue server rx evt");
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static session_cb_vft_t active_open_clients = {
+ .session_reset_callback = active_open_reset_callback,
+ .session_connected_callback = active_open_connected_callback,
+ .session_accept_callback = active_open_create_callback,
+ .session_disconnect_callback = active_open_disconnect_callback,
+ .builtin_server_rx_callback = active_open_rx_callback
+};
+/* *INDENT-ON* */
+
+
+static void
+create_api_loopbacks (vlib_main_t * vm)
+{
+ proxy_main_t *pm = &proxy_main;
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ pm->vl_input_queue = shmem_hdr->vl_input_queue;
+ pm->server_client_index =
+ vl_api_memclnt_create_internal ("proxy_server", pm->vl_input_queue);
+ pm->active_open_client_index =
+ vl_api_memclnt_create_internal ("proxy_active_open", pm->vl_input_queue);
+}
+
+static int
+proxy_server_attach ()
+{
+ proxy_main_t *pm = &proxy_main;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+ u32 segment_size = 512 << 20;
+
+ memset (a, 0, sizeof (*a));
+ memset (options, 0, sizeof (options));
+
+ if (pm->private_segment_size)
+ segment_size = pm->private_segment_size;
+ a->api_client_index = pm->server_client_index;
+ a->session_cb_vft = &proxy_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] = pm->fifo_size;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] = pm->fifo_size;
+ a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = pm->private_segment_count;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+ pm->prealloc_fifos ? pm->prealloc_fifos : 1;
+
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+
+ if (vnet_application_attach (a))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ pm->server_app_index = a->app_index;
+
+ return 0;
+}
+
+static int
+active_open_attach (void)
+{
+ proxy_main_t *pm = &proxy_main;
+ vnet_app_attach_args_t _a, *a = &_a;
+ u64 options[16];
+
+ memset (a, 0, sizeof (*a));
+ memset (options, 0, sizeof (options));
+
+ a->api_client_index = pm->active_open_client_index;
+ a->session_cb_vft = &active_open_clients;
+
+ options[APP_OPTIONS_ACCEPT_COOKIE] = 0x12345678;
+ options[APP_OPTIONS_SEGMENT_SIZE] = 512 << 20;
+ options[APP_OPTIONS_RX_FIFO_SIZE] = pm->fifo_size;
+ options[APP_OPTIONS_TX_FIFO_SIZE] = pm->fifo_size;
+ options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = pm->private_segment_count;
+ options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+ pm->prealloc_fifos ? pm->prealloc_fifos : 1;
+
+ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN
+ | APP_OPTIONS_FLAGS_IS_PROXY;
+
+ a->options = options;
+
+ if (vnet_application_attach (a))
+ return -1;
+
+ pm->active_open_app_index = a->app_index;
+
+ return 0;
+}
+
+static int
+proxy_server_listen ()
+{
+ proxy_main_t *pm = &proxy_main;
+ vnet_bind_args_t _a, *a = &_a;
+ memset (a, 0, sizeof (*a));
+ a->app_index = pm->server_app_index;
+ a->uri = (char *) pm->server_uri;
+ return vnet_bind_uri (a);
+}
+
+static int
+proxy_server_create (vlib_main_t * vm)
+{
+ proxy_main_t *pm = &proxy_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+ int i;
+
+ if (pm->server_client_index == (u32) ~ 0)
+ create_api_loopbacks (vm);
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (proxy_main.server_event_queue, num_threads - 1);
+ vec_validate (proxy_main.active_open_event_queue, num_threads - 1);
+ vec_validate (pm->rx_buf, num_threads - 1);
+
+ for (i = 0; i < num_threads; i++)
+ vec_validate (pm->rx_buf[i], pm->rcv_buffer_size);
+
+ if (proxy_server_attach ())
+ {
+ clib_warning ("failed to attach server app");
+ return -1;
+ }
+ if (proxy_server_listen ())
+ {
+ clib_warning ("failed to start listening");
+ return -1;
+ }
+ if (active_open_attach ())
+ {
+ clib_warning ("failed to attach active open app");
+ return -1;
+ }
+
+ for (i = 0; i < num_threads; i++)
+ {
+ pm->active_open_event_queue[i] =
+ session_manager_get_vpp_event_queue (i);
+
+ ASSERT (pm->active_open_event_queue[i]);
+
+ pm->server_event_queue[i] = session_manager_get_vpp_event_queue (i);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ proxy_main_t *pm = &proxy_main;
+ char *default_server_uri = "tcp://0.0.0.0/23";
+ char *default_client_uri = "tcp://6.0.2.2/23";
+ int rv;
+ u64 tmp;
+
+ pm->fifo_size = 64 << 10;
+ pm->rcv_buffer_size = 1024;
+ pm->prealloc_fifos = 0;
+ pm->private_segment_count = 0;
+ pm->private_segment_size = 0;
+ pm->server_uri = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "fifo-size %d", &pm->fifo_size))
+ pm->fifo_size <<= 10;
+ else if (unformat (input, "rcv-buf-size %d", &pm->rcv_buffer_size))
+ ;
+ else if (unformat (input, "prealloc-fifos %d", &pm->prealloc_fifos))
+ ;
+ else if (unformat (input, "private-segment-count %d",
+ &pm->private_segment_count))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ return clib_error_return
+ (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ pm->private_segment_size = tmp;
+ }
+ else if (unformat (input, "server-uri %s", &pm->server_uri))
+ ;
+ else if (unformat (input, "client-uri %s", &pm->client_uri))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (!pm->server_uri)
+ {
+ clib_warning ("No server-uri provided, Using default: %s",
+ default_server_uri);
+ pm->server_uri = format (0, "%s%c", default_server_uri, 0);
+ }
+ if (!pm->client_uri)
+ {
+ clib_warning ("No client-uri provided, Using default: %s",
+ default_client_uri);
+ pm->client_uri = format (0, "%s%c", default_client_uri, 0);
+ }
+
+ vnet_session_enable_disable (vm, 1 /* turn on session and transport */ );
+
+ rv = proxy_server_create (vm);
+ switch (rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return (0, "server_create returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (proxy_create_command, static) =
+{
+ .path = "test proxy server",
+ .short_help = "test proxy server [server-uri <tcp://ip/port>]"
+ "[client-uri <tcp://ip/port>][fifo-size <nn>][rcv-buf-size <nn>]"
+ "[prealloc-fifos <nn>][private-segment-size <mem>]"
+ "[private-segment-count <nn>]",
+ .function = proxy_server_create_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+proxy_main_init (vlib_main_t * vm)
+{
+ proxy_main_t *pm = &proxy_main;
+ pm->server_client_index = ~0;
+ pm->active_open_client_index = ~0;
+ pm->proxy_session_by_active_open_handle = hash_create (0, sizeof (uword));
+ pm->proxy_session_by_server_handle = hash_create (0, sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (proxy_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/vnet/session-apps/proxy.h b/src/vnet/session-apps/proxy.h
new file mode 100644
index 00000000000..4bca0a02cd8
--- /dev/null
+++ b/src/vnet/session-apps/proxy.h
@@ -0,0 +1,93 @@
+
+/*
+ * builtin_proxy.h - skeleton vpp engine plug-in header file
+ *
+ * Copyright (c) <current-year> <your-organization>
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_proxy_h__
+#define __included_proxy_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <svm/svm_fifo_segment.h>
+#include <vnet/session/session.h>
+#include <vnet/session/application_interface.h>
+
+typedef struct
+{
+ svm_fifo_t *server_rx_fifo;
+ svm_fifo_t *server_tx_fifo;
+
+ u64 vpp_server_handle;
+ u64 vpp_active_open_handle;
+} proxy_session_t;
+
+typedef struct
+{
+ svm_queue_t *vl_input_queue; /**< vpe input queue */
+ /** per-thread vectors */
+ svm_queue_t **server_event_queue;
+ svm_queue_t **active_open_event_queue;
+ u8 **rx_buf; /**< intermediate rx buffers */
+
+ u32 cli_node_index; /**< cli process node index */
+ u32 server_client_index; /**< server API client handle */
+ u32 server_app_index; /**< server app index */
+ u32 active_open_client_index; /**< active open API client handle */
+ u32 active_open_app_index; /**< active open index after attach */
+
+ uword *proxy_session_by_server_handle;
+ uword *proxy_session_by_active_open_handle;
+
+ /*
+ * Configuration params
+ */
+ u8 *connect_uri; /**< URI for slave's connect */
+ u32 configured_segment_size;
+ u32 fifo_size;
+ u32 private_segment_count; /**< Number of private fifo segs */
+ u32 private_segment_size; /**< size of private fifo segs */
+ int rcv_buffer_size;
+ u8 *server_uri;
+ u8 *client_uri;
+
+ /*
+ * Test state variables
+ */
+ proxy_session_t *sessions; /**< Session pool, shared */
+ clib_spinlock_t sessions_lock;
+ u32 **connection_index_by_thread;
+ pthread_t client_thread_handle;
+
+ /*
+ * Flags
+ */
+ u8 is_init;
+ u8 prealloc_fifos; /**< Request fifo preallocation */
+} proxy_main_t;
+
+extern proxy_main_t proxy_main;
+
+#endif /* __included_proxy_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */