summaryrefslogtreecommitdiffstats
path: root/src/plugins/hs_apps
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/hs_apps')
-rw-r--r--src/plugins/hs_apps/CMakeLists.txt21
-rw-r--r--src/plugins/hs_apps/echo_client.c1038
-rw-r--r--src/plugins/hs_apps/echo_client.h128
-rw-r--r--src/plugins/hs_apps/echo_server.c588
-rw-r--r--src/plugins/hs_apps/hs_apps.c34
-rw-r--r--src/plugins/hs_apps/http_server.c984
-rw-r--r--src/plugins/hs_apps/proxy.c626
-rw-r--r--src/plugins/hs_apps/proxy.h92
8 files changed, 3511 insertions, 0 deletions
diff --git a/src/plugins/hs_apps/CMakeLists.txt b/src/plugins/hs_apps/CMakeLists.txt
new file mode 100644
index 00000000000..d27c8512306
--- /dev/null
+++ b/src/plugins/hs_apps/CMakeLists.txt
@@ -0,0 +1,21 @@
+# Copyright (c) 2019 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(hs_apps
+ SOURCES
+ echo_client.c
+ echo_server.c
+ hs_apps.c
+ http_server.c
+ proxy.c
+)
diff --git a/src/plugins/hs_apps/echo_client.c b/src/plugins/hs_apps/echo_client.c
new file mode 100644
index 00000000000..bcf4176ca71
--- /dev/null
+++ b/src/plugins/hs_apps/echo_client.c
@@ -0,0 +1,1038 @@
+/*
+ * echo_client.c - vpp built-in echo client code
+ *
+ * Copyright (c) 2017-2019 by Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vlibapi/api.h>
+#include <vlibmemory/api.h>
+#include <hs_apps/echo_client.h>
+
+echo_client_main_t echo_client_main;
+
+#define ECHO_CLIENT_DBG (0)
+#define DBG(_fmt, _args...) \
+ if (ECHO_CLIENT_DBG) \
+ clib_warning (_fmt, ##_args)
+
+static void
+signal_evt_to_cli_i (int *code)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ ASSERT (vlib_get_thread_index () == 0);
+ vlib_process_signal_event (ecm->vlib_main, ecm->cli_node_index, *code, 0);
+}
+
+static void
+signal_evt_to_cli (int code)
+{
+ if (vlib_get_thread_index () != 0)
+ vl_api_rpc_call_main_thread (signal_evt_to_cli_i, (u8 *) & code,
+ sizeof (code));
+ else
+ signal_evt_to_cli_i (&code);
+}
+
+static void
+send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
+{
+ u8 *test_data = ecm->connect_test_data;
+ int test_buf_len, test_buf_offset, rv;
+ u32 bytes_this_chunk;
+
+ test_buf_len = vec_len (test_data);
+ ASSERT (test_buf_len > 0);
+ test_buf_offset = s->bytes_sent % test_buf_len;
+ bytes_this_chunk = clib_min (test_buf_len - test_buf_offset,
+ s->bytes_to_send);
+
+ if (!ecm->is_dgram)
+ {
+ if (ecm->no_copy)
+ {
+ svm_fifo_t *f = s->data.tx_fifo;
+ rv = clib_min (svm_fifo_max_enqueue_prod (f), bytes_this_chunk);
+ svm_fifo_enqueue_nocopy (f, rv);
+ session_send_io_evt_to_thread_custom (&f->master_session_index,
+ s->thread_index,
+ SESSION_IO_EVT_TX);
+ }
+ else
+ rv = app_send_stream (&s->data, test_data + test_buf_offset,
+ bytes_this_chunk, 0);
+ }
+ else
+ {
+ if (ecm->no_copy)
+ {
+ session_dgram_hdr_t hdr;
+ svm_fifo_t *f = s->data.tx_fifo;
+ app_session_transport_t *at = &s->data.transport;
+ u32 max_enqueue = svm_fifo_max_enqueue_prod (f);
+
+ if (max_enqueue <= sizeof (session_dgram_hdr_t))
+ return;
+
+ max_enqueue -= sizeof (session_dgram_hdr_t);
+ rv = clib_min (max_enqueue, bytes_this_chunk);
+
+ hdr.data_length = rv;
+ hdr.data_offset = 0;
+ clib_memcpy_fast (&hdr.rmt_ip, &at->rmt_ip,
+ sizeof (ip46_address_t));
+ hdr.is_ip4 = at->is_ip4;
+ hdr.rmt_port = at->rmt_port;
+ clib_memcpy_fast (&hdr.lcl_ip, &at->lcl_ip,
+ sizeof (ip46_address_t));
+ hdr.lcl_port = at->lcl_port;
+ svm_fifo_enqueue (f, sizeof (hdr), (u8 *) & hdr);
+ svm_fifo_enqueue_nocopy (f, rv);
+ session_send_io_evt_to_thread_custom (&f->master_session_index,
+ s->thread_index,
+ SESSION_IO_EVT_TX);
+ }
+ else
+ rv = app_send_dgram (&s->data, test_data + test_buf_offset,
+ bytes_this_chunk, 0);
+ }
+
+ /* If we managed to enqueue data... */
+ if (rv > 0)
+ {
+ /* Account for it... */
+ s->bytes_to_send -= rv;
+ s->bytes_sent += rv;
+
+ if (ECHO_CLIENT_DBG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "tx-enq: xfer %d bytes, sent %u remain %u",
+ .format_args = "i4i4i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 data[3];
+ } *ed;
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->data[0] = rv;
+ ed->data[1] = s->bytes_sent;
+ ed->data[2] = s->bytes_to_send;
+ }
+ }
+}
+
+static void
+receive_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
+{
+ svm_fifo_t *rx_fifo = s->data.rx_fifo;
+ u32 thread_index = vlib_get_thread_index ();
+ int n_read, i;
+
+ if (ecm->test_bytes)
+ {
+ if (!ecm->is_dgram)
+ n_read = app_recv_stream (&s->data, ecm->rx_buf[thread_index],
+ vec_len (ecm->rx_buf[thread_index]));
+ else
+ n_read = app_recv_dgram (&s->data, ecm->rx_buf[thread_index],
+ vec_len (ecm->rx_buf[thread_index]));
+ }
+ else
+ {
+ n_read = svm_fifo_max_dequeue_cons (rx_fifo);
+ svm_fifo_dequeue_drop (rx_fifo, n_read);
+ }
+
+ if (n_read > 0)
+ {
+ if (ECHO_CLIENT_DBG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "rx-deq: %d bytes",
+ .format_args = "i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 data[1];
+ } *ed;
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->data[0] = n_read;
+ }
+
+ if (ecm->test_bytes)
+ {
+ for (i = 0; i < n_read; i++)
+ {
+ if (ecm->rx_buf[thread_index][i]
+ != ((s->bytes_received + i) & 0xff))
+ {
+ clib_warning ("read %d error at byte %lld, 0x%x not 0x%x",
+ n_read, s->bytes_received + i,
+ ecm->rx_buf[thread_index][i],
+ ((s->bytes_received + i) & 0xff));
+ ecm->test_failed = 1;
+ }
+ }
+ }
+ ASSERT (n_read <= s->bytes_to_receive);
+ s->bytes_to_receive -= n_read;
+ s->bytes_received += n_read;
+ }
+}
+
+static uword
+echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ int my_thread_index = vlib_get_thread_index ();
+ eclient_session_t *sp;
+ int i;
+ int delete_session;
+ u32 *connection_indices;
+ u32 *connections_this_batch;
+ u32 nconnections_this_batch;
+
+ connection_indices = ecm->connection_index_by_thread[my_thread_index];
+ connections_this_batch =
+ ecm->connections_this_batch_by_thread[my_thread_index];
+
+ if ((ecm->run_test != ECHO_CLIENTS_RUNNING) ||
+ ((vec_len (connection_indices) == 0)
+ && vec_len (connections_this_batch) == 0))
+ return 0;
+
+ /* Grab another pile of connections */
+ if (PREDICT_FALSE (vec_len (connections_this_batch) == 0))
+ {
+ nconnections_this_batch =
+ clib_min (ecm->connections_per_batch, vec_len (connection_indices));
+
+ ASSERT (nconnections_this_batch > 0);
+ vec_validate (connections_this_batch, nconnections_this_batch - 1);
+ clib_memcpy_fast (connections_this_batch,
+ connection_indices + vec_len (connection_indices)
+ - nconnections_this_batch,
+ nconnections_this_batch * sizeof (u32));
+ _vec_len (connection_indices) -= nconnections_this_batch;
+ }
+
+ if (PREDICT_FALSE (ecm->prev_conns != ecm->connections_per_batch
+ && ecm->prev_conns == vec_len (connections_this_batch)))
+ {
+ ecm->repeats++;
+ ecm->prev_conns = vec_len (connections_this_batch);
+ if (ecm->repeats == 500000)
+ {
+ clib_warning ("stuck clients");
+ }
+ }
+ else
+ {
+ ecm->prev_conns = vec_len (connections_this_batch);
+ ecm->repeats = 0;
+ }
+
+ for (i = 0; i < vec_len (connections_this_batch); i++)
+ {
+ delete_session = 1;
+
+ sp = pool_elt_at_index (ecm->sessions, connections_this_batch[i]);
+
+ if (sp->bytes_to_send > 0)
+ {
+ send_data_chunk (ecm, sp);
+ delete_session = 0;
+ }
+ if (sp->bytes_to_receive > 0)
+ {
+ delete_session = 0;
+ }
+ if (PREDICT_FALSE (delete_session == 1))
+ {
+ session_t *s;
+
+ clib_atomic_fetch_add (&ecm->tx_total, sp->bytes_sent);
+ clib_atomic_fetch_add (&ecm->rx_total, sp->bytes_received);
+ s = session_get_from_handle_if_valid (sp->vpp_session_handle);
+
+ if (s)
+ {
+ vnet_disconnect_args_t _a, *a = &_a;
+ a->handle = session_handle (s);
+ a->app_index = ecm->app_index;
+ vnet_disconnect_session (a);
+
+ vec_delete (connections_this_batch, 1, i);
+ i--;
+ clib_atomic_fetch_add (&ecm->ready_connections, -1);
+ }
+ else
+ {
+ clib_warning ("session AWOL?");
+ vec_delete (connections_this_batch, 1, i);
+ }
+
+ /* Kick the debug CLI process */
+ if (ecm->ready_connections == 0)
+ {
+ signal_evt_to_cli (2);
+ }
+ }
+ }
+
+ ecm->connection_index_by_thread[my_thread_index] = connection_indices;
+ ecm->connections_this_batch_by_thread[my_thread_index] =
+ connections_this_batch;
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (echo_clients_node) =
+{
+ .function = echo_client_node_fn,
+ .name = "echo-clients",
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+};
+/* *INDENT-ON* */
+
+static int
+create_api_loopback (echo_client_main_t * ecm)
+{
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ ecm->vl_input_queue = shmem_hdr->vl_input_queue;
+ ecm->my_client_index = vl_api_memclnt_create_internal ("echo_client",
+ ecm->vl_input_queue);
+ return 0;
+}
+
+static int
+echo_clients_init (vlib_main_t * vm)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+ int i;
+
+ if (create_api_loopback (ecm))
+ return -1;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+
+ /* Init test data. Big buffer */
+ vec_validate (ecm->connect_test_data, 4 * 1024 * 1024 - 1);
+ for (i = 0; i < vec_len (ecm->connect_test_data); i++)
+ ecm->connect_test_data[i] = i & 0xff;
+
+ vec_validate (ecm->rx_buf, num_threads - 1);
+ for (i = 0; i < num_threads; i++)
+ vec_validate (ecm->rx_buf[i], vec_len (ecm->connect_test_data) - 1);
+
+ ecm->is_init = 1;
+
+ vec_validate (ecm->connection_index_by_thread, vtm->n_vlib_mains);
+ vec_validate (ecm->connections_this_batch_by_thread, vtm->n_vlib_mains);
+ vec_validate (ecm->quic_session_index_by_thread, vtm->n_vlib_mains);
+ vec_validate (ecm->vpp_event_queue, vtm->n_vlib_mains);
+
+ return 0;
+}
+
+static int
+quic_echo_clients_qsession_connected_callback (u32 app_index, u32 api_context,
+ session_t * s, u8 is_fail)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_connect_args_t *a = 0;
+ int rv;
+ u8 thread_index = vlib_get_thread_index ();
+ session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
+ u32 stream_n;
+
+ DBG ("QUIC Connection handle %d", session_handle (s));
+
+ vec_validate (a, 1);
+ a->uri = (char *) ecm->connect_uri;
+ parse_uri (a->uri, &sep);
+ sep.transport_opts = session_handle (s);
+ sep.port = 0; /* QUIC: create a stream flag */
+
+ for (stream_n = 0; stream_n < ecm->quic_streams; stream_n++)
+ {
+ clib_memset (a, 0, sizeof (*a));
+ a->app_index = ecm->app_index;
+ a->api_context = -1 - api_context;
+ clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
+
+ DBG ("QUIC opening stream %d", stream_n);
+ if ((rv = vnet_connect (a)))
+ {
+ clib_error ("Stream session %d opening failed: %d", stream_n, rv);
+ return -1;
+ }
+ DBG ("QUIC stream %d connected", stream_n);
+ }
+ vec_add1 (ecm->quic_session_index_by_thread[thread_index],
+ session_handle (s));
+ vec_free (a);
+ return 0;
+}
+
+static int
+quic_echo_clients_session_connected_callback (u32 app_index, u32 api_context,
+ session_t * s, u8 is_fail)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ eclient_session_t *session;
+ u32 session_index;
+ u8 thread_index;
+
+ if (PREDICT_FALSE (ecm->run_test != ECHO_CLIENTS_STARTING))
+ return -1;
+
+ if (is_fail)
+ {
+ clib_warning ("connection %d failed!", api_context);
+ ecm->run_test = ECHO_CLIENTS_EXITING;
+ signal_evt_to_cli (-1);
+ return 0;
+ }
+
+ if (s->listener_handle == SESSION_INVALID_HANDLE)
+ return quic_echo_clients_qsession_connected_callback (app_index,
+ api_context, s,
+ is_fail);
+ DBG ("STREAM Connection callback %d", api_context);
+
+ thread_index = s->thread_index;
+ ASSERT (thread_index == vlib_get_thread_index ()
+ || session_transport_service_type (s) == TRANSPORT_SERVICE_CL);
+
+ if (!ecm->vpp_event_queue[thread_index])
+ ecm->vpp_event_queue[thread_index] =
+ session_main_get_vpp_event_queue (thread_index);
+
+ /*
+ * Setup session
+ */
+ clib_spinlock_lock_if_init (&ecm->sessions_lock);
+ pool_get (ecm->sessions, session);
+ clib_spinlock_unlock_if_init (&ecm->sessions_lock);
+
+ clib_memset (session, 0, sizeof (*session));
+ session_index = session - ecm->sessions;
+ session->bytes_to_send = ecm->bytes_to_send;
+ session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send;
+ session->data.rx_fifo = s->rx_fifo;
+ session->data.rx_fifo->client_session_index = session_index;
+ session->data.tx_fifo = s->tx_fifo;
+ session->data.tx_fifo->client_session_index = session_index;
+ session->data.vpp_evt_q = ecm->vpp_event_queue[thread_index];
+ session->vpp_session_handle = session_handle (s);
+
+ if (ecm->is_dgram)
+ {
+ transport_connection_t *tc;
+ tc = session_get_transport (s);
+ clib_memcpy_fast (&session->data.transport, tc,
+ sizeof (session->data.transport));
+ session->data.is_dgram = 1;
+ }
+
+ vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
+ clib_atomic_fetch_add (&ecm->ready_connections, 1);
+ if (ecm->ready_connections == ecm->expected_connections)
+ {
+ ecm->run_test = ECHO_CLIENTS_RUNNING;
+ /* Signal the CLI process that the action is starting... */
+ signal_evt_to_cli (1);
+ }
+
+ return 0;
+}
+
+static int
+echo_clients_session_connected_callback (u32 app_index, u32 api_context,
+ session_t * s, u8 is_fail)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ eclient_session_t *session;
+ u32 session_index;
+ u8 thread_index;
+
+ if (PREDICT_FALSE (ecm->run_test != ECHO_CLIENTS_STARTING))
+ return -1;
+
+ if (is_fail)
+ {
+ clib_warning ("connection %d failed!", api_context);
+ ecm->run_test = ECHO_CLIENTS_EXITING;
+ signal_evt_to_cli (-1);
+ return 0;
+ }
+
+ thread_index = s->thread_index;
+ ASSERT (thread_index == vlib_get_thread_index ()
+ || session_transport_service_type (s) == TRANSPORT_SERVICE_CL);
+
+ if (!ecm->vpp_event_queue[thread_index])
+ ecm->vpp_event_queue[thread_index] =
+ session_main_get_vpp_event_queue (thread_index);
+
+ /*
+ * Setup session
+ */
+ clib_spinlock_lock_if_init (&ecm->sessions_lock);
+ pool_get (ecm->sessions, session);
+ clib_spinlock_unlock_if_init (&ecm->sessions_lock);
+
+ clib_memset (session, 0, sizeof (*session));
+ session_index = session - ecm->sessions;
+ session->bytes_to_send = ecm->bytes_to_send;
+ session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send;
+ session->data.rx_fifo = s->rx_fifo;
+ session->data.rx_fifo->client_session_index = session_index;
+ session->data.tx_fifo = s->tx_fifo;
+ session->data.tx_fifo->client_session_index = session_index;
+ session->data.vpp_evt_q = ecm->vpp_event_queue[thread_index];
+ session->vpp_session_handle = session_handle (s);
+
+ if (ecm->is_dgram)
+ {
+ transport_connection_t *tc;
+ tc = session_get_transport (s);
+ clib_memcpy_fast (&session->data.transport, tc,
+ sizeof (session->data.transport));
+ session->data.is_dgram = 1;
+ }
+
+ vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
+ clib_atomic_fetch_add (&ecm->ready_connections, 1);
+ if (ecm->ready_connections == ecm->expected_connections)
+ {
+ ecm->run_test = ECHO_CLIENTS_RUNNING;
+ /* Signal the CLI process that the action is starting... */
+ signal_evt_to_cli (1);
+ }
+
+ return 0;
+}
+
+static void
+echo_clients_session_reset_callback (session_t * s)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ if (s->session_state == SESSION_STATE_READY)
+ clib_warning ("Reset active connection %U", format_session, s, 2);
+
+ a->handle = session_handle (s);
+ a->app_index = ecm->app_index;
+ vnet_disconnect_session (a);
+ return;
+}
+
+static int
+echo_clients_session_create_callback (session_t * s)
+{
+ return 0;
+}
+
+static void
+echo_clients_session_disconnect_callback (session_t * s)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ a->handle = session_handle (s);
+ a->app_index = ecm->app_index;
+ vnet_disconnect_session (a);
+ return;
+}
+
+void
+echo_clients_session_disconnect (session_t * s)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ a->handle = session_handle (s);
+ a->app_index = ecm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static int
+echo_clients_rx_callback (session_t * s)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ eclient_session_t *sp;
+
+ if (PREDICT_FALSE (ecm->run_test != ECHO_CLIENTS_RUNNING))
+ {
+ echo_clients_session_disconnect (s);
+ return -1;
+ }
+
+ sp = pool_elt_at_index (ecm->sessions, s->rx_fifo->client_session_index);
+ receive_data_chunk (ecm, sp);
+
+ if (svm_fifo_max_dequeue_cons (s->rx_fifo))
+ {
+ if (svm_fifo_set_event (s->rx_fifo))
+ session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_BUILTIN_RX);
+ }
+ return 0;
+}
+
+int
+echo_client_add_segment_callback (u32 client_index, u64 segment_handle)
+{
+ /* New heaps may be added */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static session_cb_vft_t echo_clients = {
+ .session_reset_callback = echo_clients_session_reset_callback,
+ .session_connected_callback = echo_clients_session_connected_callback,
+ .session_accept_callback = echo_clients_session_create_callback,
+ .session_disconnect_callback = echo_clients_session_disconnect_callback,
+ .builtin_app_rx_callback = echo_clients_rx_callback,
+ .add_segment_callback = echo_client_add_segment_callback
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+echo_clients_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
+{
+ u32 prealloc_fifos, segment_size = 256 << 20;
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_app_attach_args_t _a, *a = &_a;
+ u64 options[16];
+ int rv;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ a->api_client_index = ecm->my_client_index;
+ if (ecm->transport_proto == TRANSPORT_PROTO_QUIC)
+ echo_clients.session_connected_callback =
+ quic_echo_clients_session_connected_callback;
+ a->session_cb_vft = &echo_clients;
+
+ prealloc_fifos = ecm->prealloc_fifos ? ecm->expected_connections : 1;
+
+ if (ecm->private_segment_size)
+ segment_size = ecm->private_segment_size;
+
+ options[APP_OPTIONS_ACCEPT_COOKIE] = 0x12345678;
+ options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ options[APP_OPTIONS_RX_FIFO_SIZE] = ecm->fifo_size;
+ options[APP_OPTIONS_TX_FIFO_SIZE] = ecm->fifo_size;
+ options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = ecm->private_segment_count;
+ options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = prealloc_fifos;
+ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ options[APP_OPTIONS_TLS_ENGINE] = ecm->tls_engine;
+ if (appns_id)
+ {
+ options[APP_OPTIONS_FLAGS] |= appns_flags;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = appns_secret;
+ }
+ a->options = options;
+ a->namespace_id = appns_id;
+
+ if ((rv = vnet_application_attach (a)))
+ return clib_error_return (0, "attach returned %d", rv);
+
+ ecm->app_index = a->app_index;
+ return 0;
+}
+
+static int
+echo_clients_detach ()
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_app_detach_args_t _da, *da = &_da;
+ int rv;
+
+ da->app_index = ecm->app_index;
+ da->api_client_index = ~0;
+ rv = vnet_application_detach (da);
+ ecm->test_client_attached = 0;
+ ecm->app_index = ~0;
+ return rv;
+}
+
+static void *
+echo_client_thread_fn (void *arg)
+{
+ return 0;
+}
+
+/** Start a transmit thread */
+int
+echo_clients_start_tx_pthread (echo_client_main_t * ecm)
+{
+ if (ecm->client_thread_handle == 0)
+ {
+ int rv = pthread_create (&ecm->client_thread_handle,
+ NULL /*attr */ ,
+ echo_client_thread_fn, 0);
+ if (rv)
+ {
+ ecm->client_thread_handle = 0;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+clib_error_t *
+echo_clients_connect (vlib_main_t * vm, u32 n_clients)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vnet_connect_args_t _a, *a = &_a;
+ int i, rv;
+
+ clib_memset (a, 0, sizeof (*a));
+
+ for (i = 0; i < n_clients; i++)
+ {
+ a->uri = (char *) ecm->connect_uri;
+ a->api_context = i;
+ a->app_index = ecm->app_index;
+ if ((rv = vnet_connect_uri (a)))
+ return clib_error_return (0, "connect returned: %d", rv);
+
+ /* Crude pacing for call setups */
+ if ((i % 16) == 0)
+ vlib_process_suspend (vm, 100e-6);
+ ASSERT (i + 1 >= ecm->ready_connections);
+ while (i + 1 - ecm->ready_connections > 128)
+ vlib_process_suspend (vm, 1e-3);
+ }
+ return 0;
+}
+
+#define ec_cli_output(_fmt, _args...) \
+ if (!ecm->no_output) \
+ vlib_cli_output(vm, _fmt, ##_args)
+
+static clib_error_t *
+echo_clients_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ vlib_thread_main_t *thread_main = vlib_get_thread_main ();
+ u64 tmp, total_bytes, appns_flags = 0, appns_secret = 0;
+ f64 test_timeout = 20.0, syn_timeout = 20.0, delta;
+ char *default_uri = "tcp://6.0.1.1/1234";
+ uword *event_data = 0, event_type;
+ f64 time_before_connects;
+ u32 n_clients = 1;
+ int preallocate_sessions = 0;
+ char *transfer_type;
+ clib_error_t *error = 0;
+ u8 *appns_id = 0;
+ int i;
+ session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
+ int rv;
+
+ ecm->quic_streams = 1;
+ ecm->bytes_to_send = 8192;
+ ecm->no_return = 0;
+ ecm->fifo_size = 64 << 10;
+ ecm->connections_per_batch = 1000;
+ ecm->private_segment_count = 0;
+ ecm->private_segment_size = 0;
+ ecm->no_output = 0;
+ ecm->test_bytes = 0;
+ ecm->test_failed = 0;
+ ecm->vlib_main = vm;
+ ecm->tls_engine = TLS_ENGINE_OPENSSL;
+ ecm->no_copy = 0;
+ ecm->run_test = ECHO_CLIENTS_STARTING;
+
+ if (thread_main->n_vlib_mains > 1)
+ clib_spinlock_init (&ecm->sessions_lock);
+ vec_free (ecm->connect_uri);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "uri %s", &ecm->connect_uri))
+ ;
+ else if (unformat (input, "nclients %d", &n_clients))
+ ;
+ else if (unformat (input, "quic-streams %d", &ecm->quic_streams))
+ ;
+ else if (unformat (input, "mbytes %lld", &tmp))
+ ecm->bytes_to_send = tmp << 20;
+ else if (unformat (input, "gbytes %lld", &tmp))
+ ecm->bytes_to_send = tmp << 30;
+ else if (unformat (input, "bytes %lld", &ecm->bytes_to_send))
+ ;
+ else if (unformat (input, "test-timeout %f", &test_timeout))
+ ;
+ else if (unformat (input, "syn-timeout %f", &syn_timeout))
+ ;
+ else if (unformat (input, "no-return"))
+ ecm->no_return = 1;
+ else if (unformat (input, "fifo-size %d", &ecm->fifo_size))
+ ecm->fifo_size <<= 10;
+ else if (unformat (input, "private-segment-count %d",
+ &ecm->private_segment_count))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ return clib_error_return
+ (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ ecm->private_segment_size = tmp;
+ }
+ else if (unformat (input, "preallocate-fifos"))
+ ecm->prealloc_fifos = 1;
+ else if (unformat (input, "preallocate-sessions"))
+ preallocate_sessions = 1;
+ else
+ if (unformat (input, "client-batch %d", &ecm->connections_per_batch))
+ ;
+ else if (unformat (input, "appns %_%v%_", &appns_id))
+ ;
+ else if (unformat (input, "all-scope"))
+ appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE
+ | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
+ else if (unformat (input, "local-scope"))
+ appns_flags = APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ else if (unformat (input, "global-scope"))
+ appns_flags = APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ else if (unformat (input, "secret %lu", &appns_secret))
+ ;
+ else if (unformat (input, "no-output"))
+ ecm->no_output = 1;
+ else if (unformat (input, "test-bytes"))
+ ecm->test_bytes = 1;
+ else if (unformat (input, "tls-engine %d", &ecm->tls_engine))
+ ;
+ else
+ return clib_error_return (0, "failed: unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ /* Store cli process node index for signalling */
+ ecm->cli_node_index =
+ vlib_get_current_process (vm)->node_runtime.node_index;
+
+ if (ecm->is_init == 0)
+ {
+ if (echo_clients_init (vm))
+ return clib_error_return (0, "failed init");
+ }
+
+
+ ecm->ready_connections = 0;
+ ecm->expected_connections = n_clients * ecm->quic_streams;
+ ecm->rx_total = 0;
+ ecm->tx_total = 0;
+
+ if (!ecm->connect_uri)
+ {
+ clib_warning ("No uri provided. Using default: %s", default_uri);
+ ecm->connect_uri = format (0, "%s%c", default_uri, 0);
+ }
+
+ if ((rv = parse_uri ((char *) ecm->connect_uri, &sep)))
+ return clib_error_return (0, "Uri parse error: %d", rv);
+ ecm->transport_proto = sep.transport_proto;
+ ecm->is_dgram = (sep.transport_proto == TRANSPORT_PROTO_UDP);
+
+#if ECHO_CLIENT_PTHREAD
+ echo_clients_start_tx_pthread ();
+#endif
+
+ vlib_worker_thread_barrier_sync (vm);
+ vnet_session_enable_disable (vm, 1 /* turn on session and transports */ );
+ vlib_worker_thread_barrier_release (vm);
+
+ if (ecm->test_client_attached == 0)
+ {
+ if ((error = echo_clients_attach (appns_id, appns_flags, appns_secret)))
+ {
+ vec_free (appns_id);
+ clib_error_report (error);
+ return error;
+ }
+ vec_free (appns_id);
+ }
+ ecm->test_client_attached = 1;
+
+ /* Turn on the builtin client input nodes */
+ for (i = 0; i < thread_main->n_vlib_mains; i++)
+ vlib_node_set_state (vlib_mains[i], echo_clients_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ if (preallocate_sessions)
+ pool_init_fixed (ecm->sessions, 1.1 * n_clients);
+
+ /* Fire off connect requests */
+ time_before_connects = vlib_time_now (vm);
+ if ((error = echo_clients_connect (vm, n_clients)))
+ goto cleanup;
+
+ /* Park until the sessions come up, or ten seconds elapse... */
+ vlib_process_wait_for_event_or_clock (vm, syn_timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0:
+ ec_cli_output ("Timeout with only %d sessions active...",
+ ecm->ready_connections);
+ error = clib_error_return (0, "failed: syn timeout with %d sessions",
+ ecm->ready_connections);
+ goto cleanup;
+
+ case 1:
+ delta = vlib_time_now (vm) - time_before_connects;
+ if (delta != 0.0)
+ ec_cli_output ("%d three-way handshakes in %.2f seconds %.2f/s",
+ n_clients, delta, ((f64) n_clients) / delta);
+
+ ecm->test_start_time = vlib_time_now (ecm->vlib_main);
+ ec_cli_output ("Test started at %.6f", ecm->test_start_time);
+ break;
+
+ default:
+ ec_cli_output ("unexpected event(1): %d", event_type);
+ error = clib_error_return (0, "failed: unexpected event(1): %d",
+ event_type);
+ goto cleanup;
+ }
+
+ /* Now wait for the sessions to finish... */
+ vlib_process_wait_for_event_or_clock (vm, test_timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0:
+ ec_cli_output ("Timeout with %d sessions still active...",
+ ecm->ready_connections);
+ error = clib_error_return (0, "failed: timeout with %d sessions",
+ ecm->ready_connections);
+ goto cleanup;
+
+ case 2:
+ ecm->test_end_time = vlib_time_now (vm);
+ ec_cli_output ("Test finished at %.6f", ecm->test_end_time);
+ break;
+
+ default:
+ ec_cli_output ("unexpected event(2): %d", event_type);
+ error = clib_error_return (0, "failed: unexpected event(2): %d",
+ event_type);
+ goto cleanup;
+ }
+
+ delta = ecm->test_end_time - ecm->test_start_time;
+ if (delta != 0.0)
+ {
+ total_bytes = (ecm->no_return ? ecm->tx_total : ecm->rx_total);
+ transfer_type = ecm->no_return ? "half-duplex" : "full-duplex";
+ ec_cli_output ("%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds",
+ total_bytes, total_bytes / (1ULL << 20),
+ total_bytes / (1ULL << 30), delta);
+ ec_cli_output ("%.2f bytes/second %s", ((f64) total_bytes) / (delta),
+ transfer_type);
+ ec_cli_output ("%.4f gbit/second %s",
+ (((f64) total_bytes * 8.0) / delta / 1e9),
+ transfer_type);
+ }
+ else
+ {
+ ec_cli_output ("zero delta-t?");
+ error = clib_error_return (0, "failed: zero delta-t");
+ goto cleanup;
+ }
+
+ if (ecm->test_bytes && ecm->test_failed)
+ error = clib_error_return (0, "failed: test bytes");
+
+cleanup:
+ ecm->run_test = ECHO_CLIENTS_EXITING;
+ vlib_process_wait_for_event_or_clock (vm, 10e-3);
+ for (i = 0; i < vec_len (ecm->connection_index_by_thread); i++)
+ {
+ vec_reset_length (ecm->connection_index_by_thread[i]);
+ vec_reset_length (ecm->connections_this_batch_by_thread[i]);
+ vec_reset_length (ecm->quic_session_index_by_thread[i]);
+ }
+
+ pool_free (ecm->sessions);
+
+ /* Detach the application, so we can use different fifo sizes next time */
+ if (ecm->test_client_attached)
+ {
+ if (echo_clients_detach ())
+ {
+ error = clib_error_return (0, "failed: app detach");
+ ec_cli_output ("WARNING: app detach failed...");
+ }
+ }
+ if (error)
+ ec_cli_output ("test failed");
+ vec_free (ecm->connect_uri);
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (echo_clients_command, static) =
+{
+ .path = "test echo clients",
+ .short_help = "test echo clients [nclients %d][[m|g]bytes <bytes>]"
+ "[test-timeout <time>][syn-timeout <time>][no-return][fifo-size <size>]"
+ "[private-segment-count <count>][private-segment-size <bytes>[m|g]]"
+ "[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]"
+ "[uri <tcp://ip/port>][test-bytes][no-output]",
+ .function = echo_clients_command_fn,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+echo_clients_main_init (vlib_main_t * vm)
+{
+ echo_client_main_t *ecm = &echo_client_main;
+ ecm->is_init = 0;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (echo_clients_main_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/echo_client.h b/src/plugins/hs_apps/echo_client.h
new file mode 100644
index 00000000000..34cf0bd4764
--- /dev/null
+++ b/src/plugins/hs_apps/echo_client.h
@@ -0,0 +1,128 @@
+
+/*
+ * echo_client.h - built-in application layer echo client
+ *
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_echo_client_h__
+#define __included_echo_client_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vnet/session/session.h>
+#include <vnet/session/application_interface.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ app_session_t data;
+ u64 bytes_to_send;
+ u64 bytes_sent;
+ u64 bytes_to_receive;
+ u64 bytes_received;
+ u64 vpp_session_handle;
+ u8 thread_index;
+} eclient_session_t;
+
+typedef struct
+{
+ /*
+ * Application setup parameters
+ */
+ svm_queue_t *vl_input_queue; /**< vpe input queue */
+ svm_msg_q_t **vpp_event_queue;
+
+ u32 cli_node_index; /**< cli process node index */
+ u32 my_client_index; /**< loopback API client handle */
+ u32 app_index; /**< app index after attach */
+
+ /*
+ * Configuration params
+ */
+ u8 *connect_uri; /**< URI for slave's connect */
+ u64 bytes_to_send; /**< Bytes to send */
+ u32 configured_segment_size;
+ u32 fifo_size;
+ u32 expected_connections; /**< Number of clients/connections */
+ u32 connections_per_batch; /**< Connections to rx/tx at once */
+ u32 private_segment_count; /**< Number of private fifo segs */
+ u32 private_segment_size; /**< size of private fifo segs */
+ u32 tls_engine; /**< TLS engine mbedtls/openssl */
+ u8 is_dgram;
+ u32 no_copy; /**< Don't memcpy data to tx fifo */
+ u32 quic_streams; /**< QUIC streams per connection */
+
+ /*
+ * Test state variables
+ */
+ eclient_session_t *sessions; /**< Session pool, shared */
+ clib_spinlock_t sessions_lock;
+ u8 **rx_buf; /**< intermediate rx buffers */
+ u8 *connect_test_data; /**< Pre-computed test data */
+ u32 **quic_session_index_by_thread;
+ u32 **connection_index_by_thread;
+ u32 **connections_this_batch_by_thread; /**< active connection batch */
+ pthread_t client_thread_handle;
+
+ volatile u32 ready_connections;
+ volatile u32 finished_connections;
+ volatile u64 rx_total;
+ volatile u64 tx_total;
+ volatile int run_test; /**< Signal start of test */
+
+ f64 test_start_time;
+ f64 test_end_time;
+ u32 prev_conns;
+ u32 repeats;
+ /*
+ * Flags
+ */
+ u8 is_init;
+ u8 test_client_attached;
+ u8 no_return;
+ u8 test_return_packets;
+ int i_am_master;
+ int drop_packets; /**< drop all packets */
+ u8 prealloc_fifos; /**< Request fifo preallocation */
+ u8 no_output;
+ u8 test_bytes;
+ u8 test_failed;
+ u8 transport_proto;
+
+ vlib_main_t *vlib_main;
+} echo_client_main_t;
+
+enum
+{
+ ECHO_CLIENTS_STARTING,
+ ECHO_CLIENTS_RUNNING,
+ ECHO_CLIENTS_EXITING
+} echo_clients_test_state_e;
+extern echo_client_main_t echo_client_main;
+
+vlib_node_registration_t echo_clients_node;
+
+#endif /* __included_echo_client_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/echo_server.c b/src/plugins/hs_apps/echo_server.c
new file mode 100644
index 00000000000..b7a74818cca
--- /dev/null
+++ b/src/plugins/hs_apps/echo_server.c
@@ -0,0 +1,588 @@
+/*
+* Copyright (c) 2017-2019 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+
+#define ECHO_SERVER_DBG (0)
+#define DBG(_fmt, _args...) \
+ if (ECHO_SERVER_DBG) \
+ clib_warning (_fmt, ##_args)
+
+typedef struct
+{
+ /*
+ * Server app parameters
+ */
+ svm_msg_q_t **vpp_queue;
+ svm_queue_t *vl_input_queue; /**< Sever's event queue */
+
+ u32 app_index; /**< Server app index */
+ u32 my_client_index; /**< API client handle */
+ u32 node_index; /**< process node index for event scheduling */
+
+ /*
+ * Config params
+ */
+ u8 no_echo; /**< Don't echo traffic */
+ u32 fifo_size; /**< Fifo size */
+ u32 rcv_buffer_size; /**< Rcv buffer size */
+ u32 prealloc_fifos; /**< Preallocate fifos */
+ u32 private_segment_count; /**< Number of private segments */
+ u32 private_segment_size; /**< Size of private segments */
+ char *server_uri; /**< Server URI */
+ u32 tls_engine; /**< TLS engine: mbedtls/openssl */
+ u8 is_dgram; /**< set if transport is dgram */
+ /*
+ * Test state
+ */
+ u8 **rx_buf; /**< Per-thread RX buffer */
+ u64 byte_index;
+ u32 **rx_retries;
+ u8 transport_proto;
+ u64 listener_handle; /**< Session handle of the root listener */
+
+ vlib_main_t *vlib_main;
+} echo_server_main_t;
+
+echo_server_main_t echo_server_main;
+
+int
+quic_echo_server_qsession_accept_callback (session_t * s)
+{
+ DBG ("QSession %u accept w/opaque %d", s->session_index, s->opaque);
+ return 0;
+}
+
+int
+quic_echo_server_session_accept_callback (session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ if (s->listener_handle == esm->listener_handle)
+ return quic_echo_server_qsession_accept_callback (s);
+ DBG ("SSESSION %u accept w/opaque %d", s->session_index, s->opaque);
+
+ esm->vpp_queue[s->thread_index] =
+ session_main_get_vpp_event_queue (s->thread_index);
+ s->session_state = SESSION_STATE_READY;
+ esm->byte_index = 0;
+ ASSERT (vec_len (esm->rx_retries) > s->thread_index);
+ vec_validate (esm->rx_retries[s->thread_index], s->session_index);
+ esm->rx_retries[s->thread_index][s->session_index] = 0;
+ return 0;
+}
+
+int
+echo_server_session_accept_callback (session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ esm->vpp_queue[s->thread_index] =
+ session_main_get_vpp_event_queue (s->thread_index);
+ s->session_state = SESSION_STATE_READY;
+ esm->byte_index = 0;
+ ASSERT (vec_len (esm->rx_retries) > s->thread_index);
+ vec_validate (esm->rx_retries[s->thread_index], s->session_index);
+ esm->rx_retries[s->thread_index][s->session_index] = 0;
+ return 0;
+}
+
+void
+echo_server_session_disconnect_callback (session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ a->handle = session_handle (s);
+ a->app_index = esm->app_index;
+ vnet_disconnect_session (a);
+}
+
+void
+echo_server_session_reset_callback (session_t * s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ clib_warning ("Reset session %U", format_session, s, 2);
+ a->handle = session_handle (s);
+ a->app_index = esm->app_index;
+ vnet_disconnect_session (a);
+}
+
+int
+echo_server_session_connected_callback (u32 app_index, u32 api_context,
+ session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+int
+echo_server_add_segment_callback (u32 client_index, u64 segment_handle)
+{
+ /* New heaps may be added */
+ return 0;
+}
+
+int
+echo_server_redirect_connect_callback (u32 client_index, void *mp)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+void
+test_bytes (echo_server_main_t * esm, int actual_transfer)
+{
+ int i;
+ u32 my_thread_id = vlib_get_thread_index ();
+
+ for (i = 0; i < actual_transfer; i++)
+ {
+ if (esm->rx_buf[my_thread_id][i] != ((esm->byte_index + i) & 0xff))
+ {
+ clib_warning ("at %lld expected %d got %d", esm->byte_index + i,
+ (esm->byte_index + i) & 0xff,
+ esm->rx_buf[my_thread_id][i]);
+ }
+ }
+ esm->byte_index += actual_transfer;
+}
+
+/*
+ * If no-echo, just drop the data and be done with it.
+ */
+int
+echo_server_builtin_server_rx_callback_no_echo (session_t * s)
+{
+ svm_fifo_t *rx_fifo = s->rx_fifo;
+ svm_fifo_dequeue_drop (rx_fifo, svm_fifo_max_dequeue_cons (rx_fifo));
+ return 0;
+}
+
+int
+echo_server_rx_callback (session_t * s)
+{
+ u32 n_written, max_dequeue, max_enqueue, max_transfer;
+ int actual_transfer;
+ svm_fifo_t *tx_fifo, *rx_fifo;
+ echo_server_main_t *esm = &echo_server_main;
+ u32 thread_index = vlib_get_thread_index ();
+ app_session_transport_t at;
+
+ ASSERT (s->thread_index == thread_index);
+
+ rx_fifo = s->rx_fifo;
+ tx_fifo = s->tx_fifo;
+
+ ASSERT (rx_fifo->master_thread_index == thread_index);
+ ASSERT (tx_fifo->master_thread_index == thread_index);
+
+ max_enqueue = svm_fifo_max_enqueue_prod (tx_fifo);
+ if (!esm->is_dgram)
+ {
+ max_dequeue = svm_fifo_max_dequeue_cons (rx_fifo);
+ }
+ else
+ {
+ session_dgram_pre_hdr_t ph;
+ svm_fifo_peek (rx_fifo, 0, sizeof (ph), (u8 *) & ph);
+ max_dequeue = ph.data_length - ph.data_offset;
+ if (!esm->vpp_queue[s->thread_index])
+ {
+ svm_msg_q_t *mq;
+ mq = session_main_get_vpp_event_queue (s->thread_index);
+ esm->vpp_queue[s->thread_index] = mq;
+ }
+ max_enqueue -= sizeof (session_dgram_hdr_t);
+ }
+
+ if (PREDICT_FALSE (max_dequeue == 0))
+ return 0;
+
+ /* Number of bytes we're going to copy */
+ max_transfer = clib_min (max_dequeue, max_enqueue);
+
+ /* No space in tx fifo */
+ if (PREDICT_FALSE (max_transfer == 0))
+ {
+ /* XXX timeout for session that are stuck */
+
+ rx_event:
+ /* Program self-tap to retry */
+ if (svm_fifo_set_event (rx_fifo))
+ {
+ if (session_send_io_evt_to_thread (rx_fifo,
+ SESSION_IO_EVT_BUILTIN_RX))
+ clib_warning ("failed to enqueue self-tap");
+
+ vec_validate (esm->rx_retries[s->thread_index], s->session_index);
+ if (esm->rx_retries[thread_index][s->session_index] == 500000)
+ {
+ clib_warning ("session stuck: %U", format_session, s, 2);
+ }
+ if (esm->rx_retries[thread_index][s->session_index] < 500001)
+ esm->rx_retries[thread_index][s->session_index]++;
+ }
+
+ return 0;
+ }
+
+ vec_validate (esm->rx_buf[thread_index], max_transfer);
+ if (!esm->is_dgram)
+ {
+ actual_transfer = app_recv_stream_raw (rx_fifo,
+ esm->rx_buf[thread_index],
+ max_transfer,
+ 0 /* don't clear event */ ,
+ 0 /* peek */ );
+ }
+ else
+ {
+ actual_transfer = app_recv_dgram_raw (rx_fifo,
+ esm->rx_buf[thread_index],
+ max_transfer, &at,
+ 0 /* don't clear event */ ,
+ 0 /* peek */ );
+ }
+ ASSERT (actual_transfer == max_transfer);
+ /* test_bytes (esm, actual_transfer); */
+
+ /*
+ * Echo back
+ */
+
+ if (!esm->is_dgram)
+ {
+ n_written = app_send_stream_raw (tx_fifo,
+ esm->vpp_queue[thread_index],
+ esm->rx_buf[thread_index],
+ actual_transfer, SESSION_IO_EVT_TX,
+ 1 /* do_evt */ , 0);
+ }
+ else
+ {
+ n_written = app_send_dgram_raw (tx_fifo, &at,
+ esm->vpp_queue[s->thread_index],
+ esm->rx_buf[thread_index],
+ actual_transfer, SESSION_IO_EVT_TX,
+ 1 /* do_evt */ , 0);
+ }
+
+ if (n_written != max_transfer)
+ clib_warning ("short trout! written %u read %u", n_written, max_transfer);
+
+ if (PREDICT_FALSE (svm_fifo_max_dequeue_cons (rx_fifo)))
+ goto rx_event;
+
+ return 0;
+}
+
+static session_cb_vft_t echo_server_session_cb_vft = {
+ .session_accept_callback = echo_server_session_accept_callback,
+ .session_disconnect_callback = echo_server_session_disconnect_callback,
+ .session_connected_callback = echo_server_session_connected_callback,
+ .add_segment_callback = echo_server_add_segment_callback,
+ .builtin_app_rx_callback = echo_server_rx_callback,
+ .session_reset_callback = echo_server_session_reset_callback
+};
+
+/* Abuse VPP's input queue */
+static int
+create_api_loopback (vlib_main_t * vm)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ esm->vl_input_queue = shmem_hdr->vl_input_queue;
+ esm->my_client_index = vl_api_memclnt_create_internal ("echo_server",
+ esm->vl_input_queue);
+ return 0;
+}
+
+static int
+echo_server_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
+{
+ vnet_app_add_tls_cert_args_t _a_cert, *a_cert = &_a_cert;
+ vnet_app_add_tls_key_args_t _a_key, *a_key = &_a_key;
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_app_attach_args_t _a, *a = &_a;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ u32 segment_size = 512 << 20;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ if (esm->no_echo)
+ echo_server_session_cb_vft.builtin_app_rx_callback =
+ echo_server_builtin_server_rx_callback_no_echo;
+ else
+ echo_server_session_cb_vft.builtin_app_rx_callback =
+ echo_server_rx_callback;
+ if (esm->transport_proto == TRANSPORT_PROTO_QUIC)
+ echo_server_session_cb_vft.session_accept_callback =
+ quic_echo_server_session_accept_callback;
+
+ if (esm->private_segment_size)
+ segment_size = esm->private_segment_size;
+
+ a->api_client_index = esm->my_client_index;
+ a->session_cb_vft = &echo_server_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] = esm->fifo_size;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] = esm->fifo_size;
+ a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = esm->private_segment_count;
+ a->options[APP_OPTIONS_TLS_ENGINE] = esm->tls_engine;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+ esm->prealloc_fifos ? esm->prealloc_fifos : 1;
+
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ if (appns_id)
+ {
+ a->namespace_id = appns_id;
+ a->options[APP_OPTIONS_FLAGS] |= appns_flags;
+ a->options[APP_OPTIONS_NAMESPACE_SECRET] = appns_secret;
+ }
+
+ if (vnet_application_attach (a))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ esm->app_index = a->app_index;
+
+ clib_memset (a_cert, 0, sizeof (*a_cert));
+ a_cert->app_index = a->app_index;
+ vec_validate (a_cert->cert, test_srv_crt_rsa_len);
+ clib_memcpy_fast (a_cert->cert, test_srv_crt_rsa, test_srv_crt_rsa_len);
+ vnet_app_add_tls_cert (a_cert);
+
+ clib_memset (a_key, 0, sizeof (*a_key));
+ a_key->app_index = a->app_index;
+ vec_validate (a_key->key, test_srv_key_rsa_len);
+ clib_memcpy_fast (a_key->key, test_srv_key_rsa, test_srv_key_rsa_len);
+ vnet_app_add_tls_key (a_key);
+ return 0;
+}
+
+static int
+echo_server_detach (void)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_app_detach_args_t _da, *da = &_da;
+ int rv;
+
+ da->app_index = esm->app_index;
+ rv = vnet_application_detach (da);
+ esm->app_index = ~0;
+ return rv;
+}
+
+static int
+echo_server_listen ()
+{
+ int rv;
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_listen_args_t _a, *a = &_a;
+ clib_memset (a, 0, sizeof (*a));
+ a->app_index = esm->app_index;
+ a->uri = esm->server_uri;
+ rv = vnet_bind_uri (a);
+ esm->listener_handle = a->handle;
+ return rv;
+}
+
+static int
+echo_server_create (vlib_main_t * vm, u8 * appns_id, u64 appns_flags,
+ u64 appns_secret)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+ int i;
+
+ if (esm->my_client_index == (u32) ~ 0)
+ {
+ if (create_api_loopback (vm))
+ {
+ clib_warning ("failed to create api loopback");
+ return -1;
+ }
+ }
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (echo_server_main.vpp_queue, num_threads - 1);
+ vec_validate (esm->rx_buf, num_threads - 1);
+ vec_validate (esm->rx_retries, num_threads - 1);
+ for (i = 0; i < vec_len (esm->rx_retries); i++)
+ vec_validate (esm->rx_retries[i],
+ pool_elts (session_main.wrk[i].sessions));
+ esm->rcv_buffer_size = clib_max (esm->rcv_buffer_size, esm->fifo_size);
+ for (i = 0; i < num_threads; i++)
+ vec_validate (esm->rx_buf[i], esm->rcv_buffer_size);
+
+ if (echo_server_attach (appns_id, appns_flags, appns_secret))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ if (echo_server_listen ())
+ {
+ clib_warning ("failed to start listening");
+ if (echo_server_detach ())
+ clib_warning ("failed to detach");
+ return -1;
+ }
+ return 0;
+}
+
+static clib_error_t *
+echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ u8 server_uri_set = 0, *appns_id = 0;
+ u64 tmp, appns_flags = 0, appns_secret = 0;
+ char *default_uri = "tcp://0.0.0.0/1234";
+ int rv, is_stop = 0;
+ session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
+
+ esm->no_echo = 0;
+ esm->fifo_size = 64 << 10;
+ esm->rcv_buffer_size = 128 << 10;
+ esm->prealloc_fifos = 0;
+ esm->private_segment_count = 0;
+ esm->private_segment_size = 0;
+ esm->tls_engine = TLS_ENGINE_OPENSSL;
+ vec_free (esm->server_uri);
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "uri %s", &esm->server_uri))
+ server_uri_set = 1;
+ else if (unformat (input, "no-echo"))
+ esm->no_echo = 1;
+ else if (unformat (input, "fifo-size %d", &esm->fifo_size))
+ esm->fifo_size <<= 10;
+ else if (unformat (input, "rcv-buf-size %d", &esm->rcv_buffer_size))
+ ;
+ else if (unformat (input, "prealloc-fifos %d", &esm->prealloc_fifos))
+ ;
+ else if (unformat (input, "private-segment-count %d",
+ &esm->private_segment_count))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ return clib_error_return
+ (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ esm->private_segment_size = tmp;
+ }
+ else if (unformat (input, "appns %_%v%_", &appns_id))
+ ;
+ else if (unformat (input, "all-scope"))
+ appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE
+ | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
+ else if (unformat (input, "local-scope"))
+ appns_flags |= APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ else if (unformat (input, "global-scope"))
+ appns_flags |= APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ else if (unformat (input, "secret %lu", &appns_secret))
+ ;
+ else if (unformat (input, "stop"))
+ is_stop = 1;
+ else if (unformat (input, "tls-engine %d", &esm->tls_engine))
+ ;
+ else
+ return clib_error_return (0, "failed: unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (is_stop)
+ {
+ if (esm->app_index == (u32) ~ 0)
+ {
+ clib_warning ("server not running");
+ return clib_error_return (0, "failed: server not running");
+ }
+ rv = echo_server_detach ();
+ if (rv)
+ {
+ clib_warning ("failed: detach");
+ return clib_error_return (0, "failed: server detach %d", rv);
+ }
+ return 0;
+ }
+
+ vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ );
+
+ if (!server_uri_set)
+ {
+ clib_warning ("No uri provided! Using default: %s", default_uri);
+ esm->server_uri = (char *) format (0, "%s%c", default_uri, 0);
+ }
+
+ if ((rv = parse_uri ((char *) esm->server_uri, &sep)))
+ return clib_error_return (0, "Uri parse error: %d", rv);
+ esm->transport_proto = sep.transport_proto;
+ esm->is_dgram = (sep.transport_proto == TRANSPORT_PROTO_UDP);
+
+ rv = echo_server_create (vm, appns_id, appns_flags, appns_secret);
+ vec_free (appns_id);
+ if (rv)
+ {
+ vec_free (esm->server_uri);
+ return clib_error_return (0, "failed: server_create returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (echo_server_create_command, static) =
+{
+ .path = "test echo server",
+ .short_help = "test echo server proto <proto> [no echo][fifo-size <mbytes>]"
+ "[rcv-buf-size <bytes>][prealloc-fifos <count>]"
+ "[private-segment-count <count>][private-segment-size <bytes[m|g]>]"
+ "[uri <tcp://ip/port>]",
+ .function = echo_server_create_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+echo_server_main_init (vlib_main_t * vm)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ esm->my_client_index = ~0;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (echo_server_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/plugins/hs_apps/hs_apps.c b/src/plugins/hs_apps/hs_apps.c
new file mode 100644
index 00000000000..5067919cc28
--- /dev/null
+++ b/src/plugins/hs_apps/hs_apps.c
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () =
+{
+ .version = VPP_BUILD_VER,
+ .description = "Host Stack Applications",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/http_server.c b/src/plugins/hs_apps/http_server.c
new file mode 100644
index 00000000000..ace3c75ebb8
--- /dev/null
+++ b/src/plugins/hs_apps/http_server.c
@@ -0,0 +1,984 @@
+/*
+* Copyright (c) 2017-2019 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+#include <vppinfra/tw_timer_2t_1w_2048sl.h>
+
+typedef enum
+{
+ EVENT_WAKEUP = 1,
+} http_process_event_t;
+
+typedef struct
+{
+ u32 hs_index;
+ u32 thread_index;
+ u64 node_index;
+} http_server_args;
+
+typedef enum
+{
+ HTTP_STATE_CLOSED,
+ HTTP_STATE_ESTABLISHED,
+ HTTP_STATE_OK_SENT,
+} http_session_state_t;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+#define _(type, name) type name;
+ foreach_app_session_field
+#undef _
+ u32 thread_index;
+ u8 *rx_buf;
+ u32 vpp_session_index;
+ u64 vpp_session_handle;
+ u32 timer_handle;
+} http_session_t;
+
+typedef struct
+{
+ http_session_t **sessions;
+ clib_rwlock_t sessions_lock;
+ u32 **session_to_http_session;
+
+ svm_msg_q_t **vpp_queue;
+
+ uword *handler_by_get_request;
+
+ u32 *free_http_cli_process_node_indices;
+
+ /* Sever's event queue */
+ svm_queue_t *vl_input_queue;
+
+ /* API client handle */
+ u32 my_client_index;
+
+ u32 app_index;
+
+ /* process node index for evnt scheduling */
+ u32 node_index;
+
+ tw_timer_wheel_2t_1w_2048sl_t tw;
+ clib_spinlock_t tw_lock;
+
+ u32 prealloc_fifos;
+ u32 private_segment_size;
+ u32 fifo_size;
+ u8 *uri;
+ u32 is_static;
+ vlib_main_t *vlib_main;
+} http_server_main_t;
+
+http_server_main_t http_server_main;
+
+static void
+http_server_sessions_reader_lock (void)
+{
+ clib_rwlock_reader_lock (&http_server_main.sessions_lock);
+}
+
+static void
+http_server_sessions_reader_unlock (void)
+{
+ clib_rwlock_reader_unlock (&http_server_main.sessions_lock);
+}
+
+static void
+http_server_sessions_writer_lock (void)
+{
+ clib_rwlock_writer_lock (&http_server_main.sessions_lock);
+}
+
+static void
+http_server_sessions_writer_unlock (void)
+{
+ clib_rwlock_writer_unlock (&http_server_main.sessions_lock);
+}
+
+static http_session_t *
+http_server_session_alloc (u32 thread_index)
+{
+ http_server_main_t *hsm = &http_server_main;
+ http_session_t *hs;
+ pool_get (hsm->sessions[thread_index], hs);
+ memset (hs, 0, sizeof (*hs));
+ hs->session_index = hs - hsm->sessions[thread_index];
+ hs->thread_index = thread_index;
+ hs->timer_handle = ~0;
+ return hs;
+}
+
+static http_session_t *
+http_server_session_get (u32 thread_index, u32 hs_index)
+{
+ http_server_main_t *hsm = &http_server_main;
+ if (pool_is_free_index (hsm->sessions[thread_index], hs_index))
+ return 0;
+ return pool_elt_at_index (hsm->sessions[thread_index], hs_index);
+}
+
+static void
+http_server_session_free (http_session_t * hs)
+{
+ http_server_main_t *hsm = &http_server_main;
+ pool_put (hsm->sessions[hs->thread_index], hs);
+ if (CLIB_DEBUG)
+ memset (hs, 0xfa, sizeof (*hs));
+}
+
+static void
+http_server_session_lookup_add (u32 thread_index, u32 s_index, u32 hs_index)
+{
+ http_server_main_t *hsm = &http_server_main;
+ vec_validate (hsm->session_to_http_session[thread_index], s_index);
+ hsm->session_to_http_session[thread_index][s_index] = hs_index;
+}
+
+static void
+http_server_session_lookup_del (u32 thread_index, u32 s_index)
+{
+ http_server_main_t *hsm = &http_server_main;
+ hsm->session_to_http_session[thread_index][s_index] = ~0;
+}
+
+static http_session_t *
+http_server_session_lookup (u32 thread_index, u32 s_index)
+{
+ http_server_main_t *hsm = &http_server_main;
+ u32 hs_index;
+
+ if (s_index < vec_len (hsm->session_to_http_session[thread_index]))
+ {
+ hs_index = hsm->session_to_http_session[thread_index][s_index];
+ return http_server_session_get (thread_index, hs_index);
+ }
+ return 0;
+}
+
+
+static void
+http_server_session_timer_start (http_session_t * hs)
+{
+ u32 hs_handle;
+ hs_handle = hs->thread_index << 24 | hs->session_index;
+ clib_spinlock_lock (&http_server_main.tw_lock);
+ hs->timer_handle = tw_timer_start_2t_1w_2048sl (&http_server_main.tw,
+ hs_handle, 0, 60);
+ clib_spinlock_unlock (&http_server_main.tw_lock);
+}
+
+static void
+http_server_session_timer_stop (http_session_t * hs)
+{
+ if (hs->timer_handle == ~0)
+ return;
+ clib_spinlock_lock (&http_server_main.tw_lock);
+ tw_timer_stop_2t_1w_2048sl (&http_server_main.tw, hs->timer_handle);
+ clib_spinlock_unlock (&http_server_main.tw_lock);
+}
+
+static void
+http_server_session_cleanup (http_session_t * hs)
+{
+ if (!hs)
+ return;
+ http_server_session_lookup_del (hs->thread_index, hs->vpp_session_index);
+ vec_free (hs->rx_buf);
+ http_server_session_timer_stop (hs);
+ http_server_session_free (hs);
+}
+
+static void
+http_server_session_disconnect (http_session_t * hs)
+{
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ a->handle = hs->vpp_session_handle;
+ a->app_index = http_server_main.app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+http_process_free (http_server_args * args)
+{
+ vlib_node_runtime_t *rt;
+ vlib_main_t *vm = &vlib_global_main;
+ http_server_main_t *hsm = &http_server_main;
+ vlib_node_t *n;
+ u32 node_index;
+ http_server_args **save_args;
+
+ node_index = args->node_index;
+ ASSERT (node_index != 0);
+
+ n = vlib_get_node (vm, node_index);
+ rt = vlib_node_get_runtime (vm, n->index);
+ save_args = vlib_node_get_runtime_data (vm, n->index);
+
+ /* Reset process session pointer */
+ clib_mem_free (*save_args);
+ *save_args = 0;
+
+ /* Turn off the process node */
+ vlib_node_set_state (vm, rt->node_index, VLIB_NODE_STATE_DISABLED);
+
+ /* add node index to the freelist */
+ vec_add1 (hsm->free_http_cli_process_node_indices, node_index);
+}
+
+/* *INDENT-OFF* */
+static const char *http_ok =
+ "HTTP/1.1 200 OK\r\n";
+
+static const char *http_response =
+ "Content-Type: text/html\r\n"
+ "Expires: Mon, 11 Jan 1970 10:10:10 GMT\r\n"
+ "Connection: close \r\n"
+ "Pragma: no-cache\r\n"
+ "Content-Length: %d\r\n\r\n%s";
+
+static const char *http_error_template =
+ "HTTP/1.1 %s\r\n"
+ "Content-Type: text/html\r\n"
+ "Expires: Mon, 11 Jan 1970 10:10:10 GMT\r\n"
+ "Connection: close\r\n"
+ "Pragma: no-cache\r\n"
+ "Content-Length: 0\r\n\r\n";
+
+/* Header, including incantation to suppress favicon.ico requests */
+static const char *html_header_template =
+ "<html><head><title>%v</title></head>"
+ "<link rel=\"icon\" href=\"data:,\">"
+ "<body><pre>";
+
+static const char *html_footer =
+ "</pre></body></html>\r\n";
+
+static const char *html_header_static =
+ "<html><head><title>static reply</title></head>"
+ "<link rel=\"icon\" href=\"data:,\">"
+ "<body><pre>hello</pre></body></html>\r\n";
+/* *INDENT-ON* */
+
+static u8 *static_http;
+static u8 *static_ok;
+
+static void
+http_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
+{
+ u8 **output_vecp = (u8 **) arg;
+ u8 *output_vec;
+ u32 offset;
+
+ output_vec = *output_vecp;
+
+ offset = vec_len (output_vec);
+ vec_validate (output_vec, offset + buffer_bytes - 1);
+ clib_memcpy_fast (output_vec + offset, buffer, buffer_bytes);
+
+ *output_vecp = output_vec;
+}
+
+void
+send_data (http_session_t * hs, u8 * data)
+{
+ http_server_main_t *hsm = &http_server_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ vlib_main_t *vm = vlib_get_main ();
+ f64 last_sent_timer = vlib_time_now (vm);
+ u32 offset, bytes_to_send;
+ f64 delay = 10e-3;
+
+ bytes_to_send = vec_len (data);
+ offset = 0;
+
+ while (bytes_to_send > 0)
+ {
+ int actual_transfer;
+
+ actual_transfer = svm_fifo_enqueue
+ (hs->tx_fifo, bytes_to_send, data + offset);
+
+ /* Made any progress? */
+ if (actual_transfer <= 0)
+ {
+ http_server_sessions_reader_unlock ();
+ vlib_process_suspend (vm, delay);
+ http_server_sessions_reader_lock ();
+
+ /* 10s deadman timer */
+ if (vlib_time_now (vm) > last_sent_timer + 10.0)
+ {
+ a->handle = hs->vpp_session_handle;
+ a->app_index = hsm->app_index;
+ vnet_disconnect_session (a);
+ break;
+ }
+ /* Exponential backoff, within reason */
+ if (delay < 1.0)
+ delay = delay * 2.0;
+ }
+ else
+ {
+ last_sent_timer = vlib_time_now (vm);
+ offset += actual_transfer;
+ bytes_to_send -= actual_transfer;
+
+ if (svm_fifo_set_event (hs->tx_fifo))
+ session_send_io_evt_to_thread (hs->tx_fifo,
+ SESSION_IO_EVT_TX_FLUSH);
+ delay = 10e-3;
+ }
+ }
+}
+
+static void
+send_error (http_session_t * hs, char *str)
+{
+ u8 *data;
+
+ data = format (0, http_error_template, str);
+ send_data (hs, data);
+ vec_free (data);
+}
+
+static uword
+http_cli_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ u8 *request = 0, *reply = 0, *http = 0, *html = 0;
+ http_server_main_t *hsm = &http_server_main;
+ http_server_args **save_args;
+ http_server_args *args;
+ unformat_input_t input;
+ http_session_t *hs;
+ int i;
+
+ save_args = vlib_node_get_runtime_data (hsm->vlib_main, rt->node_index);
+ args = *save_args;
+
+ http_server_sessions_reader_lock ();
+
+ hs = http_server_session_get (args->thread_index, args->hs_index);
+ ASSERT (hs);
+
+ request = hs->rx_buf;
+ if (vec_len (request) < 7)
+ {
+ send_error (hs, "400 Bad Request");
+ goto out;
+ }
+
+ for (i = 0; i < vec_len (request) - 4; i++)
+ {
+ if (request[i] == 'G' &&
+ request[i + 1] == 'E' &&
+ request[i + 2] == 'T' && request[i + 3] == ' ')
+ goto found;
+ }
+bad_request:
+ send_error (hs, "400 Bad Request");
+ goto out;
+
+found:
+ /* Lose "GET " */
+ vec_delete (request, i + 5, 0);
+
+ /* Replace slashes with spaces, stop at the end of the path */
+ i = 0;
+ while (1)
+ {
+ if (request[i] == '/')
+ request[i] = ' ';
+ else if (request[i] == ' ')
+ {
+ /* vlib_cli_input is vector-based, no need for a NULL */
+ _vec_len (request) = i;
+ break;
+ }
+ i++;
+ /* Should never happen */
+ if (i == vec_len (request))
+ goto bad_request;
+ }
+
+ /* Generate the html header */
+ html = format (0, html_header_template, request /* title */ );
+
+ /* Run the command */
+ unformat_init_vector (&input, vec_dup (request));
+ vlib_cli_input (vm, &input, http_cli_output, (uword) & reply);
+ unformat_free (&input);
+ request = 0;
+
+ /* Generate the html page */
+ html = format (html, "%v", reply);
+ html = format (html, html_footer);
+ /* And the http reply */
+ http = format (0, http_ok, vec_len (http_ok));
+ http = format (http, http_response, vec_len (html), html);
+
+ /* Send it */
+ send_data (hs, http);
+
+out:
+ /* Cleanup */
+ http_server_sessions_reader_unlock ();
+ vec_free (reply);
+ vec_free (html);
+ vec_free (http);
+
+ http_process_free (args);
+ return (0);
+}
+
+static void
+alloc_http_process (http_server_args * args)
+{
+ char *name;
+ vlib_node_t *n;
+ http_server_main_t *hsm = &http_server_main;
+ vlib_main_t *vm = hsm->vlib_main;
+ uword l = vec_len (hsm->free_http_cli_process_node_indices);
+ http_server_args **save_args;
+
+ if (vec_len (hsm->free_http_cli_process_node_indices) > 0)
+ {
+ n = vlib_get_node (vm, hsm->free_http_cli_process_node_indices[l - 1]);
+ vlib_node_set_state (vm, n->index, VLIB_NODE_STATE_POLLING);
+ _vec_len (hsm->free_http_cli_process_node_indices) = l - 1;
+ }
+ else
+ {
+ static vlib_node_registration_t r = {
+ .function = http_cli_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .process_log2_n_stack_bytes = 16,
+ .runtime_data_bytes = sizeof (void *),
+ };
+
+ name = (char *) format (0, "http-cli-%d", l);
+ r.name = name;
+ vlib_register_node (vm, &r);
+ vec_free (name);
+
+ n = vlib_get_node (vm, r.index);
+ }
+
+ /* Save the node index in the args. It won't be zero. */
+ args->node_index = n->index;
+
+ /* Save the args (pointer) in the node runtime */
+ save_args = vlib_node_get_runtime_data (vm, n->index);
+ *save_args = clib_mem_alloc (sizeof (*args));
+ clib_memcpy_fast (*save_args, args, sizeof (*args));
+
+ vlib_start_process (vm, n->runtime_index);
+}
+
+static void
+alloc_http_process_callback (void *cb_args)
+{
+ alloc_http_process ((http_server_args *) cb_args);
+}
+
+static int
+session_rx_request (http_session_t * hs)
+{
+ u32 max_dequeue, cursize;
+ int n_read;
+
+ cursize = vec_len (hs->rx_buf);
+ max_dequeue = svm_fifo_max_dequeue_cons (hs->rx_fifo);
+ if (PREDICT_FALSE (max_dequeue == 0))
+ return -1;
+
+ vec_validate (hs->rx_buf, cursize + max_dequeue - 1);
+ n_read = app_recv_stream_raw (hs->rx_fifo, hs->rx_buf + cursize,
+ max_dequeue, 0, 0 /* peek */ );
+ ASSERT (n_read == max_dequeue);
+ if (svm_fifo_is_empty_cons (hs->rx_fifo))
+ svm_fifo_unset_event (hs->rx_fifo);
+
+ _vec_len (hs->rx_buf) = cursize + n_read;
+ return 0;
+}
+
+static int
+http_server_rx_callback (session_t * s)
+{
+ http_server_args args;
+ http_session_t *hs;
+ int rv;
+
+ http_server_sessions_reader_lock ();
+
+ hs = http_server_session_lookup (s->thread_index, s->session_index);
+ if (!hs || hs->session_state != HTTP_STATE_ESTABLISHED)
+ return -1;
+
+ rv = session_rx_request (hs);
+ if (rv)
+ return rv;
+
+ /* send the command to a new/recycled vlib process */
+ args.hs_index = hs->session_index;
+ args.thread_index = hs->thread_index;
+
+ http_server_sessions_reader_unlock ();
+
+ /* Send RPC request to main thread */
+ if (vlib_get_thread_index () != 0)
+ vlib_rpc_call_main_thread (alloc_http_process_callback, (u8 *) & args,
+ sizeof (args));
+ else
+ alloc_http_process (&args);
+ return 0;
+}
+
+static int
+http_server_rx_callback_static (session_t * s)
+{
+ http_session_t *hs;
+ u32 request_len;
+ u8 *request = 0;
+ int i, rv;
+
+ hs = http_server_session_lookup (s->thread_index, s->session_index);
+ if (!hs || hs->session_state == HTTP_STATE_CLOSED)
+ return 0;
+
+ /* ok 200 was sent */
+ if (hs->session_state == HTTP_STATE_OK_SENT)
+ goto send_data;
+
+ rv = session_rx_request (hs);
+ if (rv)
+ goto wait_for_data;
+
+ request = hs->rx_buf;
+ request_len = vec_len (request);
+ if (vec_len (request) < 7)
+ {
+ send_error (hs, "400 Bad Request");
+ goto close_session;
+ }
+
+ for (i = 0; i < request_len - 4; i++)
+ {
+ if (request[i] == 'G' &&
+ request[i + 1] == 'E' &&
+ request[i + 2] == 'T' && request[i + 3] == ' ')
+ goto find_end;
+ }
+ send_error (hs, "400 Bad Request");
+ goto close_session;
+
+find_end:
+
+ /* check for the end sequence: /r/n/r/n */
+ if (request[request_len - 1] != 0xa || request[request_len - 3] != 0xa
+ || request[request_len - 2] != 0xd || request[request_len - 4] != 0xd)
+ goto wait_for_data;
+
+ /* send 200 OK first */
+ send_data (hs, static_ok);
+ hs->session_state = HTTP_STATE_OK_SENT;
+ goto postpone;
+
+send_data:
+ send_data (hs, static_http);
+
+close_session:
+ http_server_session_disconnect (hs);
+ http_server_session_cleanup (hs);
+ return 0;
+
+postpone:
+ (void) svm_fifo_set_event (hs->rx_fifo);
+ session_send_io_evt_to_thread (hs->rx_fifo, SESSION_IO_EVT_BUILTIN_RX);
+ return 0;
+
+wait_for_data:
+ return 0;
+}
+
+static int
+http_server_session_accept_callback (session_t * s)
+{
+ http_server_main_t *hsm = &http_server_main;
+ http_session_t *hs;
+
+ hsm->vpp_queue[s->thread_index] =
+ session_main_get_vpp_event_queue (s->thread_index);
+
+ if (!hsm->is_static)
+ http_server_sessions_writer_lock ();
+
+ hs = http_server_session_alloc (s->thread_index);
+ http_server_session_lookup_add (s->thread_index, s->session_index,
+ hs->session_index);
+ hs->rx_fifo = s->rx_fifo;
+ hs->tx_fifo = s->tx_fifo;
+ hs->vpp_session_index = s->session_index;
+ hs->vpp_session_handle = session_handle (s);
+ hs->session_state = HTTP_STATE_ESTABLISHED;
+ http_server_session_timer_start (hs);
+
+ if (!hsm->is_static)
+ http_server_sessions_writer_unlock ();
+
+ s->session_state = SESSION_STATE_READY;
+ return 0;
+}
+
+static void
+http_server_session_disconnect_callback (session_t * s)
+{
+ http_server_main_t *hsm = &http_server_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ http_session_t *hs;
+
+ if (!hsm->is_static)
+ http_server_sessions_writer_lock ();
+
+ hs = http_server_session_lookup (s->thread_index, s->session_index);
+ http_server_session_cleanup (hs);
+
+ if (!hsm->is_static)
+ http_server_sessions_writer_unlock ();
+
+ a->handle = session_handle (s);
+ a->app_index = hsm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+http_server_session_reset_callback (session_t * s)
+{
+ http_server_main_t *hsm = &http_server_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ http_session_t *hs;
+
+ if (!hsm->is_static)
+ http_server_sessions_writer_lock ();
+
+ hs = http_server_session_lookup (s->thread_index, s->session_index);
+ http_server_session_cleanup (hs);
+
+ if (!hsm->is_static)
+ http_server_sessions_writer_unlock ();
+
+ a->handle = session_handle (s);
+ a->app_index = hsm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static int
+http_server_session_connected_callback (u32 app_index, u32 api_context,
+ session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+http_server_add_segment_callback (u32 client_index, u64 segment_handle)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static session_cb_vft_t http_server_session_cb_vft = {
+ .session_accept_callback = http_server_session_accept_callback,
+ .session_disconnect_callback = http_server_session_disconnect_callback,
+ .session_connected_callback = http_server_session_connected_callback,
+ .add_segment_callback = http_server_add_segment_callback,
+ .builtin_app_rx_callback = http_server_rx_callback,
+ .session_reset_callback = http_server_session_reset_callback
+};
+
+static int
+http_server_attach ()
+{
+ vnet_app_add_tls_cert_args_t _a_cert, *a_cert = &_a_cert;
+ vnet_app_add_tls_key_args_t _a_key, *a_key = &_a_key;
+ http_server_main_t *hsm = &http_server_main;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+ u32 segment_size = 128 << 20;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ if (hsm->private_segment_size)
+ segment_size = hsm->private_segment_size;
+
+ a->api_client_index = ~0;
+ a->name = format (0, "test_http_server");
+ a->session_cb_vft = &http_server_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] =
+ hsm->fifo_size ? hsm->fifo_size : 8 << 10;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] =
+ hsm->fifo_size ? hsm->fifo_size : 32 << 10;
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hsm->prealloc_fifos;
+
+ if (vnet_application_attach (a))
+ {
+ vec_free (a->name);
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ vec_free (a->name);
+ hsm->app_index = a->app_index;
+
+ clib_memset (a_cert, 0, sizeof (*a_cert));
+ a_cert->app_index = a->app_index;
+ vec_validate (a_cert->cert, test_srv_crt_rsa_len);
+ clib_memcpy_fast (a_cert->cert, test_srv_crt_rsa, test_srv_crt_rsa_len);
+ vnet_app_add_tls_cert (a_cert);
+
+ clib_memset (a_key, 0, sizeof (*a_key));
+ a_key->app_index = a->app_index;
+ vec_validate (a_key->key, test_srv_key_rsa_len);
+ clib_memcpy_fast (a_key->key, test_srv_key_rsa, test_srv_key_rsa_len);
+ vnet_app_add_tls_key (a_key);
+
+ return 0;
+}
+
+static int
+http_server_listen ()
+{
+ http_server_main_t *hsm = &http_server_main;
+ vnet_listen_args_t _a, *a = &_a;
+ clib_memset (a, 0, sizeof (*a));
+ a->app_index = hsm->app_index;
+ a->uri = "tcp://0.0.0.0/80";
+ if (hsm->uri)
+ a->uri = (char *) hsm->uri;
+ return vnet_bind_uri (a);
+}
+
+static void
+http_server_session_cleanup_cb (void *hs_handlep)
+{
+ http_session_t *hs;
+ uword hs_handle;
+ hs_handle = pointer_to_uword (hs_handlep);
+ hs = http_server_session_get (hs_handle >> 24, hs_handle & 0x00FFFFFF);
+ if (!hs)
+ return;
+ hs->timer_handle = ~0;
+ http_server_session_disconnect (hs);
+ http_server_session_cleanup (hs);
+}
+
+static void
+http_expired_timers_dispatch (u32 * expired_timers)
+{
+ u32 hs_handle;
+ int i;
+
+ for (i = 0; i < vec_len (expired_timers); i++)
+ {
+ /* Get session handle. The first bit is the timer id */
+ hs_handle = expired_timers[i] & 0x7FFFFFFF;
+ session_send_rpc_evt_to_thread (hs_handle >> 24,
+ http_server_session_cleanup_cb,
+ uword_to_pointer (hs_handle, void *));
+ }
+}
+
+static uword
+http_server_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
+ vlib_frame_t * f)
+{
+ http_server_main_t *hsm = &http_server_main;
+ f64 now, timeout = 1.0;
+ uword *event_data = 0;
+ uword __clib_unused event_type;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+ now = vlib_time_now (vm);
+ event_type = vlib_process_get_events (vm, (uword **) & event_data);
+
+ /* expire timers */
+ clib_spinlock_lock (&http_server_main.tw_lock);
+ tw_timer_expire_timers_2t_1w_2048sl (&hsm->tw, now);
+ clib_spinlock_unlock (&http_server_main.tw_lock);
+
+ vec_reset_length (event_data);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (http_server_process_node) =
+{
+ .function = http_server_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "http-server-process",
+ .state = VLIB_NODE_STATE_DISABLED,
+};
+/* *INDENT-ON* */
+
+static int
+http_server_create (vlib_main_t * vm)
+{
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ http_server_main_t *hsm = &http_server_main;
+ u32 num_threads;
+ vlib_node_t *n;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (hsm->vpp_queue, num_threads - 1);
+ vec_validate (hsm->sessions, num_threads - 1);
+ vec_validate (hsm->session_to_http_session, num_threads - 1);
+
+ clib_rwlock_init (&hsm->sessions_lock);
+ clib_spinlock_init (&hsm->tw_lock);
+
+ if (http_server_attach ())
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ if (http_server_listen ())
+ {
+ clib_warning ("failed to start listening");
+ return -1;
+ }
+
+ /* Init timer wheel and process */
+ tw_timer_wheel_init_2t_1w_2048sl (&hsm->tw, http_expired_timers_dispatch,
+ 1 /* timer interval */ , ~0);
+ vlib_node_set_state (vm, http_server_process_node.index,
+ VLIB_NODE_STATE_POLLING);
+ n = vlib_get_node (vm, http_server_process_node.index);
+ vlib_start_process (vm, n->runtime_index);
+
+ return 0;
+}
+
+static clib_error_t *
+http_server_create_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ http_server_main_t *hsm = &http_server_main;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u64 seg_size;
+ u8 *html;
+ int rv;
+
+ hsm->prealloc_fifos = 0;
+ hsm->private_segment_size = 0;
+ hsm->fifo_size = 0;
+ hsm->is_static = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ goto start_server;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "static"))
+ hsm->is_static = 1;
+ else
+ if (unformat (line_input, "prealloc-fifos %d", &hsm->prealloc_fifos))
+ ;
+ else if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &seg_size))
+ {
+ if (seg_size >= 0x100000000ULL)
+ {
+ vlib_cli_output (vm, "private segment size %llu, too large",
+ seg_size);
+ return 0;
+ }
+ hsm->private_segment_size = seg_size;
+ }
+ else if (unformat (line_input, "fifo-size %d", &hsm->fifo_size))
+ hsm->fifo_size <<= 10;
+ else if (unformat (line_input, "uri %s", &hsm->uri))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ }
+ unformat_free (line_input);
+
+start_server:
+
+ if (hsm->my_client_index != (u32) ~ 0)
+ return clib_error_return (0, "test http server is already running");
+
+ vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ );
+
+ if (hsm->is_static)
+ {
+ http_server_session_cb_vft.builtin_app_rx_callback =
+ http_server_rx_callback_static;
+ html = format (0, html_header_static);
+ static_http = format (0, http_response, vec_len (html), html);
+ static_ok = format (0, http_ok);
+ }
+ rv = http_server_create (vm);
+ switch (rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return (0, "server_create returned %d", rv);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (http_server_create_command, static) =
+{
+ .path = "test http server",
+ .short_help = "test http server",
+ .function = http_server_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+http_server_main_init (vlib_main_t * vm)
+{
+ http_server_main_t *hsm = &http_server_main;
+
+ hsm->my_client_index = ~0;
+ hsm->vlib_main = vm;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (http_server_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/plugins/hs_apps/proxy.c b/src/plugins/hs_apps/proxy.c
new file mode 100644
index 00000000000..31dbfc5969f
--- /dev/null
+++ b/src/plugins/hs_apps/proxy.c
@@ -0,0 +1,626 @@
+/*
+* Copyright (c) 2017-2019 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <hs_apps/proxy.h>
+
+proxy_main_t proxy_main;
+
+typedef struct
+{
+ char uri[128];
+ u32 app_index;
+ u32 api_context;
+} proxy_connect_args_t;
+
+static void
+proxy_cb_fn (void *data, u32 data_len)
+{
+ proxy_connect_args_t *pa = (proxy_connect_args_t *) data;
+ vnet_connect_args_t a;
+
+ memset (&a, 0, sizeof (a));
+ a.api_context = pa->api_context;
+ a.app_index = pa->app_index;
+ a.uri = pa->uri;
+ vnet_connect_uri (&a);
+}
+
+static void
+proxy_call_main_thread (vnet_connect_args_t * a)
+{
+ if (vlib_get_thread_index () == 0)
+ {
+ vnet_connect_uri (a);
+ }
+ else
+ {
+ proxy_connect_args_t args;
+ args.api_context = a->api_context;
+ args.app_index = a->app_index;
+ clib_memcpy (args.uri, a->uri, vec_len (a->uri));
+ vl_api_rpc_call_main_thread (proxy_cb_fn, (u8 *) & args, sizeof (args));
+ }
+}
+
+static void
+delete_proxy_session (session_t * s, int is_active_open)
+{
+ proxy_main_t *pm = &proxy_main;
+ proxy_session_t *ps = 0;
+ vnet_disconnect_args_t _a, *a = &_a;
+ session_t *active_open_session = 0;
+ session_t *server_session = 0;
+ uword *p;
+ u64 handle;
+
+ handle = session_handle (s);
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+ if (is_active_open)
+ {
+ active_open_session = s;
+
+ p = hash_get (pm->proxy_session_by_active_open_handle, handle);
+ if (p == 0)
+ {
+ clib_warning ("proxy session for %s handle %lld (%llx) AWOL",
+ is_active_open ? "active open" : "server",
+ handle, handle);
+ }
+ else if (!pool_is_free_index (pm->sessions, p[0]))
+ {
+ ps = pool_elt_at_index (pm->sessions, p[0]);
+ if (ps->vpp_server_handle != ~0)
+ server_session = session_get_from_handle (ps->vpp_server_handle);
+ else
+ server_session = 0;
+ }
+ }
+ else
+ {
+ server_session = s;
+
+ p = hash_get (pm->proxy_session_by_server_handle, handle);
+ if (p == 0)
+ {
+ clib_warning ("proxy session for %s handle %lld (%llx) AWOL",
+ is_active_open ? "active open" : "server",
+ handle, handle);
+ }
+ else if (!pool_is_free_index (pm->sessions, p[0]))
+ {
+ ps = pool_elt_at_index (pm->sessions, p[0]);
+ if (ps->vpp_active_open_handle != ~0)
+ active_open_session = session_get_from_handle
+ (ps->vpp_active_open_handle);
+ else
+ active_open_session = 0;
+ }
+ }
+
+ if (ps)
+ {
+ if (CLIB_DEBUG > 0)
+ clib_memset (ps, 0xFE, sizeof (*ps));
+ pool_put (pm->sessions, ps);
+ }
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ if (active_open_session)
+ {
+ a->handle = session_handle (active_open_session);
+ a->app_index = pm->active_open_app_index;
+ hash_unset (pm->proxy_session_by_active_open_handle,
+ session_handle (active_open_session));
+ vnet_disconnect_session (a);
+ }
+
+ if (server_session)
+ {
+ a->handle = session_handle (server_session);
+ a->app_index = pm->server_app_index;
+ hash_unset (pm->proxy_session_by_server_handle,
+ session_handle (server_session));
+ vnet_disconnect_session (a);
+ }
+}
+
+static int
+proxy_accept_callback (session_t * s)
+{
+ proxy_main_t *pm = &proxy_main;
+
+ s->session_state = SESSION_STATE_READY;
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+
+ return 0;
+}
+
+static void
+proxy_disconnect_callback (session_t * s)
+{
+ delete_proxy_session (s, 0 /* is_active_open */ );
+}
+
+static void
+proxy_reset_callback (session_t * s)
+{
+ clib_warning ("Reset session %U", format_session, s, 2);
+ delete_proxy_session (s, 0 /* is_active_open */ );
+}
+
+static int
+proxy_connected_callback (u32 app_index, u32 api_context,
+ session_t * s, u8 is_fail)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+proxy_add_segment_callback (u32 client_index, u64 segment_handle)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static int
+proxy_rx_callback (session_t * s)
+{
+ u32 max_dequeue;
+ int actual_transfer __attribute__ ((unused));
+ svm_fifo_t *tx_fifo, *rx_fifo;
+ proxy_main_t *pm = &proxy_main;
+ u32 thread_index = vlib_get_thread_index ();
+ vnet_connect_args_t _a, *a = &_a;
+ proxy_session_t *ps;
+ int proxy_index;
+ uword *p;
+ svm_fifo_t *active_open_tx_fifo;
+
+ ASSERT (s->thread_index == thread_index);
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+ p = hash_get (pm->proxy_session_by_server_handle, session_handle (s));
+
+ if (PREDICT_TRUE (p != 0))
+ {
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+ active_open_tx_fifo = s->rx_fifo;
+
+ /*
+ * Send event for active open tx fifo
+ */
+ if (svm_fifo_set_event (active_open_tx_fifo))
+ {
+ u32 ao_thread_index = active_open_tx_fifo->master_thread_index;
+ u32 ao_session_index = active_open_tx_fifo->master_session_index;
+ if (session_send_io_evt_to_thread_custom (&ao_session_index,
+ ao_thread_index,
+ SESSION_IO_EVT_TX))
+ clib_warning ("failed to enqueue tx evt");
+ }
+ }
+ else
+ {
+ rx_fifo = s->rx_fifo;
+ tx_fifo = s->tx_fifo;
+
+ ASSERT (rx_fifo->master_thread_index == thread_index);
+ ASSERT (tx_fifo->master_thread_index == thread_index);
+
+ max_dequeue = svm_fifo_max_dequeue_cons (s->rx_fifo);
+
+ if (PREDICT_FALSE (max_dequeue == 0))
+ return 0;
+
+ actual_transfer = svm_fifo_peek (rx_fifo, 0 /* relative_offset */ ,
+ max_dequeue, pm->rx_buf[thread_index]);
+
+ /* $$$ your message in this space: parse url, etc. */
+
+ clib_memset (a, 0, sizeof (*a));
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+ pool_get (pm->sessions, ps);
+ clib_memset (ps, 0, sizeof (*ps));
+ ps->server_rx_fifo = rx_fifo;
+ ps->server_tx_fifo = tx_fifo;
+ ps->vpp_server_handle = session_handle (s);
+
+ proxy_index = ps - pm->sessions;
+
+ hash_set (pm->proxy_session_by_server_handle, ps->vpp_server_handle,
+ proxy_index);
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ a->uri = (char *) pm->client_uri;
+ a->api_context = proxy_index;
+ a->app_index = pm->active_open_app_index;
+ proxy_call_main_thread (a);
+ }
+
+ return 0;
+}
+
+static session_cb_vft_t proxy_session_cb_vft = {
+ .session_accept_callback = proxy_accept_callback,
+ .session_disconnect_callback = proxy_disconnect_callback,
+ .session_connected_callback = proxy_connected_callback,
+ .add_segment_callback = proxy_add_segment_callback,
+ .builtin_app_rx_callback = proxy_rx_callback,
+ .session_reset_callback = proxy_reset_callback
+};
+
+static int
+active_open_connected_callback (u32 app_index, u32 opaque,
+ session_t * s, u8 is_fail)
+{
+ proxy_main_t *pm = &proxy_main;
+ proxy_session_t *ps;
+ u8 thread_index = vlib_get_thread_index ();
+
+ if (is_fail)
+ {
+ clib_warning ("connection %d failed!", opaque);
+ return 0;
+ }
+
+ /*
+ * Setup proxy session handle.
+ */
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+
+ ps = pool_elt_at_index (pm->sessions, opaque);
+ ps->vpp_active_open_handle = session_handle (s);
+
+ s->tx_fifo = ps->server_rx_fifo;
+ s->rx_fifo = ps->server_tx_fifo;
+
+ /*
+ * Reset the active-open tx-fifo master indices so the active-open session
+ * will receive data, etc.
+ */
+ s->tx_fifo->master_session_index = s->session_index;
+ s->tx_fifo->master_thread_index = s->thread_index;
+
+ /*
+ * Account for the active-open session's use of the fifos
+ * so they won't disappear until the last session which uses
+ * them disappears
+ */
+ s->tx_fifo->refcnt++;
+ s->rx_fifo->refcnt++;
+
+ hash_set (pm->proxy_session_by_active_open_handle,
+ ps->vpp_active_open_handle, opaque);
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ /*
+ * Send event for active open tx fifo
+ */
+ ASSERT (s->thread_index == thread_index);
+ if (svm_fifo_set_event (s->tx_fifo))
+ session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
+
+ return 0;
+}
+
+static void
+active_open_reset_callback (session_t * s)
+{
+ delete_proxy_session (s, 1 /* is_active_open */ );
+}
+
+static int
+active_open_create_callback (session_t * s)
+{
+ return 0;
+}
+
+static void
+active_open_disconnect_callback (session_t * s)
+{
+ delete_proxy_session (s, 1 /* is_active_open */ );
+}
+
+static int
+active_open_rx_callback (session_t * s)
+{
+ svm_fifo_t *proxy_tx_fifo;
+
+ proxy_tx_fifo = s->rx_fifo;
+
+ /*
+ * Send event for server tx fifo
+ */
+ if (svm_fifo_set_event (proxy_tx_fifo))
+ {
+ u8 thread_index = proxy_tx_fifo->master_thread_index;
+ u32 session_index = proxy_tx_fifo->master_session_index;
+ return session_send_io_evt_to_thread_custom (&session_index,
+ thread_index,
+ SESSION_IO_EVT_TX);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+static session_cb_vft_t active_open_clients = {
+ .session_reset_callback = active_open_reset_callback,
+ .session_connected_callback = active_open_connected_callback,
+ .session_accept_callback = active_open_create_callback,
+ .session_disconnect_callback = active_open_disconnect_callback,
+ .builtin_app_rx_callback = active_open_rx_callback
+};
+/* *INDENT-ON* */
+
+
+static void
+create_api_loopbacks (vlib_main_t * vm)
+{
+ proxy_main_t *pm = &proxy_main;
+ api_main_t *am = &api_main;
+ vl_shmem_hdr_t *shmem_hdr;
+
+ shmem_hdr = am->shmem_hdr;
+ pm->vl_input_queue = shmem_hdr->vl_input_queue;
+ pm->server_client_index =
+ vl_api_memclnt_create_internal ("proxy_server", pm->vl_input_queue);
+ pm->active_open_client_index =
+ vl_api_memclnt_create_internal ("proxy_active_open", pm->vl_input_queue);
+}
+
+static int
+proxy_server_attach ()
+{
+ proxy_main_t *pm = &proxy_main;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+ u32 segment_size = 512 << 20;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ if (pm->private_segment_size)
+ segment_size = pm->private_segment_size;
+ a->api_client_index = pm->server_client_index;
+ a->session_cb_vft = &proxy_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] = pm->fifo_size;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] = pm->fifo_size;
+ a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = pm->private_segment_count;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+ pm->prealloc_fifos ? pm->prealloc_fifos : 0;
+
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+
+ if (vnet_application_attach (a))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ pm->server_app_index = a->app_index;
+
+ return 0;
+}
+
+static int
+active_open_attach (void)
+{
+ proxy_main_t *pm = &proxy_main;
+ vnet_app_attach_args_t _a, *a = &_a;
+ u64 options[16];
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ a->api_client_index = pm->active_open_client_index;
+ a->session_cb_vft = &active_open_clients;
+
+ options[APP_OPTIONS_ACCEPT_COOKIE] = 0x12345678;
+ options[APP_OPTIONS_SEGMENT_SIZE] = 512 << 20;
+ options[APP_OPTIONS_RX_FIFO_SIZE] = pm->fifo_size;
+ options[APP_OPTIONS_TX_FIFO_SIZE] = pm->fifo_size;
+ options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = pm->private_segment_count;
+ options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] =
+ pm->prealloc_fifos ? pm->prealloc_fifos : 0;
+
+ options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN
+ | APP_OPTIONS_FLAGS_IS_PROXY;
+
+ a->options = options;
+
+ if (vnet_application_attach (a))
+ return -1;
+
+ pm->active_open_app_index = a->app_index;
+
+ return 0;
+}
+
+static int
+proxy_server_listen ()
+{
+ proxy_main_t *pm = &proxy_main;
+ vnet_listen_args_t _a, *a = &_a;
+ clib_memset (a, 0, sizeof (*a));
+ a->app_index = pm->server_app_index;
+ a->uri = (char *) pm->server_uri;
+ return vnet_bind_uri (a);
+}
+
+static int
+proxy_server_create (vlib_main_t * vm)
+{
+ proxy_main_t *pm = &proxy_main;
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ u32 num_threads;
+ int i;
+
+ if (pm->server_client_index == (u32) ~ 0)
+ create_api_loopbacks (vm);
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (proxy_main.server_event_queue, num_threads - 1);
+ vec_validate (proxy_main.active_open_event_queue, num_threads - 1);
+ vec_validate (pm->rx_buf, num_threads - 1);
+
+ for (i = 0; i < num_threads; i++)
+ vec_validate (pm->rx_buf[i], pm->rcv_buffer_size);
+
+ if (proxy_server_attach ())
+ {
+ clib_warning ("failed to attach server app");
+ return -1;
+ }
+ if (proxy_server_listen ())
+ {
+ clib_warning ("failed to start listening");
+ return -1;
+ }
+ if (active_open_attach ())
+ {
+ clib_warning ("failed to attach active open app");
+ return -1;
+ }
+
+ for (i = 0; i < num_threads; i++)
+ {
+ pm->active_open_event_queue[i] = session_main_get_vpp_event_queue (i);
+
+ ASSERT (pm->active_open_event_queue[i]);
+
+ pm->server_event_queue[i] = session_main_get_vpp_event_queue (i);
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ proxy_main_t *pm = &proxy_main;
+ char *default_server_uri = "tcp://0.0.0.0/23";
+ char *default_client_uri = "tcp://6.0.2.2/23";
+ int rv;
+ u64 tmp;
+
+ pm->fifo_size = 64 << 10;
+ pm->rcv_buffer_size = 1024;
+ pm->prealloc_fifos = 0;
+ pm->private_segment_count = 0;
+ pm->private_segment_size = 0;
+ pm->server_uri = 0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "fifo-size %d", &pm->fifo_size))
+ pm->fifo_size <<= 10;
+ else if (unformat (input, "rcv-buf-size %d", &pm->rcv_buffer_size))
+ ;
+ else if (unformat (input, "prealloc-fifos %d", &pm->prealloc_fifos))
+ ;
+ else if (unformat (input, "private-segment-count %d",
+ &pm->private_segment_count))
+ ;
+ else if (unformat (input, "private-segment-size %U",
+ unformat_memory_size, &tmp))
+ {
+ if (tmp >= 0x100000000ULL)
+ return clib_error_return
+ (0, "private segment size %lld (%llu) too large", tmp, tmp);
+ pm->private_segment_size = tmp;
+ }
+ else if (unformat (input, "server-uri %s", &pm->server_uri))
+ ;
+ else if (unformat (input, "client-uri %s", &pm->client_uri))
+ pm->client_uri = format (0, "%s%c", pm->client_uri, 0);
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ if (!pm->server_uri)
+ {
+ clib_warning ("No server-uri provided, Using default: %s",
+ default_server_uri);
+ pm->server_uri = format (0, "%s%c", default_server_uri, 0);
+ }
+ if (!pm->client_uri)
+ {
+ clib_warning ("No client-uri provided, Using default: %s",
+ default_client_uri);
+ pm->client_uri = format (0, "%s%c", default_client_uri, 0);
+ }
+
+ vnet_session_enable_disable (vm, 1 /* turn on session and transport */ );
+
+ rv = proxy_server_create (vm);
+ switch (rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return (0, "server_create returned %d", rv);
+ }
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (proxy_create_command, static) =
+{
+ .path = "test proxy server",
+ .short_help = "test proxy server [server-uri <tcp://ip/port>]"
+ "[client-uri <tcp://ip/port>][fifo-size <nn>][rcv-buf-size <nn>]"
+ "[prealloc-fifos <nn>][private-segment-size <mem>]"
+ "[private-segment-count <nn>]",
+ .function = proxy_server_create_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+proxy_main_init (vlib_main_t * vm)
+{
+ proxy_main_t *pm = &proxy_main;
+ pm->server_client_index = ~0;
+ pm->active_open_client_index = ~0;
+ pm->proxy_session_by_active_open_handle = hash_create (0, sizeof (uword));
+ pm->proxy_session_by_server_handle = hash_create (0, sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (proxy_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/plugins/hs_apps/proxy.h b/src/plugins/hs_apps/proxy.h
new file mode 100644
index 00000000000..9b80b3d1105
--- /dev/null
+++ b/src/plugins/hs_apps/proxy.h
@@ -0,0 +1,92 @@
+
+/*
+ * builtin_proxy.h - skeleton vpp engine plug-in header file
+ *
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_proxy_h__
+#define __included_proxy_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+
+#include <vppinfra/hash.h>
+#include <vppinfra/error.h>
+#include <vnet/session/session.h>
+#include <vnet/session/application_interface.h>
+
+typedef struct
+{
+ svm_fifo_t *server_rx_fifo;
+ svm_fifo_t *server_tx_fifo;
+
+ u64 vpp_server_handle;
+ u64 vpp_active_open_handle;
+} proxy_session_t;
+
+typedef struct
+{
+ svm_queue_t *vl_input_queue; /**< vpe input queue */
+ /** per-thread vectors */
+ svm_msg_q_t **server_event_queue;
+ svm_msg_q_t **active_open_event_queue;
+ u8 **rx_buf; /**< intermediate rx buffers */
+
+ u32 cli_node_index; /**< cli process node index */
+ u32 server_client_index; /**< server API client handle */
+ u32 server_app_index; /**< server app index */
+ u32 active_open_client_index; /**< active open API client handle */
+ u32 active_open_app_index; /**< active open index after attach */
+
+ uword *proxy_session_by_server_handle;
+ uword *proxy_session_by_active_open_handle;
+
+ /*
+ * Configuration params
+ */
+ u8 *connect_uri; /**< URI for slave's connect */
+ u32 configured_segment_size;
+ u32 fifo_size;
+ u32 private_segment_count; /**< Number of private fifo segs */
+ u32 private_segment_size; /**< size of private fifo segs */
+ int rcv_buffer_size;
+ u8 *server_uri;
+ u8 *client_uri;
+
+ /*
+ * Test state variables
+ */
+ proxy_session_t *sessions; /**< Session pool, shared */
+ clib_spinlock_t sessions_lock;
+ u32 **connection_index_by_thread;
+ pthread_t client_thread_handle;
+
+ /*
+ * Flags
+ */
+ u8 is_init;
+ u8 prealloc_fifos; /**< Request fifo preallocation */
+} proxy_main_t;
+
+extern proxy_main_t proxy_main;
+
+#endif /* __included_proxy_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */