aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/hs_apps
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/hs_apps')
-rw-r--r--src/plugins/hs_apps/CMakeLists.txt9
-rw-r--r--src/plugins/hs_apps/echo_client.c1348
-rw-r--r--src/plugins/hs_apps/echo_client.h116
-rw-r--r--src/plugins/hs_apps/echo_server.c515
-rw-r--r--src/plugins/hs_apps/hs_apps.c2
-rw-r--r--src/plugins/hs_apps/hs_test.h212
-rw-r--r--src/plugins/hs_apps/http_cli.c683
-rw-r--r--src/plugins/hs_apps/http_cli.h27
-rw-r--r--src/plugins/hs_apps/http_client_cli.c575
-rw-r--r--src/plugins/hs_apps/http_server.c1004
-rw-r--r--src/plugins/hs_apps/http_tps.c839
-rw-r--r--src/plugins/hs_apps/proxy.c265
-rw-r--r--src/plugins/hs_apps/proxy.h29
-rw-r--r--src/plugins/hs_apps/sapi/vpp_echo.c184
-rw-r--r--src/plugins/hs_apps/sapi/vpp_echo_bapi.c40
-rw-r--r--src/plugins/hs_apps/sapi/vpp_echo_common.h15
-rw-r--r--src/plugins/hs_apps/sapi/vpp_echo_proto_quic.c2
-rw-r--r--src/plugins/hs_apps/sapi/vpp_echo_sapi.c330
-rw-r--r--src/plugins/hs_apps/vcl/sock_test_client.c160
-rw-r--r--src/plugins/hs_apps/vcl/sock_test_server.c74
-rw-r--r--src/plugins/hs_apps/vcl/vcl_test.h217
-rw-r--r--src/plugins/hs_apps/vcl/vcl_test_client.c731
-rw-r--r--src/plugins/hs_apps/vcl/vcl_test_protos.c109
-rw-r--r--src/plugins/hs_apps/vcl/vcl_test_server.c103
24 files changed, 5037 insertions, 2552 deletions
diff --git a/src/plugins/hs_apps/CMakeLists.txt b/src/plugins/hs_apps/CMakeLists.txt
index 1f474828b15..179c9c7a4c4 100644
--- a/src/plugins/hs_apps/CMakeLists.txt
+++ b/src/plugins/hs_apps/CMakeLists.txt
@@ -19,7 +19,9 @@ add_vpp_plugin(hs_apps
echo_client.c
echo_server.c
hs_apps.c
- http_server.c
+ http_cli.c
+ http_client_cli.c
+ http_tps.c
proxy.c
)
@@ -33,6 +35,7 @@ if(VPP_BUILD_HS_SAPI_APPS)
sapi/vpp_echo.c
sapi/vpp_echo_common.c
sapi/vpp_echo_bapi.c
+ sapi/vpp_echo_sapi.c
sapi/vpp_echo_proto_quic.c
sapi/vpp_echo_proto_tcp.c
sapi/vpp_echo_proto_udp.c
@@ -52,7 +55,7 @@ if(VPP_BUILD_VCL_TESTS)
)
add_vpp_executable(${test}
SOURCES "vcl/${test}.c"
- LINK_LIBRARIES vppcom pthread
+ LINK_LIBRARIES vppcom pthread ${EPOLL_LIB}
NO_INSTALL
)
endforeach()
@@ -65,7 +68,7 @@ if(VPP_BUILD_VCL_TESTS)
SOURCES
"vcl/${test}.c"
vcl/vcl_test_protos.c
- LINK_LIBRARIES vppcom pthread
+ LINK_LIBRARIES vppcom pthread ${EPOLL_LIB}
NO_INSTALL
)
endforeach()
diff --git a/src/plugins/hs_apps/echo_client.c b/src/plugins/hs_apps/echo_client.c
index d641a9ec14e..d1443e75e80 100644
--- a/src/plugins/hs_apps/echo_client.c
+++ b/src/plugins/hs_apps/echo_client.c
@@ -15,38 +15,69 @@
* limitations under the License.
*/
-#include <vnet/vnet.h>
-#include <vlibapi/api.h>
-#include <vlibmemory/api.h>
#include <hs_apps/echo_client.h>
-echo_client_main_t echo_client_main;
+static ec_main_t ec_main;
-#define ECHO_CLIENT_DBG (0)
-#define DBG(_fmt, _args...) \
- if (ECHO_CLIENT_DBG) \
- clib_warning (_fmt, ##_args)
+#define ec_err(_fmt, _args...) clib_warning (_fmt, ##_args);
+
+#define ec_dbg(_fmt, _args...) \
+ do \
+ { \
+ if (ec_main.cfg.verbose) \
+ ec_err (_fmt, ##_args); \
+ } \
+ while (0)
+
+#define ec_cli(_fmt, _args...) vlib_cli_output (vm, _fmt, ##_args)
static void
-signal_evt_to_cli_i (int *code)
+signal_evt_to_cli_i (void *codep)
{
- echo_client_main_t *ecm = &echo_client_main;
+ ec_main_t *ecm = &ec_main;
+ int code;
+
ASSERT (vlib_get_thread_index () == 0);
- vlib_process_signal_event (ecm->vlib_main, ecm->cli_node_index, *code, 0);
+ code = pointer_to_uword (codep);
+ vlib_process_signal_event (ecm->vlib_main, ecm->cli_node_index, code, 0);
}
static void
signal_evt_to_cli (int code)
{
if (vlib_get_thread_index () != 0)
- vl_api_rpc_call_main_thread (signal_evt_to_cli_i, (u8 *) & code,
- sizeof (code));
+ session_send_rpc_evt_to_thread_force (
+ 0, signal_evt_to_cli_i, uword_to_pointer ((uword) code, void *));
else
- signal_evt_to_cli_i (&code);
+ signal_evt_to_cli_i (uword_to_pointer ((uword) code, void *));
+}
+
+static inline ec_worker_t *
+ec_worker_get (u32 thread_index)
+{
+ return vec_elt_at_index (ec_main.wrk, thread_index);
+}
+
+static inline ec_session_t *
+ec_session_alloc (ec_worker_t *wrk)
+{
+ ec_session_t *ecs;
+
+ pool_get_zero (wrk->sessions, ecs);
+ ecs->session_index = ecs - wrk->sessions;
+ ecs->thread_index = wrk->thread_index;
+
+ return ecs;
+}
+
+static inline ec_session_t *
+ec_session_get (ec_worker_t *wrk, u32 ec_index)
+{
+ return pool_elt_at_index (wrk->sessions, ec_index);
}
static void
-send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
+send_data_chunk (ec_main_t *ecm, ec_session_t *es)
{
u8 *test_data = ecm->connect_test_data;
int test_buf_len, test_buf_offset, rv;
@@ -54,27 +85,28 @@ send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
test_buf_len = vec_len (test_data);
ASSERT (test_buf_len > 0);
- test_buf_offset = s->bytes_sent % test_buf_len;
- bytes_this_chunk = clib_min (test_buf_len - test_buf_offset,
- s->bytes_to_send);
+ test_buf_offset = es->bytes_sent % test_buf_len;
+ bytes_this_chunk =
+ clib_min (test_buf_len - test_buf_offset, es->bytes_to_send);
- if (!ecm->is_dgram)
+ if (!es->is_dgram)
{
if (ecm->no_copy)
{
- svm_fifo_t *f = s->data.tx_fifo;
+ svm_fifo_t *f = es->tx_fifo;
rv = clib_min (svm_fifo_max_enqueue_prod (f), bytes_this_chunk);
svm_fifo_enqueue_nocopy (f, rv);
session_send_io_evt_to_thread_custom (
- &f->shr->master_session_index, s->thread_index, SESSION_IO_EVT_TX);
+ &es->vpp_session_index, es->thread_index, SESSION_IO_EVT_TX);
}
else
- rv = app_send_stream (&s->data, test_data + test_buf_offset,
- bytes_this_chunk, 0);
+ rv =
+ app_send_stream ((app_session_t *) es, test_data + test_buf_offset,
+ bytes_this_chunk, 0);
}
else
{
- svm_fifo_t *f = s->data.tx_fifo;
+ svm_fifo_t *f = es->tx_fifo;
u32 max_enqueue = svm_fifo_max_enqueue_prod (f);
if (max_enqueue < sizeof (session_dgram_hdr_t))
@@ -85,7 +117,7 @@ send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
if (ecm->no_copy)
{
session_dgram_hdr_t hdr;
- app_session_transport_t *at = &s->data.transport;
+ app_session_transport_t *at = &es->transport;
rv = clib_min (max_enqueue, bytes_this_chunk);
@@ -101,13 +133,15 @@ send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
svm_fifo_enqueue (f, sizeof (hdr), (u8 *) & hdr);
svm_fifo_enqueue_nocopy (f, rv);
session_send_io_evt_to_thread_custom (
- &f->shr->master_session_index, s->thread_index, SESSION_IO_EVT_TX);
+ &es->vpp_session_index, es->thread_index, SESSION_IO_EVT_TX);
}
else
{
bytes_this_chunk = clib_min (bytes_this_chunk, max_enqueue);
- rv = app_send_dgram (&s->data, test_data + test_buf_offset,
- bytes_this_chunk, 0);
+ bytes_this_chunk = clib_min (bytes_this_chunk, 1460);
+ rv =
+ app_send_dgram ((app_session_t *) es, test_data + test_buf_offset,
+ bytes_this_chunk, 0);
}
}
@@ -115,45 +149,39 @@ send_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
if (rv > 0)
{
/* Account for it... */
- s->bytes_to_send -= rv;
- s->bytes_sent += rv;
+ es->bytes_to_send -= rv;
+ es->bytes_sent += rv;
- if (ECHO_CLIENT_DBG)
+ if (ecm->cfg.verbose)
{
- /* *INDENT-OFF* */
ELOG_TYPE_DECLARE (e) =
{
.format = "tx-enq: xfer %d bytes, sent %u remain %u",
.format_args = "i4i4i4",
};
- /* *INDENT-ON* */
struct
{
u32 data[3];
} *ed;
ed = ELOG_DATA (&vlib_global_main.elog_main, e);
ed->data[0] = rv;
- ed->data[1] = s->bytes_sent;
- ed->data[2] = s->bytes_to_send;
+ ed->data[1] = es->bytes_sent;
+ ed->data[2] = es->bytes_to_send;
}
}
}
static void
-receive_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
+receive_data_chunk (ec_worker_t *wrk, ec_session_t *es)
{
- svm_fifo_t *rx_fifo = s->data.rx_fifo;
- u32 thread_index = vlib_get_thread_index ();
+ ec_main_t *ecm = &ec_main;
+ svm_fifo_t *rx_fifo = es->rx_fifo;
int n_read, i;
- if (ecm->test_bytes)
+ if (ecm->cfg.test_bytes)
{
- if (!ecm->is_dgram)
- n_read = app_recv_stream (&s->data, ecm->rx_buf[thread_index],
- vec_len (ecm->rx_buf[thread_index]));
- else
- n_read = app_recv_dgram (&s->data, ecm->rx_buf[thread_index],
- vec_len (ecm->rx_buf[thread_index]));
+ n_read =
+ app_recv ((app_session_t *) es, wrk->rx_buf, vec_len (wrk->rx_buf));
}
else
{
@@ -163,15 +191,13 @@ receive_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
if (n_read > 0)
{
- if (ECHO_CLIENT_DBG)
+ if (ecm->cfg.verbose)
{
- /* *INDENT-OFF* */
ELOG_TYPE_DECLARE (e) =
{
.format = "rx-deq: %d bytes",
.format_args = "i4",
};
- /* *INDENT-ON* */
struct
{
u32 data[1];
@@ -180,102 +206,104 @@ receive_data_chunk (echo_client_main_t * ecm, eclient_session_t * s)
ed->data[0] = n_read;
}
- if (ecm->test_bytes)
+ if (ecm->cfg.test_bytes)
{
for (i = 0; i < n_read; i++)
{
- if (ecm->rx_buf[thread_index][i]
- != ((s->bytes_received + i) & 0xff))
+ if (wrk->rx_buf[i] != ((es->bytes_received + i) & 0xff))
{
- clib_warning ("read %d error at byte %lld, 0x%x not 0x%x",
- n_read, s->bytes_received + i,
- ecm->rx_buf[thread_index][i],
- ((s->bytes_received + i) & 0xff));
+ ec_err ("read %d error at byte %lld, 0x%x not 0x%x", n_read,
+ es->bytes_received + i, wrk->rx_buf[i],
+ ((es->bytes_received + i) & 0xff));
ecm->test_failed = 1;
}
}
}
- ASSERT (n_read <= s->bytes_to_receive);
- s->bytes_to_receive -= n_read;
- s->bytes_received += n_read;
+ ASSERT (n_read <= es->bytes_to_receive);
+ es->bytes_to_receive -= n_read;
+ es->bytes_received += n_read;
}
}
static uword
-echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ec_node_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
- echo_client_main_t *ecm = &echo_client_main;
- int my_thread_index = vlib_get_thread_index ();
- eclient_session_t *sp;
- int i;
- int delete_session;
- u32 *connection_indices;
- u32 *connections_this_batch;
- u32 nconnections_this_batch;
-
- connection_indices = ecm->connection_index_by_thread[my_thread_index];
- connections_this_batch =
- ecm->connections_this_batch_by_thread[my_thread_index];
-
- if ((ecm->run_test != ECHO_CLIENTS_RUNNING) ||
- ((vec_len (connection_indices) == 0)
- && vec_len (connections_this_batch) == 0))
+ u32 *conn_indices, *conns_this_batch, nconns_this_batch;
+ int thread_index = vm->thread_index, i, delete_session;
+ ec_main_t *ecm = &ec_main;
+ ec_worker_t *wrk;
+ ec_session_t *es;
+ session_t *s;
+
+ if (ecm->run_test != EC_RUNNING)
+ return 0;
+
+ wrk = ec_worker_get (thread_index);
+ conn_indices = wrk->conn_indices;
+ conns_this_batch = wrk->conns_this_batch;
+
+ if (((vec_len (conn_indices) == 0) && vec_len (conns_this_batch) == 0))
return 0;
/* Grab another pile of connections */
- if (PREDICT_FALSE (vec_len (connections_this_batch) == 0))
+ if (PREDICT_FALSE (vec_len (conns_this_batch) == 0))
{
- nconnections_this_batch =
- clib_min (ecm->connections_per_batch, vec_len (connection_indices));
-
- ASSERT (nconnections_this_batch > 0);
- vec_validate (connections_this_batch, nconnections_this_batch - 1);
- clib_memcpy_fast (connections_this_batch,
- connection_indices + vec_len (connection_indices)
- - nconnections_this_batch,
- nconnections_this_batch * sizeof (u32));
- _vec_len (connection_indices) -= nconnections_this_batch;
+ nconns_this_batch =
+ clib_min (ecm->connections_per_batch, vec_len (conn_indices));
+
+ ASSERT (nconns_this_batch > 0);
+ vec_validate (conns_this_batch, nconns_this_batch - 1);
+ clib_memcpy_fast (conns_this_batch,
+ conn_indices + vec_len (conn_indices) -
+ nconns_this_batch,
+ nconns_this_batch * sizeof (u32));
+ vec_dec_len (conn_indices, nconns_this_batch);
}
- if (PREDICT_FALSE (ecm->prev_conns != ecm->connections_per_batch
- && ecm->prev_conns == vec_len (connections_this_batch)))
+ /*
+ * Track progress
+ */
+ if (PREDICT_FALSE (ecm->prev_conns != ecm->connections_per_batch &&
+ ecm->prev_conns == vec_len (conns_this_batch)))
{
ecm->repeats++;
- ecm->prev_conns = vec_len (connections_this_batch);
+ ecm->prev_conns = vec_len (conns_this_batch);
if (ecm->repeats == 500000)
{
- clib_warning ("stuck clients");
+ ec_err ("stuck clients");
}
}
else
{
- ecm->prev_conns = vec_len (connections_this_batch);
+ ecm->prev_conns = vec_len (conns_this_batch);
ecm->repeats = 0;
}
- for (i = 0; i < vec_len (connections_this_batch); i++)
+ /*
+ * Handle connections in this batch
+ */
+ for (i = 0; i < vec_len (conns_this_batch); i++)
{
- delete_session = 1;
+ es = ec_session_get (wrk, conns_this_batch[i]);
- sp = pool_elt_at_index (ecm->sessions, connections_this_batch[i]);
+ delete_session = 1;
- if (sp->bytes_to_send > 0)
+ if (es->bytes_to_send > 0)
{
- send_data_chunk (ecm, sp);
+ send_data_chunk (ecm, es);
delete_session = 0;
}
- if (sp->bytes_to_receive > 0)
+
+ if (es->bytes_to_receive > 0)
{
delete_session = 0;
}
+
if (PREDICT_FALSE (delete_session == 1))
{
- session_t *s;
-
- clib_atomic_fetch_add (&ecm->tx_total, sp->bytes_sent);
- clib_atomic_fetch_add (&ecm->rx_total, sp->bytes_received);
- s = session_get_from_handle_if_valid (sp->vpp_session_handle);
+ clib_atomic_fetch_add (&ecm->tx_total, es->bytes_sent);
+ clib_atomic_fetch_add (&ecm->rx_total, es->bytes_received);
+ s = session_get_from_handle_if_valid (es->vpp_session_handle);
if (s)
{
@@ -284,205 +312,327 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
a->app_index = ecm->app_index;
vnet_disconnect_session (a);
- vec_delete (connections_this_batch, 1, i);
+ vec_delete (conns_this_batch, 1, i);
i--;
clib_atomic_fetch_add (&ecm->ready_connections, -1);
}
else
{
- clib_warning ("session AWOL?");
- vec_delete (connections_this_batch, 1, i);
+ ec_err ("session AWOL?");
+ vec_delete (conns_this_batch, 1, i);
}
/* Kick the debug CLI process */
if (ecm->ready_connections == 0)
{
- signal_evt_to_cli (2);
+ signal_evt_to_cli (EC_CLI_TEST_DONE);
}
}
}
- ecm->connection_index_by_thread[my_thread_index] = connection_indices;
- ecm->connections_this_batch_by_thread[my_thread_index] =
- connections_this_batch;
+ wrk->conn_indices = conn_indices;
+ wrk->conns_this_batch = conns_this_batch;
return 0;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (echo_clients_node) =
-{
- .function = echo_client_node_fn,
+VLIB_REGISTER_NODE (echo_clients_node) = {
+ .function = ec_node_fn,
.name = "echo-clients",
.type = VLIB_NODE_TYPE_INPUT,
.state = VLIB_NODE_STATE_DISABLED,
};
-/* *INDENT-ON* */
+
+static void
+ec_reset_runtime_config (ec_main_t *ecm)
+{
+ hs_test_cfg_init (&ecm->cfg);
+ ecm->n_clients = 1;
+ ecm->quic_streams = 1;
+ ecm->bytes_to_send = 8192;
+ ecm->echo_bytes = 0;
+ ecm->fifo_size = 64 << 10;
+ ecm->connections_per_batch = 1000;
+ ecm->private_segment_count = 0;
+ ecm->private_segment_size = 256 << 20;
+ ecm->test_failed = 0;
+ ecm->tls_engine = CRYPTO_ENGINE_OPENSSL;
+ ecm->no_copy = 0;
+ ecm->run_test = EC_STARTING;
+ ecm->ready_connections = 0;
+ ecm->connect_conn_index = 0;
+ ecm->rx_total = 0;
+ ecm->tx_total = 0;
+ ecm->barrier_acq_needed = 0;
+ ecm->prealloc_sessions = 0;
+ ecm->prealloc_fifos = 0;
+ ecm->appns_id = 0;
+ ecm->appns_secret = 0;
+ ecm->attach_flags = 0;
+ ecm->syn_timeout = 20.0;
+ ecm->test_timeout = 20.0;
+ vec_free (ecm->connect_uri);
+}
static int
-echo_clients_init (vlib_main_t * vm)
+ec_init (vlib_main_t *vm)
{
- echo_client_main_t *ecm = &echo_client_main;
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ ec_main_t *ecm = &ec_main;
+ ec_worker_t *wrk;
u32 num_threads;
int i;
- num_threads = 1 /* main thread */ + vtm->n_threads;
+ ec_reset_runtime_config (ecm);
+
+ /* Store cli process node index for signaling */
+ ecm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index;
+ ecm->vlib_main = vm;
+
+ if (vlib_num_workers ())
+ {
+ /* The request came over the binary api and the inband cli handler
+ * is not mp_safe. Drop the barrier to make sure the workers are not
+ * blocked.
+ */
+ if (vlib_thread_is_main_w_barrier ())
+ {
+ ecm->barrier_acq_needed = 1;
+ vlib_worker_thread_barrier_release (vm);
+ }
+ /*
+ * There's a good chance that both the client and the server echo
+ * apps will be enabled so make sure the session queue node polls on
+ * the main thread as connections will probably be established on it.
+ */
+ vlib_node_set_state (vm, session_queue_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }
+
+ /* App init done only once */
+ if (ecm->app_is_init)
+ return 0;
+
/* Init test data. Big buffer */
vec_validate (ecm->connect_test_data, 4 * 1024 * 1024 - 1);
for (i = 0; i < vec_len (ecm->connect_test_data); i++)
ecm->connect_test_data[i] = i & 0xff;
- vec_validate (ecm->rx_buf, num_threads - 1);
- for (i = 0; i < num_threads; i++)
- vec_validate (ecm->rx_buf[i], vec_len (ecm->connect_test_data) - 1);
+ num_threads = 1 /* main thread */ + vlib_num_workers ();
+ vec_validate (ecm->wrk, num_threads - 1);
+ vec_foreach (wrk, ecm->wrk)
+ {
+ vec_validate (wrk->rx_buf, vec_len (ecm->connect_test_data) - 1);
+ wrk->thread_index = wrk - ecm->wrk;
+ wrk->vpp_event_queue =
+ session_main_get_vpp_event_queue (wrk->thread_index);
+ }
- ecm->is_init = 1;
+ ecm->app_is_init = 1;
- vec_validate (ecm->connection_index_by_thread, vtm->n_vlib_mains);
- vec_validate (ecm->connections_this_batch_by_thread, vtm->n_vlib_mains);
- vec_validate (ecm->quic_session_index_by_thread, vtm->n_vlib_mains);
- vec_validate (ecm->vpp_event_queue, vtm->n_vlib_mains);
+ vlib_worker_thread_barrier_sync (vm);
+ vnet_session_enable_disable (vm, 1 /* turn on session and transports */);
+
+ /* Turn on the builtin client input nodes */
+ foreach_vlib_main ()
+ vlib_node_set_state (this_vlib_main, echo_clients_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ vlib_worker_thread_barrier_release (vm);
return 0;
}
+static void
+ec_prealloc_sessions (ec_main_t *ecm)
+{
+ u32 sessions_per_wrk, n_wrks;
+ ec_worker_t *wrk;
+
+ n_wrks = vlib_num_workers () ? vlib_num_workers () : 1;
+
+ sessions_per_wrk = ecm->n_clients / n_wrks;
+ vec_foreach (wrk, ecm->wrk)
+ pool_init_fixed (wrk->sessions, 1.1 * sessions_per_wrk);
+}
+
+static void
+ec_worker_cleanup (ec_worker_t *wrk)
+{
+ pool_free (wrk->sessions);
+ vec_free (wrk->conn_indices);
+ vec_free (wrk->conns_this_batch);
+}
+
+static void
+ec_cleanup (ec_main_t *ecm)
+{
+ ec_worker_t *wrk;
+
+ vec_foreach (wrk, ecm->wrk)
+ ec_worker_cleanup (wrk);
+
+ vec_free (ecm->connect_uri);
+ vec_free (ecm->appns_id);
+
+ if (ecm->barrier_acq_needed)
+ vlib_worker_thread_barrier_sync (ecm->vlib_main);
+}
+
static int
-quic_echo_clients_qsession_connected_callback (u32 app_index, u32 api_context,
- session_t * s,
- session_error_t err)
+quic_ec_qsession_connected_callback (u32 app_index, u32 api_context,
+ session_t *s, session_error_t err)
{
- echo_client_main_t *ecm = &echo_client_main;
- vnet_connect_args_t *a = 0;
- int rv;
- u8 thread_index = vlib_get_thread_index ();
session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
+ ec_main_t *ecm = &ec_main;
+ vnet_connect_args_t _a, *a = &_a;
u32 stream_n;
- session_handle_t handle;
+ int rv;
- DBG ("QUIC Connection handle %d", session_handle (s));
+ ec_dbg ("QUIC Connection handle %d", session_handle (s));
- vec_validate (a, 1);
a->uri = (char *) ecm->connect_uri;
if (parse_uri (a->uri, &sep))
return -1;
- sep.parent_handle = handle = session_handle (s);
+ sep.parent_handle = session_handle (s);
for (stream_n = 0; stream_n < ecm->quic_streams; stream_n++)
{
clib_memset (a, 0, sizeof (*a));
a->app_index = ecm->app_index;
- a->api_context = -1 - api_context;
+ a->api_context = -2 - api_context;
clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
- DBG ("QUIC opening stream %d", stream_n);
+ ec_dbg ("QUIC opening stream %d", stream_n);
if ((rv = vnet_connect (a)))
{
clib_error ("Stream session %d opening failed: %d", stream_n, rv);
return -1;
}
- DBG ("QUIC stream %d connected", stream_n);
+ ec_dbg ("QUIC stream %d connected", stream_n);
}
- /*
- * 's' is no longer valid, its underlying pool could have been moved in
- * vnet_connect()
- */
- vec_add1 (ecm->quic_session_index_by_thread[thread_index], handle);
- vec_free (a);
return 0;
}
static int
-quic_echo_clients_session_connected_callback (u32 app_index, u32 api_context,
- session_t * s,
- session_error_t err)
+ec_ctrl_send (hs_test_cmd_t cmd)
+{
+ ec_main_t *ecm = &ec_main;
+ session_t *s;
+ int rv;
+
+ ecm->cfg.cmd = cmd;
+ if (ecm->ctrl_session_handle == SESSION_INVALID_HANDLE)
+ {
+ ec_dbg ("ctrl session went away");
+ return -1;
+ }
+
+ s = session_get_from_handle_if_valid (ecm->ctrl_session_handle);
+ if (!s)
+ {
+ ec_err ("ctrl session not found");
+ return -1;
+ }
+
+ ec_dbg ("sending test paramters to the server..");
+ if (ecm->cfg.verbose)
+ hs_test_cfg_dump (&ecm->cfg, 1);
+
+ rv = svm_fifo_enqueue (s->tx_fifo, sizeof (ecm->cfg), (u8 *) &ecm->cfg);
+ ASSERT (rv == sizeof (ecm->cfg));
+ session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
+ return 0;
+}
+
+static int
+ec_ctrl_session_connected_callback (session_t *s)
+{
+ ec_main_t *ecm = &ec_main;
+
+ s->opaque = HS_CTRL_HANDLE;
+ ecm->ctrl_session_handle = session_handle (s);
+
+ /* send test parameters to the server */
+ ec_ctrl_send (HS_TEST_CMD_SYNC);
+ return 0;
+}
+
+static int
+quic_ec_session_connected_callback (u32 app_index, u32 api_context,
+ session_t *s, session_error_t err)
{
- echo_client_main_t *ecm = &echo_client_main;
- eclient_session_t *session;
- u32 session_index;
- u8 thread_index;
+ ec_main_t *ecm = &ec_main;
+ ec_session_t *es;
+ ec_worker_t *wrk;
+ u32 thread_index;
- if (PREDICT_FALSE (ecm->run_test != ECHO_CLIENTS_STARTING))
+ if (PREDICT_FALSE (api_context == HS_CTRL_HANDLE))
+ return ec_ctrl_session_connected_callback (s);
+
+ if (PREDICT_FALSE (ecm->run_test != EC_STARTING))
return -1;
if (err)
{
- clib_warning ("connection %d failed!", api_context);
- ecm->run_test = ECHO_CLIENTS_EXITING;
- signal_evt_to_cli (-1);
+ ec_err ("connection %d failed!", api_context);
+ ecm->run_test = EC_EXITING;
+ signal_evt_to_cli (EC_CLI_CONNECTS_FAILED);
return 0;
}
if (s->listener_handle == SESSION_INVALID_HANDLE)
- return quic_echo_clients_qsession_connected_callback (app_index,
- api_context, s,
- err);
- DBG ("STREAM Connection callback %d", api_context);
+ return quic_ec_qsession_connected_callback (app_index, api_context, s,
+ err);
+ ec_dbg ("STREAM Connection callback %d", api_context);
thread_index = s->thread_index;
ASSERT (thread_index == vlib_get_thread_index ()
|| session_transport_service_type (s) == TRANSPORT_SERVICE_CL);
- if (!ecm->vpp_event_queue[thread_index])
- ecm->vpp_event_queue[thread_index] =
- session_main_get_vpp_event_queue (thread_index);
+ wrk = ec_worker_get (thread_index);
/*
* Setup session
*/
- clib_spinlock_lock_if_init (&ecm->sessions_lock);
- pool_get (ecm->sessions, session);
- clib_spinlock_unlock_if_init (&ecm->sessions_lock);
-
- clib_memset (session, 0, sizeof (*session));
- session_index = session - ecm->sessions;
- session->bytes_to_send = ecm->bytes_to_send;
- session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send;
- session->data.rx_fifo = s->rx_fifo;
- session->data.rx_fifo->shr->client_session_index = session_index;
- session->data.tx_fifo = s->tx_fifo;
- session->data.tx_fifo->shr->client_session_index = session_index;
- session->data.vpp_evt_q = ecm->vpp_event_queue[thread_index];
- session->vpp_session_handle = session_handle (s);
-
- if (ecm->is_dgram)
- {
- transport_connection_t *tc;
- tc = session_get_transport (s);
- clib_memcpy_fast (&session->data.transport, tc,
- sizeof (session->data.transport));
- session->data.is_dgram = 1;
- }
+ es = ec_session_alloc (wrk);
+ hs_test_app_session_init (es, s);
- vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
+ es->bytes_to_send = ecm->bytes_to_send;
+ es->bytes_to_receive = ecm->echo_bytes ? ecm->bytes_to_send : 0ULL;
+ es->vpp_session_handle = session_handle (s);
+ es->vpp_session_index = s->session_index;
+ s->opaque = es->session_index;
+
+ vec_add1 (wrk->conn_indices, es->session_index);
clib_atomic_fetch_add (&ecm->ready_connections, 1);
if (ecm->ready_connections == ecm->expected_connections)
{
- ecm->run_test = ECHO_CLIENTS_RUNNING;
+ ecm->run_test = EC_RUNNING;
/* Signal the CLI process that the action is starting... */
- signal_evt_to_cli (1);
+ signal_evt_to_cli (EC_CLI_CONNECTS_DONE);
}
return 0;
}
static int
-echo_clients_session_connected_callback (u32 app_index, u32 api_context,
- session_t * s, session_error_t err)
+ec_session_connected_callback (u32 app_index, u32 api_context, session_t *s,
+ session_error_t err)
{
- echo_client_main_t *ecm = &echo_client_main;
- eclient_session_t *session;
- u32 session_index;
- u8 thread_index;
+ ec_main_t *ecm = &ec_main;
+ ec_session_t *es;
+ u32 thread_index;
+ ec_worker_t *wrk;
- if (PREDICT_FALSE (ecm->run_test != ECHO_CLIENTS_STARTING))
+ if (PREDICT_FALSE (ecm->run_test != EC_STARTING))
return -1;
if (err)
{
- clib_warning ("connection %d failed!", api_context);
- ecm->run_test = ECHO_CLIENTS_EXITING;
- signal_evt_to_cli (-1);
+ ec_err ("connection %d failed! %U", api_context, format_session_error,
+ err);
+ ecm->run_test = EC_EXITING;
+ signal_evt_to_cli (EC_CLI_CONNECTS_FAILED);
return 0;
}
@@ -490,57 +640,43 @@ echo_clients_session_connected_callback (u32 app_index, u32 api_context,
ASSERT (thread_index == vlib_get_thread_index ()
|| session_transport_service_type (s) == TRANSPORT_SERVICE_CL);
- if (!ecm->vpp_event_queue[thread_index])
- ecm->vpp_event_queue[thread_index] =
- session_main_get_vpp_event_queue (thread_index);
+ if (PREDICT_FALSE (api_context == HS_CTRL_HANDLE))
+ return ec_ctrl_session_connected_callback (s);
+
+ wrk = ec_worker_get (thread_index);
/*
* Setup session
*/
- clib_spinlock_lock_if_init (&ecm->sessions_lock);
- pool_get (ecm->sessions, session);
- clib_spinlock_unlock_if_init (&ecm->sessions_lock);
-
- clib_memset (session, 0, sizeof (*session));
- session_index = session - ecm->sessions;
- session->bytes_to_send = ecm->bytes_to_send;
- session->bytes_to_receive = ecm->no_return ? 0ULL : ecm->bytes_to_send;
- session->data.rx_fifo = s->rx_fifo;
- session->data.rx_fifo->shr->client_session_index = session_index;
- session->data.tx_fifo = s->tx_fifo;
- session->data.tx_fifo->shr->client_session_index = session_index;
- session->data.vpp_evt_q = ecm->vpp_event_queue[thread_index];
- session->vpp_session_handle = session_handle (s);
-
- if (ecm->is_dgram)
- {
- transport_connection_t *tc;
- tc = session_get_transport (s);
- clib_memcpy_fast (&session->data.transport, tc,
- sizeof (session->data.transport));
- session->data.is_dgram = 1;
- }
+ es = ec_session_alloc (wrk);
+ hs_test_app_session_init (es, s);
+
+ es->bytes_to_send = ecm->bytes_to_send;
+ es->bytes_to_receive = ecm->echo_bytes ? ecm->bytes_to_send : 0ULL;
+ es->vpp_session_handle = session_handle (s);
+ es->vpp_session_index = s->session_index;
+ s->opaque = es->session_index;
- vec_add1 (ecm->connection_index_by_thread[thread_index], session_index);
+ vec_add1 (wrk->conn_indices, es->session_index);
clib_atomic_fetch_add (&ecm->ready_connections, 1);
if (ecm->ready_connections == ecm->expected_connections)
{
- ecm->run_test = ECHO_CLIENTS_RUNNING;
+ ecm->run_test = EC_RUNNING;
/* Signal the CLI process that the action is starting... */
- signal_evt_to_cli (1);
+ signal_evt_to_cli (EC_CLI_CONNECTS_DONE);
}
return 0;
}
static void
-echo_clients_session_reset_callback (session_t * s)
+ec_session_reset_callback (session_t *s)
{
- echo_client_main_t *ecm = &echo_client_main;
+ ec_main_t *ecm = &ec_main;
vnet_disconnect_args_t _a = { 0 }, *a = &_a;
if (s->session_state == SESSION_STATE_READY)
- clib_warning ("Reset active connection %U", format_session, s, 2);
+ ec_err ("Reset active connection %U", format_session, s, 2);
a->handle = session_handle (s);
a->app_index = ecm->app_index;
@@ -549,16 +685,23 @@ echo_clients_session_reset_callback (session_t * s)
}
static int
-echo_clients_session_create_callback (session_t * s)
+ec_session_accept_callback (session_t *s)
{
return 0;
}
static void
-echo_clients_session_disconnect_callback (session_t * s)
+ec_session_disconnect_callback (session_t *s)
{
- echo_client_main_t *ecm = &echo_client_main;
+ ec_main_t *ecm = &ec_main;
vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ if (session_handle (s) == ecm->ctrl_session_handle)
+ {
+ ec_dbg ("ctrl session disconnect");
+ ecm->ctrl_session_handle = SESSION_INVALID_HANDLE;
+ }
+
a->handle = session_handle (s);
a->app_index = ecm->app_index;
vnet_disconnect_session (a);
@@ -566,9 +709,9 @@ echo_clients_session_disconnect_callback (session_t * s)
}
void
-echo_clients_session_disconnect (session_t * s)
+ec_session_disconnect (session_t *s)
{
- echo_client_main_t *ecm = &echo_client_main;
+ ec_main_t *ecm = &ec_main;
vnet_disconnect_args_t _a = { 0 }, *a = &_a;
a->handle = session_handle (s);
a->app_index = ecm->app_index;
@@ -576,54 +719,124 @@ echo_clients_session_disconnect (session_t * s)
}
static int
-echo_clients_rx_callback (session_t * s)
+ec_ctrl_session_rx_callback (session_t *s)
{
- echo_client_main_t *ecm = &echo_client_main;
- eclient_session_t *sp;
+ ec_main_t *ecm = &ec_main;
+ int rx_bytes;
+ hs_test_cfg_t cfg = { 0 };
- if (PREDICT_FALSE (ecm->run_test != ECHO_CLIENTS_RUNNING))
+ rx_bytes = svm_fifo_dequeue (s->rx_fifo, sizeof (cfg), (u8 *) &cfg);
+ if (rx_bytes != sizeof (cfg))
{
- echo_clients_session_disconnect (s);
+ ec_err ("invalid cfg length %d (expected %d)", rx_bytes, sizeof (cfg));
+ signal_evt_to_cli (EC_CLI_CONNECTS_FAILED);
return -1;
}
- sp =
- pool_elt_at_index (ecm->sessions, s->rx_fifo->shr->client_session_index);
- receive_data_chunk (ecm, sp);
+ ec_dbg ("control message received:");
+ if (ecm->cfg.verbose)
+ hs_test_cfg_dump (&cfg, 1);
- if (svm_fifo_max_dequeue_cons (s->rx_fifo))
+ switch (cfg.cmd)
{
- if (svm_fifo_set_event (s->rx_fifo))
- session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_BUILTIN_RX);
+ case HS_TEST_CMD_SYNC:
+ switch (ecm->run_test)
+ {
+ case EC_STARTING:
+ if (!hs_test_cfg_verify (&cfg, &ecm->cfg))
+ {
+ ec_err ("invalid config received from server!");
+ signal_evt_to_cli (EC_CLI_CONNECTS_FAILED);
+ return -1;
+ }
+ signal_evt_to_cli (EC_CLI_CFG_SYNC);
+ break;
+
+ case EC_RUNNING:
+ ec_dbg ("test running..");
+ break;
+
+ case EC_EXITING:
+ /* post test sync */
+ signal_evt_to_cli (EC_CLI_CFG_SYNC);
+ break;
+
+ default:
+ ec_err ("unexpected test state! %d", ecm->run_test);
+ break;
+ }
+ break;
+ case HS_TEST_CMD_START:
+ signal_evt_to_cli (EC_CLI_START);
+ break;
+ case HS_TEST_CMD_STOP:
+ signal_evt_to_cli (EC_CLI_STOP);
+ break;
+ default:
+ ec_err ("unexpected cmd! %d", cfg.cmd);
+ break;
}
+
return 0;
}
-int
-echo_client_add_segment_callback (u32 client_index, u64 segment_handle)
+static int
+ec_session_rx_callback (session_t *s)
{
- /* New heaps may be added */
+ ec_main_t *ecm = &ec_main;
+ ec_worker_t *wrk;
+ ec_session_t *es;
+
+ if (PREDICT_FALSE (s->opaque == HS_CTRL_HANDLE))
+ return ec_ctrl_session_rx_callback (s);
+
+ if (PREDICT_FALSE (ecm->run_test != EC_RUNNING))
+ {
+ ec_session_disconnect (s);
+ return -1;
+ }
+
+ wrk = ec_worker_get (s->thread_index);
+ es = ec_session_get (wrk, s->opaque);
+
+ receive_data_chunk (wrk, es);
+
+ if (svm_fifo_max_dequeue_cons (s->rx_fifo))
+ session_enqueue_notify (s);
+
return 0;
}
-/* *INDENT-OFF* */
-static session_cb_vft_t echo_clients = {
- .session_reset_callback = echo_clients_session_reset_callback,
- .session_connected_callback = echo_clients_session_connected_callback,
- .session_accept_callback = echo_clients_session_create_callback,
- .session_disconnect_callback = echo_clients_session_disconnect_callback,
- .builtin_app_rx_callback = echo_clients_rx_callback,
- .add_segment_callback = echo_client_add_segment_callback
+static int
+ec_add_segment_callback (u32 app_index, u64 segment_handle)
+{
+ /* New segments may be added */
+ return 0;
+}
+
+static int
+ec_del_segment_callback (u32 app_index, u64 segment_handle)
+{
+ return 0;
+}
+
+static session_cb_vft_t ec_cb_vft = {
+ .session_reset_callback = ec_session_reset_callback,
+ .session_connected_callback = ec_session_connected_callback,
+ .session_accept_callback = ec_session_accept_callback,
+ .session_disconnect_callback = ec_session_disconnect_callback,
+ .builtin_app_rx_callback = ec_session_rx_callback,
+ .add_segment_callback = ec_add_segment_callback,
+ .del_segment_callback = ec_del_segment_callback,
};
-/* *INDENT-ON* */
static clib_error_t *
-echo_clients_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
+ec_attach ()
{
vnet_app_add_cert_key_pair_args_t _ck_pair, *ck_pair = &_ck_pair;
- u32 prealloc_fifos, segment_size = 256 << 20;
- echo_client_main_t *ecm = &echo_client_main;
+ ec_main_t *ecm = &ec_main;
vnet_app_attach_args_t _a, *a = &_a;
+ u32 prealloc_fifos;
u64 options[18];
int rv;
@@ -633,18 +846,14 @@ echo_clients_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
a->api_client_index = ~0;
a->name = format (0, "echo_client");
if (ecm->transport_proto == TRANSPORT_PROTO_QUIC)
- echo_clients.session_connected_callback =
- quic_echo_clients_session_connected_callback;
- a->session_cb_vft = &echo_clients;
+ ec_cb_vft.session_connected_callback = quic_ec_session_connected_callback;
+ a->session_cb_vft = &ec_cb_vft;
prealloc_fifos = ecm->prealloc_fifos ? ecm->expected_connections : 1;
- if (ecm->private_segment_size)
- segment_size = ecm->private_segment_size;
-
options[APP_OPTIONS_ACCEPT_COOKIE] = 0x12345678;
- options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
- options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ options[APP_OPTIONS_SEGMENT_SIZE] = ecm->private_segment_size;
+ options[APP_OPTIONS_ADD_SEGMENT_SIZE] = ecm->private_segment_size;
options[APP_OPTIONS_RX_FIFO_SIZE] = ecm->fifo_size;
options[APP_OPTIONS_TX_FIFO_SIZE] = ecm->fifo_size;
options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = ecm->private_segment_count;
@@ -652,13 +861,13 @@ echo_clients_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
options[APP_OPTIONS_TLS_ENGINE] = ecm->tls_engine;
options[APP_OPTIONS_PCT_FIRST_ALLOC] = 100;
- if (appns_id)
+ options[APP_OPTIONS_FLAGS] |= ecm->attach_flags;
+ if (ecm->appns_id)
{
- options[APP_OPTIONS_FLAGS] |= appns_flags;
- options[APP_OPTIONS_NAMESPACE_SECRET] = appns_secret;
+ options[APP_OPTIONS_NAMESPACE_SECRET] = ecm->appns_secret;
+ a->namespace_id = ecm->appns_id;
}
a->options = options;
- a->namespace_id = appns_id;
if ((rv = vnet_application_attach (a)))
return clib_error_return (0, "attach returned %d", rv);
@@ -674,16 +883,21 @@ echo_clients_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
vnet_app_add_cert_key_pair (ck_pair);
ecm->ckpair_index = ck_pair->index;
+ ecm->test_client_attached = 1;
+
return 0;
}
static int
-echo_clients_detach ()
+ec_detach ()
{
- echo_client_main_t *ecm = &echo_client_main;
+ ec_main_t *ecm = &ec_main;
vnet_app_detach_args_t _da, *da = &_da;
int rv;
+ if (!ecm->test_client_attached)
+ return 0;
+
da->app_index = ecm->app_index;
da->api_client_index = ~0;
rv = vnet_application_detach (da);
@@ -694,412 +908,450 @@ echo_clients_detach ()
return rv;
}
-static void *
-echo_client_thread_fn (void *arg)
-{
- return 0;
-}
-
-/** Start a transmit thread */
-int
-echo_clients_start_tx_pthread (echo_client_main_t * ecm)
-{
- if (ecm->client_thread_handle == 0)
- {
- int rv = pthread_create (&ecm->client_thread_handle,
- NULL /*attr */ ,
- echo_client_thread_fn, 0);
- if (rv)
- {
- ecm->client_thread_handle = 0;
- return -1;
- }
- }
- return 0;
-}
-
static int
-echo_client_transport_needs_crypto (transport_proto_t proto)
+ec_transport_needs_crypto (transport_proto_t proto)
{
return proto == TRANSPORT_PROTO_TLS || proto == TRANSPORT_PROTO_DTLS ||
proto == TRANSPORT_PROTO_QUIC;
}
-clib_error_t *
-echo_clients_connect (vlib_main_t * vm, u32 n_clients)
+static int
+ec_connect_rpc (void *args)
{
- session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
- echo_client_main_t *ecm = &echo_client_main;
- vnet_connect_args_t _a, *a = &_a;
- int i, rv;
-
- clib_memset (a, 0, sizeof (*a));
+ ec_main_t *ecm = &ec_main;
+ vnet_connect_args_t _a = {}, *a = &_a;
+ int rv, needs_crypto;
+ u32 n_clients, ci;
+
+ n_clients = ecm->n_clients;
+ needs_crypto = ec_transport_needs_crypto (ecm->transport_proto);
+ clib_memcpy (&a->sep_ext, &ecm->connect_sep, sizeof (ecm->connect_sep));
+ a->sep_ext.transport_flags |= TRANSPORT_CFG_F_CONNECTED;
+ a->app_index = ecm->app_index;
- if (parse_uri ((char *) ecm->connect_uri, &sep))
- return clib_error_return (0, "invalid uri");
+ ci = ecm->connect_conn_index;
- for (i = 0; i < n_clients; i++)
+ while (ci < n_clients)
{
- clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
- a->api_context = i;
- a->app_index = ecm->app_index;
- if (echo_client_transport_needs_crypto (a->sep_ext.transport_proto))
+ /* Crude pacing for call setups */
+ if (ci - ecm->ready_connections > 128)
+ {
+ ecm->connect_conn_index = ci;
+ break;
+ }
+
+ a->api_context = ci;
+ if (needs_crypto)
{
session_endpoint_alloc_ext_cfg (&a->sep_ext,
TRANSPORT_ENDPT_EXT_CFG_CRYPTO);
a->sep_ext.ext_cfg->crypto.ckpair_index = ecm->ckpair_index;
}
- vlib_worker_thread_barrier_sync (vm);
rv = vnet_connect (a);
- if (a->sep_ext.ext_cfg)
+
+ if (needs_crypto)
clib_mem_free (a->sep_ext.ext_cfg);
+
if (rv)
{
- vlib_worker_thread_barrier_release (vm);
- return clib_error_return (0, "connect returned: %d", rv);
+ ec_err ("connect returned: %U", format_session_error, rv);
+ ecm->run_test = EC_EXITING;
+ signal_evt_to_cli (EC_CLI_CONNECTS_FAILED);
+ break;
}
- vlib_worker_thread_barrier_release (vm);
- /* Crude pacing for call setups */
- if ((i % 16) == 0)
- vlib_process_suspend (vm, 100e-6);
- ASSERT (i + 1 >= ecm->ready_connections);
- while (i + 1 - ecm->ready_connections > 128)
- vlib_process_suspend (vm, 1e-3);
+ ci += 1;
}
+
+ if (ci < ecm->expected_connections && ecm->run_test != EC_EXITING)
+ ec_program_connects ();
+
return 0;
}
-#define ec_cli_output(_fmt, _args...) \
- if (!ecm->no_output) \
- vlib_cli_output(vm, _fmt, ##_args)
+void
+ec_program_connects (void)
+{
+ session_send_rpc_evt_to_thread_force (transport_cl_thread (), ec_connect_rpc,
+ 0);
+}
static clib_error_t *
-echo_clients_command_fn (vlib_main_t * vm,
- unformat_input_t * input, vlib_cli_command_t * cmd)
+ec_ctrl_connect_rpc ()
{
- echo_client_main_t *ecm = &echo_client_main;
- vlib_thread_main_t *thread_main = vlib_get_thread_main ();
- u64 tmp, total_bytes, appns_flags = 0, appns_secret = 0;
- session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
- f64 test_timeout = 20.0, syn_timeout = 20.0, delta;
- char *default_uri = "tcp://6.0.1.1/1234";
- u8 *appns_id = 0, barrier_acq_needed = 0;
- int preallocate_sessions = 0, i, rv;
+ session_error_t rv;
+ ec_main_t *ecm = &ec_main;
+ vnet_connect_args_t _a = {}, *a = &_a;
+
+ a->api_context = HS_CTRL_HANDLE;
+ ecm->cfg.cmd = HS_TEST_CMD_SYNC;
+ clib_memcpy (&a->sep_ext, &ecm->connect_sep, sizeof (ecm->connect_sep));
+ a->sep_ext.transport_proto = TRANSPORT_PROTO_TCP;
+ a->app_index = ecm->app_index;
+
+ rv = vnet_connect (a);
+ if (rv)
+ {
+ ec_err ("ctrl connect returned: %U", format_session_error, rv);
+ ecm->run_test = EC_EXITING;
+ signal_evt_to_cli (EC_CLI_CONNECTS_FAILED);
+ }
+ return 0;
+}
+
+static void
+ec_ctrl_connect (void)
+{
+ session_send_rpc_evt_to_thread_force (transport_cl_thread (),
+ ec_ctrl_connect_rpc, 0);
+}
+
+static void
+ec_ctrl_session_disconnect ()
+{
+ ec_main_t *ecm = &ec_main;
+ vnet_disconnect_args_t _a, *a = &_a;
+ session_error_t err;
+
+ a->handle = ecm->ctrl_session_handle;
+ a->app_index = ecm->app_index;
+ err = vnet_disconnect_session (a);
+ if (err)
+ ec_err ("vnet_disconnect_session: %U", format_session_error, err);
+}
+
+static int
+ec_ctrl_test_sync ()
+{
+ ec_main_t *ecm = &ec_main;
+ ecm->cfg.test = HS_TEST_TYPE_ECHO;
+ return ec_ctrl_send (HS_TEST_CMD_SYNC);
+}
+
+static int
+ec_ctrl_test_start ()
+{
+ return ec_ctrl_send (HS_TEST_CMD_START);
+}
+
+static int
+ec_ctrl_test_stop ()
+{
+ return ec_ctrl_send (HS_TEST_CMD_STOP);
+}
+
+#define ec_wait_for_signal(_sig) \
+ vlib_process_wait_for_event_or_clock (vm, ecm->syn_timeout); \
+ event_type = vlib_process_get_events (vm, &event_data); \
+ switch (event_type) \
+ { \
+ case ~0: \
+ ec_cli ("Timeout while waiting for " #_sig); \
+ error = \
+ clib_error_return (0, "failed: timeout while waiting for " #_sig); \
+ goto cleanup; \
+ case _sig: \
+ break; \
+ default: \
+ ec_cli ("unexpected event while waiting for " #_sig ": %d", \
+ event_type); \
+ error = \
+ clib_error_return (0, "failed: unexpected event: %d", event_type); \
+ goto cleanup; \
+ }
+
+static clib_error_t *
+ec_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ char *default_uri = "tcp://6.0.1.1/1234", *transfer_type;
+ ec_main_t *ecm = &ec_main;
uword *event_data = 0, event_type;
- f64 time_before_connects;
- u32 n_clients = 1;
- char *transfer_type;
clib_error_t *error = 0;
+ int rv, had_config = 1;
+ u64 tmp, total_bytes;
+ f64 delta;
- ecm->quic_streams = 1;
- ecm->bytes_to_send = 8192;
- ecm->no_return = 0;
- ecm->fifo_size = 64 << 10;
- ecm->connections_per_batch = 1000;
- ecm->private_segment_count = 0;
- ecm->private_segment_size = 0;
- ecm->no_output = 0;
- ecm->test_bytes = 0;
- ecm->test_failed = 0;
- ecm->vlib_main = vm;
- ecm->tls_engine = CRYPTO_ENGINE_OPENSSL;
- ecm->no_copy = 0;
- ecm->run_test = ECHO_CLIENTS_STARTING;
+ if (ecm->test_client_attached)
+ return clib_error_return (0, "failed: already running!");
- if (vlib_num_workers ())
+ if (ec_init (vm))
{
- /* The request came over the binary api and the inband cli handler
- * is not mp_safe. Drop the barrier to make sure the workers are not
- * blocked.
- */
- if (vlib_thread_is_main_w_barrier ())
- {
- barrier_acq_needed = 1;
- vlib_worker_thread_barrier_release (vm);
- }
- /*
- * There's a good chance that both the client and the server echo
- * apps will be enabled so make sure the session queue node polls on
- * the main thread as connections will probably be established on it.
- */
- vlib_node_set_state (vm, session_queue_node.index,
- VLIB_NODE_STATE_POLLING);
+ error = clib_error_return (0, "failed init");
+ goto cleanup;
}
- if (thread_main->n_vlib_mains > 1)
- clib_spinlock_init (&ecm->sessions_lock);
- vec_free (ecm->connect_uri);
+ if (!unformat_user (input, unformat_line_input, line_input))
+ {
+ had_config = 0;
+ goto parse_config;
+ }
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (input, "uri %s", &ecm->connect_uri))
+ if (unformat (line_input, "uri %s", &ecm->connect_uri))
;
- else if (unformat (input, "nclients %d", &n_clients))
+ else if (unformat (line_input, "nclients %d", &ecm->n_clients))
;
- else if (unformat (input, "quic-streams %d", &ecm->quic_streams))
+ else if (unformat (line_input, "quic-streams %d", &ecm->quic_streams))
;
- else if (unformat (input, "mbytes %lld", &tmp))
+ else if (unformat (line_input, "mbytes %lld", &tmp))
ecm->bytes_to_send = tmp << 20;
- else if (unformat (input, "gbytes %lld", &tmp))
+ else if (unformat (line_input, "gbytes %lld", &tmp))
ecm->bytes_to_send = tmp << 30;
- else if (unformat (input, "bytes %lld", &ecm->bytes_to_send))
+ else if (unformat (line_input, "bytes %U", unformat_memory_size,
+ &ecm->bytes_to_send))
+ ;
+ else if (unformat (line_input, "test-timeout %f", &ecm->test_timeout))
;
- else if (unformat (input, "test-timeout %f", &test_timeout))
+ else if (unformat (line_input, "syn-timeout %f", &ecm->syn_timeout))
;
- else if (unformat (input, "syn-timeout %f", &syn_timeout))
+ else if (unformat (line_input, "echo-bytes"))
+ ecm->echo_bytes = 1;
+ else if (unformat (line_input, "fifo-size %U", unformat_memory_size,
+ &ecm->fifo_size))
;
- else if (unformat (input, "no-return"))
- ecm->no_return = 1;
- else if (unformat (input, "fifo-size %d", &ecm->fifo_size))
- ecm->fifo_size <<= 10;
- else if (unformat (input, "private-segment-count %d",
+ else if (unformat (line_input, "private-segment-count %d",
&ecm->private_segment_count))
;
- else if (unformat (input, "private-segment-size %U",
- unformat_memory_size, &tmp))
- {
- if (tmp >= 0x100000000ULL)
- {
- error = clib_error_return (
- 0, "private segment size %lld (%llu) too large", tmp, tmp);
- goto cleanup;
- }
- ecm->private_segment_size = tmp;
- }
- else if (unformat (input, "preallocate-fifos"))
+ else if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &ecm->private_segment_size))
+ ;
+ else if (unformat (line_input, "preallocate-fifos"))
ecm->prealloc_fifos = 1;
- else if (unformat (input, "preallocate-sessions"))
- preallocate_sessions = 1;
- else
- if (unformat (input, "client-batch %d", &ecm->connections_per_batch))
+ else if (unformat (line_input, "preallocate-sessions"))
+ ecm->prealloc_sessions = 1;
+ else if (unformat (line_input, "client-batch %d",
+ &ecm->connections_per_batch))
;
- else if (unformat (input, "appns %_%v%_", &appns_id))
+ else if (unformat (line_input, "appns %_%v%_", &ecm->appns_id))
;
- else if (unformat (input, "all-scope"))
- appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE
- | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
- else if (unformat (input, "local-scope"))
- appns_flags = APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
- else if (unformat (input, "global-scope"))
- appns_flags = APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
- else if (unformat (input, "secret %lu", &appns_secret))
+ else if (unformat (line_input, "all-scope"))
+ ecm->attach_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE |
+ APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
+ else if (unformat (line_input, "local-scope"))
+ ecm->attach_flags = APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ else if (unformat (line_input, "global-scope"))
+ ecm->attach_flags = APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ else if (unformat (line_input, "secret %lu", &ecm->appns_secret))
;
- else if (unformat (input, "no-output"))
- ecm->no_output = 1;
- else if (unformat (input, "test-bytes"))
- ecm->test_bytes = 1;
- else if (unformat (input, "tls-engine %d", &ecm->tls_engine))
+ else if (unformat (line_input, "verbose"))
+ ecm->cfg.verbose = 1;
+ else if (unformat (line_input, "test-bytes"))
+ ecm->cfg.test_bytes = 1;
+ else if (unformat (line_input, "tls-engine %d", &ecm->tls_engine))
;
else
{
error = clib_error_return (0, "failed: unknown input `%U'",
- format_unformat_error, input);
+ format_unformat_error, line_input);
goto cleanup;
}
}
- /* Store cli process node index for signalling */
- ecm->cli_node_index =
- vlib_get_current_process (vm)->node_runtime.node_index;
-
- if (ecm->is_init == 0)
- {
- if (echo_clients_init (vm))
- {
- error = clib_error_return (0, "failed init");
- goto cleanup;
- }
- }
+parse_config:
-
- ecm->ready_connections = 0;
- ecm->expected_connections = n_clients * ecm->quic_streams;
- ecm->rx_total = 0;
- ecm->tx_total = 0;
+ ecm->cfg.num_test_sessions = ecm->expected_connections =
+ ecm->n_clients * ecm->quic_streams;
if (!ecm->connect_uri)
{
- clib_warning ("No uri provided. Using default: %s", default_uri);
+ ec_cli ("No uri provided. Using default: %s", default_uri);
ecm->connect_uri = format (0, "%s%c", default_uri, 0);
}
- if ((rv = parse_uri ((char *) ecm->connect_uri, &sep)))
+ if ((rv = parse_uri ((char *) ecm->connect_uri, &ecm->connect_sep)))
{
error = clib_error_return (0, "Uri parse error: %d", rv);
goto cleanup;
}
- ecm->transport_proto = sep.transport_proto;
- ecm->is_dgram = (sep.transport_proto == TRANSPORT_PROTO_UDP);
+ ecm->transport_proto = ecm->connect_sep.transport_proto;
-#if ECHO_CLIENT_PTHREAD
- echo_clients_start_tx_pthread ();
-#endif
+ if (ecm->prealloc_sessions)
+ ec_prealloc_sessions (ecm);
- vlib_worker_thread_barrier_sync (vm);
- vnet_session_enable_disable (vm, 1 /* turn on session and transports */ );
- vlib_worker_thread_barrier_release (vm);
-
- if (ecm->test_client_attached == 0)
+ if ((error = ec_attach ()))
{
- if ((error = echo_clients_attach (appns_id, appns_flags, appns_secret)))
- {
- vec_free (appns_id);
- clib_error_report (error);
- goto cleanup;
- }
- vec_free (appns_id);
+ clib_error_report (error);
+ goto cleanup;
}
- ecm->test_client_attached = 1;
- /* Turn on the builtin client input nodes */
- for (i = 0; i < thread_main->n_vlib_mains; i++)
- vlib_node_set_state (vlib_get_main_by_index (i), echo_clients_node.index,
- VLIB_NODE_STATE_POLLING);
+ if (ecm->echo_bytes)
+ ecm->cfg.test = HS_TEST_TYPE_BI;
+ else
+ ecm->cfg.test = HS_TEST_TYPE_UNI;
- if (preallocate_sessions)
- pool_init_fixed (ecm->sessions, 1.1 * n_clients);
+ ec_ctrl_connect ();
+ ec_wait_for_signal (EC_CLI_CFG_SYNC);
- /* Fire off connect requests */
- time_before_connects = vlib_time_now (vm);
- if ((error = echo_clients_connect (vm, n_clients)))
+ if (ec_ctrl_test_start () < 0)
{
+ ec_cli ("failed to send start command");
goto cleanup;
}
+ ec_wait_for_signal (EC_CLI_START);
- /* Park until the sessions come up, or ten seconds elapse... */
- vlib_process_wait_for_event_or_clock (vm, syn_timeout);
+ /*
+ * Start. Fire off connect requests
+ */
+
+ /* update data port */
+ ecm->connect_sep.port = hs_make_data_port (ecm->connect_sep.port);
+
+ ecm->syn_start_time = vlib_time_now (vm);
+ ec_program_connects ();
+
+ /*
+ * Park until the sessions come up, or syn_timeout seconds pass
+ */
+
+ vlib_process_wait_for_event_or_clock (vm, ecm->syn_timeout);
event_type = vlib_process_get_events (vm, &event_data);
switch (event_type)
{
case ~0:
- ec_cli_output ("Timeout with only %d sessions active...",
- ecm->ready_connections);
+ ec_cli ("Timeout with only %d sessions active...",
+ ecm->ready_connections);
error = clib_error_return (0, "failed: syn timeout with %d sessions",
ecm->ready_connections);
- goto cleanup;
+ goto stop_test;
- case 1:
- delta = vlib_time_now (vm) - time_before_connects;
+ case EC_CLI_CONNECTS_DONE:
+ delta = vlib_time_now (vm) - ecm->syn_start_time;
if (delta != 0.0)
- ec_cli_output ("%d three-way handshakes in %.2f seconds %.2f/s",
- n_clients, delta, ((f64) n_clients) / delta);
-
- ecm->test_start_time = vlib_time_now (ecm->vlib_main);
- ec_cli_output ("Test started at %.6f", ecm->test_start_time);
+ ec_cli ("%d three-way handshakes in %.2f seconds %.2f/s",
+ ecm->n_clients, delta, ((f64) ecm->n_clients) / delta);
break;
+ case EC_CLI_CONNECTS_FAILED:
+ error = clib_error_return (0, "failed: connect returned");
+ goto stop_test;
+
default:
- ec_cli_output ("unexpected event(1): %d", event_type);
- error = clib_error_return (0, "failed: unexpected event(1): %d",
- event_type);
- goto cleanup;
+ ec_cli ("unexpected event(2): %d", event_type);
+ error =
+ clib_error_return (0, "failed: unexpected event(2): %d", event_type);
+ goto stop_test;
}
- /* Now wait for the sessions to finish... */
- vlib_process_wait_for_event_or_clock (vm, test_timeout);
+ /*
+ * Wait for the sessions to finish or test_timeout seconds pass
+ */
+ ecm->test_start_time = vlib_time_now (ecm->vlib_main);
+ ec_cli ("Test started at %.6f", ecm->test_start_time);
+ vlib_process_wait_for_event_or_clock (vm, ecm->test_timeout);
event_type = vlib_process_get_events (vm, &event_data);
switch (event_type)
{
case ~0:
- ec_cli_output ("Timeout with %d sessions still active...",
- ecm->ready_connections);
+ ec_cli ("Timeout at %.6f with %d sessions still active...",
+ vlib_time_now (ecm->vlib_main), ecm->ready_connections);
error = clib_error_return (0, "failed: timeout with %d sessions",
ecm->ready_connections);
- goto cleanup;
+ goto stop_test;
- case 2:
+ case EC_CLI_TEST_DONE:
ecm->test_end_time = vlib_time_now (vm);
- ec_cli_output ("Test finished at %.6f", ecm->test_end_time);
+ ec_cli ("Test finished at %.6f", ecm->test_end_time);
break;
default:
- ec_cli_output ("unexpected event(2): %d", event_type);
- error = clib_error_return (0, "failed: unexpected event(2): %d",
- event_type);
- goto cleanup;
+ ec_cli ("unexpected event(3): %d", event_type);
+ error =
+ clib_error_return (0, "failed: unexpected event(3): %d", event_type);
+ goto stop_test;
}
+ /*
+ * Done. Compute stats
+ */
delta = ecm->test_end_time - ecm->test_start_time;
- if (delta != 0.0)
+ if (delta == 0.0)
{
- total_bytes = (ecm->no_return ? ecm->tx_total : ecm->rx_total);
- transfer_type = ecm->no_return ? "half-duplex" : "full-duplex";
- ec_cli_output ("%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds",
- total_bytes, total_bytes / (1ULL << 20),
- total_bytes / (1ULL << 30), delta);
- ec_cli_output ("%.2f bytes/second %s", ((f64) total_bytes) / (delta),
- transfer_type);
- ec_cli_output ("%.4f gbit/second %s",
- (((f64) total_bytes * 8.0) / delta / 1e9),
- transfer_type);
- }
- else
- {
- ec_cli_output ("zero delta-t?");
+ ec_cli ("zero delta-t?");
error = clib_error_return (0, "failed: zero delta-t");
- goto cleanup;
+ goto stop_test;
}
- if (ecm->test_bytes && ecm->test_failed)
+ total_bytes = (ecm->echo_bytes ? ecm->rx_total : ecm->tx_total);
+ transfer_type = ecm->echo_bytes ? "full-duplex" : "half-duplex";
+ ec_cli ("%lld bytes (%lld mbytes, %lld gbytes) in %.2f seconds", total_bytes,
+ total_bytes / (1ULL << 20), total_bytes / (1ULL << 30), delta);
+ ec_cli ("%.2f bytes/second %s", ((f64) total_bytes) / (delta),
+ transfer_type);
+ ec_cli ("%.4f gbit/second %s", (((f64) total_bytes * 8.0) / delta / 1e9),
+ transfer_type);
+
+ if (ecm->cfg.test_bytes && ecm->test_failed)
error = clib_error_return (0, "failed: test bytes");
-cleanup:
- ecm->run_test = ECHO_CLIENTS_EXITING;
- vlib_process_wait_for_event_or_clock (vm, 10e-3);
- for (i = 0; i < vec_len (ecm->connection_index_by_thread); i++)
+stop_test:
+ ecm->run_test = EC_EXITING;
+
+ /* send stop test command to the server */
+ if (ec_ctrl_test_stop () < 0)
{
- vec_reset_length (ecm->connection_index_by_thread[i]);
- vec_reset_length (ecm->connections_this_batch_by_thread[i]);
- vec_reset_length (ecm->quic_session_index_by_thread[i]);
+ ec_cli ("failed to send stop command");
+ goto cleanup;
}
+ ec_wait_for_signal (EC_CLI_STOP);
- pool_free (ecm->sessions);
+ /* post test sync */
+ if (ec_ctrl_test_sync () < 0)
+ {
+ ec_cli ("failed to send post sync command");
+ goto cleanup;
+ }
+ ec_wait_for_signal (EC_CLI_CFG_SYNC);
+
+ /* disconnect control session */
+ ec_ctrl_session_disconnect ();
+
+cleanup:
+
+ ecm->run_test = EC_EXITING;
+ vlib_process_wait_for_event_or_clock (vm, 10e-3);
/* Detach the application, so we can use different fifo sizes next time */
- if (ecm->test_client_attached)
+ if (ec_detach ())
{
- if (echo_clients_detach ())
- {
- error = clib_error_return (0, "failed: app detach");
- ec_cli_output ("WARNING: app detach failed...");
- }
+ error = clib_error_return (0, "failed: app detach");
+ ec_cli ("WARNING: app detach failed...");
}
- if (error)
- ec_cli_output ("test failed");
- vec_free (ecm->connect_uri);
- clib_spinlock_free (&ecm->sessions_lock);
- if (barrier_acq_needed)
- vlib_worker_thread_barrier_sync (vm);
+ ec_cleanup (ecm);
+ if (had_config)
+ unformat_free (line_input);
+
+ if (error)
+ ec_cli ("test failed");
return error;
}
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (echo_clients_command, static) =
-{
+VLIB_CLI_COMMAND (ec_command, static) = {
.path = "test echo clients",
- .short_help = "test echo clients [nclients %d][[m|g]bytes <bytes>]"
- "[test-timeout <time>][syn-timeout <time>][no-return][fifo-size <size>]"
- "[private-segment-count <count>][private-segment-size <bytes>[m|g]]"
- "[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]"
- "[uri <tcp://ip/port>][test-bytes][no-output]",
- .function = echo_clients_command_fn,
+ .short_help =
+ "test echo clients [nclients %d][[m|g]bytes <bytes>]"
+ "[test-timeout <time>][syn-timeout <time>][echo-bytes][fifo-size <size>]"
+ "[private-segment-count <count>][private-segment-size <bytes>[m|g]]"
+ "[preallocate-fifos][preallocate-sessions][client-batch <batch-size>]"
+ "[uri <tcp://ip/port>][test-bytes][verbose]",
+ .function = ec_command_fn,
.is_mp_safe = 1,
};
-/* *INDENT-ON* */
clib_error_t *
-echo_clients_main_init (vlib_main_t * vm)
+ec_main_init (vlib_main_t *vm)
{
- echo_client_main_t *ecm = &echo_client_main;
- ecm->is_init = 0;
+ ec_main_t *ecm = &ec_main;
+ ecm->app_is_init = 0;
return 0;
}
-VLIB_INIT_FUNCTION (echo_clients_main_init);
+VLIB_INIT_FUNCTION (ec_main_init);
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/hs_apps/echo_client.h b/src/plugins/hs_apps/echo_client.h
index c4983ca78d8..5868c3652ce 100644
--- a/src/plugins/hs_apps/echo_client.h
+++ b/src/plugins/hs_apps/echo_client.h
@@ -18,105 +18,121 @@
#ifndef __included_echo_client_h__
#define __included_echo_client_h__
-#include <vnet/vnet.h>
-#include <vnet/ip/ip.h>
-#include <vnet/ethernet/ethernet.h>
-
-#include <vppinfra/hash.h>
-#include <vppinfra/error.h>
+#include <hs_apps/hs_test.h>
#include <vnet/session/session.h>
#include <vnet/session/application_interface.h>
-typedef struct
+typedef struct ec_session_
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- app_session_t data;
+#define _(type, name) type name;
+ foreach_app_session_field
+#undef _
+ u32 vpp_session_index;
+ u32 thread_index;
u64 bytes_to_send;
u64 bytes_sent;
u64 bytes_to_receive;
u64 bytes_received;
u64 vpp_session_handle;
- u8 thread_index;
-} eclient_session_t;
+} ec_session_t;
+
+typedef struct ec_worker_
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ ec_session_t *sessions; /**< session pool */
+ u8 *rx_buf; /**< prealloced rx buffer */
+ u32 *conn_indices; /**< sessions handled by worker */
+ u32 *conns_this_batch; /**< sessions handled in batch */
+ svm_msg_q_t *vpp_event_queue; /**< session layer worker mq */
+ u32 thread_index; /**< thread index for worker */
+} ec_worker_t;
typedef struct
{
+ ec_worker_t *wrk; /**< Per-thread state */
+ u8 *connect_test_data; /**< Pre-computed test data */
+
+ volatile u32 ready_connections;
+ volatile u64 rx_total;
+ volatile u64 tx_total;
+ volatile int run_test; /**< Signal start of test */
+
+ f64 syn_start_time;
+ f64 test_start_time;
+ f64 test_end_time;
+ u32 prev_conns;
+ u32 repeats;
+
+ u32 connect_conn_index; /**< Connects attempted progress */
+
/*
* Application setup parameters
*/
- svm_queue_t *vl_input_queue; /**< vpe input queue */
- svm_msg_q_t **vpp_event_queue;
u32 cli_node_index; /**< cli process node index */
- u32 my_client_index; /**< loopback API client handle */
u32 app_index; /**< app index after attach */
+ session_handle_t ctrl_session_handle; /**< control session handle */
/*
* Configuration params
*/
+ hs_test_cfg_t cfg;
+ u32 n_clients; /**< Number of clients */
u8 *connect_uri; /**< URI for slave's connect */
+ session_endpoint_cfg_t connect_sep; /**< Sever session endpoint */
u64 bytes_to_send; /**< Bytes to send */
u32 configured_segment_size;
u32 fifo_size;
u32 expected_connections; /**< Number of clients/connections */
u32 connections_per_batch; /**< Connections to rx/tx at once */
u32 private_segment_count; /**< Number of private fifo segs */
- u32 private_segment_size; /**< size of private fifo segs */
+ u64 private_segment_size; /**< size of private fifo segs */
u32 tls_engine; /**< TLS engine mbedtls/openssl */
- u8 is_dgram;
u32 no_copy; /**< Don't memcpy data to tx fifo */
u32 quic_streams; /**< QUIC streams per connection */
u32 ckpair_index; /**< Cert key pair for tls/quic */
+ u64 attach_flags; /**< App attach flags */
+ u8 *appns_id; /**< App namespaces id */
+ u64 appns_secret; /**< App namespace secret */
+ f64 syn_timeout; /**< Test syn timeout (s) */
+ f64 test_timeout; /**< Test timeout (s) */
/*
- * Test state variables
- */
- eclient_session_t *sessions; /**< Session pool, shared */
- clib_spinlock_t sessions_lock;
- u8 **rx_buf; /**< intermediate rx buffers */
- u8 *connect_test_data; /**< Pre-computed test data */
- u32 **quic_session_index_by_thread;
- u32 **connection_index_by_thread;
- u32 **connections_this_batch_by_thread; /**< active connection batch */
- pthread_t client_thread_handle;
-
- volatile u32 ready_connections;
- volatile u32 finished_connections;
- volatile u64 rx_total;
- volatile u64 tx_total;
- volatile int run_test; /**< Signal start of test */
-
- f64 test_start_time;
- f64 test_end_time;
- u32 prev_conns;
- u32 repeats;
- /*
* Flags
*/
- u8 is_init;
+ u8 app_is_init;
u8 test_client_attached;
- u8 no_return;
+ u8 echo_bytes;
u8 test_return_packets;
- int i_am_master;
int drop_packets; /**< drop all packets */
u8 prealloc_fifos; /**< Request fifo preallocation */
- u8 no_output;
- u8 test_bytes;
+ u8 prealloc_sessions;
u8 test_failed;
u8 transport_proto;
+ u8 barrier_acq_needed;
vlib_main_t *vlib_main;
-} echo_client_main_t;
+} ec_main_t;
+
+typedef enum ec_state_
+{
+ EC_STARTING,
+ EC_RUNNING,
+ EC_EXITING
+} ec_state_t;
-enum
+typedef enum ec_cli_signal_
{
- ECHO_CLIENTS_STARTING,
- ECHO_CLIENTS_RUNNING,
- ECHO_CLIENTS_EXITING
-} echo_clients_test_state_e;
-extern echo_client_main_t echo_client_main;
+ EC_CLI_CONNECTS_DONE = 1,
+ EC_CLI_CONNECTS_FAILED,
+ EC_CLI_CFG_SYNC,
+ EC_CLI_START,
+ EC_CLI_STOP,
+ EC_CLI_TEST_DONE
+} ec_cli_signal_t;
-vlib_node_registration_t echo_clients_node;
+void ec_program_connects (void);
#endif /* __included_echo_client_h__ */
diff --git a/src/plugins/hs_apps/echo_server.c b/src/plugins/hs_apps/echo_server.c
index b75a3667e83..0243252434a 100644
--- a/src/plugins/hs_apps/echo_server.c
+++ b/src/plugins/hs_apps/echo_server.c
@@ -13,79 +13,143 @@
* limitations under the License.
*/
+#include <hs_apps/hs_test.h>
#include <vnet/vnet.h>
#include <vlibmemory/api.h>
#include <vnet/session/application.h>
#include <vnet/session/application_interface.h>
#include <vnet/session/session.h>
-#define ECHO_SERVER_DBG (0)
-#define DBG(_fmt, _args...) \
- if (ECHO_SERVER_DBG) \
- clib_warning (_fmt, ##_args)
+static void es_set_echo_rx_callbacks (u8 no_echo);
typedef struct
{
- /*
- * Server app parameters
- */
- svm_msg_q_t **vpp_queue;
- svm_queue_t *vl_input_queue; /**< Sever's event queue */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+#define _(type, name) type name;
+ foreach_app_session_field
+#undef _
+ u64 vpp_session_handle;
+ u32 vpp_session_index;
+ u32 rx_retries;
+ u8 byte_index;
+} es_session_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ es_session_t *sessions;
+ u8 *rx_buf; /**< Per-thread RX buffer */
+ svm_msg_q_t *vpp_event_queue;
+ u32 thread_index;
+} es_worker_t;
+typedef struct
+{
u32 app_index; /**< Server app index */
- u32 my_client_index; /**< API client handle */
- u32 node_index; /**< process node index for event scheduling */
/*
* Config params
*/
- u8 no_echo; /**< Don't echo traffic */
+ hs_test_cfg_t cfg;
u32 fifo_size; /**< Fifo size */
u32 rcv_buffer_size; /**< Rcv buffer size */
u32 prealloc_fifos; /**< Preallocate fifos */
u32 private_segment_count; /**< Number of private segments */
- u32 private_segment_size; /**< Size of private segments */
+ u64 private_segment_size; /**< Size of private segments */
char *server_uri; /**< Server URI */
u32 tls_engine; /**< TLS engine: mbedtls/openssl */
u32 ckpair_index; /**< Cert and key for tls/quic */
- u8 is_dgram; /**< set if transport is dgram */
/*
* Test state
*/
- u8 **rx_buf; /**< Per-thread RX buffer */
- u64 byte_index;
- u32 **rx_retries;
+ es_worker_t *wrk;
+ int (*rx_callback) (session_t *session);
u8 transport_proto;
u64 listener_handle; /**< Session handle of the root listener */
+ u64 ctrl_listener_handle;
vlib_main_t *vlib_main;
} echo_server_main_t;
echo_server_main_t echo_server_main;
+#define es_err(_fmt, _args...) clib_warning (_fmt, ##_args);
+
+#define es_dbg(_fmt, _args...) \
+ do \
+ { \
+ if (PREDICT_FALSE (echo_server_main.cfg.verbose)) \
+ es_err (_fmt, ##_args); \
+ } \
+ while (0)
+
+#define es_cli(_fmt, _args...) vlib_cli_output (vm, _fmt, ##_args)
+
+static inline es_worker_t *
+es_worker_get (u32 thread_index)
+{
+ return vec_elt_at_index (echo_server_main.wrk, thread_index);
+}
+
+static inline es_session_t *
+es_session_alloc (es_worker_t *wrk)
+{
+ es_session_t *es;
+
+ pool_get_zero (wrk->sessions, es);
+ es->session_index = es - wrk->sessions;
+ return es;
+}
+
+static inline es_session_t *
+es_session_get (es_worker_t *wrk, u32 es_index)
+{
+ return pool_elt_at_index (wrk->sessions, es_index);
+}
+
int
quic_echo_server_qsession_accept_callback (session_t * s)
{
- DBG ("QSession %u accept w/opaque %d", s->session_index, s->opaque);
+ es_dbg ("QSession %u accept w/opaque %d", s->session_index, s->opaque);
return 0;
}
+static int
+echo_server_ctrl_session_accept_callback (session_t *s)
+{
+ s->session_state = SESSION_STATE_READY;
+ return 0;
+}
+
+static void
+es_session_alloc_and_init (session_t *s)
+{
+ es_session_t *es;
+ es_worker_t *wrk = es_worker_get (s->thread_index);
+
+ es = es_session_alloc (wrk);
+ hs_test_app_session_init (es, s);
+ es->vpp_session_index = s->session_index;
+ es->vpp_session_handle = session_handle (s);
+ s->opaque = es->session_index;
+}
+
int
quic_echo_server_session_accept_callback (session_t * s)
{
echo_server_main_t *esm = &echo_server_main;
+
+ if (PREDICT_FALSE (esm->ctrl_listener_handle == s->listener_handle))
+ return echo_server_ctrl_session_accept_callback (s);
+
if (s->listener_handle == esm->listener_handle)
return quic_echo_server_qsession_accept_callback (s);
- DBG ("SSESSION %u accept w/opaque %d", s->session_index, s->opaque);
- esm->vpp_queue[s->thread_index] =
- session_main_get_vpp_event_queue (s->thread_index);
+ es_dbg ("SSESSION %u accept w/opaque %d", s->session_index, s->opaque);
+
s->session_state = SESSION_STATE_READY;
- esm->byte_index = 0;
- ASSERT (vec_len (esm->rx_retries) > s->thread_index);
- vec_validate (esm->rx_retries[s->thread_index], s->session_index);
- esm->rx_retries[s->thread_index][s->session_index] = 0;
+ es_session_alloc_and_init (s);
return 0;
}
@@ -93,13 +157,12 @@ int
echo_server_session_accept_callback (session_t * s)
{
echo_server_main_t *esm = &echo_server_main;
- esm->vpp_queue[s->thread_index] =
- session_main_get_vpp_event_queue (s->thread_index);
+
+ if (PREDICT_FALSE (esm->ctrl_listener_handle == s->listener_handle))
+ return echo_server_ctrl_session_accept_callback (s);
+
s->session_state = SESSION_STATE_READY;
- esm->byte_index = 0;
- ASSERT (vec_len (esm->rx_retries) > s->thread_index);
- vec_validate (esm->rx_retries[s->thread_index], s->session_index);
- esm->rx_retries[s->thread_index][s->session_index] = 0;
+ es_session_alloc_and_init (s);
return 0;
}
@@ -119,7 +182,7 @@ echo_server_session_reset_callback (session_t * s)
{
echo_server_main_t *esm = &echo_server_main;
vnet_disconnect_args_t _a = { 0 }, *a = &_a;
- clib_warning ("Reset session %U", format_session, s, 2);
+ es_dbg ("Reset session %U", format_session, s, 2);
a->handle = session_handle (s);
a->app_index = esm->app_index;
vnet_disconnect_session (a);
@@ -129,7 +192,7 @@ int
echo_server_session_connected_callback (u32 app_index, u32 api_context,
session_t * s, session_error_t err)
{
- clib_warning ("called...");
+ es_err ("called...");
return -1;
}
@@ -143,26 +206,135 @@ echo_server_add_segment_callback (u32 client_index, u64 segment_handle)
int
echo_server_redirect_connect_callback (u32 client_index, void *mp)
{
- clib_warning ("called...");
+ es_err ("called...");
return -1;
}
-void
-test_bytes (echo_server_main_t * esm, int actual_transfer)
+static void
+es_foreach_thread (void *fp)
{
- int i;
- u32 my_thread_id = vlib_get_thread_index ();
+ echo_server_main_t *esm = &echo_server_main;
+ uword thread_index;
+ for (thread_index = 0; thread_index < vec_len (esm->wrk); thread_index++)
+ {
+ session_send_rpc_evt_to_thread (thread_index, fp,
+ uword_to_pointer (thread_index, void *));
+ }
+}
- for (i = 0; i < actual_transfer; i++)
+static int
+es_wrk_prealloc_sessions (void *args)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ u32 sessions_per_wrk, n_wrks, thread_index;
+
+ thread_index = pointer_to_uword (args);
+ es_worker_t *wrk = es_worker_get (thread_index);
+ n_wrks = vlib_num_workers () ? vlib_num_workers () : 1;
+ sessions_per_wrk = esm->cfg.num_test_sessions / n_wrks;
+ pool_alloc (wrk->sessions, 1.1 * sessions_per_wrk);
+ return 0;
+}
+
+static int
+echo_server_setup_test (hs_test_cfg_t *c)
+{
+ if (c->test == HS_TEST_TYPE_UNI)
+ es_set_echo_rx_callbacks (1 /* no echo */);
+ else
+ es_set_echo_rx_callbacks (0 /* no echo */);
+
+ es_foreach_thread (es_wrk_prealloc_sessions);
+ return 0;
+}
+
+static void
+echo_server_ctrl_reply (session_t *s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ int rv;
+
+ rv = svm_fifo_enqueue (s->tx_fifo, sizeof (esm->cfg), (u8 *) &esm->cfg);
+ ASSERT (rv == sizeof (esm->cfg));
+ session_send_io_evt_to_thread_custom (&s->session_index, s->thread_index,
+ SESSION_IO_EVT_TX);
+}
+
+static int
+es_test_cmd_sync (echo_server_main_t *esm, session_t *s)
+{
+ int rv;
+
+ rv = echo_server_setup_test (&esm->cfg);
+ if (rv)
+ es_err ("setup test error!");
+
+ echo_server_ctrl_reply (s);
+ return 0;
+}
+
+static int
+es_wrk_cleanup_sessions (void *args)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_disconnect_args_t _a = {}, *a = &_a;
+ u32 thread_index = pointer_to_uword (args);
+ es_session_t *es;
+ es_worker_t *wrk;
+
+ wrk = es_worker_get (thread_index);
+ a->app_index = esm->app_index;
+
+ pool_foreach (es, wrk->sessions)
+ {
+ a->handle = es->vpp_session_handle;
+ vnet_disconnect_session (a);
+ }
+ pool_free (wrk->sessions);
+
+ return 0;
+}
+
+static int
+echo_server_rx_ctrl_callback (session_t *s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ int rv;
+
+ rv = svm_fifo_dequeue (s->rx_fifo, sizeof (esm->cfg), (u8 *) &esm->cfg);
+ ASSERT (rv == sizeof (esm->cfg));
+
+ es_dbg ("control message received:");
+ if (esm->cfg.verbose)
+ hs_test_cfg_dump (&esm->cfg, 0);
+
+ switch (esm->cfg.cmd)
{
- if (esm->rx_buf[my_thread_id][i] != ((esm->byte_index + i) & 0xff))
+ case HS_TEST_CMD_SYNC:
+ switch (esm->cfg.test)
{
- clib_warning ("at %lld expected %d got %d", esm->byte_index + i,
- (esm->byte_index + i) & 0xff,
- esm->rx_buf[my_thread_id][i]);
+ case HS_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_NONE:
+ es_foreach_thread (es_wrk_cleanup_sessions);
+ echo_server_ctrl_reply (s);
+ break;
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
+ return es_test_cmd_sync (esm, s);
+ break;
+ default:
+ es_err ("unknown command type! %d", esm->cfg.cmd);
}
+ break;
+ case HS_TEST_CMD_START:
+ case HS_TEST_CMD_STOP:
+ echo_server_ctrl_reply (s);
+ break;
+ default:
+ es_err ("unknown command! %d", esm->cfg.cmd);
+ break;
}
- esm->byte_index += actual_transfer;
+ return 0;
}
/*
@@ -171,11 +343,30 @@ test_bytes (echo_server_main_t * esm, int actual_transfer)
int
echo_server_builtin_server_rx_callback_no_echo (session_t * s)
{
+ echo_server_main_t *esm = &echo_server_main;
+ if (PREDICT_FALSE (esm->ctrl_listener_handle == s->listener_handle))
+ return echo_server_rx_ctrl_callback (s);
+
svm_fifo_t *rx_fifo = s->rx_fifo;
svm_fifo_dequeue_drop (rx_fifo, svm_fifo_max_dequeue_cons (rx_fifo));
return 0;
}
+static void
+es_test_bytes (es_worker_t *wrk, es_session_t *es, int actual_transfer)
+{
+ int i;
+ for (i = 0; i < actual_transfer; i++)
+ {
+ if (wrk->rx_buf[i] != ((es->byte_index + i) & 0xff))
+ {
+ es_err ("at %lld expected %d got %d", es->byte_index + i,
+ (es->byte_index + i) & 0xff, wrk->rx_buf[i]);
+ }
+ }
+ es->byte_index += actual_transfer;
+}
+
int
echo_server_rx_callback (session_t * s)
{
@@ -184,7 +375,8 @@ echo_server_rx_callback (session_t * s)
svm_fifo_t *tx_fifo, *rx_fifo;
echo_server_main_t *esm = &echo_server_main;
u32 thread_index = vlib_get_thread_index ();
- app_session_transport_t at;
+ es_worker_t *wrk;
+ es_session_t *es;
ASSERT (s->thread_index == thread_index);
@@ -194,24 +386,25 @@ echo_server_rx_callback (session_t * s)
ASSERT (rx_fifo->master_thread_index == thread_index);
ASSERT (tx_fifo->master_thread_index == thread_index);
+ if (PREDICT_FALSE (esm->ctrl_listener_handle == s->listener_handle))
+ return echo_server_rx_ctrl_callback (s);
+
+ wrk = es_worker_get (thread_index);
max_enqueue = svm_fifo_max_enqueue_prod (tx_fifo);
- if (!esm->is_dgram)
- {
- max_dequeue = svm_fifo_max_dequeue_cons (rx_fifo);
- }
- else
+ es = es_session_get (wrk, s->opaque);
+
+ if (es->is_dgram)
{
session_dgram_pre_hdr_t ph;
svm_fifo_peek (rx_fifo, 0, sizeof (ph), (u8 *) & ph);
max_dequeue = ph.data_length - ph.data_offset;
- if (!esm->vpp_queue[s->thread_index])
- {
- svm_msg_q_t *mq;
- mq = session_main_get_vpp_event_queue (s->thread_index);
- esm->vpp_queue[s->thread_index] = mq;
- }
+ ASSERT (wrk->vpp_event_queue);
max_enqueue -= sizeof (session_dgram_hdr_t);
}
+ else
+ {
+ max_dequeue = svm_fifo_max_dequeue_cons (rx_fifo);
+ }
if (PREDICT_FALSE (max_dequeue == 0))
return 0;
@@ -228,65 +421,40 @@ echo_server_rx_callback (session_t * s)
/* Program self-tap to retry */
if (svm_fifo_set_event (rx_fifo))
{
+ /* TODO should be session_enqueue_notify(s) but quic tests seem
+ * to fail if that's the case */
if (session_send_io_evt_to_thread (rx_fifo,
SESSION_IO_EVT_BUILTIN_RX))
- clib_warning ("failed to enqueue self-tap");
+ es_err ("failed to enqueue self-tap");
- vec_validate (esm->rx_retries[s->thread_index], s->session_index);
- if (esm->rx_retries[thread_index][s->session_index] == 500000)
+ if (es->rx_retries == 500000)
{
- clib_warning ("session stuck: %U", format_session, s, 2);
+ es_err ("session stuck: %U", format_session, s, 2);
}
- if (esm->rx_retries[thread_index][s->session_index] < 500001)
- esm->rx_retries[thread_index][s->session_index]++;
+ if (es->rx_retries < 500001)
+ es->rx_retries++;
}
return 0;
}
- vec_validate (esm->rx_buf[thread_index], max_transfer);
- if (!esm->is_dgram)
- {
- actual_transfer = app_recv_stream_raw (rx_fifo,
- esm->rx_buf[thread_index],
- max_transfer,
- 0 /* don't clear event */ ,
- 0 /* peek */ );
- }
- else
+ vec_validate (wrk->rx_buf, max_transfer);
+ actual_transfer = app_recv ((app_session_t *) es, wrk->rx_buf, max_transfer);
+ ASSERT (actual_transfer == max_transfer);
+
+ if (esm->cfg.test_bytes)
{
- actual_transfer = app_recv_dgram_raw (rx_fifo,
- esm->rx_buf[thread_index],
- max_transfer, &at,
- 0 /* don't clear event */ ,
- 0 /* peek */ );
+ es_test_bytes (wrk, es, actual_transfer);
}
- ASSERT (actual_transfer == max_transfer);
- /* test_bytes (esm, actual_transfer); */
/*
* Echo back
*/
- if (!esm->is_dgram)
- {
- n_written = app_send_stream_raw (tx_fifo,
- esm->vpp_queue[thread_index],
- esm->rx_buf[thread_index],
- actual_transfer, SESSION_IO_EVT_TX,
- 1 /* do_evt */ , 0);
- }
- else
- {
- n_written = app_send_dgram_raw (tx_fifo, &at,
- esm->vpp_queue[s->thread_index],
- esm->rx_buf[thread_index],
- actual_transfer, SESSION_IO_EVT_TX,
- 1 /* do_evt */ , 0);
- }
+ n_written = app_send ((app_session_t *) es, wrk->rx_buf, actual_transfer, 0);
if (n_written != max_transfer)
- clib_warning ("short trout! written %u read %u", n_written, max_transfer);
+ es_err ("short trout! written %u read %u", n_written, max_transfer);
if (PREDICT_FALSE (svm_fifo_max_dequeue_cons (rx_fifo)))
goto rx_event;
@@ -294,15 +462,32 @@ echo_server_rx_callback (session_t * s)
return 0;
}
+int
+echo_server_rx_callback_common (session_t *s)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ return esm->rx_callback (s);
+}
+
static session_cb_vft_t echo_server_session_cb_vft = {
.session_accept_callback = echo_server_session_accept_callback,
.session_disconnect_callback = echo_server_session_disconnect_callback,
.session_connected_callback = echo_server_session_connected_callback,
.add_segment_callback = echo_server_add_segment_callback,
- .builtin_app_rx_callback = echo_server_rx_callback,
+ .builtin_app_rx_callback = echo_server_rx_callback_common,
.session_reset_callback = echo_server_session_reset_callback
};
+static void
+es_set_echo_rx_callbacks (u8 no_echo)
+{
+ echo_server_main_t *esm = &echo_server_main;
+ if (no_echo)
+ esm->rx_callback = echo_server_builtin_server_rx_callback_no_echo;
+ else
+ esm->rx_callback = echo_server_rx_callback;
+}
+
static int
echo_server_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
{
@@ -310,30 +495,22 @@ echo_server_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
echo_server_main_t *esm = &echo_server_main;
vnet_app_attach_args_t _a, *a = &_a;
u64 options[APP_OPTIONS_N_OPTIONS];
- u32 segment_size = 512 << 20;
clib_memset (a, 0, sizeof (*a));
clib_memset (options, 0, sizeof (options));
- if (esm->no_echo)
- echo_server_session_cb_vft.builtin_app_rx_callback =
- echo_server_builtin_server_rx_callback_no_echo;
- else
- echo_server_session_cb_vft.builtin_app_rx_callback =
- echo_server_rx_callback;
+ esm->rx_callback = echo_server_rx_callback;
+
if (esm->transport_proto == TRANSPORT_PROTO_QUIC)
echo_server_session_cb_vft.session_accept_callback =
quic_echo_server_session_accept_callback;
- if (esm->private_segment_size)
- segment_size = esm->private_segment_size;
-
a->api_client_index = ~0;
a->name = format (0, "echo_server");
a->session_cb_vft = &echo_server_session_cb_vft;
a->options = options;
- a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
- a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = esm->private_segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = esm->private_segment_size;
a->options[APP_OPTIONS_RX_FIFO_SIZE] = esm->fifo_size;
a->options[APP_OPTIONS_TX_FIFO_SIZE] = esm->fifo_size;
a->options[APP_OPTIONS_PRIVATE_SEGMENT_COUNT] = esm->private_segment_count;
@@ -352,7 +529,7 @@ echo_server_attach (u8 * appns_id, u64 appns_flags, u64 appns_secret)
if (vnet_application_attach (a))
{
- clib_warning ("failed to attach server");
+ es_err ("failed to attach server");
return -1;
}
esm->app_index = a->app_index;
@@ -392,19 +569,35 @@ echo_client_transport_needs_crypto (transport_proto_t proto)
}
static int
+echo_server_listen_ctrl ()
+{
+ echo_server_main_t *esm = &echo_server_main;
+ vnet_listen_args_t _args = {}, *args = &_args;
+ session_error_t rv;
+
+ if ((rv = parse_uri (esm->server_uri, &args->sep_ext)))
+ return -1;
+ args->sep_ext.transport_proto = TRANSPORT_PROTO_TCP;
+ args->app_index = esm->app_index;
+
+ rv = vnet_listen (args);
+ esm->ctrl_listener_handle = args->handle;
+ return rv;
+}
+
+static int
echo_server_listen ()
{
i32 rv;
echo_server_main_t *esm = &echo_server_main;
- vnet_listen_args_t _args = { 0 }, *args = &_args;
-
- args->sep_ext.app_wrk_index = 0;
+ vnet_listen_args_t _args = {}, *args = &_args;
if ((rv = parse_uri (esm->server_uri, &args->sep_ext)))
{
return -1;
}
args->app_index = esm->app_index;
+ args->sep_ext.port = hs_make_data_port (args->sep_ext.port);
if (echo_client_transport_needs_crypto (args->sep_ext.transport_proto))
{
session_endpoint_alloc_ext_cfg (&args->sep_ext,
@@ -430,30 +623,36 @@ echo_server_create (vlib_main_t * vm, u8 * appns_id, u64 appns_flags,
{
echo_server_main_t *esm = &echo_server_main;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
- u32 num_threads;
- int i;
+ es_worker_t *wrk;
- num_threads = 1 /* main thread */ + vtm->n_threads;
- vec_validate (echo_server_main.vpp_queue, num_threads - 1);
- vec_validate (esm->rx_buf, num_threads - 1);
- vec_validate (esm->rx_retries, num_threads - 1);
- for (i = 0; i < vec_len (esm->rx_retries); i++)
- vec_validate (esm->rx_retries[i],
- pool_elts (session_main.wrk[i].sessions));
esm->rcv_buffer_size = clib_max (esm->rcv_buffer_size, esm->fifo_size);
- for (i = 0; i < num_threads; i++)
- vec_validate (esm->rx_buf[i], esm->rcv_buffer_size);
+ vec_validate (esm->wrk, vtm->n_threads);
+
+ vec_foreach (wrk, esm->wrk)
+ {
+ wrk->thread_index = wrk - esm->wrk;
+ vec_validate (wrk->rx_buf, esm->rcv_buffer_size);
+ wrk->vpp_event_queue =
+ session_main_get_vpp_event_queue (wrk->thread_index);
+ }
if (echo_server_attach (appns_id, appns_flags, appns_secret))
{
- clib_warning ("failed to attach server");
+ es_err ("failed to attach server");
+ return -1;
+ }
+ if (echo_server_listen_ctrl ())
+ {
+ es_err ("failed to start listening on ctrl session");
+ if (echo_server_detach ())
+ es_err ("failed to detach");
return -1;
}
if (echo_server_listen ())
{
- clib_warning ("failed to start listening");
+ es_err ("failed to start listening");
if (echo_server_detach ())
- clib_warning ("failed to detach");
+ es_err ("failed to detach");
return -1;
}
return 0;
@@ -466,27 +665,16 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
echo_server_main_t *esm = &echo_server_main;
u8 server_uri_set = 0, *appns_id = 0;
- u64 tmp, appns_flags = 0, appns_secret = 0;
+ u64 appns_flags = 0, appns_secret = 0;
char *default_uri = "tcp://0.0.0.0/1234";
- int rv, is_stop = 0, barrier_acq_needed = 0;
+ int rv, is_stop = 0;
clib_error_t *error = 0;
- /* The request came over the binary api and the inband cli handler
- * is not mp_safe. Drop the barrier to make sure the workers are not
- * blocked.
- */
- if (vlib_num_workers () && vlib_thread_is_main_w_barrier ())
- {
- barrier_acq_needed = 1;
- vlib_worker_thread_barrier_release (vm);
- }
-
- esm->no_echo = 0;
esm->fifo_size = 64 << 10;
esm->rcv_buffer_size = 128 << 10;
esm->prealloc_fifos = 0;
esm->private_segment_count = 0;
- esm->private_segment_size = 0;
+ esm->private_segment_size = 512 << 20;
esm->tls_engine = CRYPTO_ENGINE_OPENSSL;
vec_free (esm->server_uri);
@@ -494,10 +682,9 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
{
if (unformat (input, "uri %s", &esm->server_uri))
server_uri_set = 1;
- else if (unformat (input, "no-echo"))
- esm->no_echo = 1;
- else if (unformat (input, "fifo-size %d", &esm->fifo_size))
- esm->fifo_size <<= 10;
+ else if (unformat (input, "fifo-size %U", unformat_memory_size,
+ &esm->fifo_size))
+ ;
else if (unformat (input, "rcv-buf-size %d", &esm->rcv_buffer_size))
;
else if (unformat (input, "prealloc-fifos %d", &esm->prealloc_fifos))
@@ -506,16 +693,8 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
&esm->private_segment_count))
;
else if (unformat (input, "private-segment-size %U",
- unformat_memory_size, &tmp))
- {
- if (tmp >= 0x100000000ULL)
- {
- error = clib_error_return (
- 0, "private segment size %lld (%llu) too large", tmp, tmp);
- goto cleanup;
- }
- esm->private_segment_size = tmp;
- }
+ unformat_memory_size, &esm->private_segment_size))
+ ;
else if (unformat (input, "appns %_%v%_", &appns_id))
;
else if (unformat (input, "all-scope"))
@@ -543,14 +722,14 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
{
if (esm->app_index == (u32) ~ 0)
{
- clib_warning ("server not running");
+ es_cli ("server not running");
error = clib_error_return (0, "failed: server not running");
goto cleanup;
}
rv = echo_server_detach ();
if (rv)
{
- clib_warning ("failed: detach");
+ es_cli ("failed: detach");
error = clib_error_return (0, "failed: server detach %d", rv);
goto cleanup;
}
@@ -561,7 +740,7 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
if (!server_uri_set)
{
- clib_warning ("No uri provided! Using default: %s", default_uri);
+ es_cli ("No uri provided! Using default: %s", default_uri);
esm->server_uri = (char *) format (0, "%s%c", default_uri, 0);
}
@@ -571,7 +750,6 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
goto cleanup;
}
esm->transport_proto = sep.transport_proto;
- esm->is_dgram = (sep.transport_proto == TRANSPORT_PROTO_UDP);
rv = echo_server_create (vm, appns_id, appns_flags, appns_secret);
if (rv)
@@ -584,29 +762,22 @@ echo_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
cleanup:
vec_free (appns_id);
- if (barrier_acq_needed)
- vlib_worker_thread_barrier_sync (vm);
-
return error;
}
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (echo_server_create_command, static) =
-{
+VLIB_CLI_COMMAND (echo_server_create_command, static) = {
.path = "test echo server",
- .short_help = "test echo server proto <proto> [no echo][fifo-size <mbytes>]"
- "[rcv-buf-size <bytes>][prealloc-fifos <count>]"
- "[private-segment-count <count>][private-segment-size <bytes[m|g]>]"
- "[uri <tcp://ip/port>]",
+ .short_help =
+ "test echo server proto <proto> [fifo-size <mbytes>]"
+ "[rcv-buf-size <bytes>][prealloc-fifos <count>]"
+ "[private-segment-count <count>][private-segment-size <bytes[m|g]>]"
+ "[uri <tcp://ip/port>]",
.function = echo_server_create_command_fn,
};
-/* *INDENT-ON* */
clib_error_t *
echo_server_main_init (vlib_main_t * vm)
{
- echo_server_main_t *esm = &echo_server_main;
- esm->my_client_index = ~0;
return 0;
}
diff --git a/src/plugins/hs_apps/hs_apps.c b/src/plugins/hs_apps/hs_apps.c
index 5067919cc28..8e991954c7e 100644
--- a/src/plugins/hs_apps/hs_apps.c
+++ b/src/plugins/hs_apps/hs_apps.c
@@ -17,13 +17,11 @@
#include <vnet/plugin/plugin.h>
#include <vpp/app/version.h>
-/* *INDENT-OFF* */
VLIB_PLUGIN_REGISTER () =
{
.version = VPP_BUILD_VER,
.description = "Host Stack Applications",
};
-/* *INDENT-ON* */
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/plugins/hs_apps/hs_test.h b/src/plugins/hs_apps/hs_test.h
new file mode 100644
index 00000000000..167c7957229
--- /dev/null
+++ b/src/plugins/hs_apps/hs_test.h
@@ -0,0 +1,212 @@
+/*
+ * hs_test.h
+ *
+ * Copyright (c) 2023 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_hs_test_t__
+#define __included_hs_test_t__
+
+#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+
+#define HS_TEST_CFG_CTRL_MAGIC 0xfeedface
+#define HS_TEST_CFG_TXBUF_SIZE_DEF 8192
+#define HS_TEST_CFG_RXBUF_SIZE_DEF (64 * HS_TEST_CFG_TXBUF_SIZE_DEF)
+#define HS_TEST_CFG_NUM_WRITES_DEF 1000000
+
+#define VCL_TEST_TOKEN_HELP "#H"
+#define VCL_TEST_TOKEN_EXIT "#X"
+#define VCL_TEST_TOKEN_VERBOSE "#V"
+#define VCL_TEST_TOKEN_TXBUF_SIZE "#T:"
+#define VCL_TEST_TOKEN_NUM_TEST_SESS "#I:"
+#define VCL_TEST_TOKEN_NUM_WRITES "#N:"
+#define VCL_TEST_TOKEN_RXBUF_SIZE "#R:"
+#define VCL_TEST_TOKEN_SHOW_CFG "#C"
+#define HS_TEST_TOKEN_RUN_UNI "#U"
+#define HS_TEST_TOKEN_RUN_BI "#B"
+
+#define HS_TEST_SEPARATOR_STRING " -----------------------------\n"
+
+#define HS_CTRL_HANDLE (~0)
+
+typedef enum
+{
+ HS_TEST_CMD_SYNC,
+ HS_TEST_CMD_START,
+ HS_TEST_CMD_STOP,
+} hs_test_cmd_t;
+
+typedef enum
+{
+ HS_TEST_TYPE_NONE,
+ HS_TEST_TYPE_ECHO,
+ HS_TEST_TYPE_UNI,
+ HS_TEST_TYPE_BI,
+ HS_TEST_TYPE_EXIT,
+ HS_TEST_TYPE_EXIT_CLIENT,
+} hs_test_t;
+
+typedef struct __attribute__ ((packed))
+{
+ uint32_t magic;
+ uint32_t seq_num;
+ uint32_t test;
+ uint32_t cmd;
+ uint32_t ctrl_handle;
+ uint32_t num_test_sessions;
+ uint32_t num_test_sessions_perq;
+ uint32_t num_test_qsessions;
+ uint32_t verbose;
+ uint32_t address_ip6;
+ uint32_t transport_udp;
+ uint64_t rxbuf_size;
+ uint64_t txbuf_size;
+ uint64_t num_writes;
+ uint64_t total_bytes;
+ uint32_t test_bytes;
+} hs_test_cfg_t;
+
+static inline char *
+hs_test_type_str (hs_test_t t)
+{
+ switch (t)
+ {
+ case HS_TEST_TYPE_NONE:
+ return "NONE";
+
+ case HS_TEST_TYPE_ECHO:
+ return "ECHO";
+
+ case HS_TEST_TYPE_UNI:
+ return "UNI";
+
+ case HS_TEST_TYPE_BI:
+ return "BI";
+
+ case HS_TEST_TYPE_EXIT:
+ return "EXIT";
+
+ default:
+ return "Unknown";
+ }
+}
+
+static inline int
+hs_test_cfg_verify (hs_test_cfg_t *cfg, hs_test_cfg_t *valid_cfg)
+{
+ /* Note: txbuf & rxbuf on server are the same buffer,
+ * so txbuf_size is not included in this check.
+ */
+ return ((cfg->magic == valid_cfg->magic) && (cfg->test == valid_cfg->test) &&
+ (cfg->verbose == valid_cfg->verbose) &&
+ (cfg->rxbuf_size == valid_cfg->rxbuf_size) &&
+ (cfg->num_writes == valid_cfg->num_writes) &&
+ (cfg->total_bytes == valid_cfg->total_bytes));
+}
+
+static inline void
+hs_test_cfg_init (hs_test_cfg_t *cfg)
+{
+ cfg->magic = HS_TEST_CFG_CTRL_MAGIC;
+ cfg->test = HS_TEST_TYPE_UNI;
+ cfg->ctrl_handle = ~0;
+ cfg->num_test_sessions = 1;
+ cfg->num_test_sessions_perq = 1;
+ cfg->verbose = 0;
+ cfg->rxbuf_size = HS_TEST_CFG_RXBUF_SIZE_DEF;
+ cfg->num_writes = HS_TEST_CFG_NUM_WRITES_DEF;
+ cfg->txbuf_size = HS_TEST_CFG_TXBUF_SIZE_DEF;
+ cfg->total_bytes = cfg->num_writes * cfg->txbuf_size;
+ cfg->test_bytes = 0;
+}
+
+static inline char *
+hs_test_cmd_to_str (int cmd)
+{
+ switch (cmd)
+ {
+ case HS_TEST_CMD_SYNC:
+ return "SYNC";
+ case HS_TEST_CMD_START:
+ return "START";
+ case HS_TEST_CMD_STOP:
+ return "STOP";
+ }
+ return "";
+}
+
+static inline void
+hs_test_cfg_dump (hs_test_cfg_t *cfg, uint8_t is_client)
+{
+ char *spc = " ";
+
+ printf (" test config (%p):\n" HS_TEST_SEPARATOR_STRING
+ " command: %s\n"
+ " magic: 0x%08x\n"
+ " seq_num: 0x%08x\n"
+ " test bytes: %s\n"
+ "%-5s test: %s (%d)\n"
+ " ctrl handle: %d (0x%x)\n"
+ "%-5s num test sockets: %u (0x%08x)\n"
+ "%-5s verbose: %s (%d)\n"
+ "%-5s rxbuf size: %lu (0x%08lx)\n"
+ "%-5s txbuf size: %lu (0x%08lx)\n"
+ "%-5s num writes: %lu (0x%08lx)\n"
+ " client tx bytes: %lu (0x%08lx)\n" HS_TEST_SEPARATOR_STRING,
+ (void *) cfg, hs_test_cmd_to_str (cfg->cmd), cfg->magic,
+ cfg->seq_num, cfg->test_bytes ? "yes" : "no",
+ is_client && (cfg->test == HS_TEST_TYPE_UNI) ?
+ "'" HS_TEST_TOKEN_RUN_UNI "'" :
+ is_client && (cfg->test == HS_TEST_TYPE_BI) ?
+ "'" HS_TEST_TOKEN_RUN_BI "'" :
+ spc,
+ hs_test_type_str (cfg->test), cfg->test, cfg->ctrl_handle,
+ cfg->ctrl_handle,
+ is_client ? "'" VCL_TEST_TOKEN_NUM_TEST_SESS "'" : spc,
+ cfg->num_test_sessions, cfg->num_test_sessions,
+ is_client ? "'" VCL_TEST_TOKEN_VERBOSE "'" : spc,
+ cfg->verbose ? "on" : "off", cfg->verbose,
+ is_client ? "'" VCL_TEST_TOKEN_RXBUF_SIZE "'" : spc, cfg->rxbuf_size,
+ cfg->rxbuf_size, is_client ? "'" VCL_TEST_TOKEN_TXBUF_SIZE "'" : spc,
+ cfg->txbuf_size, cfg->txbuf_size,
+ is_client ? "'" VCL_TEST_TOKEN_NUM_WRITES "'" : spc, cfg->num_writes,
+ cfg->num_writes, cfg->total_bytes, cfg->total_bytes);
+}
+
+static inline u16
+hs_make_data_port (u16 p)
+{
+ p = clib_net_to_host_u16 (p);
+ return clib_host_to_net_u16 (p + 1);
+}
+
+static inline void
+hs_test_app_session_init_ (app_session_t *as, session_t *s)
+{
+ as->rx_fifo = s->rx_fifo;
+ as->tx_fifo = s->tx_fifo;
+ as->vpp_evt_q = session_main_get_vpp_event_queue (s->thread_index);
+ if (session_get_transport_proto (s) == TRANSPORT_PROTO_UDP)
+ {
+ transport_connection_t *tc;
+ tc = session_get_transport (s);
+ clib_memcpy_fast (&as->transport, tc, sizeof (as->transport));
+ as->is_dgram = 1;
+ }
+}
+
+#define hs_test_app_session_init(_as, _s) \
+ hs_test_app_session_init_ ((app_session_t *) (_as), (_s))
+
+#endif /* __included_hs_test_t__ */
diff --git a/src/plugins/hs_apps/http_cli.c b/src/plugins/hs_apps/http_cli.c
new file mode 100644
index 00000000000..f42f65342c3
--- /dev/null
+++ b/src/plugins/hs_apps/http_cli.c
@@ -0,0 +1,683 @@
+/*
+* Copyright (c) 2017-2019 Cisco and/or its affiliates.
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at:
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+#include <http/http.h>
+
+typedef struct
+{
+ u32 hs_index;
+ u32 thread_index;
+ u64 node_index;
+ u8 *buf;
+} hcs_cli_args_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 session_index;
+ u32 thread_index;
+ u8 *tx_buf;
+ u32 tx_offset;
+ u32 vpp_session_index;
+} hcs_session_t;
+
+typedef struct
+{
+ hcs_session_t **sessions;
+ u32 *free_http_cli_process_node_indices;
+ u32 app_index;
+
+ /* Cert key pair for tls */
+ u32 ckpair_index;
+
+ u32 prealloc_fifos;
+ u32 private_segment_size;
+ u32 fifo_size;
+ u8 *uri;
+ vlib_main_t *vlib_main;
+} hcs_main_t;
+
+static hcs_main_t hcs_main;
+
+static hcs_session_t *
+hcs_session_alloc (u32 thread_index)
+{
+ hcs_main_t *hcm = &hcs_main;
+ hcs_session_t *hs;
+ pool_get (hcm->sessions[thread_index], hs);
+ memset (hs, 0, sizeof (*hs));
+ hs->session_index = hs - hcm->sessions[thread_index];
+ hs->thread_index = thread_index;
+ return hs;
+}
+
+static hcs_session_t *
+hcs_session_get (u32 thread_index, u32 hs_index)
+{
+ hcs_main_t *hcm = &hcs_main;
+ if (pool_is_free_index (hcm->sessions[thread_index], hs_index))
+ return 0;
+ return pool_elt_at_index (hcm->sessions[thread_index], hs_index);
+}
+
+static void
+hcs_session_free (hcs_session_t *hs)
+{
+ hcs_main_t *hcm = &hcs_main;
+ u32 thread = hs->thread_index;
+ if (CLIB_DEBUG)
+ memset (hs, 0xfa, sizeof (*hs));
+ pool_put (hcm->sessions[thread], hs);
+}
+
+static void
+hcs_cli_process_free (hcs_cli_args_t *args)
+{
+ vlib_main_t *vm = vlib_get_first_main ();
+ hcs_main_t *hcm = &hcs_main;
+ hcs_cli_args_t **save_args;
+ vlib_node_runtime_t *rt;
+ vlib_node_t *n;
+ u32 node_index;
+
+ node_index = args->node_index;
+ ASSERT (node_index != 0);
+
+ n = vlib_get_node (vm, node_index);
+ rt = vlib_node_get_runtime (vm, n->index);
+ save_args = vlib_node_get_runtime_data (vm, n->index);
+
+ /* Reset process session pointer */
+ clib_mem_free (*save_args);
+ *save_args = 0;
+
+ /* Turn off the process node */
+ vlib_node_set_state (vm, rt->node_index, VLIB_NODE_STATE_DISABLED);
+
+ /* add node index to the freelist */
+ vec_add1 (hcm->free_http_cli_process_node_indices, node_index);
+}
+
+/* Header, including incantation to suppress favicon.ico requests */
+static const char *html_header_template =
+ "<html><head><title>%v</title></head>"
+ "<link rel=\"icon\" href=\"data:,\">"
+ "<body><pre>";
+
+static const char *html_footer =
+ "</pre></body></html>\r\n";
+
+static void
+hcs_cli_output (uword arg, u8 *buffer, uword buffer_bytes)
+{
+ u8 **output_vecp = (u8 **) arg;
+ u8 *output_vec;
+ u32 offset;
+
+ output_vec = *output_vecp;
+
+ offset = vec_len (output_vec);
+ vec_validate (output_vec, offset + buffer_bytes - 1);
+ clib_memcpy_fast (output_vec + offset, buffer, buffer_bytes);
+
+ *output_vecp = output_vec;
+}
+
+static void
+start_send_data (hcs_session_t *hs, http_status_code_t status)
+{
+ http_msg_t msg;
+ session_t *ts;
+ int rv;
+
+ msg.type = HTTP_MSG_REPLY;
+ msg.code = status;
+ msg.content_type = HTTP_CONTENT_TEXT_HTML;
+ msg.data.type = HTTP_MSG_DATA_INLINE;
+ msg.data.len = vec_len (hs->tx_buf);
+
+ ts = session_get (hs->vpp_session_index, hs->thread_index);
+ rv = svm_fifo_enqueue (ts->tx_fifo, sizeof (msg), (u8 *) &msg);
+ ASSERT (rv == sizeof (msg));
+
+ if (!msg.data.len)
+ goto done;
+
+ rv = svm_fifo_enqueue (ts->tx_fifo, vec_len (hs->tx_buf), hs->tx_buf);
+
+ if (rv != vec_len (hs->tx_buf))
+ {
+ hs->tx_offset = rv;
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ }
+ else
+ {
+ vec_free (hs->tx_buf);
+ }
+
+done:
+
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_send_io_evt_to_thread (ts->tx_fifo, SESSION_IO_EVT_TX);
+}
+
+static void
+send_data_to_http (void *rpc_args)
+{
+ hcs_cli_args_t *args = (hcs_cli_args_t *) rpc_args;
+ hcs_session_t *hs;
+
+ hs = hcs_session_get (args->thread_index, args->hs_index);
+ if (!hs)
+ {
+ vec_free (args->buf);
+ goto cleanup;
+ }
+
+ hs->tx_buf = args->buf;
+ start_send_data (hs, HTTP_STATUS_OK);
+
+cleanup:
+
+ clib_mem_free (rpc_args);
+}
+
+static uword
+hcs_cli_process (vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
+{
+ u8 *request = 0, *reply = 0, *html = 0;
+ hcs_cli_args_t *args, *rpc_args;
+ hcs_main_t *hcm = &hcs_main;
+ hcs_cli_args_t **save_args;
+ unformat_input_t input;
+ int i;
+
+ save_args = vlib_node_get_runtime_data (hcm->vlib_main, rt->node_index);
+ args = *save_args;
+
+ request = args->buf;
+
+ /* Replace slashes with spaces, stop at the end of the path */
+ i = 0;
+ while (i < vec_len (request))
+ {
+ if (request[i] == '/')
+ request[i] = ' ';
+ else if (request[i] == ' ')
+ {
+ /* vlib_cli_input is vector-based, no need for a NULL */
+ vec_set_len (request, i);
+ break;
+ }
+ i++;
+ }
+
+ /* Generate the html header */
+ html = format (0, html_header_template, request /* title */ );
+
+ /* Run the command */
+ unformat_init_vector (&input, vec_dup (request));
+ vlib_cli_input (vm, &input, hcs_cli_output, (uword) &reply);
+ unformat_free (&input);
+ request = 0;
+
+ /* Generate the html page */
+ html = format (html, "%v", reply);
+ html = format (html, html_footer);
+
+ /* Send it */
+ rpc_args = clib_mem_alloc (sizeof (*args));
+ clib_memcpy_fast (rpc_args, args, sizeof (*args));
+ rpc_args->buf = html;
+
+ session_send_rpc_evt_to_thread_force (args->thread_index, send_data_to_http,
+ rpc_args);
+
+ vec_free (reply);
+ vec_free (args->buf);
+ hcs_cli_process_free (args);
+
+ return (0);
+}
+
+static void
+alloc_cli_process (hcs_cli_args_t *args)
+{
+ hcs_main_t *hcm = &hcs_main;
+ vlib_main_t *vm = hcm->vlib_main;
+ hcs_cli_args_t **save_args;
+ vlib_node_t *n;
+ uword l;
+
+ l = vec_len (hcm->free_http_cli_process_node_indices);
+ if (l > 0)
+ {
+ n = vlib_get_node (vm, hcm->free_http_cli_process_node_indices[l - 1]);
+ vlib_node_set_state (vm, n->index, VLIB_NODE_STATE_POLLING);
+ vec_set_len (hcm->free_http_cli_process_node_indices, l - 1);
+ }
+ else
+ {
+ static vlib_node_registration_t r = {
+ .function = hcs_cli_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .process_log2_n_stack_bytes = 16,
+ .runtime_data_bytes = sizeof (void *),
+ };
+
+ vlib_register_node (vm, &r, "http-cli-%d", l);
+
+ n = vlib_get_node (vm, r.index);
+ }
+
+ /* Save the node index in the args. It won't be zero. */
+ args->node_index = n->index;
+
+ /* Save the args (pointer) in the node runtime */
+ save_args = vlib_node_get_runtime_data (vm, n->index);
+ *save_args = clib_mem_alloc (sizeof (*args));
+ clib_memcpy_fast (*save_args, args, sizeof (*args));
+
+ vlib_start_process (vm, n->runtime_index);
+}
+
+static void
+alloc_cli_process_callback (void *cb_args)
+{
+ alloc_cli_process ((hcs_cli_args_t *) cb_args);
+}
+
+static int
+hcs_ts_rx_callback (session_t *ts)
+{
+ hcs_cli_args_t args = {};
+ hcs_session_t *hs;
+ http_msg_t msg;
+ int rv;
+
+ hs = hcs_session_get (ts->thread_index, ts->opaque);
+
+ /* Read the http message header */
+ rv = svm_fifo_dequeue (ts->rx_fifo, sizeof (msg), (u8 *) &msg);
+ ASSERT (rv == sizeof (msg));
+
+ if (msg.type != HTTP_MSG_REQUEST || msg.method_type != HTTP_REQ_GET)
+ {
+ hs->tx_buf = 0;
+ start_send_data (hs, HTTP_STATUS_METHOD_NOT_ALLOWED);
+ return 0;
+ }
+
+ if (msg.data.len == 0)
+ {
+ hs->tx_buf = 0;
+ start_send_data (hs, HTTP_STATUS_BAD_REQUEST);
+ return 0;
+ }
+
+ /* send the command to a new/recycled vlib process */
+ vec_validate (args.buf, msg.data.len - 1);
+ rv = svm_fifo_dequeue (ts->rx_fifo, msg.data.len, args.buf);
+ ASSERT (rv == msg.data.len);
+ vec_set_len (args.buf, rv);
+
+ args.hs_index = hs->session_index;
+ args.thread_index = ts->thread_index;
+
+ /* Send RPC request to main thread */
+ if (vlib_get_thread_index () != 0)
+ vlib_rpc_call_main_thread (alloc_cli_process_callback, (u8 *) &args,
+ sizeof (args));
+ else
+ alloc_cli_process (&args);
+ return 0;
+}
+
+static int
+hcs_ts_tx_callback (session_t *ts)
+{
+ hcs_session_t *hs;
+ u32 to_send;
+ int rv;
+
+ hs = hcs_session_get (ts->thread_index, ts->opaque);
+ if (!hs || !hs->tx_buf)
+ return 0;
+
+ to_send = vec_len (hs->tx_buf) - hs->tx_offset;
+ rv = svm_fifo_enqueue (ts->tx_fifo, to_send, hs->tx_buf + hs->tx_offset);
+
+ if (rv <= 0)
+ {
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ return 0;
+ }
+
+ if (rv < to_send)
+ {
+ hs->tx_offset += rv;
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ }
+ else
+ {
+ vec_free (hs->tx_buf);
+ }
+
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_send_io_evt_to_thread (ts->tx_fifo, SESSION_IO_EVT_TX);
+
+ return 0;
+}
+
+static int
+hcs_ts_accept_callback (session_t *ts)
+{
+ hcs_session_t *hs;
+
+ hs = hcs_session_alloc (ts->thread_index);
+ hs->vpp_session_index = ts->session_index;
+
+ ts->opaque = hs->session_index;
+ ts->session_state = SESSION_STATE_READY;
+
+ return 0;
+}
+
+static int
+hcs_ts_connected_callback (u32 app_index, u32 api_context, session_t *s,
+ session_error_t err)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static void
+hcs_ts_disconnect_callback (session_t *s)
+{
+ hcs_main_t *hcm = &hcs_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ a->handle = session_handle (s);
+ a->app_index = hcm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+hcs_ts_reset_callback (session_t *s)
+{
+ hcs_main_t *hcm = &hcs_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ a->handle = session_handle (s);
+ a->app_index = hcm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+hcs_ts_cleanup_callback (session_t *s, session_cleanup_ntf_t ntf)
+{
+ hcs_session_t *hs;
+
+ if (ntf == SESSION_CLEANUP_TRANSPORT)
+ return;
+
+ hs = hcs_session_get (s->thread_index, s->opaque);
+ if (!hs)
+ return;
+
+ vec_free (hs->tx_buf);
+ hcs_session_free (hs);
+}
+
+static int
+hcs_add_segment_callback (u32 client_index, u64 segment_handle)
+{
+ return 0;
+}
+
+static int
+hcs_del_segment_callback (u32 client_index, u64 segment_handle)
+{
+ return 0;
+}
+
+static session_cb_vft_t hcs_session_cb_vft = {
+ .session_accept_callback = hcs_ts_accept_callback,
+ .session_disconnect_callback = hcs_ts_disconnect_callback,
+ .session_connected_callback = hcs_ts_connected_callback,
+ .add_segment_callback = hcs_add_segment_callback,
+ .del_segment_callback = hcs_del_segment_callback,
+ .builtin_app_rx_callback = hcs_ts_rx_callback,
+ .builtin_app_tx_callback = hcs_ts_tx_callback,
+ .session_reset_callback = hcs_ts_reset_callback,
+ .session_cleanup_callback = hcs_ts_cleanup_callback,
+};
+
+static int
+hcs_attach ()
+{
+ vnet_app_add_cert_key_pair_args_t _ck_pair, *ck_pair = &_ck_pair;
+ hcs_main_t *hcm = &hcs_main;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+ u32 segment_size = 128 << 20;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ if (hcm->private_segment_size)
+ segment_size = hcm->private_segment_size;
+
+ a->api_client_index = ~0;
+ a->name = format (0, "http_cli_server");
+ a->session_cb_vft = &hcs_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] =
+ hcm->fifo_size ? hcm->fifo_size : 8 << 10;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] =
+ hcm->fifo_size ? hcm->fifo_size : 32 << 10;
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hcm->prealloc_fifos;
+
+ if (vnet_application_attach (a))
+ {
+ vec_free (a->name);
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ vec_free (a->name);
+ hcm->app_index = a->app_index;
+
+ clib_memset (ck_pair, 0, sizeof (*ck_pair));
+ ck_pair->cert = (u8 *) test_srv_crt_rsa;
+ ck_pair->key = (u8 *) test_srv_key_rsa;
+ ck_pair->cert_len = test_srv_crt_rsa_len;
+ ck_pair->key_len = test_srv_key_rsa_len;
+ vnet_app_add_cert_key_pair (ck_pair);
+ hcm->ckpair_index = ck_pair->index;
+
+ return 0;
+}
+
+static int
+hcs_transport_needs_crypto (transport_proto_t proto)
+{
+ return proto == TRANSPORT_PROTO_TLS || proto == TRANSPORT_PROTO_DTLS ||
+ proto == TRANSPORT_PROTO_QUIC;
+}
+
+static int
+hcs_listen ()
+{
+ session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
+ hcs_main_t *hcm = &hcs_main;
+ vnet_listen_args_t _a, *a = &_a;
+ char *uri = "tcp://0.0.0.0/80";
+ u8 need_crypto;
+ int rv;
+
+ clib_memset (a, 0, sizeof (*a));
+ a->app_index = hcm->app_index;
+
+ if (hcm->uri)
+ uri = (char *) hcm->uri;
+
+ if (parse_uri (uri, &sep))
+ return -1;
+
+ need_crypto = hcs_transport_needs_crypto (sep.transport_proto);
+
+ sep.transport_proto = TRANSPORT_PROTO_HTTP;
+ clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
+
+ if (need_crypto)
+ {
+ session_endpoint_alloc_ext_cfg (&a->sep_ext,
+ TRANSPORT_ENDPT_EXT_CFG_CRYPTO);
+ a->sep_ext.ext_cfg->crypto.ckpair_index = hcm->ckpair_index;
+ }
+
+ rv = vnet_listen (a);
+
+ if (need_crypto)
+ clib_mem_free (a->sep_ext.ext_cfg);
+
+ return rv;
+}
+
+static void
+hcs_detach ()
+{
+ vnet_app_detach_args_t _a, *a = &_a;
+ hcs_main_t *hcm = &hcs_main;
+ a->app_index = hcm->app_index;
+ a->api_client_index = APP_INVALID_INDEX;
+ hcm->app_index = ~0;
+ vnet_application_detach (a);
+}
+
+static int
+hcs_create (vlib_main_t *vm)
+{
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ hcs_main_t *hcm = &hcs_main;
+ u32 num_threads;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (hcm->sessions, num_threads - 1);
+
+ if (hcs_attach ())
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ if (hcs_listen ())
+ {
+ hcs_detach ();
+ clib_warning ("failed to start listening");
+ return -1;
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+hcs_create_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ hcs_main_t *hcm = &hcs_main;
+ u64 seg_size;
+ int rv;
+
+ hcm->prealloc_fifos = 0;
+ hcm->private_segment_size = 0;
+ hcm->fifo_size = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ goto start_server;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "prealloc-fifos %d", &hcm->prealloc_fifos))
+ ;
+ else if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &seg_size))
+ hcm->private_segment_size = seg_size;
+ else if (unformat (line_input, "fifo-size %d", &hcm->fifo_size))
+ hcm->fifo_size <<= 10;
+ else if (unformat (line_input, "uri %s", &hcm->uri))
+ ;
+ else
+ {
+ unformat_free (line_input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ }
+ }
+
+ unformat_free (line_input);
+
+start_server:
+
+ if (hcm->app_index != (u32) ~0)
+ return clib_error_return (0, "test http server is already running");
+
+ vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ );
+
+ rv = hcs_create (vm);
+ switch (rv)
+ {
+ case 0:
+ break;
+ default:
+ return clib_error_return (0, "server_create returned %d", rv);
+ }
+
+ return 0;
+}
+
+VLIB_CLI_COMMAND (hcs_create_command, static) = {
+ .path = "http cli server",
+ .short_help = "http cli server [uri <uri>] [fifo-size <nbytes>] "
+ "[private-segment-size <nMG>] [prealloc-fifos <n>]",
+ .function = hcs_create_command_fn,
+};
+
+static clib_error_t *
+hcs_main_init (vlib_main_t *vm)
+{
+ hcs_main_t *hcs = &hcs_main;
+
+ hcs->app_index = ~0;
+ hcs->vlib_main = vm;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (hcs_main_init);
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/plugins/hs_apps/http_cli.h b/src/plugins/hs_apps/http_cli.h
new file mode 100644
index 00000000000..f774552d60f
--- /dev/null
+++ b/src/plugins/hs_apps/http_cli.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+const char *html_header_template = "<html><head><title>%v</title></head>"
+ "<link rel=\"icon\" href=\"data:,\">"
+ "<body><pre>";
+
+const char *html_footer = "</pre></body></html>\r\n";
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/http_client_cli.c b/src/plugins/hs_apps/http_client_cli.c
new file mode 100644
index 00000000000..a99169bafea
--- /dev/null
+++ b/src/plugins/hs_apps/http_client_cli.c
@@ -0,0 +1,575 @@
+/*
+ * Copyright (c) 2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+#include <http/http.h>
+
+#define HCC_DEBUG 0
+
+#if HCC_DEBUG
+#define HCC_DBG(_fmt, _args...) clib_warning (_fmt, ##_args)
+#else
+#define HCC_DBG(_fmt, _args...)
+#endif
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 session_index;
+ u32 thread_index;
+ u32 rx_offset;
+ u32 vpp_session_index;
+ u32 to_recv;
+ u8 is_closed;
+} hcc_session_t;
+
+typedef struct
+{
+ hcc_session_t *sessions;
+ u8 *rx_buf;
+ u32 thread_index;
+} hcc_worker_t;
+
+typedef struct
+{
+ hcc_worker_t *wrk;
+ u32 app_index;
+
+ u32 prealloc_fifos;
+ u32 private_segment_size;
+ u32 fifo_size;
+ u8 *uri;
+ u8 *http_query;
+ session_endpoint_cfg_t connect_sep;
+
+ u8 test_client_attached;
+ vlib_main_t *vlib_main;
+ u32 cli_node_index;
+ u8 *http_response;
+ u8 *appns_id;
+ u64 appns_secret;
+} hcc_main_t;
+
+typedef enum
+{
+ HCC_REPLY_RECEIVED = 100,
+ HCC_TRANSPORT_CLOSED,
+ HCC_CONNECT_FAILED,
+} hcc_cli_signal_t;
+
+static hcc_main_t hcc_main;
+
+static hcc_worker_t *
+hcc_worker_get (u32 thread_index)
+{
+ return vec_elt_at_index (hcc_main.wrk, thread_index);
+}
+
+static hcc_session_t *
+hcc_session_alloc (hcc_worker_t *wrk)
+{
+ hcc_session_t *hs;
+ pool_get_zero (wrk->sessions, hs);
+ hs->session_index = hs - wrk->sessions;
+ hs->thread_index = wrk->thread_index;
+ return hs;
+}
+
+static hcc_session_t *
+hcc_session_get (u32 hs_index, u32 thread_index)
+{
+ hcc_worker_t *wrk = hcc_worker_get (thread_index);
+ return pool_elt_at_index (wrk->sessions, hs_index);
+}
+
+static void
+hcc_session_free (u32 thread_index, hcc_session_t *hs)
+{
+ hcc_worker_t *wrk = hcc_worker_get (thread_index);
+ pool_put (wrk->sessions, hs);
+}
+
+static int
+hcc_ts_accept_callback (session_t *ts)
+{
+ clib_warning ("bug");
+ return -1;
+}
+
+static void
+hcc_ts_disconnect_callback (session_t *s)
+{
+ hcc_main_t *hcm = &hcc_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ a->handle = session_handle (s);
+ a->app_index = hcm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static int
+hcc_ts_connected_callback (u32 app_index, u32 hc_index, session_t *as,
+ session_error_t err)
+{
+ hcc_main_t *hcm = &hcc_main;
+ hcc_session_t *hs, *new_hs;
+ hcc_worker_t *wrk;
+ http_msg_t msg;
+ int rv;
+
+ HCC_DBG ("hc_index: %d", hc_index);
+
+ if (err)
+ {
+ clib_warning ("connected error: hc_index(%d): %U", hc_index,
+ format_session_error, err);
+ vlib_process_signal_event_mt (hcm->vlib_main, hcm->cli_node_index,
+ HCC_CONNECT_FAILED, 0);
+ return -1;
+ }
+
+ /* TODO delete half open session once the support is added in http layer */
+ hs = hcc_session_get (hc_index, 0);
+ wrk = hcc_worker_get (as->thread_index);
+ new_hs = hcc_session_alloc (wrk);
+ clib_memcpy_fast (new_hs, hs, sizeof (*hs));
+
+ hs->vpp_session_index = as->session_index;
+
+ msg.type = HTTP_MSG_REQUEST;
+ msg.method_type = HTTP_REQ_GET;
+ msg.content_type = HTTP_CONTENT_TEXT_HTML;
+ msg.data.type = HTTP_MSG_DATA_INLINE;
+ msg.data.len = vec_len (hcm->http_query);
+
+ svm_fifo_seg_t segs[2] = { { (u8 *) &msg, sizeof (msg) },
+ { hcm->http_query, vec_len (hcm->http_query) } };
+
+ rv = svm_fifo_enqueue_segments (as->tx_fifo, segs, 2, 0 /* allow partial */);
+ if (rv < 0 || rv != sizeof (msg) + vec_len (hcm->http_query))
+ {
+ clib_warning ("failed app enqueue");
+ return -1;
+ }
+
+ if (svm_fifo_set_event (as->tx_fifo))
+ session_send_io_evt_to_thread (as->tx_fifo, SESSION_IO_EVT_TX);
+
+ return 0;
+}
+
+static void
+hcc_ts_reset_callback (session_t *s)
+{
+ hcc_main_t *hcm = &hcc_main;
+ hcc_session_t *hs;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ hs = hcc_session_get (s->opaque, s->thread_index);
+ hs->is_closed = 1;
+
+ a->handle = session_handle (s);
+ a->app_index = hcm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static int
+hcc_ts_tx_callback (session_t *ts)
+{
+ clib_warning ("bug");
+ return -1;
+}
+
+static void
+hcc_session_disconnect (session_t *s)
+{
+ hcc_main_t *hcm = &hcc_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ a->handle = session_handle (s);
+ a->app_index = hcm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static int
+hcc_ts_rx_callback (session_t *ts)
+{
+ hcc_main_t *hcm = &hcc_main;
+ hcc_session_t *hs;
+ http_msg_t msg;
+ int rv;
+
+ hs = hcc_session_get (ts->opaque, ts->thread_index);
+
+ if (hs->is_closed)
+ {
+ clib_warning ("session is closed");
+ return 0;
+ }
+
+ if (hs->to_recv == 0)
+ {
+ rv = svm_fifo_dequeue (ts->rx_fifo, sizeof (msg), (u8 *) &msg);
+ ASSERT (rv == sizeof (msg));
+
+ if (msg.type != HTTP_MSG_REPLY || msg.code != HTTP_STATUS_OK)
+ {
+ clib_warning ("unexpected msg type %d", msg.type);
+ return 0;
+ }
+ vec_validate (hcm->http_response, msg.data.len - 1);
+ vec_reset_length (hcm->http_response);
+ hs->to_recv = msg.data.len;
+ }
+
+ u32 max_deq = svm_fifo_max_dequeue (ts->rx_fifo);
+
+ u32 n_deq = clib_min (hs->to_recv, max_deq);
+ u32 curr = vec_len (hcm->http_response);
+ rv = svm_fifo_dequeue (ts->rx_fifo, n_deq, hcm->http_response + curr);
+ if (rv < 0)
+ {
+ clib_warning ("app dequeue(n=%d) failed; rv = %d", n_deq, rv);
+ return -1;
+ }
+
+ if (rv != n_deq)
+ return -1;
+
+ vec_set_len (hcm->http_response, curr + n_deq);
+ ASSERT (hs->to_recv >= rv);
+ hs->to_recv -= rv;
+ HCC_DBG ("app rcvd %d, remains %d", rv, hs->to_recv);
+
+ if (hs->to_recv == 0)
+ {
+ hcc_session_disconnect (ts);
+ vlib_process_signal_event_mt (hcm->vlib_main, hcm->cli_node_index,
+ HCC_REPLY_RECEIVED, 0);
+ }
+
+ return 0;
+}
+
+static void
+hcc_ts_cleanup_callback (session_t *s, session_cleanup_ntf_t ntf)
+{
+ hcc_session_t *hs;
+
+ hs = hcc_session_get (s->thread_index, s->opaque);
+ if (!hs)
+ return;
+
+ hcc_session_free (s->thread_index, hs);
+}
+
+static void
+hcc_ts_transport_closed (session_t *s)
+{
+ hcc_main_t *hcm = &hcc_main;
+
+ HCC_DBG ("transport closed");
+
+ vlib_process_signal_event_mt (hcm->vlib_main, hcm->cli_node_index,
+ HCC_TRANSPORT_CLOSED, 0);
+}
+
+static session_cb_vft_t hcc_session_cb_vft = {
+ .session_accept_callback = hcc_ts_accept_callback,
+ .session_disconnect_callback = hcc_ts_disconnect_callback,
+ .session_connected_callback = hcc_ts_connected_callback,
+ .builtin_app_rx_callback = hcc_ts_rx_callback,
+ .builtin_app_tx_callback = hcc_ts_tx_callback,
+ .session_reset_callback = hcc_ts_reset_callback,
+ .session_cleanup_callback = hcc_ts_cleanup_callback,
+ .session_transport_closed_callback = hcc_ts_transport_closed,
+};
+
+static clib_error_t *
+hcc_attach ()
+{
+ hcc_main_t *hcm = &hcc_main;
+ vnet_app_attach_args_t _a, *a = &_a;
+ u64 options[18];
+ u32 segment_size = 128 << 20;
+ int rv;
+
+ if (hcm->private_segment_size)
+ segment_size = hcm->private_segment_size;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ a->api_client_index = ~0;
+ a->name = format (0, "http_cli_client");
+ a->session_cb_vft = &hcc_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] =
+ hcm->fifo_size ? hcm->fifo_size : 8 << 10;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] =
+ hcm->fifo_size ? hcm->fifo_size : 32 << 10;
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+ a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hcm->prealloc_fifos;
+ if (hcm->appns_id)
+ {
+ a->namespace_id = hcm->appns_id;
+ a->options[APP_OPTIONS_NAMESPACE_SECRET] = hcm->appns_secret;
+ }
+
+ if ((rv = vnet_application_attach (a)))
+ return clib_error_return (0, "attach returned %d", rv);
+
+ hcm->app_index = a->app_index;
+ vec_free (a->name);
+ hcm->test_client_attached = 1;
+ return 0;
+}
+
+static int
+hcc_connect_rpc (void *rpc_args)
+{
+ vnet_connect_args_t *a = rpc_args;
+ int rv;
+
+ rv = vnet_connect (a);
+ if (rv)
+ clib_warning (0, "connect returned: %U", format_session_error, rv);
+
+ vec_free (a);
+ return rv;
+}
+
+static void
+hcc_program_connect (vnet_connect_args_t *a)
+{
+ session_send_rpc_evt_to_thread_force (transport_cl_thread (),
+ hcc_connect_rpc, a);
+}
+
+static clib_error_t *
+hcc_connect ()
+{
+ vnet_connect_args_t *a = 0;
+ hcc_main_t *hcm = &hcc_main;
+ hcc_worker_t *wrk;
+ hcc_session_t *hs;
+
+ vec_validate (a, 0);
+ clib_memset (a, 0, sizeof (a[0]));
+
+ clib_memcpy (&a->sep_ext, &hcm->connect_sep, sizeof (hcm->connect_sep));
+ a->app_index = hcm->app_index;
+
+ /* allocate http session on main thread */
+ wrk = hcc_worker_get (0);
+ hs = hcc_session_alloc (wrk);
+ a->api_context = hs->session_index;
+
+ hcc_program_connect (a);
+ return 0;
+}
+
+static clib_error_t *
+hcc_run (vlib_main_t *vm, int print_output)
+{
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ hcc_main_t *hcm = &hcc_main;
+ uword event_type, *event_data = 0;
+ u32 num_threads;
+ clib_error_t *err = 0;
+ hcc_worker_t *wrk;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (hcm->wrk, num_threads);
+ vec_foreach (wrk, hcm->wrk)
+ {
+ wrk->thread_index = wrk - hcm->wrk;
+ }
+
+ if ((err = hcc_attach ()))
+ {
+ return clib_error_return (0, "http client attach: %U", format_clib_error,
+ err);
+ }
+
+ if ((err = hcc_connect ()))
+ {
+ return clib_error_return (0, "http client connect: %U",
+ format_clib_error, err);
+ }
+
+ vlib_process_wait_for_event_or_clock (vm, 10);
+ event_type = vlib_process_get_events (vm, &event_data);
+ switch (event_type)
+ {
+ case ~0:
+ err = clib_error_return (0, "timeout");
+ goto cleanup;
+
+ case HCC_REPLY_RECEIVED:
+ if (print_output)
+ vlib_cli_output (vm, "%v", hcm->http_response);
+ vec_free (hcm->http_response);
+ break;
+ case HCC_TRANSPORT_CLOSED:
+ err = clib_error_return (0, "error, transport closed");
+ break;
+ case HCC_CONNECT_FAILED:
+ err = clib_error_return (0, "failed to connect");
+ break;
+ default:
+ err = clib_error_return (0, "unexpected event %d", event_type);
+ break;
+ }
+
+cleanup:
+ vec_free (event_data);
+ return err;
+}
+
+static int
+hcc_detach ()
+{
+ hcc_main_t *hcm = &hcc_main;
+ vnet_app_detach_args_t _da, *da = &_da;
+ int rv;
+
+ if (!hcm->test_client_attached)
+ return 0;
+
+ da->app_index = hcm->app_index;
+ da->api_client_index = ~0;
+ rv = vnet_application_detach (da);
+ hcm->test_client_attached = 0;
+ hcm->app_index = ~0;
+
+ return rv;
+}
+
+static clib_error_t *
+hcc_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ hcc_main_t *hcm = &hcc_main;
+ u64 seg_size;
+ u8 *appns_id = 0;
+ clib_error_t *err = 0;
+ int rv, print_output = 1;
+
+ hcm->prealloc_fifos = 0;
+ hcm->private_segment_size = 0;
+ hcm->fifo_size = 0;
+
+ if (hcm->test_client_attached)
+ return clib_error_return (0, "failed: already running!");
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return clib_error_return (0, "expected URI");
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "prealloc-fifos %d", &hcm->prealloc_fifos))
+ ;
+ else if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &seg_size))
+ hcm->private_segment_size = seg_size;
+ else if (unformat (line_input, "fifo-size %d", &hcm->fifo_size))
+ hcm->fifo_size <<= 10;
+ else if (unformat (line_input, "uri %s", &hcm->uri))
+ ;
+ else if (unformat (line_input, "no-output"))
+ print_output = 0;
+ else if (unformat (line_input, "appns %_%v%_", &appns_id))
+ ;
+ else if (unformat (line_input, "secret %lu", &hcm->appns_secret))
+ ;
+ else if (unformat (line_input, "query %s", &hcm->http_query))
+ ;
+ else
+ {
+ err = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ vec_free (hcm->appns_id);
+ hcm->appns_id = appns_id;
+ hcm->cli_node_index = vlib_get_current_process (vm)->node_runtime.node_index;
+
+ if (!hcm->uri)
+ {
+ err = clib_error_return (0, "URI not defined");
+ goto done;
+ }
+
+ if ((rv = parse_uri ((char *) hcm->uri, &hcm->connect_sep)))
+ {
+ err = clib_error_return (0, "Uri parse error: %d", rv);
+ goto done;
+ }
+
+ vlib_worker_thread_barrier_sync (vm);
+ vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */);
+ vlib_worker_thread_barrier_release (vm);
+
+ err = hcc_run (vm, print_output);
+
+ if (hcc_detach ())
+ {
+ /* don't override last error */
+ if (!err)
+ err = clib_error_return (0, "failed: app detach");
+ clib_warning ("WARNING: app detach failed...");
+ }
+
+done:
+ vec_free (hcm->uri);
+ vec_free (hcm->http_query);
+ unformat_free (line_input);
+ return err;
+}
+
+VLIB_CLI_COMMAND (hcc_command, static) = {
+ .path = "http cli client",
+ .short_help = "[appns <app-ns> secret <appns-secret>] uri http://<ip-addr> "
+ "query <query-string> [no-output]",
+ .function = hcc_command_fn,
+ .is_mp_safe = 1,
+};
+
+static clib_error_t *
+hcc_main_init (vlib_main_t *vm)
+{
+ hcc_main_t *hcm = &hcc_main;
+
+ hcm->app_index = ~0;
+ hcm->vlib_main = vm;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (hcc_main_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/http_server.c b/src/plugins/hs_apps/http_server.c
deleted file mode 100644
index a46e0a4ae13..00000000000
--- a/src/plugins/hs_apps/http_server.c
+++ /dev/null
@@ -1,1004 +0,0 @@
-/*
-* Copyright (c) 2017-2019 Cisco and/or its affiliates.
-* Licensed under the Apache License, Version 2.0 (the "License");
-* you may not use this file except in compliance with the License.
-* You may obtain a copy of the License at:
-*
-* http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-#include <vnet/vnet.h>
-#include <vnet/session/application.h>
-#include <vnet/session/application_interface.h>
-#include <vnet/session/session.h>
-#include <vppinfra/tw_timer_2t_1w_2048sl.h>
-
-typedef enum
-{
- EVENT_WAKEUP = 1,
-} http_process_event_t;
-
-typedef struct
-{
- u32 hs_index;
- u32 thread_index;
- u64 node_index;
-} http_server_args;
-
-typedef enum
-{
- HTTP_STATE_CLOSED,
- HTTP_STATE_ESTABLISHED,
- HTTP_STATE_OK_SENT,
-} http_session_state_t;
-typedef struct
-{
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-#define _(type, name) type name;
- foreach_app_session_field
-#undef _
- u32 thread_index;
- u8 *rx_buf;
- u32 vpp_session_index;
- u64 vpp_session_handle;
- u32 timer_handle;
-} http_session_t;
-
-typedef struct
-{
- http_session_t **sessions;
- clib_rwlock_t sessions_lock;
- u32 **session_to_http_session;
-
- svm_msg_q_t **vpp_queue;
-
- uword *handler_by_get_request;
-
- u32 *free_http_cli_process_node_indices;
-
- /* Sever's event queue */
- svm_queue_t *vl_input_queue;
-
- /* API client handle */
- u32 my_client_index;
-
- u32 app_index;
-
- /* process node index for evnt scheduling */
- u32 node_index;
-
- /* Cert key pair for tls */
- u32 ckpair_index;
-
- tw_timer_wheel_2t_1w_2048sl_t tw;
- clib_spinlock_t tw_lock;
-
- u32 prealloc_fifos;
- u32 private_segment_size;
- u32 fifo_size;
- u8 *uri;
- u32 is_static;
- vlib_main_t *vlib_main;
-} http_server_main_t;
-
-http_server_main_t http_server_main;
-
-static void
-http_server_sessions_reader_lock (void)
-{
- clib_rwlock_reader_lock (&http_server_main.sessions_lock);
-}
-
-static void
-http_server_sessions_reader_unlock (void)
-{
- clib_rwlock_reader_unlock (&http_server_main.sessions_lock);
-}
-
-static void
-http_server_sessions_writer_lock (void)
-{
- clib_rwlock_writer_lock (&http_server_main.sessions_lock);
-}
-
-static void
-http_server_sessions_writer_unlock (void)
-{
- clib_rwlock_writer_unlock (&http_server_main.sessions_lock);
-}
-
-static http_session_t *
-http_server_session_alloc (u32 thread_index)
-{
- http_server_main_t *hsm = &http_server_main;
- http_session_t *hs;
- pool_get (hsm->sessions[thread_index], hs);
- memset (hs, 0, sizeof (*hs));
- hs->session_index = hs - hsm->sessions[thread_index];
- hs->thread_index = thread_index;
- hs->timer_handle = ~0;
- return hs;
-}
-
-static http_session_t *
-http_server_session_get (u32 thread_index, u32 hs_index)
-{
- http_server_main_t *hsm = &http_server_main;
- if (pool_is_free_index (hsm->sessions[thread_index], hs_index))
- return 0;
- return pool_elt_at_index (hsm->sessions[thread_index], hs_index);
-}
-
-static void
-http_server_session_free (http_session_t * hs)
-{
- http_server_main_t *hsm = &http_server_main;
- u32 thread = hs->thread_index;
- if (CLIB_DEBUG)
- memset (hs, 0xfa, sizeof (*hs));
- pool_put (hsm->sessions[thread], hs);
-}
-
-static void
-http_server_session_lookup_add (u32 thread_index, u32 s_index, u32 hs_index)
-{
- http_server_main_t *hsm = &http_server_main;
- vec_validate (hsm->session_to_http_session[thread_index], s_index);
- hsm->session_to_http_session[thread_index][s_index] = hs_index;
-}
-
-static void
-http_server_session_lookup_del (u32 thread_index, u32 s_index)
-{
- http_server_main_t *hsm = &http_server_main;
- hsm->session_to_http_session[thread_index][s_index] = ~0;
-}
-
-static http_session_t *
-http_server_session_lookup (u32 thread_index, u32 s_index)
-{
- http_server_main_t *hsm = &http_server_main;
- u32 hs_index;
-
- if (s_index < vec_len (hsm->session_to_http_session[thread_index]))
- {
- hs_index = hsm->session_to_http_session[thread_index][s_index];
- return http_server_session_get (thread_index, hs_index);
- }
- return 0;
-}
-
-
-static void
-http_server_session_timer_start (http_session_t * hs)
-{
- u32 hs_handle;
- hs_handle = hs->thread_index << 24 | hs->session_index;
- clib_spinlock_lock (&http_server_main.tw_lock);
- hs->timer_handle = tw_timer_start_2t_1w_2048sl (&http_server_main.tw,
- hs_handle, 0, 60);
- clib_spinlock_unlock (&http_server_main.tw_lock);
-}
-
-static void
-http_server_session_timer_stop (http_session_t * hs)
-{
- if (hs->timer_handle == ~0)
- return;
- clib_spinlock_lock (&http_server_main.tw_lock);
- tw_timer_stop_2t_1w_2048sl (&http_server_main.tw, hs->timer_handle);
- clib_spinlock_unlock (&http_server_main.tw_lock);
-}
-
-static void
-http_server_session_disconnect (http_session_t * hs)
-{
- vnet_disconnect_args_t _a = { 0 }, *a = &_a;
- a->handle = hs->vpp_session_handle;
- a->app_index = http_server_main.app_index;
- vnet_disconnect_session (a);
-}
-
-static void
-http_process_free (http_server_args * args)
-{
- vlib_node_runtime_t *rt;
- vlib_main_t *vm = vlib_get_first_main ();
- http_server_main_t *hsm = &http_server_main;
- vlib_node_t *n;
- u32 node_index;
- http_server_args **save_args;
-
- node_index = args->node_index;
- ASSERT (node_index != 0);
-
- n = vlib_get_node (vm, node_index);
- rt = vlib_node_get_runtime (vm, n->index);
- save_args = vlib_node_get_runtime_data (vm, n->index);
-
- /* Reset process session pointer */
- clib_mem_free (*save_args);
- *save_args = 0;
-
- /* Turn off the process node */
- vlib_node_set_state (vm, rt->node_index, VLIB_NODE_STATE_DISABLED);
-
- /* add node index to the freelist */
- vec_add1 (hsm->free_http_cli_process_node_indices, node_index);
-}
-
-/* *INDENT-OFF* */
-static const char *http_ok =
- "HTTP/1.1 200 OK\r\n";
-
-static const char *http_response =
- "Content-Type: text/html\r\n"
- "Expires: Mon, 11 Jan 1970 10:10:10 GMT\r\n"
- "Connection: close \r\n"
- "Pragma: no-cache\r\n"
- "Content-Length: %d\r\n\r\n%v";
-
-static const char *http_error_template =
- "HTTP/1.1 %s\r\n"
- "Content-Type: text/html\r\n"
- "Expires: Mon, 11 Jan 1970 10:10:10 GMT\r\n"
- "Connection: close\r\n"
- "Pragma: no-cache\r\n"
- "Content-Length: 0\r\n\r\n";
-
-/* Header, including incantation to suppress favicon.ico requests */
-static const char *html_header_template =
- "<html><head><title>%v</title></head>"
- "<link rel=\"icon\" href=\"data:,\">"
- "<body><pre>";
-
-static const char *html_footer =
- "</pre></body></html>\r\n";
-
-static const char *html_header_static =
- "<html><head><title>static reply</title></head>"
- "<link rel=\"icon\" href=\"data:,\">"
- "<body><pre>hello</pre></body></html>\r\n";
-/* *INDENT-ON* */
-
-static u8 *static_http;
-static u8 *static_ok;
-
-static void
-http_cli_output (uword arg, u8 * buffer, uword buffer_bytes)
-{
- u8 **output_vecp = (u8 **) arg;
- u8 *output_vec;
- u32 offset;
-
- output_vec = *output_vecp;
-
- offset = vec_len (output_vec);
- vec_validate (output_vec, offset + buffer_bytes - 1);
- clib_memcpy_fast (output_vec + offset, buffer, buffer_bytes);
-
- *output_vecp = output_vec;
-}
-
-void
-send_data (http_session_t * hs, u8 * data)
-{
- http_server_main_t *hsm = &http_server_main;
- vnet_disconnect_args_t _a = { 0 }, *a = &_a;
- vlib_main_t *vm = vlib_get_main ();
- f64 last_sent_timer = vlib_time_now (vm);
- u32 offset, bytes_to_send;
- f64 delay = 10e-3;
-
- bytes_to_send = vec_len (data);
- offset = 0;
-
- while (bytes_to_send > 0)
- {
- int actual_transfer;
-
- actual_transfer = svm_fifo_enqueue
- (hs->tx_fifo, bytes_to_send, data + offset);
-
- /* Made any progress? */
- if (actual_transfer <= 0)
- {
- http_server_sessions_reader_unlock ();
- vlib_process_suspend (vm, delay);
- http_server_sessions_reader_lock ();
-
- /* 10s deadman timer */
- if (vlib_time_now (vm) > last_sent_timer + 10.0)
- {
- a->handle = hs->vpp_session_handle;
- a->app_index = hsm->app_index;
- vnet_disconnect_session (a);
- break;
- }
- /* Exponential backoff, within reason */
- if (delay < 1.0)
- delay = delay * 2.0;
- }
- else
- {
- last_sent_timer = vlib_time_now (vm);
- offset += actual_transfer;
- bytes_to_send -= actual_transfer;
-
- if (svm_fifo_set_event (hs->tx_fifo))
- session_send_io_evt_to_thread (hs->tx_fifo,
- SESSION_IO_EVT_TX_FLUSH);
- delay = 10e-3;
- }
- }
-}
-
-static void
-send_error (http_session_t * hs, char *str)
-{
- u8 *data;
-
- data = format (0, http_error_template, str);
- send_data (hs, data);
- vec_free (data);
-}
-
-static uword
-http_cli_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
- vlib_frame_t * f)
-{
- u8 *request = 0, *reply = 0, *http = 0, *html = 0;
- http_server_main_t *hsm = &http_server_main;
- http_server_args **save_args;
- http_server_args *args;
- unformat_input_t input;
- http_session_t *hs;
- int i;
-
- save_args = vlib_node_get_runtime_data (hsm->vlib_main, rt->node_index);
- args = *save_args;
-
- http_server_sessions_reader_lock ();
-
- hs = http_server_session_get (args->thread_index, args->hs_index);
- ASSERT (hs);
-
- request = hs->rx_buf;
- if (vec_len (request) < 7)
- {
- send_error (hs, "400 Bad Request");
- goto out;
- }
-
- for (i = 0; i < vec_len (request) - 4; i++)
- {
- if (request[i] == 'G' &&
- request[i + 1] == 'E' &&
- request[i + 2] == 'T' && request[i + 3] == ' ')
- goto found;
- }
-bad_request:
- send_error (hs, "400 Bad Request");
- goto out;
-
-found:
- /* Lose "GET " */
- vec_delete (request, i + 5, 0);
-
- /* Replace slashes with spaces, stop at the end of the path */
- i = 0;
- while (1)
- {
- if (request[i] == '/')
- request[i] = ' ';
- else if (request[i] == ' ')
- {
- /* vlib_cli_input is vector-based, no need for a NULL */
- _vec_len (request) = i;
- break;
- }
- i++;
- /* Should never happen */
- if (i == vec_len (request))
- goto bad_request;
- }
-
- /* Generate the html header */
- html = format (0, html_header_template, request /* title */ );
-
- /* Run the command */
- unformat_init_vector (&input, vec_dup (request));
- vlib_cli_input (vm, &input, http_cli_output, (uword) & reply);
- unformat_free (&input);
- request = 0;
-
- /* Generate the html page */
- html = format (html, "%v", reply);
- html = format (html, html_footer);
- /* And the http reply */
- http = format (0, http_ok);
- http = format (http, http_response, vec_len (html), html);
-
- /* Send it */
- send_data (hs, http);
-
-out:
- /* Cleanup */
- http_server_sessions_reader_unlock ();
- vec_free (reply);
- vec_free (html);
- vec_free (http);
-
- http_process_free (args);
- return (0);
-}
-
-static void
-alloc_http_process (http_server_args * args)
-{
- char *name;
- vlib_node_t *n;
- http_server_main_t *hsm = &http_server_main;
- vlib_main_t *vm = hsm->vlib_main;
- uword l = vec_len (hsm->free_http_cli_process_node_indices);
- http_server_args **save_args;
-
- if (vec_len (hsm->free_http_cli_process_node_indices) > 0)
- {
- n = vlib_get_node (vm, hsm->free_http_cli_process_node_indices[l - 1]);
- vlib_node_set_state (vm, n->index, VLIB_NODE_STATE_POLLING);
- _vec_len (hsm->free_http_cli_process_node_indices) = l - 1;
- }
- else
- {
- static vlib_node_registration_t r = {
- .function = http_cli_process,
- .type = VLIB_NODE_TYPE_PROCESS,
- .process_log2_n_stack_bytes = 16,
- .runtime_data_bytes = sizeof (void *),
- };
-
- name = (char *) format (0, "http-cli-%d", l);
- r.name = name;
- vlib_register_node (vm, &r);
- vec_free (name);
-
- n = vlib_get_node (vm, r.index);
- }
-
- /* Save the node index in the args. It won't be zero. */
- args->node_index = n->index;
-
- /* Save the args (pointer) in the node runtime */
- save_args = vlib_node_get_runtime_data (vm, n->index);
- *save_args = clib_mem_alloc (sizeof (*args));
- clib_memcpy_fast (*save_args, args, sizeof (*args));
-
- vlib_start_process (vm, n->runtime_index);
-}
-
-static void
-alloc_http_process_callback (void *cb_args)
-{
- alloc_http_process ((http_server_args *) cb_args);
-}
-
-static int
-session_rx_request (http_session_t * hs)
-{
- u32 max_dequeue, cursize;
- int n_read;
-
- cursize = vec_len (hs->rx_buf);
- max_dequeue = svm_fifo_max_dequeue_cons (hs->rx_fifo);
- if (PREDICT_FALSE (max_dequeue == 0))
- return -1;
-
- vec_validate (hs->rx_buf, cursize + max_dequeue - 1);
- n_read = app_recv_stream_raw (hs->rx_fifo, hs->rx_buf + cursize,
- max_dequeue, 0, 0 /* peek */ );
- ASSERT (n_read == max_dequeue);
- if (svm_fifo_is_empty_cons (hs->rx_fifo))
- svm_fifo_unset_event (hs->rx_fifo);
-
- _vec_len (hs->rx_buf) = cursize + n_read;
- return 0;
-}
-
-static int
-http_server_rx_callback (session_t * s)
-{
- http_server_args args;
- http_session_t *hs;
- int rv;
-
- http_server_sessions_reader_lock ();
-
- hs = http_server_session_lookup (s->thread_index, s->session_index);
- if (!hs || hs->session_state != HTTP_STATE_ESTABLISHED)
- return -1;
-
- rv = session_rx_request (hs);
- if (rv)
- return rv;
-
- /* send the command to a new/recycled vlib process */
- args.hs_index = hs->session_index;
- args.thread_index = hs->thread_index;
-
- http_server_sessions_reader_unlock ();
-
- /* Send RPC request to main thread */
- if (vlib_get_thread_index () != 0)
- vlib_rpc_call_main_thread (alloc_http_process_callback, (u8 *) & args,
- sizeof (args));
- else
- alloc_http_process (&args);
- return 0;
-}
-
-static int
-http_server_rx_callback_static (session_t * s)
-{
- http_session_t *hs;
- u32 request_len;
- u8 *request = 0;
- int i, rv;
-
- hs = http_server_session_lookup (s->thread_index, s->session_index);
- if (!hs || hs->session_state == HTTP_STATE_CLOSED)
- return 0;
-
- /* ok 200 was sent */
- if (hs->session_state == HTTP_STATE_OK_SENT)
- goto send_data;
-
- rv = session_rx_request (hs);
- if (rv)
- goto wait_for_data;
-
- request = hs->rx_buf;
- request_len = vec_len (request);
- if (vec_len (request) < 7)
- {
- send_error (hs, "400 Bad Request");
- goto close_session;
- }
-
- for (i = 0; i < request_len - 4; i++)
- {
- if (request[i] == 'G' &&
- request[i + 1] == 'E' &&
- request[i + 2] == 'T' && request[i + 3] == ' ')
- goto find_end;
- }
- send_error (hs, "400 Bad Request");
- goto close_session;
-
-find_end:
-
- /* check for the end sequence: /r/n/r/n */
- if (request[request_len - 1] != 0xa || request[request_len - 3] != 0xa
- || request[request_len - 2] != 0xd || request[request_len - 4] != 0xd)
- goto wait_for_data;
-
- /* send 200 OK first */
- send_data (hs, static_ok);
- hs->session_state = HTTP_STATE_OK_SENT;
- goto postpone;
-
-send_data:
- send_data (hs, static_http);
-
-close_session:
- http_server_session_disconnect (hs);
- return 0;
-
-postpone:
- (void) svm_fifo_set_event (hs->rx_fifo);
- session_send_io_evt_to_thread (hs->rx_fifo, SESSION_IO_EVT_BUILTIN_RX);
- return 0;
-
-wait_for_data:
- return 0;
-}
-
-static int
-http_server_session_accept_callback (session_t * s)
-{
- http_server_main_t *hsm = &http_server_main;
- http_session_t *hs;
-
- hsm->vpp_queue[s->thread_index] =
- session_main_get_vpp_event_queue (s->thread_index);
-
- if (!hsm->is_static)
- http_server_sessions_writer_lock ();
-
- hs = http_server_session_alloc (s->thread_index);
- http_server_session_lookup_add (s->thread_index, s->session_index,
- hs->session_index);
- hs->rx_fifo = s->rx_fifo;
- hs->tx_fifo = s->tx_fifo;
- hs->vpp_session_index = s->session_index;
- hs->vpp_session_handle = session_handle (s);
- hs->session_state = HTTP_STATE_ESTABLISHED;
- http_server_session_timer_start (hs);
-
- if (!hsm->is_static)
- http_server_sessions_writer_unlock ();
-
- s->session_state = SESSION_STATE_READY;
- return 0;
-}
-
-static void
-http_server_session_disconnect_callback (session_t * s)
-{
- http_server_main_t *hsm = &http_server_main;
- vnet_disconnect_args_t _a = { 0 }, *a = &_a;
-
- a->handle = session_handle (s);
- a->app_index = hsm->app_index;
- vnet_disconnect_session (a);
-}
-
-static void
-http_server_session_reset_callback (session_t * s)
-{
- http_server_main_t *hsm = &http_server_main;
- vnet_disconnect_args_t _a = { 0 }, *a = &_a;
-
- a->handle = session_handle (s);
- a->app_index = hsm->app_index;
- vnet_disconnect_session (a);
-}
-
-static int
-http_server_session_connected_callback (u32 app_index, u32 api_context,
- session_t * s, session_error_t err)
-{
- clib_warning ("called...");
- return -1;
-}
-
-static int
-http_server_add_segment_callback (u32 client_index, u64 segment_handle)
-{
- return 0;
-}
-
-static void
-http_server_cleanup_callback (session_t * s, session_cleanup_ntf_t ntf)
-{
- http_server_main_t *hsm = &http_server_main;
- http_session_t *hs;
-
- if (ntf == SESSION_CLEANUP_TRANSPORT)
- return;
-
- if (!hsm->is_static)
- http_server_sessions_writer_lock ();
-
- hs = http_server_session_lookup (s->thread_index, s->session_index);
- if (!hs)
- goto done;
-
- http_server_session_lookup_del (hs->thread_index, hs->vpp_session_index);
- vec_free (hs->rx_buf);
- http_server_session_timer_stop (hs);
- http_server_session_free (hs);
-
-done:
-
- if (!hsm->is_static)
- http_server_sessions_writer_unlock ();
-}
-
-static session_cb_vft_t http_server_session_cb_vft = {
- .session_accept_callback = http_server_session_accept_callback,
- .session_disconnect_callback = http_server_session_disconnect_callback,
- .session_connected_callback = http_server_session_connected_callback,
- .add_segment_callback = http_server_add_segment_callback,
- .builtin_app_rx_callback = http_server_rx_callback,
- .session_reset_callback = http_server_session_reset_callback,
- .session_cleanup_callback = http_server_cleanup_callback,
-};
-
-static int
-http_server_attach ()
-{
- vnet_app_add_cert_key_pair_args_t _ck_pair, *ck_pair = &_ck_pair;
- http_server_main_t *hsm = &http_server_main;
- u64 options[APP_OPTIONS_N_OPTIONS];
- vnet_app_attach_args_t _a, *a = &_a;
- u32 segment_size = 128 << 20;
-
- clib_memset (a, 0, sizeof (*a));
- clib_memset (options, 0, sizeof (options));
-
- if (hsm->private_segment_size)
- segment_size = hsm->private_segment_size;
-
- a->api_client_index = ~0;
- a->name = format (0, "test_http_server");
- a->session_cb_vft = &http_server_session_cb_vft;
- a->options = options;
- a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
- a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
- a->options[APP_OPTIONS_RX_FIFO_SIZE] =
- hsm->fifo_size ? hsm->fifo_size : 8 << 10;
- a->options[APP_OPTIONS_TX_FIFO_SIZE] =
- hsm->fifo_size ? hsm->fifo_size : 32 << 10;
- a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
- a->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = hsm->prealloc_fifos;
-
- if (vnet_application_attach (a))
- {
- vec_free (a->name);
- clib_warning ("failed to attach server");
- return -1;
- }
- vec_free (a->name);
- hsm->app_index = a->app_index;
-
- clib_memset (ck_pair, 0, sizeof (*ck_pair));
- ck_pair->cert = (u8 *) test_srv_crt_rsa;
- ck_pair->key = (u8 *) test_srv_key_rsa;
- ck_pair->cert_len = test_srv_crt_rsa_len;
- ck_pair->key_len = test_srv_key_rsa_len;
- vnet_app_add_cert_key_pair (ck_pair);
- hsm->ckpair_index = ck_pair->index;
-
- return 0;
-}
-
-static int
-http_transport_needs_crypto (transport_proto_t proto)
-{
- return proto == TRANSPORT_PROTO_TLS || proto == TRANSPORT_PROTO_DTLS ||
- proto == TRANSPORT_PROTO_QUIC;
-}
-
-static int
-http_server_listen ()
-{
- session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
- http_server_main_t *hsm = &http_server_main;
- vnet_listen_args_t _a, *a = &_a;
- char *uri = "tcp://0.0.0.0/80";
- int rv;
-
- clib_memset (a, 0, sizeof (*a));
- a->app_index = hsm->app_index;
-
- if (hsm->uri)
- uri = (char *) hsm->uri;
-
- if (parse_uri (uri, &sep))
- return -1;
-
- clib_memcpy (&a->sep_ext, &sep, sizeof (sep));
- if (http_transport_needs_crypto (a->sep_ext.transport_proto))
- {
- session_endpoint_alloc_ext_cfg (&a->sep_ext,
- TRANSPORT_ENDPT_EXT_CFG_CRYPTO);
- a->sep_ext.ext_cfg->crypto.ckpair_index = hsm->ckpair_index;
- }
-
- rv = vnet_listen (a);
- if (a->sep_ext.ext_cfg)
- clib_mem_free (a->sep_ext.ext_cfg);
- return rv;
-}
-
-static void
-http_server_session_close_cb (void *hs_handlep)
-{
- http_session_t *hs;
- uword hs_handle;
- hs_handle = pointer_to_uword (hs_handlep);
- hs = http_server_session_get (hs_handle >> 24, hs_handle & 0x00FFFFFF);
- if (!hs)
- return;
- hs->timer_handle = ~0;
- http_server_session_disconnect (hs);
-}
-
-static void
-http_expired_timers_dispatch (u32 * expired_timers)
-{
- u32 hs_handle;
- int i;
-
- for (i = 0; i < vec_len (expired_timers); i++)
- {
- /* Get session handle. The first bit is the timer id */
- hs_handle = expired_timers[i] & 0x7FFFFFFF;
- session_send_rpc_evt_to_thread (hs_handle >> 24,
- http_server_session_close_cb,
- uword_to_pointer (hs_handle, void *));
- }
-}
-
-static uword
-http_server_process (vlib_main_t * vm, vlib_node_runtime_t * rt,
- vlib_frame_t * f)
-{
- http_server_main_t *hsm = &http_server_main;
- f64 now, timeout = 1.0;
- uword *event_data = 0;
- uword __clib_unused event_type;
-
- while (1)
- {
- vlib_process_wait_for_event_or_clock (vm, timeout);
- now = vlib_time_now (vm);
- event_type = vlib_process_get_events (vm, (uword **) & event_data);
-
- /* expire timers */
- clib_spinlock_lock (&http_server_main.tw_lock);
- tw_timer_expire_timers_2t_1w_2048sl (&hsm->tw, now);
- clib_spinlock_unlock (&http_server_main.tw_lock);
-
- vec_reset_length (event_data);
- }
- return 0;
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (http_server_process_node) =
-{
- .function = http_server_process,
- .type = VLIB_NODE_TYPE_PROCESS,
- .name = "http-server-process",
- .state = VLIB_NODE_STATE_DISABLED,
-};
-/* *INDENT-ON* */
-
-static int
-http_server_create (vlib_main_t * vm)
-{
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
- http_server_main_t *hsm = &http_server_main;
- u32 num_threads;
- vlib_node_t *n;
-
- num_threads = 1 /* main thread */ + vtm->n_threads;
- vec_validate (hsm->vpp_queue, num_threads - 1);
- vec_validate (hsm->sessions, num_threads - 1);
- vec_validate (hsm->session_to_http_session, num_threads - 1);
-
- clib_rwlock_init (&hsm->sessions_lock);
- clib_spinlock_init (&hsm->tw_lock);
-
- if (http_server_attach ())
- {
- clib_warning ("failed to attach server");
- return -1;
- }
- if (http_server_listen ())
- {
- clib_warning ("failed to start listening");
- return -1;
- }
-
- /* Init timer wheel and process */
- tw_timer_wheel_init_2t_1w_2048sl (&hsm->tw, http_expired_timers_dispatch,
- 1 /* timer interval */ , ~0);
- vlib_node_set_state (vm, http_server_process_node.index,
- VLIB_NODE_STATE_POLLING);
- n = vlib_get_node (vm, http_server_process_node.index);
- vlib_start_process (vm, n->runtime_index);
-
- return 0;
-}
-
-static clib_error_t *
-http_server_create_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- http_server_main_t *hsm = &http_server_main;
- unformat_input_t _line_input, *line_input = &_line_input;
- u64 seg_size;
- u8 *html;
- int rv;
-
- hsm->prealloc_fifos = 0;
- hsm->private_segment_size = 0;
- hsm->fifo_size = 0;
- hsm->is_static = 0;
-
- /* Get a line of input. */
- if (!unformat_user (input, unformat_line_input, line_input))
- goto start_server;
-
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (line_input, "static"))
- hsm->is_static = 1;
- else
- if (unformat (line_input, "prealloc-fifos %d", &hsm->prealloc_fifos))
- ;
- else if (unformat (line_input, "private-segment-size %U",
- unformat_memory_size, &seg_size))
- {
- if (seg_size >= 0x100000000ULL)
- {
- vlib_cli_output (vm, "private segment size %llu, too large",
- seg_size);
- return 0;
- }
- hsm->private_segment_size = seg_size;
- }
- else if (unformat (line_input, "fifo-size %d", &hsm->fifo_size))
- hsm->fifo_size <<= 10;
- else if (unformat (line_input, "uri %s", &hsm->uri))
- ;
- else
- return clib_error_return (0, "unknown input `%U'",
- format_unformat_error, line_input);
- }
- unformat_free (line_input);
-
-start_server:
-
- if (hsm->my_client_index != (u32) ~ 0)
- return clib_error_return (0, "test http server is already running");
-
- vnet_session_enable_disable (vm, 1 /* turn on TCP, etc. */ );
-
- if (hsm->is_static)
- {
- http_server_session_cb_vft.builtin_app_rx_callback =
- http_server_rx_callback_static;
- html = format (0, html_header_static);
- static_http = format (0, http_response, vec_len (html), html);
- static_ok = format (0, http_ok);
- }
- rv = http_server_create (vm);
- switch (rv)
- {
- case 0:
- break;
- default:
- return clib_error_return (0, "server_create returned %d", rv);
- }
- return 0;
-}
-
-/* *INDENT-OFF* */
-VLIB_CLI_COMMAND (http_server_create_command, static) =
-{
- .path = "test http server",
- .short_help = "test http server",
- .function = http_server_create_command_fn,
-};
-/* *INDENT-ON* */
-
-static clib_error_t *
-http_server_main_init (vlib_main_t * vm)
-{
- http_server_main_t *hsm = &http_server_main;
-
- hsm->my_client_index = ~0;
- hsm->vlib_main = vm;
- return 0;
-}
-
-VLIB_INIT_FUNCTION (http_server_main_init);
-
-/*
-* fd.io coding-style-patch-verification: ON
-*
-* Local Variables:
-* eval: (c-set-style "gnu")
-* End:
-*/
diff --git a/src/plugins/hs_apps/http_tps.c b/src/plugins/hs_apps/http_tps.c
new file mode 100644
index 00000000000..920f7ea731f
--- /dev/null
+++ b/src/plugins/hs_apps/http_tps.c
@@ -0,0 +1,839 @@
+/*
+ * Copyright (c) 2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/session/application.h>
+#include <vnet/session/application_interface.h>
+#include <vnet/session/session.h>
+#include <http/http.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 session_index;
+ u32 thread_index;
+ u64 data_len;
+ u64 data_offset;
+ u32 vpp_session_index;
+ union
+ {
+ /** threshold after which connection is closed */
+ f64 close_threshold;
+ /** rate at which accepted sessions are marked for random close */
+ u32 close_rate;
+ };
+ u8 *uri;
+} hts_session_t;
+
+typedef struct hts_listen_cfg_
+{
+ u8 *uri;
+ u32 vrf;
+ f64 rnd_close;
+ u8 is_del;
+} hts_listen_cfg_t;
+
+typedef struct hs_main_
+{
+ hts_session_t **sessions;
+ u32 app_index;
+
+ u32 ckpair_index;
+ u8 *test_data;
+
+ /** Hash table of listener uris to handles */
+ uword *uri_to_handle;
+
+ /*
+ * Configs
+ */
+ u8 *uri;
+ u32 fifo_size;
+ u64 segment_size;
+ u8 debug_level;
+ u8 no_zc;
+ u8 *default_uri;
+ u32 seed;
+} hts_main_t;
+
+static hts_main_t hts_main;
+
+static hts_session_t *
+hts_session_alloc (u32 thread_index)
+{
+ hts_main_t *htm = &hts_main;
+ hts_session_t *hs;
+
+ pool_get_zero (htm->sessions[thread_index], hs);
+ hs->session_index = hs - htm->sessions[thread_index];
+ hs->thread_index = thread_index;
+
+ return hs;
+}
+
+static hts_session_t *
+hts_session_get (u32 thread_index, u32 hts_index)
+{
+ hts_main_t *htm = &hts_main;
+
+ if (pool_is_free_index (htm->sessions[thread_index], hts_index))
+ return 0;
+
+ return pool_elt_at_index (htm->sessions[thread_index], hts_index);
+}
+
+static void
+hts_session_free (hts_session_t *hs)
+{
+ hts_main_t *htm = &hts_main;
+ u32 thread = hs->thread_index;
+
+ if (htm->debug_level > 0)
+ clib_warning ("Freeing session %u", hs->session_index);
+
+ if (CLIB_DEBUG)
+ clib_memset (hs, 0xfa, sizeof (*hs));
+
+ pool_put (htm->sessions[thread], hs);
+}
+
+static void
+hts_disconnect_transport (hts_session_t *hs)
+{
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+ hts_main_t *htm = &hts_main;
+ session_t *ts;
+
+ if (htm->debug_level > 0)
+ clib_warning ("Actively closing session %u", hs->session_index);
+
+ ts = session_get (hs->vpp_session_index, hs->thread_index);
+ a->handle = session_handle (ts);
+ a->app_index = htm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+hts_session_tx_zc (hts_session_t *hs, session_t *ts)
+{
+ u32 to_send, space;
+ u64 max_send;
+ int rv;
+
+ rv = svm_fifo_fill_chunk_list (ts->tx_fifo);
+ if (rv < 0)
+ {
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ return;
+ }
+
+ max_send = hs->data_len - hs->data_offset;
+ space = svm_fifo_max_enqueue (ts->tx_fifo);
+ ASSERT (space != 0);
+ to_send = clib_min (space, max_send);
+
+ svm_fifo_enqueue_nocopy (ts->tx_fifo, to_send);
+
+ hs->data_offset += to_send;
+
+ if (to_send < max_send)
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_send_io_evt_to_thread (ts->tx_fifo, SESSION_IO_EVT_TX);
+}
+
+static void
+hts_session_tx_no_zc (hts_session_t *hs, session_t *ts)
+{
+ u32 n_segs, buf_offset, buf_left;
+ u64 max_send = 32 << 10, left;
+ hts_main_t *htm = &hts_main;
+ svm_fifo_seg_t seg[2];
+ int sent;
+
+ left = hs->data_len - hs->data_offset;
+ max_send = clib_min (left, max_send);
+ buf_offset = hs->data_offset % vec_len (htm->test_data);
+ buf_left = vec_len (htm->test_data) - buf_offset;
+
+ if (buf_left < max_send)
+ {
+ seg[0].data = htm->test_data + buf_offset;
+ seg[0].len = buf_left;
+ seg[1].data = htm->test_data;
+ seg[1].len = max_send - buf_left;
+ n_segs = 2;
+ }
+ else
+ {
+ seg[0].data = htm->test_data + buf_offset;
+ seg[0].len = max_send;
+ n_segs = 1;
+ }
+
+ sent = svm_fifo_enqueue_segments (ts->tx_fifo, seg, n_segs,
+ 1 /* allow partial */);
+
+ if (sent <= 0)
+ {
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+ return;
+ }
+
+ hs->data_offset += sent;
+
+ if (sent < left)
+ svm_fifo_add_want_deq_ntf (ts->tx_fifo, SVM_FIFO_WANT_DEQ_NOTIF);
+
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_send_io_evt_to_thread (ts->tx_fifo, SESSION_IO_EVT_TX);
+}
+
+static inline void
+hts_session_tx (hts_session_t *hs, session_t *ts)
+{
+ hts_main_t *htm = &hts_main;
+
+ if (!htm->no_zc)
+ hts_session_tx_zc (hs, ts);
+ else
+ hts_session_tx_no_zc (hs, ts);
+
+ if (hs->close_threshold > 0)
+ {
+ if ((f64) hs->data_offset / hs->data_len > hs->close_threshold)
+ hts_disconnect_transport (hs);
+ }
+}
+
+static void
+hts_start_send_data (hts_session_t *hs, http_status_code_t status)
+{
+ http_msg_t msg;
+ session_t *ts;
+ int rv;
+
+ msg.type = HTTP_MSG_REPLY;
+ msg.code = status;
+ msg.content_type = HTTP_CONTENT_APP_OCTET_STREAM;
+ msg.data.type = HTTP_MSG_DATA_INLINE;
+ msg.data.len = hs->data_len;
+
+ ts = session_get (hs->vpp_session_index, hs->thread_index);
+ rv = svm_fifo_enqueue (ts->tx_fifo, sizeof (msg), (u8 *) &msg);
+ ASSERT (rv == sizeof (msg));
+
+ if (!msg.data.len)
+ {
+ if (svm_fifo_set_event (ts->tx_fifo))
+ session_send_io_evt_to_thread (ts->tx_fifo, SESSION_IO_EVT_TX);
+ return;
+ }
+
+ hts_session_tx (hs, ts);
+}
+
+static int
+try_test_file (hts_session_t *hs, u8 *request)
+{
+ char *test_str = "test_file";
+ hts_main_t *htm = &hts_main;
+ unformat_input_t input;
+ uword file_size;
+ int rc = 0;
+
+ if (memcmp (request, test_str, clib_strnlen (test_str, 9)))
+ return -1;
+
+ unformat_init_vector (&input, vec_dup (request));
+ if (!unformat (&input, "test_file_%U", unformat_memory_size, &file_size))
+ {
+ rc = -1;
+ goto done;
+ }
+
+ if (unformat_check_input (&input) != UNFORMAT_END_OF_INPUT)
+ {
+ rc = -1;
+ goto done;
+ }
+
+ if (htm->debug_level)
+ clib_warning ("Requested file size %U", format_memory_size, file_size);
+
+ hs->data_len = file_size;
+ hs->data_offset = 0;
+
+ if (hs->close_threshold > 0)
+ {
+ /* Disconnect if the header is already enough to fill the quota */
+ if ((f64) 30 / hs->data_len > hs->close_threshold)
+ {
+ hts_disconnect_transport (hs);
+ goto done;
+ }
+ }
+
+ hts_start_send_data (hs, HTTP_STATUS_OK);
+
+done:
+ unformat_free (&input);
+
+ return rc;
+}
+
+static int
+hts_ts_rx_callback (session_t *ts)
+{
+ hts_session_t *hs;
+ u8 *request = 0;
+ http_msg_t msg;
+ int rv;
+
+ hs = hts_session_get (ts->thread_index, ts->opaque);
+
+ /* Read the http message header */
+ rv = svm_fifo_dequeue (ts->rx_fifo, sizeof (msg), (u8 *) &msg);
+ ASSERT (rv == sizeof (msg));
+
+ if (msg.type != HTTP_MSG_REQUEST || msg.method_type != HTTP_REQ_GET)
+ {
+ hts_start_send_data (hs, HTTP_STATUS_METHOD_NOT_ALLOWED);
+ goto done;
+ }
+
+ if (!msg.data.len)
+ {
+ hts_start_send_data (hs, HTTP_STATUS_BAD_REQUEST);
+ goto done;
+ }
+
+ vec_validate (request, msg.data.len - 1);
+ rv = svm_fifo_dequeue (ts->rx_fifo, msg.data.len, request);
+
+ if (try_test_file (hs, request))
+ hts_start_send_data (hs, HTTP_STATUS_NOT_FOUND);
+
+done:
+
+ return 0;
+}
+
+static int
+hs_ts_tx_callback (session_t *ts)
+{
+ hts_session_t *hs;
+
+ hs = hts_session_get (ts->thread_index, ts->opaque);
+ if (!hs)
+ return 0;
+
+ hts_session_tx (hs, ts);
+
+ return 0;
+}
+
+static int
+hts_ts_accept_callback (session_t *ts)
+{
+ hts_main_t *htm = &hts_main;
+ hts_session_t *hs, *lhs;
+ session_t *ls;
+
+ hs = hts_session_alloc (ts->thread_index);
+ hs->vpp_session_index = ts->session_index;
+
+ ts->opaque = hs->session_index;
+ ts->session_state = SESSION_STATE_READY;
+
+ /* Check if listener configured for random closes */
+ ls = listen_session_get_from_handle (ts->listener_handle);
+ lhs = hts_session_get (0, ls->opaque);
+
+ if (lhs->close_rate)
+ {
+ /* overload listener's data_offset as session counter */
+ u32 cnt = __atomic_add_fetch (&lhs->data_offset, 1, __ATOMIC_RELEASE);
+ if ((cnt % lhs->close_rate) == 0)
+ hs->close_threshold = random_f64 (&htm->seed);
+ }
+
+ if (htm->debug_level > 0)
+ clib_warning ("Accepted session %u close threshold %.2f", ts->opaque,
+ hs->close_threshold);
+
+ return 0;
+}
+
+static int
+hts_ts_connected_callback (u32 app_index, u32 api_context, session_t *s,
+ session_error_t err)
+{
+ clib_warning ("called...");
+ return -1;
+}
+
+static void
+hts_ts_disconnect_callback (session_t *ts)
+{
+ hts_main_t *htm = &hts_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ if (htm->debug_level > 0)
+ clib_warning ("Transport closing session %u", ts->opaque);
+
+ a->handle = session_handle (ts);
+ a->app_index = htm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+hts_ts_reset_callback (session_t *ts)
+{
+ hts_main_t *htm = &hts_main;
+ vnet_disconnect_args_t _a = { 0 }, *a = &_a;
+
+ if (htm->debug_level > 0)
+ clib_warning ("Transport reset session %u", ts->opaque);
+
+ a->handle = session_handle (ts);
+ a->app_index = htm->app_index;
+ vnet_disconnect_session (a);
+}
+
+static void
+hts_ts_cleanup_callback (session_t *s, session_cleanup_ntf_t ntf)
+{
+ hts_session_t *hs;
+
+ if (ntf == SESSION_CLEANUP_TRANSPORT)
+ return;
+
+ hs = hts_session_get (s->thread_index, s->opaque);
+ if (!hs)
+ return;
+
+ hts_session_free (hs);
+}
+
+static int
+hts_add_segment_callback (u32 client_index, u64 segment_handle)
+{
+ return 0;
+}
+
+static int
+hts_del_segment_callback (u32 client_index, u64 segment_handle)
+{
+ return 0;
+}
+
+static session_cb_vft_t hs_session_cb_vft = {
+ .session_accept_callback = hts_ts_accept_callback,
+ .session_disconnect_callback = hts_ts_disconnect_callback,
+ .session_connected_callback = hts_ts_connected_callback,
+ .add_segment_callback = hts_add_segment_callback,
+ .del_segment_callback = hts_del_segment_callback,
+ .builtin_app_rx_callback = hts_ts_rx_callback,
+ .builtin_app_tx_callback = hs_ts_tx_callback,
+ .session_reset_callback = hts_ts_reset_callback,
+ .session_cleanup_callback = hts_ts_cleanup_callback,
+};
+
+static int
+hts_attach (hts_main_t *hm)
+{
+ vnet_app_add_cert_key_pair_args_t _ck_pair, *ck_pair = &_ck_pair;
+ u64 options[APP_OPTIONS_N_OPTIONS];
+ vnet_app_attach_args_t _a, *a = &_a;
+
+ clib_memset (a, 0, sizeof (*a));
+ clib_memset (options, 0, sizeof (options));
+
+ a->api_client_index = ~0;
+ a->name = format (0, "http_tps");
+ a->session_cb_vft = &hs_session_cb_vft;
+ a->options = options;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = hm->segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = hm->segment_size;
+ a->options[APP_OPTIONS_RX_FIFO_SIZE] = hm->fifo_size;
+ a->options[APP_OPTIONS_TX_FIFO_SIZE] = hm->fifo_size;
+ a->options[APP_OPTIONS_FLAGS] = APP_OPTIONS_FLAGS_IS_BUILTIN;
+
+ if (vnet_application_attach (a))
+ {
+ vec_free (a->name);
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+ vec_free (a->name);
+ hm->app_index = a->app_index;
+
+ clib_memset (ck_pair, 0, sizeof (*ck_pair));
+ ck_pair->cert = (u8 *) test_srv_crt_rsa;
+ ck_pair->key = (u8 *) test_srv_key_rsa;
+ ck_pair->cert_len = test_srv_crt_rsa_len;
+ ck_pair->key_len = test_srv_key_rsa_len;
+ vnet_app_add_cert_key_pair (ck_pair);
+ hm->ckpair_index = ck_pair->index;
+
+ return 0;
+}
+
+static int
+hts_transport_needs_crypto (transport_proto_t proto)
+{
+ return proto == TRANSPORT_PROTO_TLS || proto == TRANSPORT_PROTO_DTLS ||
+ proto == TRANSPORT_PROTO_QUIC;
+}
+
+static int
+hts_start_listen (hts_main_t *htm, session_endpoint_cfg_t *sep, u8 *uri,
+ f64 rnd_close)
+{
+ vnet_listen_args_t _a, *a = &_a;
+ u8 need_crypto;
+ hts_session_t *hls;
+ session_t *ls;
+ u32 thread_index = 0;
+ int rv;
+
+ clib_memset (a, 0, sizeof (*a));
+ a->app_index = htm->app_index;
+
+ need_crypto = hts_transport_needs_crypto (sep->transport_proto);
+
+ sep->transport_proto = TRANSPORT_PROTO_HTTP;
+ clib_memcpy (&a->sep_ext, sep, sizeof (*sep));
+
+ if (need_crypto)
+ {
+ session_endpoint_alloc_ext_cfg (&a->sep_ext,
+ TRANSPORT_ENDPT_EXT_CFG_CRYPTO);
+ a->sep_ext.ext_cfg->crypto.ckpair_index = htm->ckpair_index;
+ }
+
+ rv = vnet_listen (a);
+
+ if (need_crypto)
+ clib_mem_free (a->sep_ext.ext_cfg);
+
+ if (rv)
+ return rv;
+
+ hls = hts_session_alloc (thread_index);
+ hls->uri = vec_dup (uri);
+ hls->close_rate = (f64) 1 / rnd_close;
+ ls = listen_session_get_from_handle (a->handle);
+ hls->vpp_session_index = ls->session_index;
+ hash_set_mem (htm->uri_to_handle, hls->uri, hls->session_index);
+
+ /* opaque holds index of hls, which is used in `hts_ts_accept_callback`
+ * to get back the pointer to hls */
+ ls->opaque = hls - htm->sessions[thread_index];
+
+ return 0;
+}
+
+static int
+hts_stop_listen (hts_main_t *htm, u32 hls_index)
+{
+ hts_session_t *hls;
+ session_t *ls;
+
+ hls = hts_session_get (0, hls_index);
+ ls = listen_session_get (hls->vpp_session_index);
+
+ vnet_unlisten_args_t ua = {
+ .handle = listen_session_get_handle (ls),
+ .app_index = htm->app_index,
+ .wrk_map_index = 0 /* default wrk */
+ };
+
+ hash_unset_mem (htm->uri_to_handle, hls->uri);
+
+ if (vnet_unlisten (&ua))
+ return -1;
+
+ vec_free (hls->uri);
+ hts_session_free (hls);
+
+ return 0;
+}
+
+static clib_error_t *
+hts_listen (hts_main_t *htm, hts_listen_cfg_t *lcfg)
+{
+ session_endpoint_cfg_t sep = SESSION_ENDPOINT_CFG_NULL;
+ clib_error_t *error = 0;
+ u8 *uri, *uri_key;
+ uword *p;
+ int rv;
+
+ uri = lcfg->uri ? lcfg->uri : htm->default_uri;
+ uri_key = format (0, "vrf%u-%s", lcfg->vrf, uri);
+ p = hash_get_mem (htm->uri_to_handle, uri_key);
+
+ if (lcfg->is_del)
+ {
+ if (!p)
+ error = clib_error_return (0, "not listening on %v", uri);
+ else if (hts_stop_listen (htm, p[0]))
+ error = clib_error_return (0, "failed to unlisten");
+ goto done;
+ }
+
+ if (p)
+ {
+ error = clib_error_return (0, "already listening %v", uri);
+ goto done;
+ }
+
+ if (parse_uri ((char *) uri, &sep))
+ {
+ error = clib_error_return (0, "failed to parse uri %v", uri);
+ goto done;
+ }
+
+ if (lcfg->vrf)
+ {
+ fib_protocol_t fp;
+ u32 fib_index;
+
+ fp = sep.is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ fib_index = fib_table_find (fp, lcfg->vrf);
+ if (fib_index == ~0)
+ {
+ error = clib_error_return (0, "no such vrf %u", lcfg->vrf);
+ goto done;
+ }
+ sep.fib_index = fib_index;
+ }
+
+ if ((rv = hts_start_listen (htm, &sep, uri_key, lcfg->rnd_close)))
+ {
+ error = clib_error_return (0, "failed to listen on %v: %U", uri,
+ format_session_error, rv);
+ }
+
+done:
+
+ vec_free (uri_key);
+ return error;
+}
+
+static int
+hts_create (vlib_main_t *vm)
+{
+ vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ hts_main_t *htm = &hts_main;
+ u32 num_threads;
+
+ num_threads = 1 /* main thread */ + vtm->n_threads;
+ vec_validate (htm->sessions, num_threads - 1);
+
+ if (htm->no_zc)
+ vec_validate (htm->test_data, (64 << 10) - 1);
+
+ if (hts_attach (htm))
+ {
+ clib_warning ("failed to attach server");
+ return -1;
+ }
+
+ htm->default_uri = format (0, "tcp://0.0.0.0/80%c", 0);
+ htm->uri_to_handle = hash_create_vec (0, sizeof (u8), sizeof (uword));
+
+ return 0;
+}
+
+static clib_error_t *
+hts_create_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ hts_main_t *htm = &hts_main;
+ hts_listen_cfg_t lcfg = {};
+ clib_error_t *error = 0;
+ u64 mem_size;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ goto start_server;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "private-segment-size %U",
+ unformat_memory_size, &mem_size))
+ htm->segment_size = mem_size;
+ else if (unformat (line_input, "fifo-size %U", unformat_memory_size,
+ &mem_size))
+ htm->fifo_size = mem_size;
+ else if (unformat (line_input, "no-zc"))
+ htm->no_zc = 1;
+ else if (unformat (line_input, "debug"))
+ htm->debug_level = 1;
+ else if (unformat (line_input, "vrf %u", &lcfg.vrf))
+ ;
+ else if (unformat (line_input, "uri %s", &lcfg.uri))
+ ;
+ else if (unformat (line_input, "rnd-close %f", &lcfg.rnd_close))
+ {
+ if (lcfg.rnd_close > 1.0)
+ {
+ error = clib_error_return (0, "invalid rnd close value %f",
+ lcfg.rnd_close);
+ break;
+ }
+ }
+ else if (unformat (line_input, "del"))
+ lcfg.is_del = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ break;
+ }
+ }
+
+ unformat_free (line_input);
+
+ if (error)
+ goto done;
+
+start_server:
+
+ if (htm->app_index == (u32) ~0)
+ {
+ vnet_session_enable_disable (vm, 1 /* is_enable */);
+
+ if (hts_create (vm))
+ {
+ error = clib_error_return (0, "http tps create failed");
+ goto done;
+ }
+ }
+
+ error = hts_listen (htm, &lcfg);
+
+done:
+
+ vec_free (lcfg.uri);
+ return error;
+}
+
+VLIB_CLI_COMMAND (http_tps_command, static) = {
+ .path = "http tps",
+ .short_help = "http tps [uri <uri>] [fifo-size <nbytes>] "
+ "[segment-size <nMG>] [prealloc-fifos <n>] [debug] [no-zc] "
+ "[del]",
+ .function = hts_create_command_fn,
+};
+
+static clib_error_t *
+hts_show_command_fn (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ hts_main_t *htm = &hts_main;
+ clib_error_t *error = 0;
+ u8 do_listeners = 0;
+ hts_session_t **sessions;
+ u32 n_listeners = 0, n_sessions = 0;
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ goto no_input;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "listeners"))
+ do_listeners = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ break;
+ }
+ }
+
+ if (error)
+ return error;
+
+no_input:
+
+ if (htm->app_index == ~0)
+ {
+ vlib_cli_output (vm, "http tps not enabled");
+ goto done;
+ }
+
+ if (do_listeners)
+ {
+ uword handle;
+ u8 *s = 0, *uri;
+
+ /* clang-format off */
+ hash_foreach (uri, handle, htm->uri_to_handle, ({
+ s = format (s, "%-30v%lx\n", uri, handle);
+ }));
+ /* clang-format on */
+
+ if (s)
+ {
+ vlib_cli_output (vm, "%-29s%s", "URI", "Index");
+ vlib_cli_output (vm, "%v", s);
+ vec_free (s);
+ }
+ goto done;
+ }
+
+ n_listeners = hash_elts (htm->uri_to_handle);
+ vec_foreach (sessions, htm->sessions)
+ n_sessions += pool_elts (*sessions);
+
+ vlib_cli_output (vm, " app index: %u\n listeners: %u\n sesions: %u",
+ htm->app_index, n_listeners, n_sessions - n_listeners);
+
+done:
+ return 0;
+}
+
+VLIB_CLI_COMMAND (show_http_tps_command, static) = {
+ .path = "show http tps",
+ .short_help = "http tps [listeners]",
+ .function = hts_show_command_fn,
+};
+
+static clib_error_t *
+hs_main_init (vlib_main_t *vm)
+{
+ hts_main_t *htm = &hts_main;
+
+ htm->app_index = ~0;
+ htm->segment_size = 128 << 20;
+ htm->fifo_size = 64 << 10;
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (hs_main_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/proxy.c b/src/plugins/hs_apps/proxy.c
index eb0d70277da..e8fedf921a5 100644
--- a/src/plugins/hs_apps/proxy.c
+++ b/src/plugins/hs_apps/proxy.c
@@ -66,27 +66,69 @@ proxy_call_main_thread (vnet_connect_args_t * a)
}
static proxy_session_t *
-proxy_get_active_open (proxy_main_t * pm, session_handle_t handle)
+proxy_session_alloc (void)
{
- proxy_session_t *ps = 0;
- uword *p;
+ proxy_main_t *pm = &proxy_main;
+ proxy_session_t *ps;
+
+ pool_get_zero (pm->sessions, ps);
+ ps->ps_index = ps - pm->sessions;
- p = hash_get (pm->proxy_session_by_active_open_handle, handle);
- if (p)
- ps = pool_elt_at_index (pm->sessions, p[0]);
return ps;
}
-static proxy_session_t *
-proxy_get_passive_open (proxy_main_t * pm, session_handle_t handle)
+static inline proxy_session_t *
+proxy_session_get (u32 ps_index)
+{
+ proxy_main_t *pm = &proxy_main;
+
+ return pool_elt_at_index (pm->sessions, ps_index);
+}
+
+static inline proxy_session_t *
+proxy_session_get_if_valid (u32 ps_index)
+{
+ proxy_main_t *pm = &proxy_main;
+
+ if (pool_is_free_index (pm->sessions, ps_index))
+ return 0;
+ return pool_elt_at_index (pm->sessions, ps_index);
+}
+
+static void
+proxy_session_free (proxy_session_t *ps)
{
+ proxy_main_t *pm = &proxy_main;
+
+ if (CLIB_DEBUG > 0)
+ clib_memset (ps, 0xFE, sizeof (*ps));
+ pool_put (pm->sessions, ps);
+}
+
+static int
+proxy_session_postponed_free_rpc (void *arg)
+{
+ uword ps_index = pointer_to_uword (arg);
+ proxy_main_t *pm = &proxy_main;
proxy_session_t *ps = 0;
- uword *p;
- p = hash_get (pm->proxy_session_by_server_handle, handle);
- if (p)
- ps = pool_elt_at_index (pm->sessions, p[0]);
- return ps;
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+
+ ps = proxy_session_get (ps_index);
+ segment_manager_dealloc_fifos (ps->server_rx_fifo, ps->server_tx_fifo);
+ proxy_session_free (ps);
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ return 0;
+}
+
+static void
+proxy_session_postponed_free (proxy_session_t *ps)
+{
+ session_send_rpc_evt_to_thread (ps->po_thread_index,
+ proxy_session_postponed_free_rpc,
+ uword_to_pointer (ps->ps_index, void *));
}
static void
@@ -95,17 +137,13 @@ proxy_try_close_session (session_t * s, int is_active_open)
proxy_main_t *pm = &proxy_main;
proxy_session_t *ps = 0;
vnet_disconnect_args_t _a, *a = &_a;
- session_handle_t handle;
-
- handle = session_handle (s);
clib_spinlock_lock_if_init (&pm->sessions_lock);
+ ps = proxy_session_get (s->opaque);
+
if (is_active_open)
{
- ps = proxy_get_active_open (pm, handle);
- ASSERT (ps != 0);
-
a->handle = ps->vpp_active_open_handle;
a->app_index = pm->active_open_app_index;
vnet_disconnect_session (a);
@@ -122,9 +160,6 @@ proxy_try_close_session (session_t * s, int is_active_open)
}
else
{
- ps = proxy_get_passive_open (pm, handle);
- ASSERT (ps != 0);
-
a->handle = ps->vpp_server_handle;
a->app_index = pm->server_app_index;
vnet_disconnect_session (a);
@@ -146,43 +181,42 @@ proxy_try_close_session (session_t * s, int is_active_open)
}
static void
-proxy_session_free (proxy_session_t * ps)
-{
- proxy_main_t *pm = &proxy_main;
- if (CLIB_DEBUG > 0)
- clib_memset (ps, 0xFE, sizeof (*ps));
- pool_put (pm->sessions, ps);
-}
-
-static void
proxy_try_delete_session (session_t * s, u8 is_active_open)
{
proxy_main_t *pm = &proxy_main;
proxy_session_t *ps = 0;
- session_handle_t handle;
-
- handle = session_handle (s);
clib_spinlock_lock_if_init (&pm->sessions_lock);
+ ps = proxy_session_get (s->opaque);
+
if (is_active_open)
{
- ps = proxy_get_active_open (pm, handle);
- ASSERT (ps != 0);
-
ps->vpp_active_open_handle = SESSION_INVALID_HANDLE;
- hash_unset (pm->proxy_session_by_active_open_handle, handle);
+ /* Revert master thread index change on connect notification */
+ ps->server_rx_fifo->master_thread_index = ps->po_thread_index;
+
+ /* Passive open already cleaned up */
if (ps->vpp_server_handle == SESSION_INVALID_HANDLE)
- proxy_session_free (ps);
+ {
+ ASSERT (s->rx_fifo->refcnt == 1);
+
+ /* The two sides of the proxy on different threads */
+ if (ps->po_thread_index != s->thread_index)
+ {
+ /* This is not the right thread to delete the fifos */
+ s->rx_fifo = 0;
+ s->tx_fifo = 0;
+ proxy_session_postponed_free (ps);
+ }
+ else
+ proxy_session_free (ps);
+ }
}
else
{
- ps = proxy_get_passive_open (pm, handle);
- ASSERT (ps != 0);
-
ps->vpp_server_handle = SESSION_INVALID_HANDLE;
- hash_unset (pm->proxy_session_by_server_handle, handle);
if (ps->vpp_active_open_handle == SESSION_INVALID_HANDLE)
{
@@ -245,12 +279,12 @@ proxy_accept_callback (session_t * s)
clib_spinlock_lock_if_init (&pm->sessions_lock);
- pool_get_zero (pm->sessions, ps);
+ ps = proxy_session_alloc ();
ps->vpp_server_handle = session_handle (s);
ps->vpp_active_open_handle = SESSION_INVALID_HANDLE;
+ ps->po_thread_index = s->thread_index;
- hash_set (pm->proxy_session_by_server_handle, ps->vpp_server_handle,
- ps - pm->sessions);
+ s->opaque = ps->ps_index;
clib_spinlock_unlock_if_init (&pm->sessions_lock);
@@ -303,8 +337,7 @@ proxy_rx_callback (session_t * s)
clib_spinlock_lock_if_init (&pm->sessions_lock);
- ps = proxy_get_passive_open (pm, session_handle (s));
- ASSERT (ps != 0);
+ ps = proxy_session_get (s->opaque);
if (PREDICT_TRUE (ps->vpp_active_open_handle != SESSION_INVALID_HANDLE))
{
@@ -332,7 +365,7 @@ proxy_rx_callback (session_t * s)
{
vnet_connect_args_t _a, *a = &_a;
svm_fifo_t *tx_fifo, *rx_fifo;
- u32 max_dequeue, proxy_index;
+ u32 max_dequeue, ps_index;
int actual_transfer __attribute__ ((unused));
rx_fifo = s->rx_fifo;
@@ -344,7 +377,10 @@ proxy_rx_callback (session_t * s)
max_dequeue = svm_fifo_max_dequeue_cons (s->rx_fifo);
if (PREDICT_FALSE (max_dequeue == 0))
- return 0;
+ {
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+ return 0;
+ }
max_dequeue = clib_min (pm->rcv_buffer_size, max_dequeue);
actual_transfer = svm_fifo_peek (rx_fifo, 0 /* relative_offset */ ,
@@ -357,12 +393,12 @@ proxy_rx_callback (session_t * s)
ps->server_rx_fifo = rx_fifo;
ps->server_tx_fifo = tx_fifo;
ps->active_open_establishing = 1;
- proxy_index = ps - pm->sessions;
+ ps_index = ps->ps_index;
clib_spinlock_unlock_if_init (&pm->sessions_lock);
clib_memcpy (&a->sep_ext, &pm->client_sep, sizeof (pm->client_sep));
- a->api_context = proxy_index;
+ a->api_context = ps_index;
a->app_index = pm->active_open_app_index;
if (proxy_transport_needs_crypto (a->sep.transport_proto))
@@ -407,11 +443,10 @@ proxy_tx_callback (session_t * proxy_s)
clib_spinlock_lock_if_init (&pm->sessions_lock);
- ps = proxy_get_passive_open (pm, session_handle (proxy_s));
- ASSERT (ps != 0);
+ ps = proxy_session_get (proxy_s->opaque);
if (ps->vpp_active_open_handle == SESSION_INVALID_HANDLE)
- return 0;
+ goto unlock;
/* Force ack on active open side to update rcv wnd. Make sure it's done on
* the right thread */
@@ -419,6 +454,7 @@ proxy_tx_callback (session_t * proxy_s)
session_send_rpc_evt_to_thread (ps->server_rx_fifo->master_thread_index,
proxy_force_ack, arg);
+unlock:
clib_spinlock_unlock_if_init (&pm->sessions_lock);
return 0;
@@ -442,10 +478,47 @@ static session_cb_vft_t proxy_session_cb_vft = {
.builtin_app_tx_callback = proxy_tx_callback,
.session_reset_callback = proxy_reset_callback,
.session_cleanup_callback = proxy_cleanup_callback,
- .fifo_tuning_callback = common_fifo_tuning_callback
+ .fifo_tuning_callback = common_fifo_tuning_callback,
};
static int
+active_open_alloc_session_fifos (session_t *s)
+{
+ proxy_main_t *pm = &proxy_main;
+ svm_fifo_t *rxf, *txf;
+ proxy_session_t *ps;
+
+ clib_spinlock_lock_if_init (&pm->sessions_lock);
+
+ ps = proxy_session_get (s->opaque);
+
+ txf = ps->server_rx_fifo;
+ rxf = ps->server_tx_fifo;
+
+ /*
+ * Reset the active-open tx-fifo master indices so the active-open session
+ * will receive data, etc.
+ */
+ txf->shr->master_session_index = s->session_index;
+ txf->master_thread_index = s->thread_index;
+
+ /*
+ * Account for the active-open session's use of the fifos
+ * so they won't disappear until the last session which uses
+ * them disappears
+ */
+ rxf->refcnt++;
+ txf->refcnt++;
+
+ clib_spinlock_unlock_if_init (&pm->sessions_lock);
+
+ s->rx_fifo = rxf;
+ s->tx_fifo = txf;
+
+ return 0;
+}
+
+static int
active_open_connected_callback (u32 app_index, u32 opaque,
session_t * s, session_error_t err)
{
@@ -458,7 +531,7 @@ active_open_connected_callback (u32 app_index, u32 opaque,
*/
clib_spinlock_lock_if_init (&pm->sessions_lock);
- ps = pool_elt_at_index (pm->sessions, opaque);
+ ps = proxy_session_get (opaque);
/* Connection failed */
if (err)
@@ -480,33 +553,12 @@ active_open_connected_callback (u32 app_index, u32 opaque,
if (ps->po_disconnected)
{
/* Setup everything for the cleanup notification */
- hash_set (pm->proxy_session_by_active_open_handle,
- ps->vpp_active_open_handle, opaque);
ps->ao_disconnected = 1;
clib_spinlock_unlock_if_init (&pm->sessions_lock);
return -1;
}
- s->tx_fifo = ps->server_rx_fifo;
- s->rx_fifo = ps->server_tx_fifo;
-
- /*
- * Reset the active-open tx-fifo master indices so the active-open session
- * will receive data, etc.
- */
- s->tx_fifo->shr->master_session_index = s->session_index;
- s->tx_fifo->master_thread_index = s->thread_index;
-
- /*
- * Account for the active-open session's use of the fifos
- * so they won't disappear until the last session which uses
- * them disappears
- */
- s->tx_fifo->refcnt++;
- s->rx_fifo->refcnt++;
-
- hash_set (pm->proxy_session_by_active_open_handle,
- ps->vpp_active_open_handle, opaque);
+ s->opaque = opaque;
clib_spinlock_unlock_if_init (&pm->sessions_lock);
@@ -568,11 +620,9 @@ active_open_tx_callback (session_t * ao_s)
{
proxy_main_t *pm = &proxy_main;
transport_connection_t *tc;
- session_handle_t handle;
proxy_session_t *ps;
session_t *proxy_s;
u32 min_free;
- uword *p;
min_free = clib_min (svm_fifo_size (ao_s->tx_fifo) >> 3, 128 << 10);
if (svm_fifo_max_enqueue (ao_s->tx_fifo) < min_free)
@@ -583,17 +633,12 @@ active_open_tx_callback (session_t * ao_s)
clib_spinlock_lock_if_init (&pm->sessions_lock);
- handle = session_handle (ao_s);
- p = hash_get (pm->proxy_session_by_active_open_handle, handle);
- if (!p)
- return 0;
-
- if (pool_is_free_index (pm->sessions, p[0]))
- return 0;
+ ps = proxy_session_get_if_valid (ao_s->opaque);
+ if (!ps)
+ goto unlock;
- ps = pool_elt_at_index (pm->sessions, p[0]);
if (ps->vpp_server_handle == ~0)
- return 0;
+ goto unlock;
proxy_s = session_get_from_handle (ps->vpp_server_handle);
@@ -601,6 +646,7 @@ active_open_tx_callback (session_t * ao_s)
tc = session_get_transport (proxy_s);
tcp_send_ack ((tcp_connection_t *) tc);
+unlock:
clib_spinlock_unlock_if_init (&pm->sessions_lock);
return 0;
@@ -615,7 +661,6 @@ active_open_cleanup_callback (session_t * s, session_cleanup_ntf_t ntf)
proxy_try_delete_session (s, 1 /* is_active_open */ );
}
-/* *INDENT-OFF* */
static session_cb_vft_t active_open_clients = {
.session_reset_callback = active_open_reset_callback,
.session_connected_callback = active_open_connected_callback,
@@ -624,9 +669,9 @@ static session_cb_vft_t active_open_clients = {
.session_cleanup_callback = active_open_cleanup_callback,
.builtin_app_rx_callback = active_open_rx_callback,
.builtin_app_tx_callback = active_open_tx_callback,
- .fifo_tuning_callback = common_fifo_tuning_callback
+ .fifo_tuning_callback = common_fifo_tuning_callback,
+ .proxy_alloc_session_fifos = active_open_alloc_session_fifos,
};
-/* *INDENT-ON* */
static int
proxy_server_attach ()
@@ -634,19 +679,16 @@ proxy_server_attach ()
proxy_main_t *pm = &proxy_main;
u64 options[APP_OPTIONS_N_OPTIONS];
vnet_app_attach_args_t _a, *a = &_a;
- u32 segment_size = 512 << 20;
clib_memset (a, 0, sizeof (*a));
clib_memset (options, 0, sizeof (options));
- if (pm->private_segment_size)
- segment_size = pm->private_segment_size;
a->name = format (0, "proxy-server");
a->api_client_index = pm->server_client_index;
a->session_cb_vft = &proxy_session_cb_vft;
a->options = options;
- a->options[APP_OPTIONS_SEGMENT_SIZE] = segment_size;
- a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = segment_size;
+ a->options[APP_OPTIONS_SEGMENT_SIZE] = pm->segment_size;
+ a->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = pm->segment_size;
a->options[APP_OPTIONS_RX_FIFO_SIZE] = pm->fifo_size;
a->options[APP_OPTIONS_TX_FIFO_SIZE] = pm->fifo_size;
a->options[APP_OPTIONS_MAX_FIFO_SIZE] = pm->max_fifo_size;
@@ -753,14 +795,12 @@ proxy_server_add_ckpair (void)
static int
proxy_server_create (vlib_main_t * vm)
{
- proxy_main_t *pm = &proxy_main;
vlib_thread_main_t *vtm = vlib_get_thread_main ();
+ proxy_main_t *pm = &proxy_main;
u32 num_threads;
int i;
num_threads = 1 /* main thread */ + vtm->n_threads;
- vec_validate (proxy_main.server_event_queue, num_threads - 1);
- vec_validate (proxy_main.active_open_event_queue, num_threads - 1);
vec_validate (pm->rx_buf, num_threads - 1);
for (i = 0; i < num_threads; i++)
@@ -784,15 +824,6 @@ proxy_server_create (vlib_main_t * vm)
return -1;
}
- for (i = 0; i < num_threads; i++)
- {
- pm->active_open_event_queue[i] = session_main_get_vpp_event_queue (i);
-
- ASSERT (pm->active_open_event_queue[i]);
-
- pm->server_event_queue[i] = session_main_get_vpp_event_queue (i);
- }
-
return 0;
}
@@ -816,7 +847,7 @@ proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
pm->rcv_buffer_size = 1024;
pm->prealloc_fifos = 0;
pm->private_segment_count = 0;
- pm->private_segment_size = 0;
+ pm->segment_size = 512 << 20;
if (vlib_num_workers ())
clib_spinlock_init (&pm->sessions_lock);
@@ -846,13 +877,7 @@ proxy_server_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
else if (unformat (line_input, "private-segment-size %U",
unformat_memory_size, &tmp64))
{
- if (tmp64 >= 0x100000000ULL)
- {
- error = clib_error_return (
- 0, "private segment size %lld (%llu) too large", tmp64, tmp64);
- goto done;
- }
- pm->private_segment_size = tmp64;
+ pm->segment_size = tmp64;
}
else if (unformat (line_input, "server-uri %s", &server_uri))
vec_add1 (server_uri, 0);
@@ -908,7 +933,6 @@ done:
return error;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (proxy_create_command, static) =
{
.path = "test proxy server",
@@ -919,7 +943,6 @@ VLIB_CLI_COMMAND (proxy_create_command, static) =
"[private-segment-size <mem>][private-segment-count <nn>]",
.function = proxy_server_create_command_fn,
};
-/* *INDENT-ON* */
clib_error_t *
proxy_main_init (vlib_main_t * vm)
@@ -927,8 +950,6 @@ proxy_main_init (vlib_main_t * vm)
proxy_main_t *pm = &proxy_main;
pm->server_client_index = ~0;
pm->active_open_client_index = ~0;
- pm->proxy_session_by_active_open_handle = hash_create (0, sizeof (uword));
- pm->proxy_session_by_server_handle = hash_create (0, sizeof (uword));
return 0;
}
diff --git a/src/plugins/hs_apps/proxy.h b/src/plugins/hs_apps/proxy.h
index aef23e1e556..26f4de2f729 100644
--- a/src/plugins/hs_apps/proxy.h
+++ b/src/plugins/hs_apps/proxy.h
@@ -36,54 +36,41 @@ typedef struct
volatile int active_open_establishing;
volatile int po_disconnected;
volatile int ao_disconnected;
+
+ u32 ps_index;
+ u32 po_thread_index;
} proxy_session_t;
typedef struct
{
- svm_queue_t *vl_input_queue; /**< vpe input queue */
- /** per-thread vectors */
- svm_msg_q_t **server_event_queue;
- svm_msg_q_t **active_open_event_queue;
+ proxy_session_t *sessions; /**< session pool, shared */
+ clib_spinlock_t sessions_lock; /**< lock for session pool */
u8 **rx_buf; /**< intermediate rx buffers */
- u32 cli_node_index; /**< cli process node index */
u32 server_client_index; /**< server API client handle */
u32 server_app_index; /**< server app index */
u32 active_open_client_index; /**< active open API client handle */
u32 active_open_app_index; /**< active open index after attach */
-
- uword *proxy_session_by_server_handle;
- uword *proxy_session_by_active_open_handle;
+ u32 ckpair_index; /**< certkey pair index for tls */
/*
* Configuration params
*/
- u8 *connect_uri; /**< URI for slave's connect */
- u32 configured_segment_size;
u32 fifo_size; /**< initial fifo size */
u32 max_fifo_size; /**< max fifo size */
u8 high_watermark; /**< high watermark (%) */
u8 low_watermark; /**< low watermark (%) */
u32 private_segment_count; /**< Number of private fifo segs */
- u32 private_segment_size; /**< size of private fifo segs */
+ u64 segment_size; /**< size of fifo segs */
+ u8 prealloc_fifos; /**< Request fifo preallocation */
int rcv_buffer_size;
session_endpoint_cfg_t server_sep;
session_endpoint_cfg_t client_sep;
- u32 ckpair_index;
- /*
- * Test state variables
- */
- proxy_session_t *sessions; /**< Session pool, shared */
- clib_spinlock_t sessions_lock;
- u32 **connection_index_by_thread;
- pthread_t client_thread_handle;
-
/*
* Flags
*/
u8 is_init;
- u8 prealloc_fifos; /**< Request fifo preallocation */
} proxy_main_t;
extern proxy_main_t proxy_main;
diff --git a/src/plugins/hs_apps/sapi/vpp_echo.c b/src/plugins/hs_apps/sapi/vpp_echo.c
index 80d274db5b0..08fd4e175e9 100644
--- a/src/plugins/hs_apps/sapi/vpp_echo.c
+++ b/src/plugins/hs_apps/sapi/vpp_echo.c
@@ -84,16 +84,19 @@ stop_signal (int signum)
em->time_to_stop = 1;
}
-int
-connect_to_vpp (char *name)
+static int
+connect_to_vpp (echo_main_t *em)
{
- echo_main_t *em = &echo_main;
api_main_t *am = vlibapi_get_main ();
+ if (em->use_app_socket_api)
+ return echo_api_connect_app_socket (em);
+
if (em->use_sock_api)
{
- if (vl_socket_client_connect ((char *) em->socket_name, name,
- 0 /* default rx, tx buffer */ ))
+ if (vl_socket_client_connect ((char *) em->socket_name,
+ (char *) em->app_name,
+ 0 /* default rx, tx buffer */))
{
ECHO_FAIL (ECHO_FAIL_SOCKET_CONNECT, "socket connect failed");
return -1;
@@ -107,7 +110,8 @@ connect_to_vpp (char *name)
}
else
{
- if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0)
+ if (vl_client_connect_to_vlib ("/vpe-api", (char *) em->app_name, 32) <
+ 0)
{
ECHO_FAIL (ECHO_FAIL_SHMEM_CONNECT, "shmem connect failed");
return -1;
@@ -286,13 +290,11 @@ echo_free_sessions (echo_main_t * em)
echo_session_t *s;
u32 *session_indexes = 0, *session_index;
- /* *INDENT-OFF* */
pool_foreach (s, em->sessions)
{
if (s->session_state == ECHO_SESSION_STATE_CLOSED)
vec_add1 (session_indexes, s->session_index);
}
- /* *INDENT-ON* */
vec_foreach (session_index, session_indexes)
{
/* Free session */
@@ -725,9 +727,18 @@ session_reset_handler (session_reset_msg_t * mp)
app_send_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt);
}
+static int
+echo_recv_fd (echo_main_t *em, int *fds, int n_fds)
+{
+ if (em->use_app_socket_api)
+ return echo_sapi_recv_fd (em, fds, n_fds);
+ return echo_bapi_recv_fd (em, fds, n_fds);
+}
+
static void
add_segment_handler (session_app_add_segment_msg_t * mp)
{
+ echo_main_t *em = &echo_main;
fifo_segment_main_t *sm = &echo_main.segment_main;
fifo_segment_create_args_t _a, *a = &_a;
int *fds = 0, i;
@@ -737,10 +748,10 @@ add_segment_handler (session_app_add_segment_msg_t * mp)
if (mp->fd_flags & SESSION_FD_F_MEMFD_SEGMENT)
{
vec_validate (fds, 1);
- if (vl_socket_client_recv_fd_msg (fds, 1, 5))
+ if (echo_recv_fd (em, fds, 1))
{
- ECHO_FAIL (ECHO_FAIL_VL_API_RECV_FD_MSG,
- "vl_socket_client_recv_fd_msg failed");
+ ECHO_LOG (0, "echo_recv_fd failed");
+ em->time_to_stop = 1;
goto failed;
}
@@ -1112,6 +1123,8 @@ echo_process_opts (int argc, char **argv)
em->test_return_packets = RETURN_PACKETS_LOG_WRONG;
else if (unformat (a, "socket-name %s", &em->socket_name))
;
+ else if (unformat (a, "use-app-socket-api"))
+ em->use_app_socket_api = 1;
else if (unformat (a, "use-svm-api"))
em->use_sock_api = 0;
else if (unformat (a, "fifo-size %U", unformat_memory_size, &tmp))
@@ -1228,6 +1241,15 @@ echo_process_opts (int argc, char **argv)
}
}
+static int
+echo_needs_crypto (echo_main_t *em)
+{
+ u8 tr = em->uri_elts.transport_proto;
+ if (tr == TRANSPORT_PROTO_QUIC || tr == TRANSPORT_PROTO_TLS)
+ return 1;
+ return 0;
+}
+
void
echo_process_uri (echo_main_t * em)
{
@@ -1260,13 +1282,91 @@ vpp_echo_init ()
clib_memset (em, 0, sizeof (*em));
}
+static int
+echo_detach (echo_main_t *em)
+{
+ if (em->use_app_socket_api)
+ return echo_sapi_detach (em);
+
+ echo_send_detach (em);
+ if (wait_for_state_change (em, STATE_DETACHED, TIMEOUT))
+ {
+ ECHO_FAIL (ECHO_FAIL_DETACH, "Couldn't detach from vpp");
+ return -1;
+ }
+ return 0;
+}
+
+static void
+echo_add_cert_key (echo_main_t *em)
+{
+ if (em->use_app_socket_api)
+ echo_sapi_add_cert_key (em);
+ else
+ {
+ echo_send_add_cert_key (em);
+ if (wait_for_state_change (em, STATE_ATTACHED, TIMEOUT))
+ {
+ ECHO_FAIL (ECHO_FAIL_APP_ATTACH,
+ "Couldn't add crypto context to vpp\n");
+ exit (1);
+ }
+ }
+}
+
+static int
+echo_del_cert_key (echo_main_t *em)
+{
+ if (em->use_app_socket_api)
+ return echo_sapi_del_cert_key (em);
+
+ echo_send_del_cert_key (em);
+ if (wait_for_state_change (em, STATE_CLEANED_CERT_KEY, TIMEOUT))
+ {
+ ECHO_FAIL (ECHO_FAIL_DEL_CERT_KEY, "Couldn't cleanup cert and key");
+ return -1;
+ }
+ return 0;
+}
+
+static void
+echo_disconnect (echo_main_t *em)
+{
+ if (em->use_app_socket_api)
+ return;
+
+ if (em->use_sock_api)
+ vl_socket_client_disconnect ();
+ else
+ vl_client_disconnect_from_vlib ();
+}
+
+static int
+echo_attach (echo_main_t *em)
+{
+ if (em->use_app_socket_api)
+ return echo_sapi_attach (em);
+ else
+ {
+ echo_api_hookup (em);
+ echo_send_attach (em);
+ if (wait_for_state_change (em, STATE_ATTACHED_NO_CERT, TIMEOUT))
+ {
+ ECHO_FAIL (ECHO_FAIL_ATTACH_TO_VPP,
+ "Couldn't attach to vpp, did you run <session enable> ?");
+ return -1;
+ }
+ }
+ return 0;
+}
+
int
main (int argc, char **argv)
{
echo_main_t *em = &echo_main;
fifo_segment_main_t *sm = &em->segment_main;
- char *app_name;
u64 i;
+ int *rv;
svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
u32 rpc_queue_size = 256 << 10;
@@ -1329,11 +1429,9 @@ main (int argc, char **argv)
for (i = 0; i < em->tx_buf_size; i++)
em->connect_test_data[i] = i & 0xff;
- /* *INDENT-OFF* */
svm_msg_q_ring_cfg_t rc[1] = {
{rpc_queue_size, sizeof (echo_rpc_msg_t), 0},
};
- /* *INDENT-ON* */
cfg->consumer_pid = getpid ();
cfg->n_rings = 1;
cfg->q_nitems = rpc_queue_size;
@@ -1344,8 +1442,10 @@ main (int argc, char **argv)
signal (SIGQUIT, stop_signal);
signal (SIGTERM, stop_signal);
- app_name = em->i_am_master ? "echo_server" : "echo_client";
- if (connect_to_vpp (app_name))
+ em->app_name =
+ format (0, "%s%c", em->i_am_master ? "echo_server" : "echo_client", 0);
+
+ if (connect_to_vpp (em))
{
svm_region_exit ();
ECHO_FAIL (ECHO_FAIL_CONNECT_TO_VPP, "Couldn't connect to vpp");
@@ -1355,34 +1455,22 @@ main (int argc, char **argv)
echo_session_prealloc (em);
echo_notify_event (em, ECHO_EVT_START);
- echo_api_hookup (em);
+ if (echo_attach (em))
+ goto exit_on_error;
- echo_send_attach (em);
- if (wait_for_state_change (em, STATE_ATTACHED_NO_CERT, TIMEOUT))
+ if (echo_needs_crypto (em))
{
- ECHO_FAIL (ECHO_FAIL_ATTACH_TO_VPP,
- "Couldn't attach to vpp, did you run <session enable> ?");
- goto exit_on_error;
+ ECHO_LOG (2, "Adding crypto context %U", echo_format_crypto_engine,
+ em->crypto_engine);
+ echo_add_cert_key (em);
}
-
- if (em->uri_elts.transport_proto != TRANSPORT_PROTO_QUIC
- && em->uri_elts.transport_proto != TRANSPORT_PROTO_TLS)
- em->state = STATE_ATTACHED;
else
{
- ECHO_LOG (2, "Adding crypto context %U", echo_format_crypto_engine,
- em->crypto_engine);
- echo_send_add_cert_key (em);
- if (wait_for_state_change (em, STATE_ATTACHED, TIMEOUT))
- {
- ECHO_FAIL (ECHO_FAIL_APP_ATTACH,
- "Couldn't add crypto context to vpp\n");
- exit (1);
- }
+ em->state = STATE_ATTACHED;
}
- if (pthread_create (&em->mq_thread_handle,
- NULL /*attr */ , echo_mq_thread_fn, 0))
+ if (pthread_create (&em->mq_thread_handle, NULL /*attr */, echo_mq_thread_fn,
+ 0))
{
ECHO_FAIL (ECHO_FAIL_PTHREAD_CREATE, "pthread create errored");
goto exit_on_error;
@@ -1402,30 +1490,22 @@ main (int argc, char **argv)
clients_run (em);
echo_notify_event (em, ECHO_EVT_EXIT);
echo_free_sessions (em);
- echo_send_del_cert_key (em);
- if (wait_for_state_change (em, STATE_CLEANED_CERT_KEY, TIMEOUT))
+ if (echo_needs_crypto (em))
{
- ECHO_FAIL (ECHO_FAIL_DEL_CERT_KEY, "Couldn't cleanup cert and key");
- goto exit_on_error;
+ if (echo_del_cert_key (em))
+ goto exit_on_error;
}
- echo_send_detach (em);
- if (wait_for_state_change (em, STATE_DETACHED, TIMEOUT))
- {
- ECHO_FAIL (ECHO_FAIL_DETACH, "Couldn't detach from vpp");
- goto exit_on_error;
- }
- int *rv;
+ if (echo_detach (em))
+ goto exit_on_error;
+
pthread_join (em->mq_thread_handle, (void **) &rv);
if (rv)
{
ECHO_FAIL (ECHO_FAIL_MQ_PTHREAD, "mq pthread errored %d", rv);
goto exit_on_error;
}
- if (em->use_sock_api)
- vl_socket_client_disconnect ();
- else
- vl_client_disconnect_from_vlib ();
+ echo_disconnect (em);
echo_assert_test_suceeded (em);
exit_on_error:
ECHO_LOG (1, "Test complete !\n");
diff --git a/src/plugins/hs_apps/sapi/vpp_echo_bapi.c b/src/plugins/hs_apps/sapi/vpp_echo_bapi.c
index 38fb522351c..868cc3a0591 100644
--- a/src/plugins/hs_apps/sapi/vpp_echo_bapi.c
+++ b/src/plugins/hs_apps/sapi/vpp_echo_bapi.c
@@ -103,6 +103,19 @@ echo_send_del_cert_key (echo_main_t * em)
vl_msg_api_send_shmem (em->vl_input_queue, (u8 *) & bmp);
}
+int
+echo_bapi_recv_fd (echo_main_t *em, int *fds, int n_fds)
+{
+ clib_error_t *err;
+ err = vl_socket_client_recv_fd_msg (fds, n_fds, 5);
+ if (err)
+ {
+ clib_error_report (err);
+ return -1;
+ }
+ return 0;
+}
+
static u8
echo_transport_needs_crypto (transport_proto_t proto)
{
@@ -265,11 +278,11 @@ echo_segment_lookup (u64 segment_handle)
clib_spinlock_lock (&em->segment_handles_lock);
segment_idxp = hash_get (em->shared_segment_handles, segment_handle);
clib_spinlock_unlock (&em->segment_handles_lock);
- if (!segment_idxp)
- return ~0;
+ if (segment_idxp)
+ return ((u32) *segment_idxp);
ECHO_LOG (2, "Segment not mapped (0x%lx)", segment_handle);
- return ((u32) *segment_idxp);
+ return ~0;
}
void
@@ -543,11 +556,14 @@ _(APPLICATION_DETACH_REPLY, application_detach_reply) \
_(APP_ADD_CERT_KEY_PAIR_REPLY, app_add_cert_key_pair_reply) \
_(APP_DEL_CERT_KEY_PAIR_REPLY, app_del_cert_key_pair_reply)
-#define vl_print(handle, ...) fformat (handle, __VA_ARGS__)
#define vl_endianfun
#include <vnet/session/session.api.h>
#undef vl_endianfun
+#define vl_calcsizefun
+#include <vnet/session/session.api.h>
+#undef vl_calcsizefun
+
#define vl_printfun
#include <vnet/session/session.api.h>
#undef vl_printfun
@@ -569,10 +585,18 @@ echo_api_hookup (echo_main_t * em)
return;
#define _(N, n) \
- vl_msg_api_set_handlers (REPLY_MSG_ID_BASE + VL_API_##N, #n, \
- vl_api_##n##_t_handler, vl_noop_handler, \
- vl_api_##n##_t_endian, vl_api_##n##_t_print, \
- sizeof (vl_api_##n##_t), 1);
+ vl_msg_api_config (&(vl_msg_api_msg_config_t){ \
+ .id = REPLY_MSG_ID_BASE + VL_API_##N, \
+ .name = #n, \
+ .handler = vl_api_##n##_t_handler, \
+ .endian = vl_api_##n##_t_endian, \
+ .format_fn = vl_api_##n##_t_format, \
+ .size = sizeof (vl_api_##n##_t), \
+ .traced = 1, \
+ .tojson = vl_api_##n##_t_tojson, \
+ .fromjson = vl_api_##n##_t_fromjson, \
+ .calc_size = vl_api_##n##_t_calc_size, \
+ });
foreach_quic_echo_msg;
#undef _
}
diff --git a/src/plugins/hs_apps/sapi/vpp_echo_common.h b/src/plugins/hs_apps/sapi/vpp_echo_common.h
index 80c539ccb0f..9b2a2c677b5 100644
--- a/src/plugins/hs_apps/sapi/vpp_echo_common.h
+++ b/src/plugins/hs_apps/sapi/vpp_echo_common.h
@@ -26,6 +26,9 @@
#define LOG_EVERY_N_IDLE_CYCLES (1e8)
#define ECHO_MQ_SEG_HANDLE ((u64) ~0 - 1)
+#define ECHO_INVALID_SEGMENT_INDEX ((u32) ~0)
+#define ECHO_INVALID_SEGMENT_HANDLE ((u64) ~0)
+
#define foreach_echo_fail_code \
_(ECHO_FAIL_NONE, "ECHO_FAIL_NONE") \
_(ECHO_FAIL_USAGE, "ECHO_FAIL_USAGE") \
@@ -269,6 +272,7 @@ typedef struct
svm_queue_t *vl_input_queue; /* vpe input queue */
u32 my_client_index; /* API client handle */
u8 *uri; /* The URI we're playing with */
+ u8 *app_name;
u32 n_uris; /* Cycle through adjacent ips */
ip46_address_t lcl_ip; /* Local ip for client */
u8 lcl_ip_set;
@@ -277,6 +281,8 @@ typedef struct
svm_msg_q_t *ctrl_mq; /* Our control queue (towards vpp) */
clib_time_t clib_time; /* For deadman timers */
u8 *socket_name;
+ u8 use_app_socket_api;
+ clib_socket_t app_api_sock;
int i_am_master;
u32 *listen_session_indexes; /* vec of vpp listener sessions */
volatile u32 listen_session_cnt;
@@ -449,6 +455,15 @@ void echo_send_disconnect_session (echo_main_t * em, void *args);
void echo_api_hookup (echo_main_t * em);
void echo_send_add_cert_key (echo_main_t * em);
void echo_send_del_cert_key (echo_main_t * em);
+int echo_bapi_recv_fd (echo_main_t *em, int *fds, int n_fds);
+
+/* Session socket API */
+int echo_sapi_attach (echo_main_t *em);
+int echo_sapi_add_cert_key (echo_main_t *em);
+int echo_sapi_del_cert_key (echo_main_t *em);
+int echo_api_connect_app_socket (echo_main_t *em);
+int echo_sapi_detach (echo_main_t *em);
+int echo_sapi_recv_fd (echo_main_t *em, int *fds, int n_fds);
#endif /* __included_vpp_echo_common_h__ */
diff --git a/src/plugins/hs_apps/sapi/vpp_echo_proto_quic.c b/src/plugins/hs_apps/sapi/vpp_echo_proto_quic.c
index c67b35fd8e6..1b0dbf33e29 100644
--- a/src/plugins/hs_apps/sapi/vpp_echo_proto_quic.c
+++ b/src/plugins/hs_apps/sapi/vpp_echo_proto_quic.c
@@ -239,7 +239,6 @@ quic_echo_initiate_qsession_close_no_stream (echo_main_t * em)
/* Close Quic session without streams */
echo_session_t *s;
- /* *INDENT-OFF* */
pool_foreach (s, em->sessions)
{
if (s->session_type == ECHO_SESSION_TYPE_QUIC)
@@ -261,7 +260,6 @@ quic_echo_initiate_qsession_close_no_stream (echo_main_t * em)
ECHO_LOG (2,"%U: PASSIVE close", echo_format_session, s);
}
}
- /* *INDENT-ON* */
}
static void
diff --git a/src/plugins/hs_apps/sapi/vpp_echo_sapi.c b/src/plugins/hs_apps/sapi/vpp_echo_sapi.c
new file mode 100644
index 00000000000..a21fbea6183
--- /dev/null
+++ b/src/plugins/hs_apps/sapi/vpp_echo_sapi.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2022 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <hs_apps/sapi/vpp_echo_common.h>
+
+int
+echo_api_connect_app_socket (echo_main_t *em)
+{
+ clib_socket_t *cs = &em->app_api_sock;
+ clib_error_t *err;
+ int rv = 0;
+
+ cs->config = (char *) em->socket_name;
+ cs->flags =
+ CLIB_SOCKET_F_IS_CLIENT | CLIB_SOCKET_F_SEQPACKET | CLIB_SOCKET_F_BLOCKING;
+
+ if ((err = clib_socket_init (cs)))
+ {
+ clib_error_report (err);
+ rv = -1;
+ }
+
+ return rv;
+}
+
+static inline u64
+echo_vpp_worker_segment_handle (u32 wrk_index)
+{
+ return (ECHO_INVALID_SEGMENT_HANDLE - wrk_index - 1);
+}
+
+static int
+echo_segment_discover_mqs (uword segment_handle, int *fds, u32 n_fds)
+{
+ echo_main_t *em = &echo_main;
+ fifo_segment_t *fs;
+ u32 fs_index;
+
+ fs_index = echo_segment_lookup (segment_handle);
+ if (fs_index == ECHO_INVALID_SEGMENT_INDEX)
+ {
+ ECHO_LOG (0, "ERROR: mq segment %lx for is not attached!",
+ segment_handle);
+ return -1;
+ }
+
+ clib_spinlock_lock (&em->segment_handles_lock);
+
+ fs = fifo_segment_get_segment (&em->segment_main, fs_index);
+ fifo_segment_msg_qs_discover (fs, fds, n_fds);
+
+ clib_spinlock_unlock (&em->segment_handles_lock);
+
+ return 0;
+}
+
+static int
+echo_api_attach_reply_handler (app_sapi_attach_reply_msg_t *mp, int *fds)
+{
+ echo_main_t *em = &echo_main;
+ int i, rv, n_fds_used = 0;
+ u64 segment_handle;
+ u8 *segment_name;
+
+ if (mp->retval)
+ {
+ ECHO_LOG (0, "attach failed: %U", format_session_error, mp->retval);
+ goto failed;
+ }
+
+ em->my_client_index = mp->api_client_handle;
+ segment_handle = mp->segment_handle;
+ if (segment_handle == ECHO_INVALID_SEGMENT_HANDLE)
+ {
+ ECHO_LOG (0, "invalid segment handle");
+ goto failed;
+ }
+
+ if (!mp->n_fds)
+ goto failed;
+
+ if (mp->fd_flags & SESSION_FD_F_VPP_MQ_SEGMENT)
+ if (echo_segment_attach (echo_vpp_worker_segment_handle (0), "vpp-mq-seg",
+ SSVM_SEGMENT_MEMFD, fds[n_fds_used++]))
+ goto failed;
+
+ if (mp->fd_flags & SESSION_FD_F_MEMFD_SEGMENT)
+ {
+ segment_name = format (0, "memfd-%ld%c", segment_handle, 0);
+ rv = echo_segment_attach (segment_handle, (char *) segment_name,
+ SSVM_SEGMENT_MEMFD, fds[n_fds_used++]);
+ vec_free (segment_name);
+ if (rv != 0)
+ goto failed;
+ }
+
+ echo_segment_attach_mq (segment_handle, mp->app_mq, 0, &em->app_mq);
+
+ if (mp->fd_flags & SESSION_FD_F_MQ_EVENTFD)
+ {
+ ECHO_LOG (0, "SESSION_FD_F_MQ_EVENTFD unsupported!");
+ goto failed;
+ }
+
+ echo_segment_discover_mqs (echo_vpp_worker_segment_handle (0),
+ fds + n_fds_used, mp->n_fds - n_fds_used);
+ echo_segment_attach_mq (echo_vpp_worker_segment_handle (0), mp->vpp_ctrl_mq,
+ mp->vpp_ctrl_mq_thread, &em->ctrl_mq);
+
+ em->state = STATE_ATTACHED_NO_CERT;
+ return 0;
+
+failed:
+
+ for (i = clib_max (n_fds_used - 1, 0); i < mp->n_fds; i++)
+ close (fds[i]);
+
+ return -1;
+}
+
+static int
+echo_api_send_attach (clib_socket_t *cs)
+{
+ echo_main_t *em = &echo_main;
+ app_sapi_msg_t msg = { 0 };
+ app_sapi_attach_msg_t *mp = &msg.attach;
+ clib_error_t *err;
+
+ clib_memcpy (&mp->name, em->app_name, vec_len (em->app_name));
+ mp->options[APP_OPTIONS_FLAGS] =
+ APP_OPTIONS_FLAGS_ACCEPT_REDIRECT | APP_OPTIONS_FLAGS_ADD_SEGMENT;
+ mp->options[APP_OPTIONS_SEGMENT_SIZE] = 256 << 20;
+ mp->options[APP_OPTIONS_ADD_SEGMENT_SIZE] = 128 << 20;
+ mp->options[APP_OPTIONS_RX_FIFO_SIZE] = em->fifo_size;
+ mp->options[APP_OPTIONS_TX_FIFO_SIZE] = em->fifo_size;
+ mp->options[APP_OPTIONS_PREALLOC_FIFO_PAIRS] = em->prealloc_fifo_pairs;
+ mp->options[APP_OPTIONS_EVT_QUEUE_SIZE] = em->evt_q_size;
+
+ msg.type = APP_SAPI_MSG_TYPE_ATTACH;
+ err = clib_socket_sendmsg (cs, &msg, sizeof (msg), 0, 0);
+ if (err)
+ {
+ clib_error_report (err);
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+echo_sapi_attach (echo_main_t *em)
+{
+ app_sapi_msg_t _rmp, *rmp = &_rmp;
+ clib_error_t *err;
+ clib_socket_t *cs;
+ int fds[32];
+
+ cs = &em->app_api_sock;
+ if (echo_api_send_attach (cs))
+ return -1;
+
+ /*
+ * Wait for attach reply
+ */
+ err = clib_socket_recvmsg (cs, rmp, sizeof (*rmp), fds, ARRAY_LEN (fds));
+ if (err)
+ {
+ clib_error_report (err);
+ return -1;
+ }
+
+ if (rmp->type != APP_SAPI_MSG_TYPE_ATTACH_REPLY)
+ return -1;
+
+ return echo_api_attach_reply_handler (&rmp->attach_reply, fds);
+}
+
+int
+echo_sapi_add_cert_key (echo_main_t *em)
+{
+ u32 cert_len = test_srv_crt_rsa_len;
+ u32 key_len = test_srv_key_rsa_len;
+ u32 certkey_len = cert_len + key_len;
+ app_sapi_msg_t _msg = { 0 }, *msg = &_msg;
+ app_sapi_cert_key_add_del_msg_t *mp;
+ app_sapi_msg_t _rmp, *rmp = &_rmp;
+ clib_error_t *err;
+ clib_socket_t *cs;
+ u8 *certkey = 0;
+ int rv = -1;
+
+ msg->type = APP_SAPI_MSG_TYPE_ADD_DEL_CERT_KEY;
+ mp = &msg->cert_key_add_del;
+ mp->context = ntohl (0xfeedface);
+ mp->cert_len = cert_len;
+ mp->certkey_len = certkey_len;
+ mp->is_add = 1;
+
+ vec_validate (certkey, certkey_len - 1);
+ clib_memcpy_fast (certkey, test_srv_crt_rsa, cert_len);
+ clib_memcpy_fast (certkey + cert_len, test_srv_key_rsa, key_len);
+
+ cs = &em->app_api_sock;
+ err = clib_socket_sendmsg (cs, msg, sizeof (*msg), 0, 0);
+ if (err)
+ {
+ clib_error_report (err);
+ goto done;
+ }
+
+ err = clib_socket_sendmsg (cs, certkey, certkey_len, 0, 0);
+ if (err)
+ {
+ clib_error_report (err);
+ goto done;
+ }
+
+ /*
+ * Wait for reply and process it
+ */
+ err = clib_socket_recvmsg (cs, rmp, sizeof (*rmp), 0, 0);
+ if (err)
+ {
+ clib_error_report (err);
+ goto done;
+ }
+
+ if (rmp->type != APP_SAPI_MSG_TYPE_ADD_DEL_CERT_KEY_REPLY)
+ {
+ ECHO_LOG (0, "unexpected reply type %u", rmp->type);
+ goto done;
+ }
+
+ if (!rmp->cert_key_add_del_reply.retval)
+ rv = rmp->cert_key_add_del_reply.index;
+
+ em->state = STATE_ATTACHED;
+ em->ckpair_index = rv;
+
+done:
+
+ return rv;
+}
+
+int
+echo_sapi_recv_fd (echo_main_t *em, int *fds, int n_fds)
+{
+ app_sapi_msg_t _msg, *msg = &_msg;
+ clib_error_t *err =
+ clib_socket_recvmsg (&em->app_api_sock, msg, sizeof (*msg), fds, n_fds);
+ if (err)
+ {
+ clib_error_report (err);
+ return -1;
+ }
+ return 0;
+}
+
+int
+echo_sapi_detach (echo_main_t *em)
+{
+ clib_socket_t *cs = &em->app_api_sock;
+ clib_socket_close (cs);
+ em->state = STATE_DETACHED;
+ return 0;
+}
+
+int
+echo_sapi_del_cert_key (echo_main_t *em)
+{
+ app_sapi_msg_t _msg = { 0 }, *msg = &_msg;
+ app_sapi_cert_key_add_del_msg_t *mp;
+ app_sapi_msg_t _rmp, *rmp = &_rmp;
+ clib_error_t *err;
+ clib_socket_t *cs;
+
+ msg->type = APP_SAPI_MSG_TYPE_ADD_DEL_CERT_KEY;
+ mp = &msg->cert_key_add_del;
+ mp->index = em->ckpair_index;
+
+ cs = &em->app_api_sock;
+ err = clib_socket_sendmsg (cs, msg, sizeof (*msg), 0, 0);
+ if (err)
+ {
+ clib_error_report (err);
+ return -1;
+ }
+
+ /*
+ * Wait for reply and process it
+ */
+ err = clib_socket_recvmsg (cs, rmp, sizeof (*rmp), 0, 0);
+ if (err)
+ {
+ clib_error_report (err);
+ return -1;
+ }
+
+ if (rmp->type != APP_SAPI_MSG_TYPE_ADD_DEL_CERT_KEY_REPLY)
+ {
+ ECHO_LOG (0, "unexpected reply type %u", rmp->type);
+ return -1;
+ }
+
+ if (rmp->cert_key_add_del_reply.retval)
+ return -1;
+
+ em->state = STATE_CLEANED_CERT_KEY;
+ return 0;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/hs_apps/vcl/sock_test_client.c b/src/plugins/hs_apps/vcl/sock_test_client.c
index 35252da21bc..c8815692184 100644
--- a/src/plugins/hs_apps/vcl/sock_test_client.c
+++ b/src/plugins/hs_apps/vcl/sock_test_client.c
@@ -46,17 +46,17 @@ sock_test_cfg_sync (vcl_test_session_t * socket)
{
sock_client_main_t *scm = &sock_client_main;
vcl_test_session_t *ctrl = &scm->ctrl_socket;
- vcl_test_cfg_t *rl_cfg = (vcl_test_cfg_t *) socket->rxbuf;
+ hs_test_cfg_t *rl_cfg = (hs_test_cfg_t *) socket->rxbuf;
int rx_bytes, tx_bytes;
if (socket->cfg.verbose)
- vcl_test_cfg_dump (&socket->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&socket->cfg, 1 /* is_client */);
ctrl->cfg.seq_num = ++scm->cfg_seq_num;
if (socket->cfg.verbose)
{
stinf ("(fd %d): Sending config sent to server.\n", socket->fd);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
tx_bytes = sock_test_write (socket->fd, (uint8_t *) & ctrl->cfg,
sizeof (ctrl->cfg), NULL, ctrl->cfg.verbose);
@@ -64,21 +64,21 @@ sock_test_cfg_sync (vcl_test_session_t * socket)
stabrt ("(fd %d): write test cfg failed (%d)!", socket->fd, tx_bytes);
rx_bytes = sock_test_read (socket->fd, (uint8_t *) socket->rxbuf,
- sizeof (vcl_test_cfg_t), NULL);
+ sizeof (hs_test_cfg_t), NULL);
if (rx_bytes < 0)
return rx_bytes;
- if (rl_cfg->magic != VCL_TEST_CFG_CTRL_MAGIC)
+ if (rl_cfg->magic != HS_TEST_CFG_CTRL_MAGIC)
stabrt ("(fd %d): Bad server reply cfg -- aborting!\n", socket->fd);
- if ((rx_bytes != sizeof (vcl_test_cfg_t))
- || !vcl_test_cfg_verify (rl_cfg, &ctrl->cfg))
+ if ((rx_bytes != sizeof (hs_test_cfg_t)) ||
+ !hs_test_cfg_verify (rl_cfg, &ctrl->cfg))
stabrt ("(fd %d): Invalid config received from server!\n", socket->fd);
if (socket->cfg.verbose)
{
stinf ("(fd %d): Got config back from server.", socket->fd);
- vcl_test_cfg_dump (rl_cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (rl_cfg, 1 /* is_client */);
}
ctrl->cfg.ctrl_handle = ((ctrl->cfg.ctrl_handle == ~0) ?
rl_cfg->ctrl_handle : ctrl->cfg.ctrl_handle);
@@ -263,27 +263,25 @@ echo_test_client (void)
vcl_test_stats_dump ("CLIENT RESULTS", &ctrl->stats,
1 /* show_rx */ , 1 /* show tx */ ,
ctrl->cfg.verbose);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
if (ctrl->cfg.verbose > 1)
{
- stinf (" ctrl socket info\n"
- VCL_TEST_SEPARATOR_STRING
+ stinf (" ctrl socket info\n" HS_TEST_SEPARATOR_STRING
" fd: %d (0x%08x)\n"
" rxbuf: %p\n"
" rxbuf size: %u (0x%08x)\n"
" txbuf: %p\n"
- " txbuf size: %u (0x%08x)\n"
- VCL_TEST_SEPARATOR_STRING,
- ctrl->fd, (uint32_t) ctrl->fd,
- ctrl->rxbuf, ctrl->rxbuf_size, ctrl->rxbuf_size,
- ctrl->txbuf, ctrl->txbuf_size, ctrl->txbuf_size);
+ " txbuf size: %u (0x%08x)\n" HS_TEST_SEPARATOR_STRING,
+ ctrl->fd, (uint32_t) ctrl->fd, ctrl->rxbuf, ctrl->rxbuf_size,
+ ctrl->rxbuf_size, ctrl->txbuf, ctrl->txbuf_size,
+ ctrl->txbuf_size);
}
}
}
static void
-stream_test_client (vcl_test_t test)
+stream_test_client (hs_test_t test)
{
sock_client_main_t *scm = &sock_client_main;
vcl_test_session_t *ctrl = &scm->ctrl_socket;
@@ -292,7 +290,7 @@ stream_test_client (vcl_test_t test)
uint32_t i, n;
fd_set wr_fdset, rd_fdset;
fd_set _wfdset, *wfdset = &_wfdset;
- fd_set _rfdset, *rfdset = (test == VCL_TEST_TYPE_BI) ? &_rfdset : 0;
+ fd_set _rfdset, *rfdset = (test == HS_TEST_TYPE_BI) ? &_rfdset : 0;
ctrl->cfg.total_bytes = ctrl->cfg.num_writes * ctrl->cfg.txbuf_size;
ctrl->cfg.ctrl_handle = ~0;
@@ -300,7 +298,7 @@ stream_test_client (vcl_test_t test)
stinf ("\n" SOCK_TEST_BANNER_STRING
"CLIENT (fd %d): %s-directional Stream Test!\n\n"
"CLIENT (fd %d): Sending config to server on ctrl socket...\n",
- ctrl->fd, test == VCL_TEST_TYPE_BI ? "Bi" : "Uni", ctrl->fd);
+ ctrl->fd, test == HS_TEST_TYPE_BI ? "Bi" : "Uni", ctrl->fd);
if (sock_test_cfg_sync (ctrl))
stabrt ("test cfg sync failed -- aborting!");
@@ -352,8 +350,7 @@ stream_test_client (vcl_test_t test)
(tsock->stats.stop.tv_nsec == 0)))
continue;
- if ((test == VCL_TEST_TYPE_BI) &&
- FD_ISSET (tsock->fd, rfdset) &&
+ if ((test == HS_TEST_TYPE_BI) && FD_ISSET (tsock->fd, rfdset) &&
(tsock->stats.rx_bytes < ctrl->cfg.total_bytes))
{
(void) sock_test_read (tsock->fd,
@@ -372,9 +369,9 @@ stream_test_client (vcl_test_t test)
tsock->fd);
}
- if (((test == VCL_TEST_TYPE_UNI) &&
+ if (((test == HS_TEST_TYPE_UNI) &&
(tsock->stats.tx_bytes >= ctrl->cfg.total_bytes)) ||
- ((test == VCL_TEST_TYPE_BI) &&
+ ((test == HS_TEST_TYPE_BI) &&
(tsock->stats.rx_bytes >= ctrl->cfg.total_bytes)))
{
clock_gettime (CLOCK_REALTIME, &tsock->stats.stop);
@@ -399,40 +396,39 @@ stream_test_client (vcl_test_t test)
snprintf (buf, sizeof (buf), "CLIENT (fd %d) RESULTS", tsock->fd);
vcl_test_stats_dump (buf, &tsock->stats,
- test == VCL_TEST_TYPE_BI /* show_rx */ ,
- 1 /* show tx */ , ctrl->cfg.verbose);
+ test == HS_TEST_TYPE_BI /* show_rx */,
+ 1 /* show tx */, ctrl->cfg.verbose);
}
vcl_test_stats_accumulate (&ctrl->stats, &tsock->stats);
}
vcl_test_stats_dump ("CLIENT RESULTS", &ctrl->stats,
- test == VCL_TEST_TYPE_BI /* show_rx */ ,
- 1 /* show tx */ , ctrl->cfg.verbose);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ test == HS_TEST_TYPE_BI /* show_rx */, 1 /* show tx */,
+ ctrl->cfg.verbose);
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
if (ctrl->cfg.verbose)
{
- stinf (" ctrl socket info\n"
- VCL_TEST_SEPARATOR_STRING
+ stinf (" ctrl socket info\n" HS_TEST_SEPARATOR_STRING
" fd: %d (0x%08x)\n"
" rxbuf: %p\n"
" rxbuf size: %u (0x%08x)\n"
" txbuf: %p\n"
- " txbuf size: %u (0x%08x)\n"
- VCL_TEST_SEPARATOR_STRING,
- ctrl->fd, (uint32_t) ctrl->fd,
- ctrl->rxbuf, ctrl->rxbuf_size, ctrl->rxbuf_size,
- ctrl->txbuf, ctrl->txbuf_size, ctrl->txbuf_size);
+ " txbuf size: %u (0x%08x)\n" HS_TEST_SEPARATOR_STRING,
+ ctrl->fd, (uint32_t) ctrl->fd, ctrl->rxbuf, ctrl->rxbuf_size,
+ ctrl->rxbuf_size, ctrl->txbuf, ctrl->txbuf_size,
+ ctrl->txbuf_size);
}
- ctrl->cfg.test = VCL_TEST_TYPE_ECHO;
+ ctrl->cfg.test = HS_TEST_TYPE_ECHO;
if (sock_test_cfg_sync (ctrl))
stabrt ("post-test cfg sync failed!");
- stinf ("(fd %d): %s-directional Stream Test Complete!\n"
- SOCK_TEST_BANNER_STRING "\n", ctrl->fd,
- test == VCL_TEST_TYPE_BI ? "Bi" : "Uni");
+ stinf (
+ "(fd %d): %s-directional Stream Test Complete!\n" SOCK_TEST_BANNER_STRING
+ "\n",
+ ctrl->fd, test == HS_TEST_TYPE_BI ? "Bi" : "Uni");
}
static void
@@ -448,24 +444,24 @@ exit_client (void)
for (i = 0; i < ctrl->cfg.num_test_sessions; i++)
{
tsock = &scm->test_socket[i];
- tsock->cfg.test = VCL_TEST_TYPE_EXIT;
+ tsock->cfg.test = HS_TEST_TYPE_EXIT;
/* coverity[COPY_PASTE_ERROR] */
if (ctrl->cfg.verbose)
{
stinf ("\(fd %d): Sending exit cfg to server...\n", tsock->fd);
- vcl_test_cfg_dump (&tsock->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&tsock->cfg, 1 /* is_client */);
}
(void) sock_test_write (tsock->fd, (uint8_t *) & tsock->cfg,
sizeof (tsock->cfg), &tsock->stats,
ctrl->cfg.verbose);
}
- ctrl->cfg.test = VCL_TEST_TYPE_EXIT;
+ ctrl->cfg.test = HS_TEST_TYPE_EXIT;
if (ctrl->cfg.verbose)
{
stinf ("\n(fd %d): Sending exit cfg to server...\n", ctrl->fd);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
(void) sock_test_write (ctrl->fd, (uint8_t *) & ctrl->cfg,
sizeof (ctrl->cfg), &ctrl->stats,
@@ -557,7 +553,7 @@ cfg_txbuf_size_set (void)
ctrl->cfg.total_bytes = ctrl->cfg.num_writes * ctrl->cfg.txbuf_size;
vcl_test_buf_alloc (&ctrl->cfg, 0 /* is_rxbuf */ ,
(uint8_t **) & ctrl->txbuf, &ctrl->txbuf_size);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
stabrt ("Invalid txbuf size (%lu) < minimum buf size (%u)!",
@@ -576,7 +572,7 @@ cfg_num_writes_set (void)
{
ctrl->cfg.num_writes = num_writes;
ctrl->cfg.total_bytes = ctrl->cfg.num_writes * ctrl->cfg.txbuf_size;
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
stabrt ("Invalid num writes: %u", num_writes);
@@ -596,7 +592,7 @@ cfg_num_test_sockets_set (void)
ctrl->cfg.num_test_sessions = num_test_sockets;
sock_test_connect_test_sockets (num_test_sockets);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
stabrt ("Invalid num test sockets: %u, (%d max)\n", num_test_sockets,
@@ -616,7 +612,7 @@ cfg_rxbuf_size_set (void)
ctrl->cfg.rxbuf_size = rxbuf_size;
vcl_test_buf_alloc (&ctrl->cfg, 1 /* is_rxbuf */ ,
(uint8_t **) & ctrl->rxbuf, &ctrl->rxbuf_size);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
stabrt ("Invalid rxbuf size (%lu) < minimum buf size (%u)!",
@@ -630,19 +626,19 @@ cfg_verbose_toggle (void)
vcl_test_session_t *ctrl = &scm->ctrl_socket;
ctrl->cfg.verbose = ctrl->cfg.verbose ? 0 : 1;
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
-static vcl_test_t
+static hs_test_t
parse_input ()
{
sock_client_main_t *scm = &sock_client_main;
vcl_test_session_t *ctrl = &scm->ctrl_socket;
- vcl_test_t rv = VCL_TEST_TYPE_NONE;
+ hs_test_t rv = HS_TEST_TYPE_NONE;
if (!strncmp (VCL_TEST_TOKEN_EXIT, ctrl->txbuf,
strlen (VCL_TEST_TOKEN_EXIT)))
- rv = VCL_TEST_TYPE_EXIT;
+ rv = HS_TEST_TYPE_EXIT;
else if (!strncmp (VCL_TEST_TOKEN_HELP, ctrl->txbuf,
strlen (VCL_TEST_TOKEN_HELP)))
@@ -672,16 +668,16 @@ parse_input ()
strlen (VCL_TEST_TOKEN_RXBUF_SIZE)))
cfg_rxbuf_size_set ();
- else if (!strncmp (VCL_TEST_TOKEN_RUN_UNI, ctrl->txbuf,
- strlen (VCL_TEST_TOKEN_RUN_UNI)))
- rv = ctrl->cfg.test = VCL_TEST_TYPE_UNI;
+ else if (!strncmp (HS_TEST_TOKEN_RUN_UNI, ctrl->txbuf,
+ strlen (HS_TEST_TOKEN_RUN_UNI)))
+ rv = ctrl->cfg.test = HS_TEST_TYPE_UNI;
- else if (!strncmp (VCL_TEST_TOKEN_RUN_BI, ctrl->txbuf,
- strlen (VCL_TEST_TOKEN_RUN_BI)))
- rv = ctrl->cfg.test = VCL_TEST_TYPE_BI;
+ else if (!strncmp (HS_TEST_TOKEN_RUN_BI, ctrl->txbuf,
+ strlen (HS_TEST_TOKEN_RUN_BI)))
+ rv = ctrl->cfg.test = HS_TEST_TYPE_BI;
else
- rv = VCL_TEST_TYPE_ECHO;
+ rv = HS_TEST_TYPE_ECHO;
return rv;
}
@@ -713,9 +709,9 @@ main (int argc, char **argv)
sock_client_main_t *scm = &sock_client_main;
vcl_test_session_t *ctrl = &scm->ctrl_socket;
int c, rv;
- vcl_test_t post_test = VCL_TEST_TYPE_NONE;
+ hs_test_t post_test = HS_TEST_TYPE_NONE;
- vcl_test_cfg_init (&ctrl->cfg);
+ hs_test_cfg_init (&ctrl->cfg);
vcl_test_session_buf_alloc (ctrl);
opterr = 0;
@@ -749,7 +745,7 @@ main (int argc, char **argv)
break;
case 'X':
- post_test = VCL_TEST_TYPE_EXIT;
+ post_test = HS_TEST_TYPE_EXIT;
break;
case 'E':
@@ -760,7 +756,7 @@ main (int argc, char **argv)
print_usage_and_exit ();
}
strncpy (ctrl->txbuf, optarg, ctrl->txbuf_size);
- ctrl->cfg.test = VCL_TEST_TYPE_ECHO;
+ ctrl->cfg.test = HS_TEST_TYPE_ECHO;
break;
case 'I':
@@ -836,11 +832,11 @@ main (int argc, char **argv)
break;
case 'U':
- ctrl->cfg.test = VCL_TEST_TYPE_UNI;
+ ctrl->cfg.test = HS_TEST_TYPE_UNI;
break;
case 'B':
- ctrl->cfg.test = VCL_TEST_TYPE_BI;
+ ctrl->cfg.test = HS_TEST_TYPE_BI;
break;
case 'V':
@@ -928,54 +924,54 @@ main (int argc, char **argv)
sock_test_connect_test_sockets (ctrl->cfg.num_test_sessions);
- while (ctrl->cfg.test != VCL_TEST_TYPE_EXIT)
+ while (ctrl->cfg.test != HS_TEST_TYPE_EXIT)
{
if (scm->dump_cfg)
{
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
scm->dump_cfg = 0;
}
switch (ctrl->cfg.test)
{
- case VCL_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_ECHO:
echo_test_client ();
break;
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
stream_test_client (ctrl->cfg.test);
break;
- case VCL_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT:
continue;
- case VCL_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_NONE:
default:
break;
}
switch (post_test)
{
- case VCL_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT:
switch (ctrl->cfg.test)
{
- case VCL_TEST_TYPE_EXIT:
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
- case VCL_TEST_TYPE_ECHO:
- ctrl->cfg.test = VCL_TEST_TYPE_EXIT;
+ case HS_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
+ case HS_TEST_TYPE_ECHO:
+ ctrl->cfg.test = HS_TEST_TYPE_EXIT;
continue;
- case VCL_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_NONE:
default:
break;
}
break;
- case VCL_TEST_TYPE_NONE:
- case VCL_TEST_TYPE_ECHO:
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
+ case HS_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
default:
break;
}
diff --git a/src/plugins/hs_apps/vcl/sock_test_server.c b/src/plugins/hs_apps/vcl/sock_test_server.c
index d516c1722db..2356a4eadca 100644
--- a/src/plugins/hs_apps/vcl/sock_test_server.c
+++ b/src/plugins/hs_apps/vcl/sock_test_server.c
@@ -37,7 +37,7 @@ typedef struct
int fd;
uint8_t *buf;
uint32_t buf_size;
- vcl_test_cfg_t cfg;
+ hs_test_cfg_t cfg;
vcl_test_stats_t stats;
} sock_server_conn_t;
@@ -87,7 +87,7 @@ conn_pool_expand (size_t expand_size)
{
sock_server_conn_t *conn = &conn_pool[i];
memset (conn, 0, sizeof (*conn));
- vcl_test_cfg_init (&conn->cfg);
+ hs_test_cfg_init (&conn->cfg);
vcl_test_buf_alloc (&conn->cfg, 1 /* is_rxbuf */ , &conn->buf,
&conn->buf_size);
conn->cfg.txbuf_size = conn->cfg.rxbuf_size;
@@ -123,7 +123,7 @@ conn_pool_free (sock_server_conn_t * conn)
}
static inline void
-sync_config_and_reply (sock_server_conn_t * conn, vcl_test_cfg_t * rx_cfg)
+sync_config_and_reply (sock_server_conn_t *conn, hs_test_cfg_t *rx_cfg)
{
conn->cfg = *rx_cfg;
vcl_test_buf_alloc (&conn->cfg, 1 /* is_rxbuf */ ,
@@ -133,19 +133,18 @@ sync_config_and_reply (sock_server_conn_t * conn, vcl_test_cfg_t * rx_cfg)
if (conn->cfg.verbose)
{
stinf ("(fd %d): Replying to cfg message!\n", conn->fd);
- vcl_test_cfg_dump (&conn->cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (&conn->cfg, 0 /* is_client */);
}
(void) sock_test_write (conn->fd, (uint8_t *) & conn->cfg,
sizeof (conn->cfg), NULL, conn->cfg.verbose);
}
static void
-stream_test_server_start_stop (sock_server_conn_t * conn,
- vcl_test_cfg_t * rx_cfg)
+stream_test_server_start_stop (sock_server_conn_t *conn, hs_test_cfg_t *rx_cfg)
{
sock_server_main_t *ssm = &sock_server_main;
int client_fd = conn->fd;
- vcl_test_t test = rx_cfg->test;
+ hs_test_t test = rx_cfg->test;
if (rx_cfg->ctrl_handle == conn->fd)
{
@@ -166,39 +165,37 @@ stream_test_server_start_stop (sock_server_conn_t * conn,
snprintf (buf, sizeof (buf), "SERVER (fd %d) RESULTS",
tc->fd);
- vcl_test_stats_dump (buf, &tc->stats, 1 /* show_rx */ ,
- test == VCL_TEST_TYPE_BI
- /* show tx */ ,
+ vcl_test_stats_dump (buf, &tc->stats, 1 /* show_rx */,
+ test == HS_TEST_TYPE_BI
+ /* show tx */,
conn->cfg.verbose);
}
}
}
- vcl_test_stats_dump ("SERVER RESULTS", &conn->stats, 1 /* show_rx */ ,
- (test == VCL_TEST_TYPE_BI) /* show_tx */ ,
+ vcl_test_stats_dump ("SERVER RESULTS", &conn->stats, 1 /* show_rx */,
+ (test == HS_TEST_TYPE_BI) /* show_tx */,
conn->cfg.verbose);
- vcl_test_cfg_dump (&conn->cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (&conn->cfg, 0 /* is_client */);
if (conn->cfg.verbose)
{
- stinf (" sock server main\n"
- VCL_TEST_SEPARATOR_STRING
+ stinf (" sock server main\n" HS_TEST_SEPARATOR_STRING
" buf: %p\n"
- " buf size: %u (0x%08x)\n"
- VCL_TEST_SEPARATOR_STRING,
+ " buf size: %u (0x%08x)\n" HS_TEST_SEPARATOR_STRING,
conn->buf, conn->buf_size, conn->buf_size);
}
sync_config_and_reply (conn, rx_cfg);
- stinf ("SERVER (fd %d): %s-directional Stream Test Complete!\n"
- SOCK_TEST_BANNER_STRING "\n", conn->fd,
- test == VCL_TEST_TYPE_BI ? "Bi" : "Uni");
+ stinf ("SERVER (fd %d): %s-directional Stream Test "
+ "Complete!\n" SOCK_TEST_BANNER_STRING "\n",
+ conn->fd, test == HS_TEST_TYPE_BI ? "Bi" : "Uni");
}
else
{
stinf (SOCK_TEST_BANNER_STRING
"SERVER (fd %d): %s-directional Stream Test!\n"
" Sending client the test cfg to start streaming data...\n",
- client_fd, test == VCL_TEST_TYPE_BI ? "Bi" : "Uni");
+ client_fd, test == HS_TEST_TYPE_BI ? "Bi" : "Uni");
rx_cfg->ctrl_handle = (rx_cfg->ctrl_handle == ~0) ? conn->fd :
rx_cfg->ctrl_handle;
@@ -216,9 +213,9 @@ static inline void
stream_test_server (sock_server_conn_t * conn, int rx_bytes)
{
int client_fd = conn->fd;
- vcl_test_t test = conn->cfg.test;
+ hs_test_t test = conn->cfg.test;
- if (test == VCL_TEST_TYPE_BI)
+ if (test == HS_TEST_TYPE_BI)
(void) sock_test_write (client_fd, conn->buf, rx_bytes, &conn->stats,
conn->cfg.verbose);
@@ -373,15 +370,14 @@ sts_server_echo (sock_server_conn_t * conn, int rx_bytes)
}
static int
-sts_handle_cfg (vcl_test_cfg_t * rx_cfg, sock_server_conn_t * conn,
- int rx_bytes)
+sts_handle_cfg (hs_test_cfg_t *rx_cfg, sock_server_conn_t *conn, int rx_bytes)
{
sock_server_main_t *ssm = &sock_server_main;
if (rx_cfg->verbose)
{
stinf ("(fd %d): Received a cfg message!\n", conn->fd);
- vcl_test_cfg_dump (rx_cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 0 /* is_client */);
}
if (rx_bytes != sizeof (*rx_cfg))
@@ -393,7 +389,7 @@ sts_handle_cfg (vcl_test_cfg_t * rx_cfg, sock_server_conn_t * conn,
if (conn->cfg.verbose)
{
stinf ("(fd %d): Replying to cfg message!\n", conn->fd);
- vcl_test_cfg_dump (rx_cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 0 /* is_client */);
}
sock_test_write (conn->fd, (uint8_t *) & conn->cfg, sizeof (conn->cfg),
NULL, conn->cfg.verbose);
@@ -402,23 +398,23 @@ sts_handle_cfg (vcl_test_cfg_t * rx_cfg, sock_server_conn_t * conn,
switch (rx_cfg->test)
{
- case VCL_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_NONE:
sync_config_and_reply (conn, rx_cfg);
break;
- case VCL_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_ECHO:
if (socket_server_echo_af_unix_init (ssm))
goto done;
sync_config_and_reply (conn, rx_cfg);
break;
- case VCL_TEST_TYPE_BI:
- case VCL_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
+ case HS_TEST_TYPE_UNI:
stream_test_server_start_stop (conn, rx_cfg);
break;
- case VCL_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT:
stinf ("Have a great day connection %d!", conn->fd);
close (conn->fd);
conn_pool_free (conn);
@@ -428,7 +424,7 @@ sts_handle_cfg (vcl_test_cfg_t * rx_cfg, sock_server_conn_t * conn,
default:
stinf ("ERROR: Unknown test type!\n");
- vcl_test_cfg_dump (rx_cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 0 /* is_client */);
break;
}
@@ -439,7 +435,7 @@ done:
static int
sts_conn_expect_config (sock_server_conn_t * conn)
{
- if (conn->cfg.test == VCL_TEST_TYPE_ECHO)
+ if (conn->cfg.test == HS_TEST_TYPE_ECHO)
return 1;
return (conn->stats.rx_bytes < 128
@@ -452,7 +448,7 @@ main (int argc, char **argv)
int client_fd, rv, main_rv = 0, rx_bytes, c, v, i;
sock_server_main_t *ssm = &sock_server_main;
sock_server_conn_t *conn;
- vcl_test_cfg_t *rx_cfg;
+ hs_test_cfg_t *rx_cfg;
struct sockaddr_storage servaddr;
uint16_t port = VCL_TEST_SERVER_PORT;
uint32_t servaddr_size;
@@ -605,8 +601,8 @@ main (int argc, char **argv)
if (sts_conn_expect_config (conn))
{
- rx_cfg = (vcl_test_cfg_t *) conn->buf;
- if (rx_cfg->magic == VCL_TEST_CFG_CTRL_MAGIC)
+ rx_cfg = (hs_test_cfg_t *) conn->buf;
+ if (rx_cfg->magic == HS_TEST_CFG_CTRL_MAGIC)
{
sts_handle_cfg (rx_cfg, conn, rx_bytes);
if (!ssm->nfds)
@@ -619,8 +615,8 @@ main (int argc, char **argv)
}
}
- if ((conn->cfg.test == VCL_TEST_TYPE_UNI)
- || (conn->cfg.test == VCL_TEST_TYPE_BI))
+ if ((conn->cfg.test == HS_TEST_TYPE_UNI) ||
+ (conn->cfg.test == HS_TEST_TYPE_BI))
{
stream_test_server (conn, rx_bytes);
if (ioctl (conn->fd, FIONREAD))
diff --git a/src/plugins/hs_apps/vcl/vcl_test.h b/src/plugins/hs_apps/vcl/vcl_test.h
index 4f67e03f72b..0ce27ef84e2 100644
--- a/src/plugins/hs_apps/vcl/vcl_test.h
+++ b/src/plugins/hs_apps/vcl/vcl_test.h
@@ -16,6 +16,7 @@
#ifndef __vcl_test_h__
#define __vcl_test_h__
+#include <hs_apps/hs_test.h>
#include <netdb.h>
#include <errno.h>
#include <stdlib.h>
@@ -46,67 +47,18 @@
#define vt_atomic_add(_ptr, _val) \
__atomic_fetch_add (_ptr, _val, __ATOMIC_RELEASE)
-#define VCL_TEST_TOKEN_HELP "#H"
-#define VCL_TEST_TOKEN_EXIT "#X"
-#define VCL_TEST_TOKEN_VERBOSE "#V"
-#define VCL_TEST_TOKEN_TXBUF_SIZE "#T:"
-#define VCL_TEST_TOKEN_NUM_TEST_SESS "#I:"
-#define VCL_TEST_TOKEN_NUM_WRITES "#N:"
-#define VCL_TEST_TOKEN_RXBUF_SIZE "#R:"
-#define VCL_TEST_TOKEN_SHOW_CFG "#C"
-#define VCL_TEST_TOKEN_RUN_UNI "#U"
-#define VCL_TEST_TOKEN_RUN_BI "#B"
-
#define VCL_TEST_SERVER_PORT 22000
#define VCL_TEST_LOCALHOST_IPADDR "127.0.0.1"
-#define VCL_TEST_CFG_CTRL_MAGIC 0xfeedface
-#define VCL_TEST_CFG_NUM_WRITES_DEF 1000000
-#define VCL_TEST_CFG_TXBUF_SIZE_DEF 8192
-#define VCL_TEST_CFG_RXBUF_SIZE_DEF (64*VCL_TEST_CFG_TXBUF_SIZE_DEF)
#define VCL_TEST_CFG_BUF_SIZE_MIN 128
-#define VCL_TEST_CFG_MAX_TEST_SESS 512
+#define VCL_TEST_CFG_MAX_TEST_SESS ((uint32_t) 1e6)
+#define VCL_TEST_CFG_MAX_SELECT_SESS 512
+#define VCL_TEST_CFG_INIT_TEST_SESS 512
#define VCL_TEST_CFG_MAX_EPOLL_EVENTS 16
#define VCL_TEST_CTRL_LISTENER (~0 - 1)
#define VCL_TEST_DATA_LISTENER (~0)
#define VCL_TEST_DELAY_DISCONNECT 1
-#define VCL_TEST_SEPARATOR_STRING \
- " -----------------------------\n"
-typedef enum
-{
- VCL_TEST_TYPE_NONE,
- VCL_TEST_TYPE_ECHO,
- VCL_TEST_TYPE_UNI,
- VCL_TEST_TYPE_BI,
- VCL_TEST_TYPE_EXIT,
-} vcl_test_t;
-
-typedef enum
-{
- VCL_TEST_CMD_SYNC,
- VCL_TEST_CMD_START,
- VCL_TEST_CMD_STOP,
-} vcl_test_cmd_t;
-
-typedef struct __attribute__ ((packed))
-{
- uint32_t magic;
- uint32_t seq_num;
- uint32_t test;
- uint32_t cmd;
- uint32_t ctrl_handle;
- uint32_t num_test_sessions;
- uint32_t num_test_sessions_perq;
- uint32_t num_test_qsessions;
- uint32_t verbose;
- uint32_t address_ip6;
- uint32_t transport_udp;
- uint64_t rxbuf_size;
- uint64_t txbuf_size;
- uint64_t num_writes;
- uint64_t total_bytes;
-} vcl_test_cfg_t;
typedef struct
{
@@ -124,9 +76,10 @@ typedef struct
typedef struct vcl_test_session
{
- uint8_t is_alloc;
- uint8_t is_open;
uint8_t is_done;
+ uint8_t is_alloc : 1;
+ uint8_t is_open : 1;
+ uint8_t noblk_connect : 1;
int fd;
int (*read) (struct vcl_test_session *ts, void *buf, uint32_t buflen);
int (*write) (struct vcl_test_session *ts, void *buf, uint32_t buflen);
@@ -134,10 +87,11 @@ typedef struct vcl_test_session
uint32_t rxbuf_size;
char *txbuf;
char *rxbuf;
- vcl_test_cfg_t cfg;
+ hs_test_cfg_t cfg;
vcl_test_stats_t stats;
vcl_test_stats_t old_stats;
int session_index;
+ struct vcl_test_session *next;
vppcom_endpt_t endpt;
uint8_t ip[16];
vppcom_data_segment_t ds[2];
@@ -154,7 +108,7 @@ vcl_test_worker_index (void)
typedef struct
{
- int (*init) (vcl_test_cfg_t *cfg);
+ int (*init) (hs_test_cfg_t *cfg);
int (*open) (vcl_test_session_t *ts, vppcom_endpt_t *endpt);
int (*listen) (vcl_test_session_t *ts, vppcom_endpt_t *endpt);
int (*accept) (int listen_fd, vcl_test_session_t *ts);
@@ -172,7 +126,7 @@ typedef struct
{
const vcl_test_proto_vft_t *protos[VPPCOM_PROTO_SRTP + 1];
uint32_t ckpair_index;
- vcl_test_cfg_t cfg;
+ hs_test_cfg_t cfg;
vcl_test_wrk_t *wrk;
} vcl_test_main_t;
@@ -198,37 +152,8 @@ vcl_test_stats_accumulate (vcl_test_stats_t * accum, vcl_test_stats_t * incr)
}
static inline void
-vcl_test_cfg_init (vcl_test_cfg_t * cfg)
-{
- cfg->magic = VCL_TEST_CFG_CTRL_MAGIC;
- cfg->test = VCL_TEST_TYPE_NONE;
- cfg->ctrl_handle = ~0;
- cfg->num_test_sessions = 1;
- cfg->num_test_sessions_perq = 1;
- cfg->verbose = 0;
- cfg->rxbuf_size = VCL_TEST_CFG_RXBUF_SIZE_DEF;
- cfg->num_writes = VCL_TEST_CFG_NUM_WRITES_DEF;
- cfg->txbuf_size = VCL_TEST_CFG_TXBUF_SIZE_DEF;
- cfg->total_bytes = cfg->num_writes * cfg->txbuf_size;
-}
-
-static inline int
-vcl_test_cfg_verify (vcl_test_cfg_t * cfg, vcl_test_cfg_t * valid_cfg)
-{
- /* Note: txbuf & rxbuf on server are the same buffer,
- * so txbuf_size is not included in this check.
- */
- return ((cfg->magic == valid_cfg->magic)
- && (cfg->test == valid_cfg->test)
- && (cfg->verbose == valid_cfg->verbose)
- && (cfg->rxbuf_size == valid_cfg->rxbuf_size)
- && (cfg->num_writes == valid_cfg->num_writes)
- && (cfg->total_bytes == valid_cfg->total_bytes));
-}
-
-static inline void
-vcl_test_buf_alloc (vcl_test_cfg_t * cfg, uint8_t is_rxbuf, uint8_t ** buf,
- uint32_t * bufsize)
+vcl_test_buf_alloc (hs_test_cfg_t *cfg, uint8_t is_rxbuf, uint8_t **buf,
+ uint32_t *bufsize)
{
uint32_t alloc_size = is_rxbuf ? cfg->rxbuf_size : cfg->txbuf_size;
uint8_t *lb = realloc (*buf, (size_t) alloc_size);
@@ -269,69 +194,6 @@ vcl_test_session_buf_free (vcl_test_session_t *ts)
ts->txbuf = 0;
}
-static inline char *
-vcl_test_type_str (vcl_test_t t)
-{
- switch (t)
- {
- case VCL_TEST_TYPE_NONE:
- return "NONE";
-
- case VCL_TEST_TYPE_ECHO:
- return "ECHO";
-
- case VCL_TEST_TYPE_UNI:
- return "UNI";
-
- case VCL_TEST_TYPE_BI:
- return "BI";
-
- case VCL_TEST_TYPE_EXIT:
- return "EXIT";
-
- default:
- return "Unknown";
- }
-}
-
-static inline void
-vcl_test_cfg_dump (vcl_test_cfg_t * cfg, uint8_t is_client)
-{
- char *spc = " ";
-
- printf (" test config (%p):\n"
- VCL_TEST_SEPARATOR_STRING
- " magic: 0x%08x\n"
- " seq_num: 0x%08x\n"
- "%-5s test: %s (%d)\n"
- " ctrl handle: %d (0x%x)\n"
- "%-5s num test sockets: %u (0x%08x)\n"
- "%-5s verbose: %s (%d)\n"
- "%-5s rxbuf size: %lu (0x%08lx)\n"
- "%-5s txbuf size: %lu (0x%08lx)\n"
- "%-5s num writes: %lu (0x%08lx)\n"
- " client tx bytes: %lu (0x%08lx)\n"
- VCL_TEST_SEPARATOR_STRING,
- (void *) cfg, cfg->magic, cfg->seq_num,
- is_client && (cfg->test == VCL_TEST_TYPE_UNI) ?
- "'" VCL_TEST_TOKEN_RUN_UNI "'" :
- is_client && (cfg->test == VCL_TEST_TYPE_BI) ?
- "'" VCL_TEST_TOKEN_RUN_BI "'" : spc,
- vcl_test_type_str (cfg->test), cfg->test,
- cfg->ctrl_handle, cfg->ctrl_handle,
- is_client ? "'" VCL_TEST_TOKEN_NUM_TEST_SESS "'" : spc,
- cfg->num_test_sessions, cfg->num_test_sessions,
- is_client ? "'" VCL_TEST_TOKEN_VERBOSE "'" : spc,
- cfg->verbose ? "on" : "off", cfg->verbose,
- is_client ? "'" VCL_TEST_TOKEN_RXBUF_SIZE "'" : spc,
- cfg->rxbuf_size, cfg->rxbuf_size,
- is_client ? "'" VCL_TEST_TOKEN_TXBUF_SIZE "'" : spc,
- cfg->txbuf_size, cfg->txbuf_size,
- is_client ? "'" VCL_TEST_TOKEN_NUM_WRITES "'" : spc,
- cfg->num_writes, cfg->num_writes,
- cfg->total_bytes, cfg->total_bytes);
-}
-
static inline void
vcl_test_stats_dump (char *header, vcl_test_stats_t * stats,
uint8_t show_rx, uint8_t show_tx, uint8_t verbose)
@@ -361,31 +223,27 @@ vcl_test_stats_dump (char *header, vcl_test_stats_t * stats,
if (show_tx)
{
- printf (VCL_TEST_SEPARATOR_STRING
- " tx stats (0x%p):\n"
- VCL_TEST_SEPARATOR_STRING
+ printf (HS_TEST_SEPARATOR_STRING
+ " tx stats (0x%p):\n" HS_TEST_SEPARATOR_STRING
" writes: %lu (0x%08lx)\n"
" tx bytes: %lu (0x%08lx)\n"
" tx eagain: %u (0x%08x)\n"
" tx incomplete: %u (0x%08x)\n",
(void *) stats, stats->tx_xacts, stats->tx_xacts,
- stats->tx_bytes, stats->tx_bytes,
- stats->tx_eagain, stats->tx_eagain,
- stats->tx_incomp, stats->tx_incomp);
+ stats->tx_bytes, stats->tx_bytes, stats->tx_eagain,
+ stats->tx_eagain, stats->tx_incomp, stats->tx_incomp);
}
if (show_rx)
{
- printf (VCL_TEST_SEPARATOR_STRING
- " rx stats (0x%p):\n"
- VCL_TEST_SEPARATOR_STRING
+ printf (HS_TEST_SEPARATOR_STRING
+ " rx stats (0x%p):\n" HS_TEST_SEPARATOR_STRING
" reads: %lu (0x%08lx)\n"
" rx bytes: %lu (0x%08lx)\n"
" rx eagain: %u (0x%08x)\n"
" rx incomplete: %u (0x%08x)\n",
(void *) stats, stats->rx_xacts, stats->rx_xacts,
- stats->rx_bytes, stats->rx_bytes,
- stats->rx_eagain, stats->rx_eagain,
- stats->rx_incomp, stats->rx_incomp);
+ stats->rx_bytes, stats->rx_bytes, stats->rx_eagain,
+ stats->rx_eagain, stats->rx_incomp, stats->rx_incomp);
}
if (verbose)
printf (" start.tv_sec: %ld\n"
@@ -395,7 +253,7 @@ vcl_test_stats_dump (char *header, vcl_test_stats_t * stats,
stats->start.tv_sec, stats->start.tv_nsec,
stats->stop.tv_sec, stats->stop.tv_nsec);
- printf (VCL_TEST_SEPARATOR_STRING);
+ printf (HS_TEST_SEPARATOR_STRING);
}
static inline double
@@ -567,25 +425,18 @@ dump_help (void)
{
#define INDENT "\n "
- printf ("CLIENT: Test configuration commands:"
- INDENT VCL_TEST_TOKEN_HELP
- "\t\t\tDisplay help."
- INDENT VCL_TEST_TOKEN_EXIT
- "\t\t\tExit test client & server."
- INDENT VCL_TEST_TOKEN_SHOW_CFG
- "\t\t\tShow the current test cfg."
- INDENT VCL_TEST_TOKEN_RUN_UNI
- "\t\t\tRun the Uni-directional test."
- INDENT VCL_TEST_TOKEN_RUN_BI
- "\t\t\tRun the Bi-directional test."
- INDENT VCL_TEST_TOKEN_VERBOSE
- "\t\t\tToggle verbose setting."
- INDENT VCL_TEST_TOKEN_RXBUF_SIZE
- "<rxbuf size>\tRx buffer size (bytes)."
- INDENT VCL_TEST_TOKEN_TXBUF_SIZE
- "<txbuf size>\tTx buffer size (bytes)."
- INDENT VCL_TEST_TOKEN_NUM_WRITES
- "<# of writes>\tNumber of txbuf writes to server." "\n");
+ printf (
+ "CLIENT: Test configuration commands:" INDENT VCL_TEST_TOKEN_HELP
+ "\t\t\tDisplay help." INDENT VCL_TEST_TOKEN_EXIT
+ "\t\t\tExit test client & server." INDENT VCL_TEST_TOKEN_SHOW_CFG
+ "\t\t\tShow the current test cfg." INDENT HS_TEST_TOKEN_RUN_UNI
+ "\t\t\tRun the Uni-directional test." INDENT HS_TEST_TOKEN_RUN_BI
+ "\t\t\tRun the Bi-directional test." INDENT VCL_TEST_TOKEN_VERBOSE
+ "\t\t\tToggle verbose setting." INDENT VCL_TEST_TOKEN_RXBUF_SIZE
+ "<rxbuf size>\tRx buffer size (bytes)." INDENT VCL_TEST_TOKEN_TXBUF_SIZE
+ "<txbuf size>\tTx buffer size (bytes)." INDENT VCL_TEST_TOKEN_NUM_WRITES
+ "<# of writes>\tNumber of txbuf writes to server."
+ "\n");
}
#endif /* __vcl_test_h__ */
diff --git a/src/plugins/hs_apps/vcl/vcl_test_client.c b/src/plugins/hs_apps/vcl/vcl_test_client.c
index 4a9fb46e5b8..a4a10b562ff 100644
--- a/src/plugins/hs_apps/vcl/vcl_test_client.c
+++ b/src/plugins/hs_apps/vcl/vcl_test_client.c
@@ -26,18 +26,34 @@
#include <pthread.h>
#include <signal.h>
-typedef struct
+typedef struct vtc_worker_ vcl_test_client_worker_t;
+typedef int (vtc_worker_run_fn) (vcl_test_client_worker_t *wrk);
+
+struct vtc_worker_
{
vcl_test_session_t *sessions;
vcl_test_session_t *qsessions;
uint32_t n_sessions;
uint32_t wrk_index;
- fd_set wr_fdset;
- fd_set rd_fdset;
- int max_fd_index;
+ union
+ {
+ struct
+ {
+ fd_set wr_fdset;
+ fd_set rd_fdset;
+ int max_fd_index;
+ };
+ struct
+ {
+ uint32_t epoll_sh;
+ struct epoll_event ep_evts[VCL_TEST_CFG_MAX_EPOLL_EVENTS];
+ vcl_test_session_t *next_to_send;
+ };
+ };
pthread_t thread_handle;
- vcl_test_cfg_t cfg;
-} vcl_test_client_worker_t;
+ vtc_worker_run_fn *wrk_run_fn;
+ hs_test_cfg_t cfg;
+};
typedef struct
{
@@ -46,13 +62,17 @@ typedef struct
vppcom_endpt_t server_endpt;
uint32_t cfg_seq_num;
uint8_t dump_cfg;
- vcl_test_t post_test;
+ hs_test_t post_test;
uint8_t proto;
uint8_t incremental_stats;
uint32_t n_workers;
volatile int active_workers;
volatile int test_running;
- struct sockaddr_storage server_addr;
+ union
+ {
+ struct in_addr v4;
+ struct in6_addr v6;
+ } server_addr;
} vcl_test_client_main_t;
vcl_test_client_main_t vcl_client_main;
@@ -65,14 +85,14 @@ vcl_test_main_t vcl_test_main;
static int
vtc_cfg_sync (vcl_test_session_t * ts)
{
- vcl_test_cfg_t *rx_cfg = (vcl_test_cfg_t *) ts->rxbuf;
+ hs_test_cfg_t *rx_cfg = (hs_test_cfg_t *) ts->rxbuf;
int rx_bytes, tx_bytes;
vt_atomic_add (&ts->cfg.seq_num, 1);
if (ts->cfg.verbose)
{
vtinf ("(fd %d): Sending config to server.", ts->fd);
- vcl_test_cfg_dump (&ts->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ts->cfg, 1 /* is_client */);
}
tx_bytes = ts->write (ts, &ts->cfg, sizeof (ts->cfg));
if (tx_bytes < 0)
@@ -81,50 +101,48 @@ vtc_cfg_sync (vcl_test_session_t * ts)
return tx_bytes;
}
- rx_bytes = ts->read (ts, ts->rxbuf, sizeof (vcl_test_cfg_t));
+ rx_bytes = ts->read (ts, ts->rxbuf, sizeof (hs_test_cfg_t));
if (rx_bytes < 0)
return rx_bytes;
- if (rx_cfg->magic != VCL_TEST_CFG_CTRL_MAGIC)
+ if (rx_cfg->magic != HS_TEST_CFG_CTRL_MAGIC)
{
vtwrn ("(fd %d): Bad server reply cfg -- aborting!", ts->fd);
return -1;
}
- if ((rx_bytes != sizeof (vcl_test_cfg_t))
- || !vcl_test_cfg_verify (rx_cfg, &ts->cfg))
+ if ((rx_bytes != sizeof (hs_test_cfg_t)) ||
+ !hs_test_cfg_verify (rx_cfg, &ts->cfg))
{
vtwrn ("(fd %d): Invalid config received from server!", ts->fd);
- if (rx_bytes != sizeof (vcl_test_cfg_t))
+ if (rx_bytes != sizeof (hs_test_cfg_t))
{
vtinf ("\tRx bytes %d != cfg size %lu", rx_bytes,
- sizeof (vcl_test_cfg_t));
+ sizeof (hs_test_cfg_t));
}
else
{
- vcl_test_cfg_dump (rx_cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 1 /* is_client */);
vtinf ("(fd %d): Valid config sent to server.", ts->fd);
- vcl_test_cfg_dump (&ts->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ts->cfg, 1 /* is_client */);
}
return -1;
}
if (ts->cfg.verbose)
{
vtinf ("(fd %d): Got config back from server.", ts->fd);
- vcl_test_cfg_dump (rx_cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 1 /* is_client */);
}
return 0;
}
static int
-vtc_connect_test_sessions (vcl_test_client_worker_t * wrk)
+vtc_worker_alloc_sessions (vcl_test_client_worker_t *wrk)
{
- vcl_test_client_main_t *vcm = &vcl_client_main;
- vcl_test_main_t *vt = &vcl_test_main;
- const vcl_test_proto_vft_t *tp;
vcl_test_session_t *ts;
uint32_t n_test_sessions;
- int i, rv;
+ struct timespec now;
+ int i, j;
n_test_sessions = wrk->cfg.num_test_sessions;
if (n_test_sessions < 1)
@@ -148,62 +166,33 @@ vtc_connect_test_sessions (vcl_test_client_worker_t * wrk)
return errno;
}
- tp = vt->protos[vcm->proto];
+ clock_gettime (CLOCK_REALTIME, &now);
for (i = 0; i < n_test_sessions; i++)
{
ts = &wrk->sessions[i];
memset (ts, 0, sizeof (*ts));
ts->session_index = i;
+ ts->old_stats.stop = now;
ts->cfg = wrk->cfg;
vcl_test_session_buf_alloc (ts);
- rv = tp->open (&wrk->sessions[i], &vcm->server_endpt);
- if (rv < 0)
- return rv;
- }
- wrk->n_sessions = n_test_sessions;
-
-done:
- vtinf ("All test sessions (%d) connected!", n_test_sessions);
- return 0;
-}
-
-static int
-vtc_worker_test_setup (vcl_test_client_worker_t * wrk)
-{
- vcl_test_cfg_t *cfg = &wrk->cfg;
- vcl_test_session_t *ts;
- struct timespec now;
- uint32_t sidx;
- int i, j;
-
- FD_ZERO (&wrk->wr_fdset);
- FD_ZERO (&wrk->rd_fdset);
-
- clock_gettime (CLOCK_REALTIME, &now);
-
- for (i = 0; i < cfg->num_test_sessions; i++)
- {
- ts = &wrk->sessions[i];
- ts->old_stats.stop = now;
- switch (cfg->test)
+ switch (ts->cfg.test)
{
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
for (j = 0; j < ts->txbuf_size; j++)
ts->txbuf[j] = j & 0xff;
break;
default:
break;
}
-
- FD_SET (vppcom_session_index (ts->fd), &wrk->wr_fdset);
- FD_SET (vppcom_session_index (ts->fd), &wrk->rd_fdset);
- sidx = vppcom_session_index (ts->fd);
- wrk->max_fd_index = vtc_max (sidx, wrk->max_fd_index);
}
- wrk->max_fd_index += 1;
+ wrk->n_sessions = n_test_sessions;
+
+done:
+
+ vtinf ("All test sessions (%d) initialized!", n_test_sessions);
return 0;
}
@@ -227,16 +216,13 @@ vtc_worker_init (vcl_test_client_worker_t * wrk)
}
vt_atomic_add (&vcm->active_workers, 1);
}
- rv = vtc_connect_test_sessions (wrk);
+ rv = vtc_worker_alloc_sessions (wrk);
if (rv)
{
- vterr ("vtc_connect_test_sessions ()", rv);
+ vterr ("vtc_worker_alloc_sessions ()", rv);
return rv;
}
- if (vtc_worker_test_setup (wrk))
- return -1;
-
return 0;
}
@@ -253,8 +239,7 @@ vtc_accumulate_stats (vcl_test_client_worker_t * wrk,
while (__sync_lock_test_and_set (&stats_lock, 1))
;
- if (ctrl->cfg.test == VCL_TEST_TYPE_BI
- || ctrl->cfg.test == VCL_TEST_TYPE_ECHO)
+ if (ctrl->cfg.test == HS_TEST_TYPE_BI || ctrl->cfg.test == HS_TEST_TYPE_ECHO)
show_rx = 1;
for (i = 0; i < wrk->cfg.num_test_sessions; i++)
@@ -308,32 +293,90 @@ vtc_inc_stats_check (vcl_test_session_t *ts)
}
}
-static void *
-vtc_worker_loop (void *arg)
+static void
+vtc_worker_start_transfer (vcl_test_client_worker_t *wrk)
+{
+ vtinf ("Worker %u starting transfer ...", wrk->wrk_index);
+
+ if (wrk->wrk_index == 0)
+ {
+ vcl_test_client_main_t *vcm = &vcl_client_main;
+ vcl_test_session_t *ctrl = &vcm->ctrl_session;
+
+ clock_gettime (CLOCK_REALTIME, &ctrl->stats.start);
+ }
+}
+
+static int
+vtc_session_check_is_done (vcl_test_session_t *ts, uint8_t check_rx)
+{
+ if ((!check_rx && ts->stats.tx_bytes >= ts->cfg.total_bytes) ||
+ (check_rx && ts->stats.rx_bytes >= ts->cfg.total_bytes))
+ {
+ clock_gettime (CLOCK_REALTIME, &ts->stats.stop);
+ ts->is_done = 1;
+ return 1;
+ }
+ return 0;
+}
+
+static int
+vtc_worker_connect_sessions_select (vcl_test_client_worker_t *wrk)
+{
+ vcl_test_client_main_t *vcm = &vcl_client_main;
+ vcl_test_main_t *vt = &vcl_test_main;
+ const vcl_test_proto_vft_t *tp;
+ vcl_test_session_t *ts;
+ uint32_t sidx;
+ int i, rv;
+
+ tp = vt->protos[vcm->proto];
+
+ FD_ZERO (&wrk->wr_fdset);
+ FD_ZERO (&wrk->rd_fdset);
+
+ for (i = 0; i < wrk->cfg.num_test_sessions; i++)
+ {
+ ts = &wrk->sessions[i];
+
+ rv = tp->open (&wrk->sessions[i], &vcm->server_endpt);
+ if (rv < 0)
+ return rv;
+
+ FD_SET (vppcom_session_index (ts->fd), &wrk->wr_fdset);
+ FD_SET (vppcom_session_index (ts->fd), &wrk->rd_fdset);
+ sidx = vppcom_session_index (ts->fd);
+ wrk->max_fd_index = vtc_max (sidx, wrk->max_fd_index);
+ }
+ wrk->max_fd_index += 1;
+
+ vtinf ("All test sessions (%d) connected!", wrk->cfg.num_test_sessions);
+
+ return 0;
+}
+
+static int
+vtc_worker_run_select (vcl_test_client_worker_t *wrk)
{
vcl_test_client_main_t *vcm = &vcl_client_main;
- vcl_test_session_t *ctrl = &vcm->ctrl_session;
- vcl_test_client_worker_t *wrk = arg;
- uint32_t n_active_sessions;
fd_set _wfdset, *wfdset = &_wfdset;
fd_set _rfdset, *rfdset = &_rfdset;
+ uint32_t n_active_sessions;
vcl_test_session_t *ts;
int i, rv, check_rx = 0;
- rv = vtc_worker_init (wrk);
+ rv = vtc_worker_connect_sessions_select (wrk);
if (rv)
{
- vterr ("vtc_worker_init()", rv);
- return 0;
+ vterr ("vtc_worker_connect_sessions()", rv);
+ return rv;
}
- vtinf ("Starting test ...");
+ check_rx = wrk->cfg.test != HS_TEST_TYPE_UNI;
+ n_active_sessions = wrk->cfg.num_test_sessions;
- if (wrk->wrk_index == 0)
- clock_gettime (CLOCK_REALTIME, &ctrl->stats.start);
+ vtc_worker_start_transfer (wrk);
- check_rx = wrk->cfg.test != VCL_TEST_TYPE_UNI;
- n_active_sessions = wrk->cfg.num_test_sessions;
while (n_active_sessions && vcm->test_running)
{
_wfdset = wrk->wr_fdset;
@@ -344,7 +387,7 @@ vtc_worker_loop (void *arg)
if (rv < 0)
{
vterr ("vppcom_select()", rv);
- goto exit;
+ break;
}
else if (rv == 0)
continue;
@@ -355,29 +398,29 @@ vtc_worker_loop (void *arg)
if (ts->is_done)
continue;
- if (FD_ISSET (vppcom_session_index (ts->fd), rfdset)
- && ts->stats.rx_bytes < ts->cfg.total_bytes)
+ if (FD_ISSET (vppcom_session_index (ts->fd), rfdset) &&
+ ts->stats.rx_bytes < ts->cfg.total_bytes)
{
rv = ts->read (ts, ts->rxbuf, ts->rxbuf_size);
if (rv < 0)
- goto exit;
+ break;
}
- if (FD_ISSET (vppcom_session_index (ts->fd), wfdset)
- && ts->stats.tx_bytes < ts->cfg.total_bytes)
+ if (FD_ISSET (vppcom_session_index (ts->fd), wfdset) &&
+ ts->stats.tx_bytes < ts->cfg.total_bytes)
{
rv = ts->write (ts, ts->txbuf, ts->cfg.txbuf_size);
if (rv < 0)
{
vtwrn ("vppcom_test_write (%d) failed -- aborting test",
ts->fd);
- goto exit;
+ break;
}
if (vcm->incremental_stats)
vtc_inc_stats_check (ts);
}
- if ((!check_rx && ts->stats.tx_bytes >= ts->cfg.total_bytes)
- || (check_rx && ts->stats.rx_bytes >= ts->cfg.total_bytes))
+ if ((!check_rx && ts->stats.tx_bytes >= ts->cfg.total_bytes) ||
+ (check_rx && ts->stats.rx_bytes >= ts->cfg.total_bytes))
{
clock_gettime (CLOCK_REALTIME, &ts->stats.stop);
ts->is_done = 1;
@@ -385,59 +428,343 @@ vtc_worker_loop (void *arg)
}
}
}
-exit:
+
+ return 0;
+}
+
+static void
+vtc_worker_epoll_send_add (vcl_test_client_worker_t *wrk,
+ vcl_test_session_t *ts)
+{
+ if (!wrk->next_to_send)
+ {
+ wrk->next_to_send = ts;
+ }
+ else
+ {
+ ts->next = wrk->next_to_send;
+ wrk->next_to_send = ts->next;
+ }
+}
+
+static void
+vtc_worker_epoll_send_del (vcl_test_client_worker_t *wrk,
+ vcl_test_session_t *ts, vcl_test_session_t *prev)
+{
+ if (!prev)
+ {
+ wrk->next_to_send = ts->next;
+ }
+ else
+ {
+ prev->next = ts->next;
+ }
+}
+
+static int
+vtc_worker_connect_sessions_epoll (vcl_test_client_worker_t *wrk)
+{
+ vcl_test_client_main_t *vcm = &vcl_client_main;
+ vcl_test_main_t *vt = &vcl_test_main;
+ const vcl_test_proto_vft_t *tp;
+ struct timespec start, end;
+ uint32_t n_connected = 0;
+ vcl_test_session_t *ts;
+ struct epoll_event ev;
+ int i, ci = 0, rv, n_ev;
+ double diff;
+
+ tp = vt->protos[vcm->proto];
+ wrk->epoll_sh = vppcom_epoll_create ();
+
+ ev.events = EPOLLET | EPOLLOUT;
+
+ clock_gettime (CLOCK_REALTIME, &start);
+
+ while (n_connected < wrk->cfg.num_test_sessions)
+ {
+ /*
+ * Try to connect more sessions if under pending threshold
+ */
+ while ((ci - n_connected) < 16 && ci < wrk->cfg.num_test_sessions)
+ {
+ ts = &wrk->sessions[ci];
+ ts->noblk_connect = 1;
+ rv = tp->open (&wrk->sessions[ci], &vcm->server_endpt);
+ if (rv < 0)
+ {
+ vtwrn ("open: %d", rv);
+ return rv;
+ }
+
+ ev.data.u64 = ci;
+ rv = vppcom_epoll_ctl (wrk->epoll_sh, EPOLL_CTL_ADD, ts->fd, &ev);
+ if (rv < 0)
+ {
+ vtwrn ("vppcom_epoll_ctl: %d", rv);
+ return rv;
+ }
+ ci += 1;
+ }
+
+ /*
+ * Handle connected events
+ */
+ n_ev =
+ vppcom_epoll_wait (wrk->epoll_sh, wrk->ep_evts,
+ VCL_TEST_CFG_MAX_EPOLL_EVENTS, 0 /* timeout */);
+ if (n_ev < 0)
+ {
+ vterr ("vppcom_epoll_wait() returned", n_ev);
+ return -1;
+ }
+ else if (n_ev == 0)
+ {
+ continue;
+ }
+
+ for (i = 0; i < n_ev; i++)
+ {
+ ts = &wrk->sessions[wrk->ep_evts[i].data.u32];
+ if (!(wrk->ep_evts[i].events & EPOLLOUT))
+ {
+ vtwrn ("connect failed");
+ return -1;
+ }
+ if (ts->is_open)
+ {
+ vtwrn ("connection already open?");
+ return -1;
+ }
+ ts->is_open = 1;
+ n_connected += 1;
+ }
+ }
+
+ clock_gettime (CLOCK_REALTIME, &end);
+
+ diff = vcl_test_time_diff (&start, &end);
+ vtinf ("Connected (%u) connected in %.2f seconds (%u CPS)!",
+ wrk->cfg.num_test_sessions, diff,
+ (uint32_t) ((double) wrk->cfg.num_test_sessions / diff));
+
+ ev.events = EPOLLET | EPOLLIN | EPOLLOUT;
+
+ for (i = 0; i < wrk->cfg.num_test_sessions; i++)
+ {
+ ts = &wrk->sessions[i];
+
+ /* No data to be sent */
+ if (ts->cfg.total_bytes == 0)
+ {
+ n_connected -= 1;
+ clock_gettime (CLOCK_REALTIME, &ts->stats.stop);
+ ts->is_done = 1;
+ continue;
+ }
+
+ ev.data.u64 = i;
+ rv = vppcom_epoll_ctl (wrk->epoll_sh, EPOLL_CTL_MOD, ts->fd, &ev);
+ if (rv < 0)
+ {
+ vtwrn ("vppcom_epoll_ctl: %d", rv);
+ return rv;
+ }
+ vtc_worker_epoll_send_add (wrk, ts);
+ }
+
+ return n_connected;
+}
+
+static int
+vtc_worker_run_epoll (vcl_test_client_worker_t *wrk)
+{
+ vcl_test_client_main_t *vcm = &vcl_client_main;
+ uint32_t n_active_sessions, max_writes = 16, n_writes = 0;
+ vcl_test_session_t *ts, *prev = 0;
+ int i, rv, check_rx = 0, n_ev;
+
+ rv = vtc_worker_connect_sessions_epoll (wrk);
+ if (rv < 0)
+ {
+ vterr ("vtc_worker_connect_sessions()", rv);
+ return rv;
+ }
+
+ n_active_sessions = rv;
+ check_rx = wrk->cfg.test != HS_TEST_TYPE_UNI;
+
+ vtc_worker_start_transfer (wrk);
+ ts = wrk->next_to_send;
+
+ while (n_active_sessions && vcm->test_running)
+ {
+ /*
+ * Try to write
+ */
+ if (!ts)
+ {
+ ts = wrk->next_to_send;
+ if (!ts)
+ goto get_epoll_evts;
+ }
+
+ rv = ts->write (ts, ts->txbuf, ts->cfg.txbuf_size);
+ if (rv > 0)
+ {
+ if (vcm->incremental_stats)
+ vtc_inc_stats_check (ts);
+ if (vtc_session_check_is_done (ts, check_rx))
+ n_active_sessions -= 1;
+ }
+ else if (rv == 0)
+ {
+ vtc_worker_epoll_send_del (wrk, ts, prev);
+ }
+ else
+ {
+ vtwrn ("vppcom_test_write (%d) failed -- aborting test", ts->fd);
+ return -1;
+ }
+ prev = ts;
+ ts = ts->next;
+ n_writes += 1;
+
+ if (rv > 0 && n_writes < max_writes)
+ continue;
+
+ get_epoll_evts:
+
+ /*
+ * Grab new events
+ */
+
+ n_ev =
+ vppcom_epoll_wait (wrk->epoll_sh, wrk->ep_evts,
+ VCL_TEST_CFG_MAX_EPOLL_EVENTS, 0 /* timeout */);
+ if (n_ev < 0)
+ {
+ vterr ("vppcom_epoll_wait()", n_ev);
+ break;
+ }
+ else if (n_ev == 0)
+ {
+ continue;
+ }
+
+ for (i = 0; i < n_ev; i++)
+ {
+ ts = &wrk->sessions[wrk->ep_evts[i].data.u32];
+
+ if (ts->is_done)
+ continue;
+
+ if (wrk->ep_evts[i].events & (EPOLLERR | EPOLLHUP | EPOLLRDHUP))
+ {
+ vtinf ("%u finished before reading all data?", ts->fd);
+ break;
+ }
+ if ((wrk->ep_evts[i].events & EPOLLIN) &&
+ ts->stats.rx_bytes < ts->cfg.total_bytes)
+ {
+ rv = ts->read (ts, ts->rxbuf, ts->rxbuf_size);
+ if (rv < 0)
+ break;
+ if (vtc_session_check_is_done (ts, check_rx))
+ n_active_sessions -= 1;
+ }
+ if ((wrk->ep_evts[i].events & EPOLLOUT) &&
+ ts->stats.tx_bytes < ts->cfg.total_bytes)
+ {
+ vtc_worker_epoll_send_add (wrk, ts);
+ }
+ }
+
+ n_writes = 0;
+ }
+
+ return 0;
+}
+
+static inline int
+vtc_worker_run (vcl_test_client_worker_t *wrk)
+{
+ int rv;
+
+ vtinf ("Worker %u starting test ...", wrk->wrk_index);
+
+ rv = wrk->wrk_run_fn (wrk);
+
vtinf ("Worker %d done ...", wrk->wrk_index);
+
+ return rv;
+}
+
+static void *
+vtc_worker_loop (void *arg)
+{
+ vcl_test_client_main_t *vcm = &vcl_client_main;
+ vcl_test_session_t *ctrl = &vcm->ctrl_session;
+ vcl_test_client_worker_t *wrk = arg;
+
+ if (vtc_worker_init (wrk))
+ goto done;
+
+ if (vtc_worker_run (wrk))
+ goto done;
+
vtc_accumulate_stats (wrk, ctrl);
sleep (VCL_TEST_DELAY_DISCONNECT);
vtc_worker_sessions_exit (wrk);
+
+done:
+
if (wrk->wrk_index)
vt_atomic_add (&vcm->active_workers, -1);
+
return 0;
}
static void
vtc_print_stats (vcl_test_session_t * ctrl)
{
- int is_echo = ctrl->cfg.test == VCL_TEST_TYPE_ECHO;
+ int is_echo = ctrl->cfg.test == HS_TEST_TYPE_ECHO;
int show_rx = 0;
char buf[64];
- if (ctrl->cfg.test == VCL_TEST_TYPE_BI
- || ctrl->cfg.test == VCL_TEST_TYPE_ECHO)
+ if (ctrl->cfg.test == HS_TEST_TYPE_BI || ctrl->cfg.test == HS_TEST_TYPE_ECHO)
show_rx = 1;
vcl_test_stats_dump ("CLIENT RESULTS", &ctrl->stats,
show_rx, 1 /* show tx */ ,
ctrl->cfg.verbose);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
if (ctrl->cfg.verbose)
{
- vtinf (" ctrl session info\n"
- VCL_TEST_SEPARATOR_STRING
+ vtinf (" ctrl session info\n" HS_TEST_SEPARATOR_STRING
" fd: %d (0x%08x)\n"
" rxbuf: %p\n"
" rxbuf size: %u (0x%08x)\n"
" txbuf: %p\n"
- " txbuf size: %u (0x%08x)\n"
- VCL_TEST_SEPARATOR_STRING,
- ctrl->fd, (uint32_t) ctrl->fd,
- ctrl->rxbuf, ctrl->rxbuf_size, ctrl->rxbuf_size,
- ctrl->txbuf, ctrl->txbuf_size, ctrl->txbuf_size);
+ " txbuf size: %u (0x%08x)\n" HS_TEST_SEPARATOR_STRING,
+ ctrl->fd, (uint32_t) ctrl->fd, ctrl->rxbuf, ctrl->rxbuf_size,
+ ctrl->rxbuf_size, ctrl->txbuf, ctrl->txbuf_size,
+ ctrl->txbuf_size);
}
if (is_echo)
snprintf (buf, sizeof (buf), "Echo");
else
snprintf (buf, sizeof (buf), "%s-directional Stream",
- ctrl->cfg.test == VCL_TEST_TYPE_BI ? "Bi" : "Uni");
+ ctrl->cfg.test == HS_TEST_TYPE_BI ? "Bi" : "Uni");
}
static void
vtc_echo_client (vcl_test_client_main_t * vcm)
{
vcl_test_session_t *ctrl = &vcm->ctrl_session;
- vcl_test_cfg_t *cfg = &ctrl->cfg;
+ hs_test_cfg_t *cfg = &ctrl->cfg;
int rv;
cfg->total_bytes = strlen (ctrl->txbuf) + 1;
@@ -457,12 +784,12 @@ static void
vtc_stream_client (vcl_test_client_main_t * vcm)
{
vcl_test_session_t *ctrl = &vcm->ctrl_session;
- vcl_test_cfg_t *cfg = &ctrl->cfg;
+ hs_test_cfg_t *cfg = &ctrl->cfg;
vcl_test_client_worker_t *wrk;
uint32_t i, n_conn, n_conn_per_wrk;
vtinf ("%s-directional Stream Test Starting!",
- ctrl->cfg.test == VCL_TEST_TYPE_BI ? "Bi" : "Uni");
+ ctrl->cfg.test == HS_TEST_TYPE_BI ? "Bi" : "Uni");
memset (&ctrl->stats, 0, sizeof (vcl_test_stats_t));
cfg->total_bytes = cfg->num_writes * cfg->txbuf_size;
@@ -480,7 +807,7 @@ vtc_stream_client (vcl_test_client_main_t * vcm)
}
vcm->test_running = 1;
- ctrl->cfg.cmd = VCL_TEST_CMD_START;
+ ctrl->cfg.cmd = HS_TEST_CMD_START;
if (vtc_cfg_sync (ctrl))
{
vtwrn ("test cfg sync failed -- aborting!");
@@ -490,8 +817,12 @@ vtc_stream_client (vcl_test_client_main_t * vcm)
for (i = 1; i < vcm->n_workers; i++)
{
wrk = &vcm->workers[i];
- pthread_create (&wrk->thread_handle, NULL, vtc_worker_loop,
- (void *) wrk);
+ if (pthread_create (&wrk->thread_handle, NULL, vtc_worker_loop,
+ (void *) wrk))
+ {
+ vtwrn ("pthread_create failed -- aborting!");
+ return;
+ }
}
vtc_worker_loop (&vcm->workers[0]);
@@ -499,7 +830,7 @@ vtc_stream_client (vcl_test_client_main_t * vcm)
;
vtinf ("Sending config on ctrl session (fd %d) for stats...", ctrl->fd);
- ctrl->cfg.cmd = VCL_TEST_CMD_STOP;
+ ctrl->cfg.cmd = HS_TEST_CMD_STOP;
if (vtc_cfg_sync (ctrl))
{
vtwrn ("test cfg sync failed -- aborting!");
@@ -508,8 +839,8 @@ vtc_stream_client (vcl_test_client_main_t * vcm)
vtc_print_stats (ctrl);
- ctrl->cfg.cmd = VCL_TEST_CMD_SYNC;
- ctrl->cfg.test = VCL_TEST_TYPE_ECHO;
+ ctrl->cfg.cmd = HS_TEST_CMD_SYNC;
+ ctrl->cfg.test = HS_TEST_TYPE_ECHO;
ctrl->cfg.total_bytes = 0;
if (vtc_cfg_sync (ctrl))
vtwrn ("post-test cfg sync failed!");
@@ -529,7 +860,7 @@ cfg_txbuf_size_set (void)
ctrl->cfg.total_bytes = ctrl->cfg.num_writes * ctrl->cfg.txbuf_size;
vcl_test_buf_alloc (&ctrl->cfg, 0 /* is_rxbuf */ ,
(uint8_t **) & ctrl->txbuf, &ctrl->txbuf_size);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
vtwrn ("Invalid txbuf size (%lu) < minimum buf size (%u)!",
@@ -548,7 +879,7 @@ cfg_num_writes_set (void)
{
ctrl->cfg.num_writes = num_writes;
ctrl->cfg.total_bytes = ctrl->cfg.num_writes * ctrl->cfg.txbuf_size;
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
{
@@ -568,7 +899,7 @@ cfg_num_test_sessions_set (void)
(num_test_sessions <= VCL_TEST_CFG_MAX_TEST_SESS))
{
ctrl->cfg.num_test_sessions = num_test_sessions;
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
{
@@ -590,7 +921,7 @@ cfg_rxbuf_size_set (void)
ctrl->cfg.rxbuf_size = rxbuf_size;
vcl_test_buf_alloc (&ctrl->cfg, 1 /* is_rxbuf */ ,
(uint8_t **) & ctrl->rxbuf, &ctrl->rxbuf_size);
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
else
vtwrn ("Invalid rxbuf size (%lu) < minimum buf size (%u)!",
@@ -604,20 +935,19 @@ cfg_verbose_toggle (void)
vcl_test_session_t *ctrl = &vcm->ctrl_session;
ctrl->cfg.verbose = ctrl->cfg.verbose ? 0 : 1;
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
-
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
}
-static vcl_test_t
+static hs_test_t
parse_input ()
{
vcl_test_client_main_t *vcm = &vcl_client_main;
vcl_test_session_t *ctrl = &vcm->ctrl_session;
- vcl_test_t rv = VCL_TEST_TYPE_NONE;
+ hs_test_t rv = HS_TEST_TYPE_NONE;
if (!strncmp (VCL_TEST_TOKEN_EXIT, ctrl->txbuf,
strlen (VCL_TEST_TOKEN_EXIT)))
- rv = VCL_TEST_TYPE_EXIT;
+ rv = HS_TEST_TYPE_EXIT;
else if (!strncmp (VCL_TEST_TOKEN_HELP, ctrl->txbuf,
strlen (VCL_TEST_TOKEN_HELP)))
@@ -647,16 +977,16 @@ parse_input ()
strlen (VCL_TEST_TOKEN_RXBUF_SIZE)))
cfg_rxbuf_size_set ();
- else if (!strncmp (VCL_TEST_TOKEN_RUN_UNI, ctrl->txbuf,
- strlen (VCL_TEST_TOKEN_RUN_UNI)))
- rv = ctrl->cfg.test = VCL_TEST_TYPE_UNI;
+ else if (!strncmp (HS_TEST_TOKEN_RUN_UNI, ctrl->txbuf,
+ strlen (HS_TEST_TOKEN_RUN_UNI)))
+ rv = ctrl->cfg.test = HS_TEST_TYPE_UNI;
- else if (!strncmp (VCL_TEST_TOKEN_RUN_BI, ctrl->txbuf,
- strlen (VCL_TEST_TOKEN_RUN_BI)))
- rv = ctrl->cfg.test = VCL_TEST_TYPE_BI;
+ else if (!strncmp (HS_TEST_TOKEN_RUN_BI, ctrl->txbuf,
+ strlen (HS_TEST_TOKEN_RUN_BI)))
+ rv = ctrl->cfg.test = HS_TEST_TYPE_BI;
else
- rv = VCL_TEST_TYPE_ECHO;
+ rv = HS_TEST_TYPE_ECHO;
return rv;
}
@@ -682,6 +1012,7 @@ print_usage_and_exit (void)
" -T <txbuf-size> Test Cfg: tx buffer size.\n"
" -U Run Uni-directional test.\n"
" -B Run Bi-directional test.\n"
+ " -b <bytes> Total number of bytes transferred\n"
" -V Verbose mode.\n"
" -I <N> Use N sessions.\n"
" -s <N> Use N sessions.\n"
@@ -697,7 +1028,7 @@ vtc_process_opts (vcl_test_client_main_t * vcm, int argc, char **argv)
int c, v;
opterr = 0;
- while ((c = getopt (argc, argv, "chnp:w:XE:I:N:R:T:UBV6DLs:q:S")) != -1)
+ while ((c = getopt (argc, argv, "chnp:w:xXE:I:N:R:T:b:UBV6DLs:q:S")) != -1)
switch (c)
{
case 'c':
@@ -752,7 +1083,11 @@ vtc_process_opts (vcl_test_client_main_t * vcm, int argc, char **argv)
break;
case 'X':
- vcm->post_test = VCL_TEST_TYPE_EXIT;
+ vcm->post_test = HS_TEST_TYPE_EXIT;
+ break;
+
+ case 'x':
+ vcm->post_test = HS_TEST_TYPE_NONE;
break;
case 'E':
@@ -763,7 +1098,7 @@ vtc_process_opts (vcl_test_client_main_t * vcm, int argc, char **argv)
print_usage_and_exit ();
}
strncpy (ctrl->txbuf, optarg, ctrl->txbuf_size);
- ctrl->cfg.test = VCL_TEST_TYPE_ECHO;
+ ctrl->cfg.test = HS_TEST_TYPE_ECHO;
break;
case 'N':
@@ -822,13 +1157,28 @@ vtc_process_opts (vcl_test_client_main_t * vcm, int argc, char **argv)
print_usage_and_exit ();
}
break;
+ case 'b':
+ if (sscanf (optarg, "0x%lu", &ctrl->cfg.total_bytes) != 1)
+ if (sscanf (optarg, "%ld", &ctrl->cfg.total_bytes) != 1)
+ {
+ vtwrn ("Invalid value for option -%c!", c);
+ print_usage_and_exit ();
+ }
+ if (ctrl->cfg.total_bytes % ctrl->cfg.txbuf_size)
+ {
+ vtwrn ("total bytes must be mutliple of txbuf size(0x%lu)!",
+ ctrl->cfg.txbuf_size);
+ print_usage_and_exit ();
+ }
+ ctrl->cfg.num_writes = ctrl->cfg.total_bytes / ctrl->cfg.txbuf_size;
+ break;
case 'U':
- ctrl->cfg.test = VCL_TEST_TYPE_UNI;
+ ctrl->cfg.test = HS_TEST_TYPE_UNI;
break;
case 'B':
- ctrl->cfg.test = VCL_TEST_TYPE_BI;
+ ctrl->cfg.test = HS_TEST_TYPE_BI;
break;
case 'V':
@@ -882,9 +1232,9 @@ vtc_process_opts (vcl_test_client_main_t * vcm, int argc, char **argv)
print_usage_and_exit ();
}
- if (argc < (optind + 2))
+ if (argc > (optind + 2))
{
- vtwrn ("Insufficient number of arguments!");
+ vtwrn ("Invalid number of arguments!");
print_usage_and_exit ();
}
@@ -895,26 +1245,25 @@ vtc_process_opts (vcl_test_client_main_t * vcm, int argc, char **argv)
memset (&vcm->server_addr, 0, sizeof (vcm->server_addr));
if (ctrl->cfg.address_ip6)
{
- struct sockaddr_in6 *sddr6 = (struct sockaddr_in6 *) &vcm->server_addr;
- sddr6->sin6_family = AF_INET6;
- inet_pton (AF_INET6, argv[optind++], &(sddr6->sin6_addr));
- sddr6->sin6_port = htons (atoi (argv[optind]));
+ struct in6_addr *in6 = &vcm->server_addr.v6;
+ inet_pton (AF_INET6, argv[optind++], in6);
vcm->server_endpt.is_ip4 = 0;
- vcm->server_endpt.ip = (uint8_t *) & sddr6->sin6_addr;
- vcm->server_endpt.port = (uint16_t) sddr6->sin6_port;
+ vcm->server_endpt.ip = (uint8_t *) in6;
}
else
{
- struct sockaddr_in *saddr4 = (struct sockaddr_in *) &vcm->server_addr;
- saddr4->sin_family = AF_INET;
- inet_pton (AF_INET, argv[optind++], &(saddr4->sin_addr));
- saddr4->sin_port = htons (atoi (argv[optind]));
+ struct in_addr *in4 = &vcm->server_addr.v4;
+ inet_pton (AF_INET, argv[optind++], in4);
vcm->server_endpt.is_ip4 = 1;
- vcm->server_endpt.ip = (uint8_t *) & saddr4->sin_addr;
- vcm->server_endpt.port = (uint16_t) saddr4->sin_port;
+ vcm->server_endpt.ip = (uint8_t *) in4;
}
+
+ if (argc == optind + 1)
+ vcm->server_endpt.port = htons (atoi (argv[optind]));
+ else
+ vcm->server_endpt.port = htons (VCL_TEST_SERVER_PORT);
}
static void
@@ -944,10 +1293,14 @@ vtc_ctrl_session_exit (void)
vcl_test_session_t *ctrl = &vcm->ctrl_session;
int verbose = ctrl->cfg.verbose;
- ctrl->cfg.test = VCL_TEST_TYPE_EXIT;
+ /* Only clients exits, server can accept new connections */
+ if (vcm->post_test == HS_TEST_TYPE_EXIT_CLIENT)
+ return;
+
+ ctrl->cfg.test = HS_TEST_TYPE_EXIT;
vtinf ("(fd %d): Sending exit cfg to server...", ctrl->fd);
if (verbose)
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
(void) vcl_test_write (ctrl, (uint8_t *) &ctrl->cfg, sizeof (ctrl->cfg));
sleep (1);
}
@@ -976,7 +1329,7 @@ vtc_ctrl_session_init (vcl_test_client_main_t *vcm, vcl_test_session_t *ctrl)
ctrl->read = vcl_test_read;
ctrl->write = vcl_test_write;
- ctrl->cfg.cmd = VCL_TEST_CMD_SYNC;
+ ctrl->cfg.cmd = HS_TEST_CMD_SYNC;
rv = vtc_cfg_sync (ctrl);
if (rv)
{
@@ -984,7 +1337,7 @@ vtc_ctrl_session_init (vcl_test_client_main_t *vcm, vcl_test_session_t *ctrl)
return rv;
}
- ctrl->cfg.ctrl_handle = ((vcl_test_cfg_t *) ctrl->rxbuf)->ctrl_handle;
+ ctrl->cfg.ctrl_handle = ((hs_test_cfg_t *) ctrl->rxbuf)->ctrl_handle;
memset (&ctrl->stats, 0, sizeof (ctrl->stats));
return 0;
@@ -1015,6 +1368,24 @@ vt_incercept_sigs (void)
}
}
+static void
+vtc_alloc_workers (vcl_test_client_main_t *vcm)
+{
+ vcl_test_main_t *vt = &vcl_test_main;
+ vtc_worker_run_fn *run_fn;
+
+ vcm->workers = calloc (vcm->n_workers, sizeof (vcl_test_client_worker_t));
+ vt->wrk = calloc (vcm->n_workers, sizeof (vcl_test_wrk_t));
+
+ if (vcm->ctrl_session.cfg.num_test_sessions > VCL_TEST_CFG_MAX_SELECT_SESS)
+ run_fn = vtc_worker_run_epoll;
+ else
+ run_fn = vtc_worker_run_select;
+
+ for (int i = 0; i < vcm->n_workers; i++)
+ vcm->workers[i].wrk_run_fn = run_fn;
+}
+
int
main (int argc, char **argv)
{
@@ -1024,13 +1395,14 @@ main (int argc, char **argv)
int rv;
vcm->n_workers = 1;
- vcl_test_cfg_init (&ctrl->cfg);
+ vcm->post_test = HS_TEST_TYPE_EXIT_CLIENT;
+
+ hs_test_cfg_init (&ctrl->cfg);
+ vt_incercept_sigs ();
vcl_test_session_buf_alloc (ctrl);
vtc_process_opts (vcm, argc, argv);
- vt_incercept_sigs ();
- vcm->workers = calloc (vcm->n_workers, sizeof (vcl_test_client_worker_t));
- vt->wrk = calloc (vcm->n_workers, sizeof (vcl_test_wrk_t));
+ vtc_alloc_workers (vcm);
rv = vppcom_app_create ("vcl_test_client");
if (rv < 0)
@@ -1038,62 +1410,67 @@ main (int argc, char **argv)
/* Protos like tls/dtls/quic need init */
if (vt->protos[vcm->proto]->init)
- vt->protos[vcm->proto]->init (&ctrl->cfg);
+ {
+ rv = vt->protos[vcm->proto]->init (&ctrl->cfg);
+ if (rv)
+ vtfail ("client init failed", rv);
+ }
if ((rv = vtc_ctrl_session_init (vcm, ctrl)))
vtfail ("vppcom_session_create() ctrl session", rv);
/* Update ctrl port to data port */
- vcm->server_endpt.port += 1;
+ vcm->server_endpt.port = hs_make_data_port (vcm->server_endpt.port);
- while (ctrl->cfg.test != VCL_TEST_TYPE_EXIT)
+ while (ctrl->cfg.test != HS_TEST_TYPE_EXIT)
{
if (vcm->dump_cfg)
{
- vcl_test_cfg_dump (&ctrl->cfg, 1 /* is_client */ );
+ hs_test_cfg_dump (&ctrl->cfg, 1 /* is_client */);
vcm->dump_cfg = 0;
}
switch (ctrl->cfg.test)
{
- case VCL_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_ECHO:
vtc_echo_client (vcm);
break;
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
vtc_stream_client (vcm);
break;
- case VCL_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT:
continue;
- case VCL_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_NONE:
default:
break;
}
switch (vcm->post_test)
{
- case VCL_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT_CLIENT:
switch (ctrl->cfg.test)
{
- case VCL_TEST_TYPE_EXIT:
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
- case VCL_TEST_TYPE_ECHO:
- ctrl->cfg.test = VCL_TEST_TYPE_EXIT;
+ case HS_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
+ case HS_TEST_TYPE_ECHO:
+ ctrl->cfg.test = HS_TEST_TYPE_EXIT;
continue;
- case VCL_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_NONE:
default:
break;
}
break;
- case VCL_TEST_TYPE_NONE:
- case VCL_TEST_TYPE_ECHO:
- case VCL_TEST_TYPE_UNI:
- case VCL_TEST_TYPE_BI:
+ case HS_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
default:
break;
}
diff --git a/src/plugins/hs_apps/vcl/vcl_test_protos.c b/src/plugins/hs_apps/vcl/vcl_test_protos.c
index 60ee09265c9..cd1ac2b24f4 100644
--- a/src/plugins/hs_apps/vcl/vcl_test_protos.c
+++ b/src/plugins/hs_apps/vcl/vcl_test_protos.c
@@ -21,16 +21,15 @@ vt_tcp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
uint32_t flags, flen;
int rv;
- ts->fd = vppcom_session_create (VPPCOM_PROTO_TCP, 0 /* is_nonblocking */);
+ ts->fd = vppcom_session_create (VPPCOM_PROTO_TCP, ts->noblk_connect);
if (ts->fd < 0)
{
vterr ("vppcom_session_create()", ts->fd);
return ts->fd;
}
- /* Connect is blocking */
rv = vppcom_session_connect (ts->fd, endpt);
- if (rv < 0)
+ if (rv < 0 && rv != VPPCOM_EINPROGRESS)
{
vterr ("vppcom_session_connect()", rv);
return rv;
@@ -38,10 +37,14 @@ vt_tcp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
ts->read = vcl_test_read;
ts->write = vcl_test_write;
- flags = O_NONBLOCK;
- flen = sizeof (flags);
- vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
- vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+
+ if (!ts->noblk_connect)
+ {
+ flags = O_NONBLOCK;
+ flen = sizeof (flags);
+ vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
+ vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+ }
return 0;
}
@@ -108,16 +111,15 @@ vt_udp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
uint32_t flags, flen;
int rv;
- ts->fd = vppcom_session_create (VPPCOM_PROTO_UDP, 0 /* is_nonblocking */);
+ ts->fd = vppcom_session_create (VPPCOM_PROTO_UDP, ts->noblk_connect);
if (ts->fd < 0)
{
vterr ("vppcom_session_create()", ts->fd);
return ts->fd;
}
- /* Connect is blocking */
rv = vppcom_session_connect (ts->fd, endpt);
- if (rv < 0)
+ if (rv < 0 && rv != VPPCOM_EINPROGRESS)
{
vterr ("vppcom_session_connect()", rv);
return rv;
@@ -125,10 +127,14 @@ vt_udp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
ts->read = vcl_test_read;
ts->write = vcl_test_write;
- flags = O_NONBLOCK;
- flen = sizeof (flags);
- vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
- vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+
+ if (!ts->noblk_connect)
+ {
+ flags = O_NONBLOCK;
+ flen = sizeof (flags);
+ vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
+ vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+ }
return 0;
}
@@ -270,7 +276,7 @@ vt_add_cert_key_pair ()
}
static int
-vt_tls_init (vcl_test_cfg_t *cfg)
+vt_tls_init (hs_test_cfg_t *cfg)
{
return vt_add_cert_key_pair ();
}
@@ -282,7 +288,7 @@ vt_tls_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
uint32_t flags, flen, ckp_len;
int rv;
- ts->fd = vppcom_session_create (VPPCOM_PROTO_TLS, 0 /* is_nonblocking */);
+ ts->fd = vppcom_session_create (VPPCOM_PROTO_TLS, ts->noblk_connect);
if (ts->fd < 0)
{
vterr ("vppcom_session_create()", ts->fd);
@@ -293,9 +299,8 @@ vt_tls_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_CKPAIR, &vt->ckpair_index,
&ckp_len);
- /* Connect is blocking */
rv = vppcom_session_connect (ts->fd, endpt);
- if (rv < 0)
+ if (rv < 0 && rv != VPPCOM_EINPROGRESS)
{
vterr ("vppcom_session_connect()", rv);
return rv;
@@ -303,10 +308,14 @@ vt_tls_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
ts->read = vcl_test_read;
ts->write = vcl_test_write;
- flags = O_NONBLOCK;
- flen = sizeof (flags);
- vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
- vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+
+ if (!ts->noblk_connect)
+ {
+ flags = O_NONBLOCK;
+ flen = sizeof (flags);
+ vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
+ vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+ }
return 0;
}
@@ -375,7 +384,7 @@ static const vcl_test_proto_vft_t vcl_test_tls = {
VCL_TEST_REGISTER_PROTO (VPPCOM_PROTO_TLS, vcl_test_tls);
static int
-vt_dtls_init (vcl_test_cfg_t *cfg)
+vt_dtls_init (hs_test_cfg_t *cfg)
{
return vt_add_cert_key_pair ();
}
@@ -387,7 +396,7 @@ vt_dtls_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
uint32_t flags, flen, ckp_len;
int rv;
- ts->fd = vppcom_session_create (VPPCOM_PROTO_DTLS, 0 /* is_nonblocking */);
+ ts->fd = vppcom_session_create (VPPCOM_PROTO_DTLS, ts->noblk_connect);
if (ts->fd < 0)
{
vterr ("vppcom_session_create()", ts->fd);
@@ -398,9 +407,8 @@ vt_dtls_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_CKPAIR, &vt->ckpair_index,
&ckp_len);
- /* Connect is blocking */
rv = vppcom_session_connect (ts->fd, endpt);
- if (rv < 0)
+ if (rv < 0 && rv != VPPCOM_EINPROGRESS)
{
vterr ("vppcom_session_connect()", rv);
return rv;
@@ -408,10 +416,14 @@ vt_dtls_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
ts->read = vcl_test_read;
ts->write = vcl_test_write;
- flags = O_NONBLOCK;
- flen = sizeof (flags);
- vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
- vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+
+ if (!ts->noblk_connect)
+ {
+ flags = O_NONBLOCK;
+ flen = sizeof (flags);
+ vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
+ vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+ }
return 0;
}
@@ -480,7 +492,7 @@ static const vcl_test_proto_vft_t vcl_test_dtls = {
VCL_TEST_REGISTER_PROTO (VPPCOM_PROTO_DTLS, vcl_test_dtls);
static int
-vt_quic_init (vcl_test_cfg_t *cfg)
+vt_quic_init (hs_test_cfg_t *cfg)
{
vcl_test_main_t *vt = &vcl_test_main;
@@ -568,7 +580,7 @@ vt_quic_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
/* Make sure qsessions are initialized */
vt_quic_maybe_init_wrk (vt, wrk, endpt);
- ts->fd = vppcom_session_create (VPPCOM_PROTO_QUIC, 0 /* is_nonblocking */);
+ ts->fd = vppcom_session_create (VPPCOM_PROTO_QUIC, ts->noblk_connect);
if (ts->fd < 0)
{
vterr ("vppcom_session_create()", ts->fd);
@@ -579,21 +591,23 @@ vt_quic_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
tq = &wrk->qsessions[ts->session_index / vt->cfg.num_test_sessions_perq];
rv = vppcom_session_stream_connect (ts->fd, tq->fd);
- if (rv < 0)
+ if (rv < 0 && rv != VPPCOM_EINPROGRESS)
{
vterr ("vppcom_session_stream_connect()", rv);
return rv;
}
- flags = O_NONBLOCK;
- flen = sizeof (flags);
- vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
-
ts->read = vcl_test_read;
ts->write = vcl_test_write;
- vtinf ("Test (quic stream) session %d (fd %d) connected.", ts->session_index,
- ts->fd);
+ if (!ts->noblk_connect)
+ {
+ flags = O_NONBLOCK;
+ flen = sizeof (flags);
+ vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
+ vtinf ("Test (quic stream) session %d (fd %d) connected.",
+ ts->session_index, ts->fd);
+ }
return 0;
}
@@ -864,7 +878,7 @@ vt_srtp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
uint32_t flags, flen;
int rv;
- ts->fd = vppcom_session_create (VPPCOM_PROTO_SRTP, 0 /* is_nonblocking */);
+ ts->fd = vppcom_session_create (VPPCOM_PROTO_SRTP, ts->noblk_connect);
if (ts->fd < 0)
{
vterr ("vppcom_session_create()", ts->fd);
@@ -873,9 +887,8 @@ vt_srtp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
vt_session_add_srtp_policy (ts, 1 /* is connect */);
- /* Connect is blocking */
rv = vppcom_session_connect (ts->fd, endpt);
- if (rv < 0)
+ if (rv < 0 && rv != VPPCOM_EINPROGRESS)
{
vterr ("vppcom_session_connect()", rv);
return rv;
@@ -883,10 +896,14 @@ vt_srtp_connect (vcl_test_session_t *ts, vppcom_endpt_t *endpt)
ts->read = vt_srtp_read;
ts->write = vt_srtp_write;
- flags = O_NONBLOCK;
- flen = sizeof (flags);
- vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
- vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+
+ if (!ts->noblk_connect)
+ {
+ flags = O_NONBLOCK;
+ flen = sizeof (flags);
+ vppcom_session_attr (ts->fd, VPPCOM_ATTR_SET_FLAGS, &flags, &flen);
+ vtinf ("Test session %d (fd %d) connected.", ts->session_index, ts->fd);
+ }
vt_srtp_session_init (ts, 1 /* is connect */);
diff --git a/src/plugins/hs_apps/vcl/vcl_test_server.c b/src/plugins/hs_apps/vcl/vcl_test_server.c
index 93c244484c8..d17a2089ba7 100644
--- a/src/plugins/hs_apps/vcl/vcl_test_server.c
+++ b/src/plugins/hs_apps/vcl/vcl_test_server.c
@@ -28,6 +28,17 @@
#include <vppinfra/mem.h>
#include <pthread.h>
+/*
+ * XXX: Unfortunately libepoll-shim requires some hacks to work, one of these
+ * defines 'close' as a macro. This collides with vcl test callback 'close'.
+ * Undef the 'close' macro on FreeBSD if it exists.
+ */
+#ifdef __FreeBSD__
+#ifdef close
+#undef close
+#endif
+#endif /* __FreeBSD__ */
+
typedef struct
{
uint16_t port;
@@ -106,7 +117,7 @@ again:
conn->endpt.ip = wrk->conn_pool[i].ip;
conn->is_alloc = 1;
conn->session_index = i;
- vcl_test_cfg_init (&conn->cfg);
+ hs_test_cfg_init (&conn->cfg);
return (&wrk->conn_pool[i]);
}
}
@@ -130,7 +141,7 @@ conn_pool_free (vcl_test_session_t *ts)
}
static inline void
-sync_config_and_reply (vcl_test_session_t *conn, vcl_test_cfg_t *rx_cfg)
+sync_config_and_reply (vcl_test_session_t *conn, hs_test_cfg_t *rx_cfg)
{
conn->cfg = *rx_cfg;
vcl_test_buf_alloc (&conn->cfg, 1 /* is_rxbuf */, (uint8_t **) &conn->rxbuf,
@@ -140,7 +151,7 @@ sync_config_and_reply (vcl_test_session_t *conn, vcl_test_cfg_t *rx_cfg)
if (conn->cfg.verbose)
{
vtinf ("(fd %d): Replying to cfg message!\n", conn->fd);
- vcl_test_cfg_dump (&conn->cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (&conn->cfg, 0 /* is_client */);
}
(void) vcl_test_write (conn, &conn->cfg, sizeof (conn->cfg));
}
@@ -185,14 +196,14 @@ vts_wrk_cleanup_all (vcl_test_server_worker_t *wrk)
static void
vts_test_cmd (vcl_test_server_worker_t *wrk, vcl_test_session_t *conn,
- vcl_test_cfg_t *rx_cfg)
+ hs_test_cfg_t *rx_cfg)
{
- u8 is_bi = rx_cfg->test == VCL_TEST_TYPE_BI;
+ u8 is_bi = rx_cfg->test == HS_TEST_TYPE_BI;
vcl_test_session_t *tc;
char buf[64];
int i;
- if (rx_cfg->cmd == VCL_TEST_CMD_STOP)
+ if (rx_cfg->cmd == HS_TEST_CMD_STOP)
{
struct timespec stop;
clock_gettime (CLOCK_REALTIME, &stop);
@@ -232,25 +243,25 @@ vts_test_cmd (vcl_test_server_worker_t *wrk, vcl_test_session_t *conn,
vcl_test_stats_dump ("SERVER RESULTS", &conn->stats, 1 /* show_rx */ ,
is_bi /* show_tx */ , conn->cfg.verbose);
- vcl_test_cfg_dump (&conn->cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (&conn->cfg, 0 /* is_client */);
if (conn->cfg.verbose)
{
- vtinf (" vcl server main\n" VCL_TEST_SEPARATOR_STRING
+ vtinf (" vcl server main\n" HS_TEST_SEPARATOR_STRING
" buf: %p\n"
- " buf size: %u (0x%08x)\n" VCL_TEST_SEPARATOR_STRING,
+ " buf size: %u (0x%08x)\n" HS_TEST_SEPARATOR_STRING,
conn->rxbuf, conn->rxbuf_size, conn->rxbuf_size);
}
sync_config_and_reply (conn, rx_cfg);
memset (&conn->stats, 0, sizeof (conn->stats));
}
- else if (rx_cfg->cmd == VCL_TEST_CMD_SYNC)
+ else if (rx_cfg->cmd == HS_TEST_CMD_SYNC)
{
rx_cfg->ctrl_handle = conn->fd;
vtinf ("Set control fd %d for test!", conn->fd);
sync_config_and_reply (conn, rx_cfg);
}
- else if (rx_cfg->cmd == VCL_TEST_CMD_START)
+ else if (rx_cfg->cmd == HS_TEST_CMD_START)
{
vtinf ("Starting %s-directional Stream Test (fd %d)!",
is_bi ? "Bi" : "Uni", conn->fd);
@@ -268,7 +279,7 @@ vts_server_process_rx (vcl_test_session_t *conn, int rx_bytes)
{
vcl_test_server_main_t *vsm = &vcl_server_main;
- if (conn->cfg.test == VCL_TEST_TYPE_BI)
+ if (conn->cfg.test == HS_TEST_TYPE_BI)
{
if (vsm->use_ds)
{
@@ -373,8 +384,9 @@ vts_accept_client (vcl_test_server_worker_t *wrk, int listen_fd)
if (tp->accept (listen_fd, conn))
return 0;
- vtinf ("Got a connection -- fd = %d (0x%08x) on listener fd = %d (0x%08x)",
- conn->fd, conn->fd, listen_fd, listen_fd);
+ if (conn->cfg.num_test_sessions < VCL_TEST_CFG_MAX_SELECT_SESS)
+ vtinf ("Got a connection -- fd = %d (0x%08x) on listener fd = %d (0x%08x)",
+ conn->fd, conn->fd, listen_fd, listen_fd);
ev.events = EPOLLET | EPOLLIN;
ev.data.u64 = conn - wrk->conn_pool;
@@ -502,31 +514,33 @@ vcl_test_server_process_opts (vcl_test_server_main_t * vsm, int argc,
print_usage_and_exit ();
}
- if (argc < (optind + 1))
+ if (argc > (optind + 1))
{
- fprintf (stderr, "SERVER: ERROR: Insufficient number of arguments!\n");
+ fprintf (stderr, "Incorrect number of arguments!\n");
print_usage_and_exit ();
}
-
- if (sscanf (argv[optind], "%d", &v) == 1)
- vsm->server_cfg.port = (uint16_t) v;
- else
+ else if (argc > 1 && argc == (optind + 1))
{
- fprintf (stderr, "SERVER: ERROR: Invalid port (%s)!\n", argv[optind]);
- print_usage_and_exit ();
+ if (sscanf (argv[optind], "%d", &v) == 1)
+ vsm->server_cfg.port = (uint16_t) v;
+ else
+ {
+ fprintf (stderr, "Invalid port (%s)!\n", argv[optind]);
+ print_usage_and_exit ();
+ }
}
vcl_test_init_endpoint_addr (vsm);
}
int
-vts_handle_ctrl_cfg (vcl_test_server_worker_t *wrk, vcl_test_cfg_t *rx_cfg,
+vts_handle_ctrl_cfg (vcl_test_server_worker_t *wrk, hs_test_cfg_t *rx_cfg,
vcl_test_session_t *conn, int rx_bytes)
{
if (rx_cfg->verbose)
{
vtinf ("(fd %d): Received a cfg msg!", conn->fd);
- vcl_test_cfg_dump (rx_cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 0 /* is_client */);
}
if (rx_bytes != sizeof (*rx_cfg))
@@ -538,7 +552,7 @@ vts_handle_ctrl_cfg (vcl_test_server_worker_t *wrk, vcl_test_cfg_t *rx_cfg,
if (conn->cfg.verbose)
{
vtinf ("(fd %d): Replying to cfg msg", conn->fd);
- vcl_test_cfg_dump (rx_cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 0 /* is_client */);
}
conn->write (conn, &conn->cfg, sizeof (conn->cfg));
return -1;
@@ -546,27 +560,28 @@ vts_handle_ctrl_cfg (vcl_test_server_worker_t *wrk, vcl_test_cfg_t *rx_cfg,
switch (rx_cfg->test)
{
- case VCL_TEST_TYPE_NONE:
- case VCL_TEST_TYPE_ECHO:
+ case HS_TEST_TYPE_NONE:
+ case HS_TEST_TYPE_ECHO:
sync_config_and_reply (conn, rx_cfg);
break;
- case VCL_TEST_TYPE_BI:
- case VCL_TEST_TYPE_UNI:
+ case HS_TEST_TYPE_BI:
+ case HS_TEST_TYPE_UNI:
vts_test_cmd (wrk, conn, rx_cfg);
break;
- case VCL_TEST_TYPE_EXIT:
+ case HS_TEST_TYPE_EXIT:
vtinf ("Ctrl session fd %d closing!", conn->fd);
vts_session_cleanup (conn);
wrk->nfds--;
if (wrk->nfds)
vts_wrk_cleanup_all (wrk);
+ vcl_server_main.ctrl = 0;
break;
default:
vtwrn ("Unknown test type %d", rx_cfg->test);
- vcl_test_cfg_dump (rx_cfg, 0 /* is_client */ );
+ hs_test_cfg_dump (rx_cfg, 0 /* is_client */);
break;
}
@@ -586,7 +601,7 @@ vts_worker_init (vcl_test_server_worker_t * wrk)
vtinf ("Initializing worker ...");
- conn_pool_expand (wrk, VCL_TEST_CFG_MAX_TEST_SESS + 1);
+ conn_pool_expand (wrk, VCL_TEST_CFG_INIT_TEST_SESS + 1);
if (wrk->wrk_index)
if (vppcom_worker_register ())
vtfail ("vppcom_worker_register()", 1);
@@ -648,7 +663,7 @@ vts_worker_loop (void *arg)
vcl_test_server_worker_t *wrk = arg;
vcl_test_session_t *conn;
int i, rx_bytes, num_ev;
- vcl_test_cfg_t *rx_cfg;
+ hs_test_cfg_t *rx_cfg;
if (wrk->wrk_index)
vts_worker_init (wrk);
@@ -675,13 +690,13 @@ vts_worker_loop (void *arg)
*/
if (ep_evts[i].events & (EPOLLHUP | EPOLLRDHUP))
{
- vts_session_cleanup (conn);
- wrk->nfds--;
- if (!wrk->nfds)
+ if (conn == vsm->ctrl)
{
- vtinf ("All client connections closed\n");
- goto done;
+ vtinf ("ctrl session went away");
+ vsm->ctrl = 0;
}
+ vts_session_cleanup (conn);
+ wrk->nfds--;
continue;
}
@@ -699,6 +714,10 @@ vts_worker_loop (void *arg)
vsm->ctrl = vts_accept_ctrl (wrk, vsm->ctrl_listen_fd);
continue;
}
+
+ /* at this point ctrl session must be valid */
+ ASSERT (vsm->ctrl);
+
if (ep_evts[i].data.u32 == VCL_TEST_DATA_LISTENER)
{
conn = vts_accept_client (wrk, wrk->listener.fd);
@@ -718,8 +737,8 @@ vts_worker_loop (void *arg)
if (!wrk->wrk_index && conn->fd == vsm->ctrl->fd)
{
rx_bytes = conn->read (conn, conn->rxbuf, conn->rxbuf_size);
- rx_cfg = (vcl_test_cfg_t *) conn->rxbuf;
- if (rx_cfg->magic == VCL_TEST_CFG_CTRL_MAGIC)
+ rx_cfg = (hs_test_cfg_t *) conn->rxbuf;
+ if (rx_cfg->magic == HS_TEST_CFG_CTRL_MAGIC)
{
vts_handle_ctrl_cfg (wrk, rx_cfg, conn, rx_bytes);
if (!wrk->nfds)
@@ -847,13 +866,15 @@ main (int argc, char **argv)
vts_ctrl_session_init (&vsm->workers[0]);
/* Update ctrl port to data port */
- vsm->server_cfg.endpt.port += 1;
+ vsm->server_cfg.endpt.port = hs_make_data_port (vsm->server_cfg.endpt.port);
vts_worker_init (&vsm->workers[0]);
for (i = 1; i < vsm->server_cfg.workers; i++)
{
vsm->workers[i].wrk_index = i;
rv = pthread_create (&vsm->workers[i].thread_handle, NULL,
vts_worker_loop, (void *) &vsm->workers[i]);
+ if (rv)
+ vtfail ("pthread_create()", rv);
}
vts_worker_loop (&vsm->workers[0]);