summaryrefslogtreecommitdiffstats
path: root/src/plugins/hs_apps/sapi/vpp_echo.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/hs_apps/sapi/vpp_echo.c')
-rw-r--r--src/plugins/hs_apps/sapi/vpp_echo.c1049
1 files changed, 1049 insertions, 0 deletions
diff --git a/src/plugins/hs_apps/sapi/vpp_echo.c b/src/plugins/hs_apps/sapi/vpp_echo.c
new file mode 100644
index 00000000000..5c9690f299f
--- /dev/null
+++ b/src/plugins/hs_apps/sapi/vpp_echo.c
@@ -0,0 +1,1049 @@
+/*
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+
+#include <vlibmemory/api.h>
+#include <svm/fifo_segment.h>
+
+#include <hs_apps/sapi/vpp_echo_common.h>
+
+echo_main_t echo_main;
+
+static void
+echo_session_prealloc (echo_main_t * em)
+{
+ /* We need to prealloc to avoid vec resize in threads */
+ echo_session_t *session;
+ int i;
+ for (i = 0; i < em->n_sessions; i++)
+ {
+ pool_get (em->sessions, session);
+ clib_memset (session, 0, sizeof (*session));
+ session->session_index = session - em->sessions;
+ session->listener_index = SESSION_INVALID_INDEX;
+ session->session_state = ECHO_SESSION_STATE_INITIAL;
+ }
+}
+
+static void
+echo_assert_test_suceeded (echo_main_t * em)
+{
+ CHECK (em->n_clients * em->bytes_to_receive,
+ em->stats.rx_total, "Not enough data received");
+ CHECK (em->n_clients * em->bytes_to_send,
+ em->stats.tx_total, "Not enough data sent");
+ clib_spinlock_lock (&em->sid_vpp_handles_lock);
+ CHECK (0, hash_elts (em->session_index_by_vpp_handles),
+ "Some sessions are still open");
+ clib_spinlock_unlock (&em->sid_vpp_handles_lock);
+}
+
+always_inline void
+echo_session_dequeue_notify (echo_session_t * s)
+{
+ int rv;
+ if (!svm_fifo_set_event (s->rx_fifo))
+ return;
+ if ((rv =
+ app_send_io_evt_to_vpp (s->vpp_evt_q, s->rx_fifo->master_session_index,
+ SESSION_IO_EVT_RX, SVM_Q_WAIT)))
+ ECHO_FAIL ("app_send_io_evt_to_vpp errored %d", rv);
+ svm_fifo_clear_deq_ntf (s->rx_fifo);
+}
+
+static void
+stop_signal (int signum)
+{
+ echo_main_t *em = &echo_main;
+ em->time_to_stop = 1;
+}
+
+int
+connect_to_vpp (char *name)
+{
+ echo_main_t *em = &echo_main;
+ api_main_t *am = &api_main;
+
+ if (em->use_sock_api)
+ {
+ if (vl_socket_client_connect ((char *) em->socket_name, name,
+ 0 /* default rx, tx buffer */ ))
+ {
+ ECHO_FAIL ("socket connect failed");
+ return -1;
+ }
+
+ if (vl_socket_client_init_shm (0, 1 /* want_pthread */ ))
+ {
+ ECHO_FAIL ("init shm api failed");
+ return -1;
+ }
+ }
+ else
+ {
+ if (vl_client_connect_to_vlib ("/vpe-api", name, 32) < 0)
+ {
+ ECHO_FAIL ("shmem connect failed");
+ return -1;
+ }
+ }
+ em->vl_input_queue = am->shmem_hdr->vl_input_queue;
+ em->my_client_index = am->my_client_index;
+ return 0;
+}
+
+static void
+echo_event_didnt_happen (u8 e)
+{
+ echo_main_t *em = &echo_main;
+ u8 *s = format (0, "%U", echo_format_timing_event, e);
+ ECHO_LOG (0, "Expected event %s to happend, which did not", s);
+ em->has_failed = 1;
+}
+
+static void
+print_global_json_stats (echo_main_t * em)
+{
+ if (!(em->timing.events_sent & em->timing.start_event))
+ return echo_event_didnt_happen (em->timing.start_event);
+ if (!(em->timing.events_sent & em->timing.end_event))
+ return echo_event_didnt_happen (em->timing.end_event);
+ f64 deltat = em->timing.end_time - em->timing.start_time;
+ u8 *start_evt =
+ format (0, "%U", echo_format_timing_event, em->timing.start_event);
+ u8 *end_evt =
+ format (0, "%U", echo_format_timing_event, em->timing.end_event);
+ fformat (stdout, "{\n");
+ fformat (stdout, "\"time\": \"%.9f\",\n", deltat);
+ fformat (stdout, "\"start_evt\": \"%s\",\n", start_evt);
+ fformat (stdout, "\"end_evt\": \"%s\",\n", end_evt);
+ fformat (stdout, "\"rx_data\": %lld,\n", em->stats.rx_total);
+ fformat (stdout, "\"tx_rx\": %lld,\n", em->stats.tx_total);
+ fformat (stdout, "\"closing\": {\n");
+ fformat (stdout, " \"reset\": { \"q\": %d, \"s\": %d },\n",
+ em->stats.reset_count.q, em->stats.reset_count.s);
+ fformat (stdout, " \"close\": { \"q\": %d, \"s\": %d },\n",
+ em->stats.close_count.q, em->stats.close_count.s);
+ fformat (stdout, " \"active\": { \"q\": %d, \"s\": %d },\n",
+ em->stats.active_count.q, em->stats.active_count.s);
+ fformat (stdout, " \"clean\": { \"q\": %d, \"s\": %d }\n",
+ em->stats.clean_count.q, em->stats.clean_count.s);
+ fformat (stdout, "}\n");
+ fformat (stdout, "}\n");
+}
+
+static void
+print_global_stats (echo_main_t * em)
+{
+ u8 *s;
+ if (!(em->timing.events_sent & em->timing.start_event))
+ return echo_event_didnt_happen (em->timing.start_event);
+ if (!(em->timing.events_sent & em->timing.end_event))
+ return echo_event_didnt_happen (em->timing.end_event);
+ f64 deltat = em->timing.end_time - em->timing.start_time;
+ s = format (0, "%U:%U",
+ echo_format_timing_event, em->timing.start_event,
+ echo_format_timing_event, em->timing.end_event);
+ fformat (stdout, "Timing %s\n", s);
+ fformat (stdout, "-------- TX --------\n");
+ fformat (stdout, "%lld bytes (%lld mbytes, %lld gbytes) in %.6f seconds\n",
+ em->stats.tx_total, em->stats.tx_total / (1ULL << 20),
+ em->stats.tx_total / (1ULL << 30), deltat);
+ fformat (stdout, "%.4f Gbit/second\n",
+ (em->stats.tx_total * 8.0) / deltat / 1e9);
+ fformat (stdout, "-------- RX --------\n");
+ fformat (stdout, "%lld bytes (%lld mbytes, %lld gbytes) in %.6f seconds\n",
+ em->stats.rx_total, em->stats.rx_total / (1ULL << 20),
+ em->stats.rx_total / (1ULL << 30), deltat);
+ fformat (stdout, "%.4f Gbit/second\n",
+ (em->stats.rx_total * 8.0) / deltat / 1e9);
+ fformat (stdout, "--------------------\n");
+ fformat (stdout, "Received close on %d streams (and %d Quic conn)\n",
+ em->stats.close_count.s, em->stats.close_count.q);
+ fformat (stdout, "Received reset on %d streams (and %d Quic conn)\n",
+ em->stats.reset_count.s, em->stats.reset_count.q);
+ fformat (stdout, "Sent close on %d streams (and %d Quic conn)\n",
+ em->stats.active_count.s, em->stats.active_count.q);
+ fformat (stdout, "Discarded %d streams (and %d Quic conn)\n",
+ em->stats.clean_count.s, em->stats.clean_count.q);
+}
+
+void
+echo_update_count_on_session_close (echo_main_t * em, echo_session_t * s)
+{
+
+ ECHO_LOG (1, "[%lu/%lu] -> S(%x) -> [%lu/%lu]",
+ s->bytes_received, s->bytes_received + s->bytes_to_receive,
+ s->session_index, s->bytes_sent,
+ s->bytes_sent + s->bytes_to_send);
+ clib_atomic_fetch_add (&em->stats.tx_total, s->bytes_sent);
+ clib_atomic_fetch_add (&em->stats.rx_total, s->bytes_received);
+
+ if (PREDICT_FALSE (em->stats.rx_total ==
+ em->n_clients * em->bytes_to_receive))
+ echo_notify_event (em, ECHO_EVT_LAST_BYTE);
+}
+
+static void
+echo_free_sessions (echo_main_t * em)
+{
+ /* Free marked sessions */
+ echo_session_t *s;
+ u32 *session_indexes = 0, *session_index;
+
+ /* *INDENT-OFF* */
+ pool_foreach (s, em->sessions,
+ ({
+ if (s->session_state == ECHO_SESSION_STATE_CLOSED)
+ vec_add1 (session_indexes, s->session_index);}
+ ));
+ /* *INDENT-ON* */
+ vec_foreach (session_index, session_indexes)
+ {
+ /* Free session */
+ s = pool_elt_at_index (em->sessions, *session_index);
+ echo_session_handle_add_del (em, s->vpp_session_handle,
+ SESSION_INVALID_INDEX);
+ pool_put (em->sessions, s);
+ clib_memset (s, 0xfe, sizeof (*s));
+ }
+}
+
+static void
+test_recv_bytes (echo_main_t * em, echo_session_t * s, u8 * rx_buf,
+ u32 n_read)
+{
+ u32 i;
+ u8 expected;
+ for (i = 0; i < n_read; i++)
+ {
+ expected = (s->bytes_received + i) & 0xff;
+ if (rx_buf[i] == expected || em->max_test_msg > 0)
+ continue;
+ ECHO_LOG (0, "Session 0x%lx byte %lld was 0x%x expected 0x%x",
+ s->vpp_session_handle, s->bytes_received + i, rx_buf[i],
+ expected);
+ em->max_test_msg--;
+ if (em->max_test_msg == 0)
+ ECHO_LOG (0, "Too many errors, hiding next ones");
+ if (em->test_return_packets == RETURN_PACKETS_ASSERT)
+ ECHO_FAIL ("test-bytes errored");
+ }
+}
+
+static int
+recv_data_chunk (echo_main_t * em, echo_session_t * s, u8 * rx_buf)
+{
+ int n_read;
+ n_read = app_recv ((app_session_t *) s, rx_buf, vec_len (rx_buf));
+ if (n_read <= 0)
+ return 0;
+ if (svm_fifo_needs_deq_ntf (s->rx_fifo, n_read))
+ echo_session_dequeue_notify (s);
+
+ if (em->test_return_packets)
+ test_recv_bytes (em, s, rx_buf, n_read);
+
+ s->bytes_received += n_read;
+ s->bytes_to_receive -= n_read;
+ return n_read;
+}
+
+static int
+send_data_chunk (echo_session_t * s, u8 * tx_buf, int offset, int len)
+{
+ int n_sent;
+ int bytes_this_chunk = clib_min (s->bytes_to_send, len - offset);
+ if (!bytes_this_chunk)
+ return 0;
+ n_sent = app_send ((app_session_t *) s, tx_buf + offset,
+ bytes_this_chunk, SVM_Q_WAIT);
+ if (n_sent < 0)
+ return 0;
+ s->bytes_to_send -= n_sent;
+ s->bytes_sent += n_sent;
+ return n_sent;
+}
+
+static int
+mirror_data_chunk (echo_main_t * em, echo_session_t * s, u8 * tx_buf, u64 len)
+{
+ u64 n_sent = 0;
+ while (n_sent < len && !em->time_to_stop)
+ n_sent += send_data_chunk (s, tx_buf, n_sent, len);
+ return n_sent;
+}
+
+static inline void
+echo_check_closed_listener (echo_main_t * em, echo_session_t * s)
+{
+ echo_session_t *ls;
+ /* if parent has died, terminate gracefully */
+ if (s->listener_index == SESSION_INVALID_INDEX)
+ return;
+ ls = pool_elt_at_index (em->sessions, s->listener_index);
+ if (ls->session_state < ECHO_SESSION_STATE_CLOSING)
+ return;
+ ECHO_LOG (2, "Session 0%lx died, close child 0x%lx", ls->vpp_session_handle,
+ s->vpp_session_handle);
+ echo_update_count_on_session_close (em, s);
+ em->proto_cb_vft->cleanup_cb (s, 1 /* parent_died */ );
+}
+
+/*
+ * Rx/Tx polling thread per connection
+ */
+static void
+echo_handle_data (echo_main_t * em, echo_session_t * s, u8 * rx_buf)
+{
+ int n_read, n_sent = 0;
+
+ n_read = recv_data_chunk (em, s, rx_buf);
+ if (em->data_source == ECHO_TEST_DATA_SOURCE)
+ n_sent = send_data_chunk (s, em->connect_test_data,
+ s->bytes_sent % em->tx_buf_size,
+ em->tx_buf_size);
+ else if (em->data_source == ECHO_RX_DATA_SOURCE)
+ n_sent = mirror_data_chunk (em, s, rx_buf, n_read);
+ if (!s->bytes_to_send && !s->bytes_to_receive)
+ {
+ /* Session is done, need to close */
+ if (s->session_state == ECHO_SESSION_STATE_AWAIT_DATA)
+ s->session_state = ECHO_SESSION_STATE_CLOSING;
+ else
+ {
+ s->session_state = ECHO_SESSION_STATE_AWAIT_CLOSING;
+ if (em->send_stream_disconnects == ECHO_CLOSE_F_ACTIVE)
+ {
+ echo_send_rpc (em, echo_send_disconnect_session,
+ (void *) s->vpp_session_handle, 0);
+ clib_atomic_fetch_add (&em->stats.active_count.s, 1);
+ }
+ else if (em->send_stream_disconnects == ECHO_CLOSE_F_NONE)
+ {
+ s->session_state = ECHO_SESSION_STATE_CLOSING;
+ clib_atomic_fetch_add (&em->stats.clean_count.s, 1);
+ }
+ }
+ return;
+ }
+
+ /* Check for idle clients */
+ if (em->log_lvl > 1)
+ {
+ if (n_sent || n_read)
+ s->idle_cycles = 0;
+ else if (s->idle_cycles++ == 1e7)
+ {
+ s->idle_cycles = 0;
+ ECHO_LOG (1, "Idle client TX:%dB RX:%dB", s->bytes_to_send,
+ s->bytes_to_receive);
+ ECHO_LOG (1, "Idle FIFOs TX:%dB RX:%dB",
+ svm_fifo_max_dequeue (s->tx_fifo),
+ svm_fifo_max_dequeue (s->rx_fifo));
+ ECHO_LOG (1, "Session 0x%lx state %u", s->vpp_session_handle,
+ s->session_state);
+ }
+ }
+}
+
+static void *
+echo_data_thread_fn (void *arg)
+{
+ clib_mem_set_thread_index ();
+ echo_main_t *em = &echo_main;
+ u32 N = em->n_clients;
+ u32 n = (N + em->n_rx_threads - 1) / em->n_rx_threads;
+ u32 idx = (u64) arg;
+ if (n * idx >= N)
+ {
+ ECHO_LOG (1, "Thread %u exiting, no sessions to care for", idx);
+ pthread_exit (0);
+ }
+ u32 thread_n_sessions = clib_min (n, N - n * idx);
+
+ u32 i = 0;
+ u32 n_closed_sessions = 0;
+ u32 session_index;
+ u8 *rx_buf = 0;
+ echo_session_t *s;
+ vec_validate (rx_buf, em->rx_buf_size);
+
+ for (i = 0; !em->time_to_stop; i = (i + 1) % thread_n_sessions)
+ {
+ n_closed_sessions = i == 0 ? 0 : n_closed_sessions;
+ session_index = em->data_thread_args[n * idx + i];
+ if (session_index == SESSION_INVALID_INDEX)
+ continue;
+ s = pool_elt_at_index (em->sessions, session_index);
+ switch (s->session_state)
+ {
+ case ECHO_SESSION_STATE_READY:
+ case ECHO_SESSION_STATE_AWAIT_DATA:
+ echo_handle_data (em, s, rx_buf);
+ echo_check_closed_listener (em, s);
+ break;
+ case ECHO_SESSION_STATE_AWAIT_CLOSING:
+ echo_check_closed_listener (em, s);
+ break;
+ case ECHO_SESSION_STATE_CLOSING:
+ echo_update_count_on_session_close (em, s);
+ em->proto_cb_vft->cleanup_cb (s, 0 /* parent_died */ );
+ break;
+ case ECHO_SESSION_STATE_CLOSED:
+ n_closed_sessions++;
+ break;
+ }
+ if (n_closed_sessions == thread_n_sessions)
+ break;
+ }
+ pthread_exit (0);
+}
+
+static void
+session_bound_handler (session_bound_msg_t * mp)
+{
+ echo_main_t *em = &echo_main;
+ echo_session_t *listen_session;
+ if (mp->retval)
+ {
+ ECHO_FAIL ("bind failed: %U", format_api_error,
+ clib_net_to_host_u32 (mp->retval));
+ return;
+ }
+ ECHO_LOG (0, "listening on %U:%u", format_ip46_address, mp->lcl_ip,
+ mp->lcl_is_ip4 ? IP46_TYPE_IP4 : IP46_TYPE_IP6,
+ clib_net_to_host_u16 (mp->lcl_port));
+
+ /* Allocate local session and set it up */
+ listen_session = echo_session_new (em);
+ listen_session->session_type = ECHO_SESSION_TYPE_LISTEN;
+ echo_session_handle_add_del (em, mp->handle, listen_session->session_index);
+ em->state = STATE_LISTEN;
+ em->listen_session_index = listen_session->session_index;
+ if (em->proto_cb_vft->bound_uri_cb)
+ em->proto_cb_vft->bound_uri_cb (mp, listen_session);
+}
+
+static void
+session_accepted_handler (session_accepted_msg_t * mp)
+{
+ app_session_evt_t _app_evt, *app_evt = &_app_evt;
+ session_accepted_reply_msg_t *rmp;
+ svm_fifo_t *rx_fifo, *tx_fifo;
+ echo_main_t *em = &echo_main;
+ echo_session_t *session, *ls;
+ /* Allocate local session and set it up */
+ session = echo_session_new (em);
+
+ if (wait_for_segment_allocation (mp->segment_handle))
+ {
+ ECHO_FAIL ("wait_for_segment_allocation errored");
+ return;
+ }
+
+ rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *);
+ rx_fifo->client_session_index = session->session_index;
+ tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *);
+ tx_fifo->client_session_index = session->session_index;
+
+ session->rx_fifo = rx_fifo;
+ session->tx_fifo = tx_fifo;
+
+ /* session->transport needed by app_send_dgram */
+ clib_memcpy_fast (&session->transport.rmt_ip, &mp->rmt.ip,
+ sizeof (ip46_address_t));
+ session->transport.is_ip4 = mp->rmt.is_ip4;
+ session->transport.rmt_port = mp->rmt.port;
+ clib_memcpy_fast (&session->transport.lcl_ip, &em->uri_elts.ip,
+ sizeof (ip46_address_t));
+ session->transport.lcl_port = em->uri_elts.port;
+
+ session->vpp_session_handle = mp->handle;
+ session->start = clib_time_now (&em->clib_time);
+ session->vpp_evt_q = uword_to_pointer (mp->vpp_event_queue_address,
+ svm_msg_q_t *);
+ if (!(ls = echo_get_session_from_handle (em, mp->listener_handle)))
+ return;
+ session->listener_index = ls->session_index;
+
+ /* Add it to lookup table */
+ ECHO_LOG (1, "Accepted session 0x%lx -> 0x%lx", mp->handle,
+ mp->listener_handle);
+ echo_session_handle_add_del (em, mp->handle, session->session_index);
+
+ app_alloc_ctrl_evt_to_vpp (session->vpp_evt_q, app_evt,
+ SESSION_CTRL_EVT_ACCEPTED_REPLY);
+ rmp = (session_accepted_reply_msg_t *) app_evt->evt->data;
+ rmp->handle = mp->handle;
+ rmp->context = mp->context;
+ app_send_ctrl_evt_to_vpp (session->vpp_evt_q, app_evt);
+ em->proto_cb_vft->accepted_cb (mp, session);
+}
+
+static void
+session_connected_handler (session_connected_msg_t * mp)
+{
+ echo_main_t *em = &echo_main;
+ echo_session_t *session;
+ u32 listener_index = htonl (mp->context);
+ svm_fifo_t *rx_fifo, *tx_fifo;
+
+ if (mp->retval)
+ {
+ ECHO_FAIL ("connection failed with code: %U", format_api_error,
+ clib_net_to_host_u32 (mp->retval));
+ return;
+ }
+
+ session = echo_session_new (em);
+ if (wait_for_segment_allocation (mp->segment_handle))
+ {
+ ECHO_FAIL ("wait_for_segment_allocation errored");
+ return;
+ }
+
+ rx_fifo = uword_to_pointer (mp->server_rx_fifo, svm_fifo_t *);
+ rx_fifo->client_session_index = session->session_index;
+ tx_fifo = uword_to_pointer (mp->server_tx_fifo, svm_fifo_t *);
+ tx_fifo->client_session_index = session->session_index;
+
+ session->rx_fifo = rx_fifo;
+ session->tx_fifo = tx_fifo;
+ session->vpp_session_handle = mp->handle;
+ session->start = clib_time_now (&em->clib_time);
+ session->vpp_evt_q = uword_to_pointer (mp->vpp_event_queue_address,
+ svm_msg_q_t *);
+ session->listener_index = listener_index;
+ /* session->transport needed by app_send_dgram */
+ clib_memcpy_fast (&session->transport.lcl_ip, &mp->lcl.ip,
+ sizeof (ip46_address_t));
+ session->transport.is_ip4 = mp->lcl.is_ip4;
+ session->transport.lcl_port = mp->lcl.port;
+ clib_memcpy_fast (&session->transport.rmt_ip, &em->uri_elts.ip,
+ sizeof (ip46_address_t));
+ session->transport.rmt_port = em->uri_elts.port;
+
+ echo_session_handle_add_del (em, mp->handle, session->session_index);
+ em->proto_cb_vft->connected_cb ((session_connected_bundled_msg_t *) mp,
+ session->session_index, 0 /* is_failed */ );
+}
+
+/*
+ *
+ * End of ECHO callback definitions
+ *
+ */
+
+static void
+session_disconnected_handler (session_disconnected_msg_t * mp)
+{
+ app_session_evt_t _app_evt, *app_evt = &_app_evt;
+ session_disconnected_reply_msg_t *rmp;
+ echo_main_t *em = &echo_main;
+ echo_session_t *s;
+ ECHO_LOG (1, "passive close session 0x%lx", mp->handle);
+ if (!(s = echo_get_session_from_handle (em, mp->handle)))
+ return;
+ em->proto_cb_vft->disconnected_cb (mp, s);
+
+ app_alloc_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt,
+ SESSION_CTRL_EVT_DISCONNECTED_REPLY);
+ rmp = (session_disconnected_reply_msg_t *) app_evt->evt->data;
+ rmp->retval = 0;
+ rmp->handle = mp->handle;
+ rmp->context = mp->context;
+ app_send_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt);
+}
+
+static void
+session_reset_handler (session_reset_msg_t * mp)
+{
+ app_session_evt_t _app_evt, *app_evt = &_app_evt;
+ echo_main_t *em = &echo_main;
+ session_reset_reply_msg_t *rmp;
+ echo_session_t *s = 0;
+ ECHO_LOG (1, "Reset session 0x%lx", mp->handle);
+ if (!(s = echo_get_session_from_handle (em, mp->handle)))
+ return;
+ em->proto_cb_vft->reset_cb (mp, s);
+
+ app_alloc_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt,
+ SESSION_CTRL_EVT_RESET_REPLY);
+ rmp = (session_reset_reply_msg_t *) app_evt->evt->data;
+ rmp->retval = 0;
+ rmp->handle = mp->handle;
+ app_send_ctrl_evt_to_vpp (s->vpp_evt_q, app_evt);
+}
+
+static void
+handle_mq_event (session_event_t * e)
+{
+ switch (e->event_type)
+ {
+ case SESSION_CTRL_EVT_BOUND:
+ session_bound_handler ((session_bound_msg_t *) e->data);
+ break;
+ case SESSION_CTRL_EVT_ACCEPTED:
+ session_accepted_handler ((session_accepted_msg_t *) e->data);
+ break;
+ case SESSION_CTRL_EVT_CONNECTED:
+ session_connected_handler ((session_connected_msg_t *) e->data);
+ break;
+ case SESSION_CTRL_EVT_DISCONNECTED:
+ session_disconnected_handler ((session_disconnected_msg_t *) e->data);
+ break;
+ case SESSION_CTRL_EVT_RESET:
+ session_reset_handler ((session_reset_msg_t *) e->data);
+ break;
+ case SESSION_IO_EVT_RX:
+ break;
+ default:
+ ECHO_LOG (0, "unhandled event %u", e->event_type);
+ }
+}
+
+static void
+echo_process_rpcs (echo_main_t * em)
+{
+ echo_rpc_msg_t *rpc;
+ svm_msg_q_msg_t msg;
+ while (em->state < STATE_DATA_DONE && !em->time_to_stop)
+ {
+ if (svm_msg_q_sub (em->rpc_msq_queue, &msg, SVM_Q_TIMEDWAIT, 1))
+ continue;
+ rpc = svm_msg_q_msg_data (em->rpc_msq_queue, &msg);
+ ((echo_rpc_t) rpc->fp) (rpc->arg, rpc->opaque);
+ svm_msg_q_free_msg (em->rpc_msq_queue, &msg);
+ }
+}
+
+static void *
+echo_mq_thread_fn (void *arg)
+{
+ clib_mem_set_thread_index ();
+ echo_main_t *em = &echo_main;
+ session_event_t *e;
+ svm_msg_q_msg_t msg;
+ int rv;
+ wait_for_state_change (em, STATE_ATTACHED, 0);
+ if (em->state < STATE_ATTACHED || !em->our_event_queue)
+ {
+ ECHO_FAIL ("Application failed to attach");
+ pthread_exit (0);
+ }
+
+ while (1)
+ {
+ if (!(rv = svm_msg_q_sub (em->our_event_queue,
+ &msg, SVM_Q_TIMEDWAIT, 1)))
+ {
+ e = svm_msg_q_msg_data (em->our_event_queue, &msg);
+ handle_mq_event (e);
+ svm_msg_q_free_msg (em->our_event_queue, &msg);
+ }
+ if (rv == ETIMEDOUT
+ && (em->time_to_stop || em->state == STATE_DETACHED))
+ break;
+ }
+ pthread_exit (0);
+}
+
+static void
+clients_run (echo_main_t * em)
+{
+ u64 i;
+ echo_notify_event (em, ECHO_EVT_FIRST_QCONNECT);
+ for (i = 0; i < em->n_connects; i++)
+ echo_send_connect (em->uri, SESSION_INVALID_INDEX);
+ wait_for_state_change (em, STATE_READY, 0);
+ ECHO_LOG (1, "App is ready");
+ echo_process_rpcs (em);
+}
+
+static void
+server_run (echo_main_t * em)
+{
+ echo_send_listen (em);
+ wait_for_state_change (em, STATE_READY, 0);
+ ECHO_LOG (1, "App is ready");
+ echo_process_rpcs (em);
+ /* Cleanup */
+ echo_send_unbind (em);
+ if (wait_for_state_change (em, STATE_DISCONNECTED, TIMEOUT))
+ {
+ ECHO_FAIL ("Timeout waiting for state disconnected");
+ return;
+ }
+}
+
+static void
+print_usage_and_exit (void)
+{
+ echo_main_t *em = &echo_main;
+ int i;
+ fprintf (stderr,
+ "Usage: vpp_echo [socket-name SOCKET] [client|server] [uri URI] [OPTIONS]\n"
+ "Generates traffic and assert correct teardown of the QUIC hoststack\n"
+ "\n"
+ " socket-name PATH Specify the binary socket path to connect to VPP\n"
+ " use-svm-api Use SVM API to connect to VPP\n"
+ " test-bytes[:assert] Check data correctness when receiving (assert fails on first error)\n"
+ " fifo-size N Use N Kb fifos\n"
+ " rx-buf N Use N Kb RX buffer\n"
+ " tx-buf N Use N Kb TX test buffer\n"
+ " appns NAMESPACE Use the namespace NAMESPACE\n"
+ " all-scope all-scope option\n"
+ " local-scope local-scope option\n"
+ " global-scope global-scope option\n"
+ " secret SECRET set namespace secret\n"
+ " chroot prefix PATH Use PATH as memory root path\n"
+ " sclose=[Y|N|W] When a stream is done, pass[N] send[Y] or wait[W] for close\n"
+ "\n"
+ " time START:END Time between evts START & END, events being :\n"
+ " start - Start of the app\n"
+ " qconnect - first Connection connect sent\n"
+ " qconnected - last Connection connected\n"
+ " sconnect - first Stream connect sent\n"
+ " sconnected - last Stream got connected\n"
+ " lastbyte - Last expected byte received\n"
+ " exit - Exiting of the app\n"
+ " json Output global stats in json\n"
+ " log=N Set the log level to [0: no output, 1:errors, 2:log]\n"
+ "\n"
+ " nclients N Open N clients sending data\n"
+ " nthreads N Use N busy loop threads for data [in addition to main & msg queue]\n"
+ " TX=1337[Kb|Mb|GB] Send 1337 [K|M|G]bytes, use TX=RX to reflect the data\n"
+ " RX=1337[Kb|Mb|GB] Expect 1337 [K|M|G]bytes\n" "\n");
+ for (i = 0; i < TRANSPORT_N_PROTO; i++)
+ {
+ echo_proto_cb_vft_t *vft = em->available_proto_cb_vft[i];
+ if (vft && vft->print_usage_cb)
+ vft->print_usage_cb ();
+ }
+ fprintf (stderr, "\nDefault configuration is :\n"
+ " server nclients 1/1 RX=64Kb TX=RX\n"
+ " client nclients 1/1 RX=64Kb TX=64Kb\n");
+ exit (1);
+}
+
+static int
+echo_process_each_proto_opts (unformat_input_t * a)
+{
+ echo_main_t *em = &echo_main;
+ int i, rv;
+ for (i = 0; i < TRANSPORT_N_PROTO; i++)
+ {
+ echo_proto_cb_vft_t *vft = em->available_proto_cb_vft[i];
+ if (vft && vft->process_opts_cb)
+ if ((rv = vft->process_opts_cb (a)))
+ return rv;
+ }
+ return 0;
+}
+
+static void
+echo_set_each_proto_defaults_before_opts (echo_main_t * em)
+{
+ int i;
+ for (i = 0; i < TRANSPORT_N_PROTO; i++)
+ {
+ echo_proto_cb_vft_t *vft = em->available_proto_cb_vft[i];
+ if (vft && vft->set_defaults_before_opts_cb)
+ vft->set_defaults_before_opts_cb ();
+ }
+}
+
+void
+echo_process_opts (int argc, char **argv)
+{
+ echo_main_t *em = &echo_main;
+ unformat_input_t _argv, *a = &_argv;
+ u32 tmp;
+ u8 *chroot_prefix;
+ u8 *uri = 0;
+ u8 default_f_active;
+
+ unformat_init_command_line (a, argv);
+ while (unformat_check_input (a) != UNFORMAT_END_OF_INPUT)
+ {
+ if (echo_process_each_proto_opts (a))
+ ;
+ else if (unformat (a, "chroot prefix %s", &chroot_prefix))
+ vl_set_memory_root_path ((char *) chroot_prefix);
+ else if (unformat (a, "uri %s", &uri))
+ em->uri = format (0, "%s%c", uri, 0);
+ else if (unformat (a, "server"))
+ em->i_am_master = 1;
+ else if (unformat (a, "client"))
+ em->i_am_master = 0;
+ else if (unformat (a, "test-bytes:assert"))
+ em->test_return_packets = RETURN_PACKETS_ASSERT;
+ else if (unformat (a, "test-bytes"))
+ em->test_return_packets = RETURN_PACKETS_LOG_WRONG;
+ else if (unformat (a, "socket-name %s", &em->socket_name))
+ ;
+ else if (unformat (a, "use-svm-api"))
+ em->use_sock_api = 0;
+ else if (unformat (a, "fifo-size %d", &tmp))
+ em->fifo_size = tmp << 10;
+ else if (unformat (a, "rx-buf %d", &tmp))
+ em->rx_buf_size = tmp << 10;
+ else if (unformat (a, "tx-buf %d", &tmp))
+ em->rx_buf_size = tmp << 10;
+ else if (unformat (a, "nclients %d", &em->n_clients))
+ {
+ em->n_sessions = em->n_clients + 1;
+ em->n_connects = em->n_clients;
+ }
+ else if (unformat (a, "nthreads %d", &em->n_rx_threads))
+ ;
+ else if (unformat (a, "appns %_%v%_", &em->appns_id))
+ ;
+ else if (unformat (a, "all-scope"))
+ em->appns_flags |= (APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE
+ | APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE);
+ else if (unformat (a, "local-scope"))
+ em->appns_flags = APP_OPTIONS_FLAGS_USE_LOCAL_SCOPE;
+ else if (unformat (a, "global-scope"))
+ em->appns_flags = APP_OPTIONS_FLAGS_USE_GLOBAL_SCOPE;
+ else if (unformat (a, "secret %lu", &em->appns_secret))
+ ;
+ else if (unformat (a, "TX=RX"))
+ em->data_source = ECHO_RX_DATA_SOURCE;
+ else if (unformat (a, "TX=%U", unformat_data, &em->bytes_to_send))
+ ;
+ else if (unformat (a, "RX=%U", unformat_data, &em->bytes_to_receive))
+ ;
+ else if (unformat (a, "json"))
+ em->output_json = 1;
+ else if (unformat (a, "log=%d", &em->log_lvl))
+ ;
+ else if (unformat (a, "sclose=%U",
+ echo_unformat_close, &em->send_stream_disconnects))
+ ;
+ else if (unformat (a, "time %U:%U",
+ echo_unformat_timing_event, &em->timing.start_event,
+ echo_unformat_timing_event, &em->timing.end_event))
+ ;
+ else
+ print_usage_and_exit ();
+ }
+
+ /* setting default for unset values
+ *
+ * bytes_to_send / bytes_to_receive & data_source */
+ if (em->bytes_to_receive == (u64) ~ 0)
+ em->bytes_to_receive = 64 << 10; /* default */
+ if (em->bytes_to_send == (u64) ~ 0)
+ em->bytes_to_send = 64 << 10; /* default */
+ else if (em->bytes_to_send == 0)
+ em->data_source = ECHO_NO_DATA_SOURCE;
+ else
+ em->data_source = ECHO_TEST_DATA_SOURCE;
+
+ if (em->data_source == ECHO_INVALID_DATA_SOURCE)
+ em->data_source =
+ em->i_am_master ? ECHO_RX_DATA_SOURCE : ECHO_TEST_DATA_SOURCE;
+ if (em->data_source == ECHO_RX_DATA_SOURCE)
+ em->bytes_to_send = em->bytes_to_receive;
+
+ /* disconnect flags */
+ if (em->i_am_master)
+ default_f_active =
+ em->bytes_to_send == 0 ? ECHO_CLOSE_F_ACTIVE : ECHO_CLOSE_F_PASSIVE;
+ else
+ default_f_active =
+ em->bytes_to_receive == 0 ? ECHO_CLOSE_F_PASSIVE : ECHO_CLOSE_F_ACTIVE;
+ if (em->send_stream_disconnects == ECHO_CLOSE_F_INVALID)
+ em->send_stream_disconnects = default_f_active;
+}
+
+void
+echo_process_uri (echo_main_t * em)
+{
+ unformat_input_t _input, *input = &_input;
+ u32 port;
+ unformat_init_string (input, (char *) em->uri, strlen ((char *) em->uri));
+ if (unformat
+ (input, "%U://%U/%d", unformat_transport_proto,
+ &em->uri_elts.transport_proto, unformat_ip4_address,
+ &em->uri_elts.ip.ip4, &port))
+ em->uri_elts.is_ip4 = 1;
+ else
+ if (unformat
+ (input, "%U://%U/%d", unformat_transport_proto,
+ &em->uri_elts.transport_proto, unformat_ip6_address,
+ &em->uri_elts.ip.ip6, &port))
+ em->uri_elts.is_ip4 = 0;
+ else
+ ECHO_FAIL ("Unable to process uri");
+ em->uri_elts.port = clib_host_to_net_u16 (port);
+ unformat_free (input);
+}
+
+static void __clib_constructor
+vpp_echo_init ()
+{
+ /* init memory before proto register themselves */
+ echo_main_t *em = &echo_main;
+ clib_mem_init_thread_safe (0, 256 << 20);
+ clib_memset (em, 0, sizeof (*em));
+}
+
+int
+main (int argc, char **argv)
+{
+ echo_main_t *em = &echo_main;
+ fifo_segment_main_t *sm = &em->segment_main;
+ char *app_name;
+ u64 i;
+ svm_msg_q_cfg_t _cfg, *cfg = &_cfg;
+ u32 rpc_queue_size = 64 << 10;
+
+ em->session_index_by_vpp_handles = hash_create (0, sizeof (uword));
+ clib_spinlock_init (&em->sid_vpp_handles_lock);
+ em->shared_segment_handles = hash_create (0, sizeof (uword));
+ clib_spinlock_init (&em->segment_handles_lock);
+ em->socket_name = format (0, "%s%c", API_SOCKET_FILE, 0);
+ em->use_sock_api = 1;
+ em->fifo_size = 64 << 10;
+ em->n_clients = 1;
+ em->n_connects = 1;
+ em->n_sessions = 2;
+ em->max_test_msg = 50;
+ em->time_to_stop = 0;
+ em->i_am_master = 1;
+ em->n_rx_threads = 4;
+ em->test_return_packets = RETURN_PACKETS_NOTEST;
+ em->timing.start_event = ECHO_EVT_FIRST_QCONNECT;
+ em->timing.end_event = ECHO_EVT_LAST_BYTE;
+ em->bytes_to_receive = ~0; /* defaulted when we know if server/client */
+ em->bytes_to_send = ~0; /* defaulted when we know if server/client */
+ em->rx_buf_size = 1 << 20;
+ em->tx_buf_size = 1 << 20;
+ em->data_source = ECHO_INVALID_DATA_SOURCE;
+ em->uri = format (0, "%s%c", "tcp://0.0.0.0/1234", 0);
+ echo_set_each_proto_defaults_before_opts (em);
+ echo_process_opts (argc, argv);
+ echo_process_uri (em);
+ em->proto_cb_vft = em->available_proto_cb_vft[em->uri_elts.transport_proto];
+ if (!em->proto_cb_vft)
+ {
+ ECHO_FAIL ("Protocol %U is not supported",
+ format_transport_proto, em->uri_elts.transport_proto);
+ exit (1);
+ }
+ if (em->proto_cb_vft->set_defaults_after_opts_cb)
+ em->proto_cb_vft->set_defaults_after_opts_cb ();
+
+ vec_validate (em->data_thread_handles, em->n_rx_threads);
+ vec_validate (em->data_thread_args, em->n_clients);
+ for (i = 0; i < em->n_clients; i++)
+ em->data_thread_args[i] = SESSION_INVALID_INDEX;
+ clib_time_init (&em->clib_time);
+ init_error_string_table ();
+ fifo_segment_main_init (sm, HIGH_SEGMENT_BASEVA, 20);
+ vec_validate (em->connect_test_data, em->tx_buf_size);
+ for (i = 0; i < em->tx_buf_size; i++)
+ em->connect_test_data[i] = i & 0xff;
+
+ /* *INDENT-OFF* */
+ svm_msg_q_ring_cfg_t rc[1] = {
+ {rpc_queue_size, sizeof (echo_rpc_msg_t), 0},
+ };
+ /* *INDENT-ON* */
+ cfg->consumer_pid = getpid ();
+ cfg->n_rings = 1;
+ cfg->q_nitems = rpc_queue_size;
+ cfg->ring_cfgs = rc;
+ em->rpc_msq_queue = svm_msg_q_alloc (cfg);
+
+ signal (SIGINT, stop_signal);
+ signal (SIGQUIT, stop_signal);
+ signal (SIGTERM, stop_signal);
+ echo_api_hookup (em);
+
+ app_name = em->i_am_master ? "echo_server" : "echo_client";
+ if (connect_to_vpp (app_name))
+ {
+ svm_region_exit ();
+ ECHO_FAIL ("Couldn't connect to vpe, exiting...\n");
+ exit (1);
+ }
+
+ echo_session_prealloc (em);
+ echo_notify_event (em, ECHO_EVT_START);
+
+ echo_send_attach (em);
+ if (wait_for_state_change (em, STATE_ATTACHED, TIMEOUT))
+ {
+ ECHO_FAIL ("Couldn't attach to vpp, did you run <session enable> ?\n");
+ exit (1);
+ }
+ if (pthread_create (&em->mq_thread_handle,
+ NULL /*attr */ , echo_mq_thread_fn, 0))
+ {
+ ECHO_FAIL ("pthread create errored\n");
+ exit (1);
+ }
+ for (i = 0; i < em->n_rx_threads; i++)
+ if (pthread_create (&em->data_thread_handles[i],
+ NULL /*attr */ , echo_data_thread_fn, (void *) i))
+ {
+ ECHO_FAIL ("pthread create errored\n");
+ exit (1);
+ }
+ if (em->i_am_master)
+ server_run (em);
+ else
+ clients_run (em);
+ echo_notify_event (em, ECHO_EVT_EXIT);
+ if (em->output_json)
+ print_global_json_stats (em);
+ else
+ print_global_stats (em);
+ echo_free_sessions (em);
+ echo_assert_test_suceeded (em);
+ echo_send_detach (em);
+ if (wait_for_state_change (em, STATE_DETACHED, TIMEOUT))
+ {
+ ECHO_FAIL ("ECHO-ERROR: Couldn't detach from vpp, exiting...\n");
+ exit (1);
+ }
+ int *rv;
+ pthread_join (em->mq_thread_handle, (void **) &rv);
+ if (rv)
+ {
+ ECHO_FAIL ("mq pthread errored %d", rv);
+ exit (1);
+ }
+ if (em->use_sock_api)
+ vl_socket_client_disconnect ();
+ else
+ vl_client_disconnect_from_vlib ();
+ ECHO_LOG (0, "Test complete !\n");
+ exit (em->has_failed);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */