aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorPing Yu <ping.yu@intel.com>2018-07-19 10:51:09 -0400
committerFlorin Coras <florin.coras@gmail.com>2018-07-19 18:32:54 +0000
commit970a0b87bb7d7d9c16acbe9ea207a7d4c81bfaee (patch)
treef35fa49873cf9f3687efdc7ae6b00c55cf9035a7 /src
parent4d56e059f78b991cb19ec4e5cf4a07a5607a0642 (diff)
Add a new communication channel between VPP and openssl engine
Thus when engine buffer is full during a burst in performance tesing, this code will help VPP handle retry machansim. Change-Id: I0f9fc05d3dba8a54d34dca4c6137700d6c80f714 Signed-off-by: Ping Yu <ping.yu@intel.com>
Diffstat (limited to 'src')
-rw-r--r--src/plugins/tlsopenssl/tls_async.c49
-rw-r--r--src/plugins/tlsopenssl/tls_openssl.c27
-rw-r--r--src/plugins/tlsopenssl/tls_openssl.h2
-rw-r--r--src/vnet/tls/tls.h2
4 files changed, 60 insertions, 20 deletions
diff --git a/src/plugins/tlsopenssl/tls_async.c b/src/plugins/tlsopenssl/tls_async.c
index aec1d7dcee2..3b469b9d3c4 100644
--- a/src/plugins/tlsopenssl/tls_async.c
+++ b/src/plugins/tlsopenssl/tls_async.c
@@ -99,7 +99,7 @@ evt_pool_init (vlib_main_t * vm)
num_threads = 1 /* main thread */ + vtm->n_threads;
- TLS_DBG ("Totally there is %d thread\n", num_threads);
+ TLS_DBG (2, "Totally there is %d thread\n", num_threads);
vec_validate (om->evt_pool, num_threads - 1);
vec_validate (om->status, num_threads - 1);
@@ -115,7 +115,6 @@ evt_pool_init (vlib_main_t * vm)
}
om->polling = NULL;
- TLS_DBG ("Node disabled\n");
openssl_async_node_enable_disable (0);
return;
@@ -253,7 +252,7 @@ openssl_async_run (void *evt)
int *evt_run_tail = &om->status[thread_index].evt_run_tail;
int *evt_run_head = &om->status[thread_index].evt_run_head;
- TLS_DBG ("Set event %d to run\n", event_index);
+ TLS_DBG (2, "Set event %d to run\n", event_index);
event = openssl_evt_get_w_thread (event_index, thread_index);
@@ -271,7 +270,9 @@ openssl_async_run (void *evt)
}
*evt_run_tail = event_index;
if (*evt_run_head < 0)
- *evt_run_head = event_index;
+ {
+ *evt_run_head = event_index;
+ }
return 1;
}
@@ -303,10 +304,33 @@ vpp_add_async_pending_event (tls_ctx_t * ctx,
event->next = *evt_pending_head;
*evt_pending_head = eidx;
-
return &event->engine_callback;
}
+int
+vpp_add_async_run_event (tls_ctx_t * ctx, openssl_resume_handler * handler)
+{
+ u32 eidx;
+ openssl_evt_t *event;
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+ u32 thread_id = ctx->c_thread_index;
+
+ eidx = openssl_evt_alloc ();
+ event = openssl_evt_get (eidx);
+
+ event->ctx_index = oc->openssl_ctx_index;
+ event->status = SSL_ASYNC_PENDING;
+ event->handler = handler;
+ event->cb_args.event_index = eidx;
+ event->cb_args.thread_index = thread_id;
+ event->engine_callback.callback = openssl_async_run;
+ event->engine_callback.arg = &event->cb_args;
+
+ /* This is a retry event, and need to put to ring to make it run again */
+ return openssl_async_run (&event->cb_args);
+
+}
+
void
event_handler (void *tls_async)
{
@@ -325,7 +349,6 @@ event_handler (void *tls_async)
if (handler)
{
- TLS_DBG ("relaunch...\n");
(*handler) (ctx, tls_session);
}
@@ -349,7 +372,7 @@ dasync_polling ()
evt_pending = &om->status[thread_index].evt_pending_head;
while (*evt_pending >= 0)
{
- TLS_DBG ("polling... current head = %d\n", *evt_pending);
+ TLS_DBG (2, "polling... current head = %d\n", *evt_pending);
event = openssl_evt_get_w_thread (*evt_pending, thread_index);
*evt_pending = event->next;
if (event->status == SSL_ASYNC_PENDING)
@@ -385,7 +408,7 @@ qat_polling_config ()
NULL, NULL, 0);
*config = 1;
- TLS_DBG ("set thread %d and instance %d mapping\n", thread_index,
+ TLS_DBG (2, "set thread %d and instance %d mapping\n", thread_index,
thread_index);
}
@@ -394,14 +417,12 @@ void
qat_polling ()
{
openssl_async_t *om = &openssl_async_main;
- int ret;
+ int poll_status = 0;
if (om->start_polling)
{
qat_polling_config ();
-#define QAT_CMD_POLL (ENGINE_CMD_BASE + 1)
- ENGINE_ctrl (om->engine, QAT_CMD_POLL, 0, &ret, NULL);
- ;
+ ENGINE_ctrl_cmd (om->engine, "POLL", 0, &poll_status, NULL, 0);
}
}
@@ -462,7 +483,7 @@ tls_resume_from_crypto (int thread_index)
if (*evt_run_head >= 0)
{
event = openssl_evt_get_w_thread (*evt_run_head, thread_index);
- TLS_DBG ("event run = %d\n", *evt_run_head);
+ TLS_DBG (2, "event run = %d\n", *evt_run_head);
tls_async_do_job (*evt_run_head, thread_index);
*evt_run_head = event->next;
@@ -481,8 +502,6 @@ tls_resume_from_crypto (int thread_index)
static clib_error_t *
tls_async_init (vlib_main_t * vm)
{
-
- TLS_DBG ("Start to call tls_async_init\n");
evt_pool_init (vm);
return 0;
diff --git a/src/plugins/tlsopenssl/tls_openssl.c b/src/plugins/tlsopenssl/tls_openssl.c
index 91c9de758b4..b675d795cd8 100644
--- a/src/plugins/tlsopenssl/tls_openssl.c
+++ b/src/plugins/tlsopenssl/tls_openssl.c
@@ -167,13 +167,26 @@ vpp_ssl_async_process_event (tls_ctx_t * ctx,
{
SSL_set_async_callback (oc->ssl, (void *) engine_cb->callback,
(void *) engine_cb->arg);
- TLS_DBG ("set callback to engine %p\n", engine_cb->callback);
+ TLS_DBG (2, "set callback to engine %p\n", engine_cb->callback);
}
- /* associated fd with context for return */
- TLS_DBG ("completed assoicated fd with tls session\n");
return 0;
}
+
+/* Due to engine busy stat, VPP need to retry later */
+static int
+vpp_ssl_async_retry_func (tls_ctx_t * ctx, openssl_resume_handler * handler)
+{
+ openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
+
+ if (vpp_add_async_run_event (ctx, handler))
+ {
+ SSL_set_async_estatus (oc->ssl, 0);
+ }
+ return 0;
+
+}
+
#endif
int
@@ -182,6 +195,7 @@ openssl_ctx_handshake_rx (tls_ctx_t * ctx, stream_session_t * tls_session)
openssl_ctx_t *oc = (openssl_ctx_t *) ctx;
int rv = 0, err;
#ifdef HAVE_OPENSSL_ASYNC
+ int estatus;
openssl_resume_handler *myself;
#endif
@@ -200,9 +214,14 @@ openssl_ctx_handshake_rx (tls_ctx_t * ctx, stream_session_t * tls_session)
err = SSL_get_error (oc->ssl, rv);
openssl_try_handshake_write (oc, tls_session);
#ifdef HAVE_OPENSSL_ASYNC
+ myself = openssl_ctx_handshake_rx;
+ if (SSL_get_async_estatus (oc->ssl, &estatus)
+ && (estatus == ENGINE_STATUS_RETRY))
+ {
+ vpp_ssl_async_retry_func (ctx, myself);
+ }
if (err == SSL_ERROR_WANT_ASYNC)
{
- myself = openssl_ctx_handshake_rx;
vpp_ssl_async_process_event (ctx, myself);
}
#endif
diff --git a/src/plugins/tlsopenssl/tls_openssl.h b/src/plugins/tlsopenssl/tls_openssl.h
index b01814939af..c1a21e57317 100644
--- a/src/plugins/tlsopenssl/tls_openssl.h
+++ b/src/plugins/tlsopenssl/tls_openssl.h
@@ -55,6 +55,8 @@ tls_ctx_t *openssl_ctx_get_w_thread (u32 ctx_index, u8 thread_index);
openssl_tls_callback_t *vpp_add_async_pending_event (tls_ctx_t * ctx,
openssl_resume_handler *
handler);
+int vpp_add_async_run_event (tls_ctx_t * ctx, openssl_resume_handler *
+ handler);
void openssl_polling_start (ENGINE * engine);
int openssl_engine_register (char *engine, char *alg);
void openssl_async_node_enable_disable (u8 is_en);
diff --git a/src/vnet/tls/tls.h b/src/vnet/tls/tls.h
index b311578f678..f67f307550b 100644
--- a/src/vnet/tls/tls.h
+++ b/src/vnet/tls/tls.h
@@ -32,7 +32,7 @@
if (_lvl <= TLS_DEBUG) \
clib_warning (_fmt, ##_args)
#else
-#define TLS_DBG(_fmt, _args...)
+#define TLS_DBG(_lvl, _fmt, _args...)
#endif
/* *INDENT-OFF* */
ref='#n541'>541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
/*
 * Copyright (c) 2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/session/application.h>
#include <vnet/session/application_interface.h>
#include <vnet/session/session.h>

/**
 * Pool of workers associated to apps
 */
static app_worker_t *app_workers;

app_worker_t *
app_worker_alloc (application_t * app)
{
  app_worker_t *app_wrk;
  pool_get (app_workers, app_wrk);
  clib_memset (app_wrk, 0, sizeof (*app_wrk));
  app_wrk->wrk_index = app_wrk - app_workers;
  app_wrk->app_index = app->app_index;
  app_wrk->wrk_map_index = ~0;
  app_wrk->connects_seg_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
  app_wrk->first_segment_manager = APP_INVALID_SEGMENT_MANAGER_INDEX;
  APP_DBG ("New app %v worker %u", app->name, app_wrk->wrk_index);
  return app_wrk;
}

app_worker_t *
app_worker_get (u32 wrk_index)
{
  return pool_elt_at_index (app_workers, wrk_index);
}

app_worker_t *
app_worker_get_if_valid (u32 wrk_index)
{
  if (pool_is_free_index (app_workers, wrk_index))
    return 0;
  return pool_elt_at_index (app_workers, wrk_index);
}

void
app_worker_free (app_worker_t * app_wrk)
{
  application_t *app = application_get (app_wrk->app_index);
  vnet_unlisten_args_t _a, *a = &_a;
  u64 handle, *handles = 0;
  segment_manager_t *sm;
  session_t *ls;
  u32 sm_index;
  int i;

  /*
   *  Listener cleanup
   */

  /* *INDENT-OFF* */
  hash_foreach (handle, sm_index, app_wrk->listeners_table, ({
    ls = listen_session_get_from_handle (handle);
    vec_add1 (handles, app_listen_session_handle (ls));
    sm = segment_manager_get (sm_index);
    sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
  }));
  /* *INDENT-ON* */

  for (i = 0; i < vec_len (handles); i++)
    {
      a->app_index = app->app_index;
      a->wrk_map_index = app_wrk->wrk_map_index;
      a->handle = handles[i];
      /* seg manager is removed when unbind completes */
      (void) vnet_unlisten (a);
    }

  /*
   * Connects segment manager cleanup
   */

  if (app_wrk->connects_seg_manager != APP_INVALID_SEGMENT_MANAGER_INDEX)
    {
      sm = segment_manager_get (app_wrk->connects_seg_manager);
      sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
      sm->first_is_protected = 0;
      segment_manager_init_free (sm);
    }

  /* If first segment manager is used by a listener */
  if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
      && app_wrk->first_segment_manager != app_wrk->connects_seg_manager)
    {
      sm = segment_manager_get (app_wrk->first_segment_manager);
      sm->first_is_protected = 0;
      sm->app_wrk_index = SEGMENT_MANAGER_INVALID_APP_INDEX;
      /* .. and has no fifos, e.g. it might be used for redirected sessions,
       * remove it */
      if (!segment_manager_has_fifos (sm))
	segment_manager_free (sm);
    }

  if (CLIB_DEBUG)
    clib_memset (app_wrk, 0xfe, sizeof (*app_wrk));
  pool_put (app_workers, app_wrk);
}

application_t *
app_worker_get_app (u32 wrk_index)
{
  app_worker_t *app_wrk;
  app_wrk = app_worker_get_if_valid (wrk_index);
  if (!app_wrk)
    return 0;
  return application_get_if_valid (app_wrk->app_index);
}

static segment_manager_t *
app_worker_alloc_segment_manager (app_worker_t * app_wrk)
{
  segment_manager_t *sm = 0;

  /* If the first segment manager is not in use, don't allocate a new one */
  if (app_wrk->first_segment_manager != APP_INVALID_SEGMENT_MANAGER_INDEX
      && app_wrk->first_segment_manager_in_use == 0)
    {
      sm = segment_manager_get (app_wrk->first_segment_manager);
      app_wrk->first_segment_manager_in_use = 1;
      return sm;
    }

  sm = segment_manager_alloc ();
  sm->app_wrk_index = app_wrk->wrk_index;

  return sm;
}

static int
app_worker_alloc_session_fifos (segment_manager_t * sm, session_t * s)
{
  svm_fifo_t *rx_fifo = 0, *tx_fifo = 0;
  int rv;

  if ((rv = segment_manager_alloc_session_fifos (sm, s->thread_index,
						 &rx_fifo, &tx_fifo)))
    return rv;

  rx_fifo->master_session_index = s->session_index;
  rx_fifo->master_thread_index = s->thread_index;

  tx_fifo->master_session_index = s->session_index;
  tx_fifo->master_thread_index = s->thread_index;

  s->rx_fifo = rx_fifo;
  s->tx_fifo = tx_fifo;
  return 0;
}

int
app_worker_init_listener (app_worker_t * app_wrk, session_t * ls)
{
  segment_manager_t *sm;

  /* Allocate segment manager. All sessions derived out of a listen session
   * have fifos allocated by the same segment manager. */
  if (!(sm = app_worker_alloc_segment_manager (app_wrk)))
    return -1;

  /* Keep track of the segment manager for the listener or this worker */
  hash_set (app_wrk->listeners_table, listen_session_get_handle (ls),
	    segment_manager_index (sm));

  if (transport_connection_is_cless (session_get_transport (ls)))
    {
      if (ls->rx_fifo)
	{
	  clib_warning ("sharing of connectionless listeners not supported");
	  return -1;
	}
      if (app_worker_alloc_session_fifos (sm, ls))
	return -1;
    }
  return 0;
}

int
app_worker_start_listen (app_worker_t * app_wrk,
			 app_listener_t * app_listener)
{
  session_t *ls;

  if (clib_bitmap_get (app_listener->workers, app_wrk->wrk_map_index))
    return VNET_API_ERROR_ADDRESS_IN_USE;

  app_listener->workers = clib_bitmap_set (app_listener->workers,
					   app_wrk->wrk_map_index, 1);

  if (app_listener->session_index != SESSION_INVALID_INDEX)
    {
      ls = session_get (app_listener->session_index, 0);
      if (app_worker_init_listener (app_wrk, ls))
	return -1;
    }

  if (app_listener->local_index != SESSION_INVALID_INDEX)
    {
      ls = session_get (app_listener->local_index, 0);
      if (app_worker_init_listener (app_wrk, ls))
	return -1;
    }

  return 0;
}

static void
app_worker_stop_listen_session (app_worker_t * app_wrk, session_t * ls)
{
  session_handle_t handle;
  segment_manager_t *sm;
  uword *sm_indexp;

  handle = listen_session_get_handle (ls);
  sm_indexp = hash_get (app_wrk->listeners_table, handle);
  if (PREDICT_FALSE (!sm_indexp))
    return;

  /* Dealloc fifos first, if any, to avoid cleanup attempt lower */
  if (ls->rx_fifo)
    {
      segment_manager_dealloc_fifos (ls->rx_fifo, ls->tx_fifo);
      ls->tx_fifo = ls->rx_fifo = 0;
    }

  sm = segment_manager_get (*sm_indexp);
  if (app_wrk->first_segment_manager == *sm_indexp)
    {
      /* Delete sessions but don't remove segment manager */
      app_wrk->first_segment_manager_in_use = 0;
      segment_manager_del_sessions (sm);
    }
  else
    {
      segment_manager_init_free (sm);
    }
  hash_unset (app_wrk->listeners_table, handle);
}

int
app_worker_stop_listen (app_worker_t * app_wrk, app_listener_t * al)
{
  session_t *ls;

  if (!clib_bitmap_get (al->workers, app_wrk->wrk_map_index))
    return 0;

  if (al->session_index != SESSION_INVALID_INDEX)
    {
      ls = listen_session_get (al->session_index);
      app_worker_stop_listen_session (app_wrk, ls);
    }

  if (al->local_index != SESSION_INVALID_INDEX)
    {
      ls = listen_session_get (al->local_index);
      app_worker_stop_listen_session (app_wrk, ls);
    }

  clib_bitmap_set_no_check (al->workers, app_wrk->wrk_map_index, 0);
  if (clib_bitmap_is_zero (al->workers))
    app_listener_cleanup (al);

  return 0;
}

int
app_worker_init_accepted (session_t * s)
{
  app_worker_t *app_wrk;
  segment_manager_t *sm;
  session_t *listener;
  application_t *app;

  listener = listen_session_get_from_handle (s->listener_handle);
  app_wrk = application_listener_select_worker (listener);
  s->app_wrk_index = app_wrk->wrk_index;

  app = application_get (app_wrk->app_index);
  if (app->cb_fns.fifo_tuning_callback)
    s->flags |= SESSION_F_CUSTOM_FIFO_TUNING;

  sm = app_worker_get_listen_segment_manager (app_wrk, listener);
  if (app_worker_alloc_session_fifos (sm, s))
    return -1;

  return 0;
}

int
app_worker_accept_notify (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);
  return app->cb_fns.session_accept_callback (s);
}

int
app_worker_init_connected (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);
  segment_manager_t *sm;

  /* Allocate fifos for session, unless the app is a builtin proxy */
  if (!application_is_builtin_proxy (app))
    {
      sm = app_worker_get_connect_segment_manager (app_wrk);
      if (app_worker_alloc_session_fifos (sm, s))
	return -1;
    }

  if (app->cb_fns.fifo_tuning_callback)
    s->flags |= SESSION_F_CUSTOM_FIFO_TUNING;

  return 0;
}

int
app_worker_connect_notify (app_worker_t * app_wrk, session_t * s, u32 opaque)
{
  application_t *app = application_get (app_wrk->app_index);
  return app->cb_fns.session_connected_callback (app_wrk->wrk_index, opaque,
						 s, s == 0 /* is_fail */ );
}

int
app_worker_close_notify (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);
  app->cb_fns.session_disconnect_callback (s);
  return 0;
}

int
app_worker_transport_closed_notify (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);
  if (app->cb_fns.session_transport_closed_callback)
    app->cb_fns.session_transport_closed_callback (s);
  return 0;
}

int
app_worker_reset_notify (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);
  app->cb_fns.session_reset_callback (s);
  return 0;
}

int
app_worker_cleanup_notify (app_worker_t * app_wrk, session_t * s,
			   session_cleanup_ntf_t ntf)
{
  application_t *app = application_get (app_wrk->app_index);
  if (app->cb_fns.session_cleanup_callback)
    app->cb_fns.session_cleanup_callback (s, ntf);
  return 0;
}

int
app_worker_builtin_rx (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);
  app->cb_fns.builtin_app_rx_callback (s);
  return 0;
}

int
app_worker_builtin_tx (app_worker_t * app_wrk, session_t * s)
{
  application_t *app = application_get (app_wrk->app_index);

  if (!app->cb_fns.builtin_app_tx_callback)
    return 0;

  app->cb_fns.builtin_app_tx_callback (s);
  return 0;
}

int
app_worker_migrate_notify (app_worker_t * app_wrk, session_t * s,
			   session_handle_t new_sh)
{
  application_t *app = application_get (app_wrk->app_index);
  app->cb_fns.session_migrate_callback (s, new_sh);
  return 0;
}

int
app_worker_own_session (app_worker_t * app_wrk, session_t * s)
{
  segment_manager_t *sm;
  svm_fifo_t *rxf, *txf;

  if (s->session_state == SESSION_STATE_LISTENING)
    return application_change_listener_owner (s, app_wrk);

  s->app_wrk_index = app_wrk->wrk_index;

  rxf = s->rx_fifo;
  txf = s->tx_fifo;

  if (!rxf || !txf)
    return 0;

  s->rx_fifo = 0;
  s->tx_fifo = 0;

  sm = app_worker_get_or_alloc_connect_segment_manager (app_wrk);
  if (app_worker_alloc_session_fifos (sm, s))
    return -1;

  if (!svm_fifo_is_empty_cons (rxf))
    svm_fifo_clone (s->rx_fifo, rxf);

  if (!svm_fifo_is_empty_cons (txf))
    svm_fifo_clone (s->tx_fifo, txf);

  segment_manager_dealloc_fifos (rxf, txf);

  return 0;
}

int
app_worker_connect_session (app_worker_t * app, session_endpoint_t * sep,
			    u32 api_context)
{
  int rv;

  /* Make sure we have a segment manager for connects */
  app_worker_alloc_connects_segment_manager (app);

  if ((rv = session_open (app->wrk_index, sep, api_context)))
    return rv;

  return 0;
}

int
app_worker_session_fifo_tuning (app_worker_t * app_wrk, session_t * s,
				svm_fifo_t * f,
				session_ft_action_t act, u32 len)
{
  application_t *app = application_get (app_wrk->app_index);
  return app->cb_fns.fifo_tuning_callback (s, f, act, len);
}

int
app_worker_alloc_connects_segment_manager (app_worker_t * app_wrk)
{
  segment_manager_t *sm;

  if (app_wrk->connects_seg_manager == APP_INVALID_SEGMENT_MANAGER_INDEX)
    {
      sm = app_worker_alloc_segment_manager (app_wrk);
      if (sm == 0)
	return -1;
      app_wrk->connects_seg_manager = segment_manager_index (sm);
    }
  return 0;
}

segment_manager_t *
app_worker_get_connect_segment_manager (app_worker_t * app)
{
  ASSERT (app->connects_seg_manager != (u32) ~ 0);
  return segment_manager_get (app->connects_seg_manager);
}

segment_manager_t *
app_worker_get_or_alloc_connect_segment_manager (app_worker_t * app_wrk)
{
  if (app_wrk->connects_seg_manager == (u32) ~ 0)
    app_worker_alloc_connects_segment_manager (app_wrk);
  return segment_manager_get (app_wrk->connects_seg_manager);
}

segment_manager_t *
app_worker_get_listen_segment_manager (app_worker_t * app,
				       session_t * listener)
{
  uword *smp;
  smp = hash_get (app->listeners_table, listen_session_get_handle (listener));
  ALWAYS_ASSERT (smp != 0);
  return segment_manager_get (*smp);
}

session_t *
app_worker_first_listener (app_worker_t * app_wrk, u8 fib_proto,
			   u8 transport_proto)
{
  session_t *listener;
  u64 handle;
  u32 sm_index;
  u8 sst;

  sst = session_type_from_proto_and_ip (transport_proto,
					fib_proto == FIB_PROTOCOL_IP4);

  /* *INDENT-OFF* */
   hash_foreach (handle, sm_index, app_wrk->listeners_table, ({
     listener = listen_session_get_from_handle (handle);
     if (listener->session_type == sst
	 && !(listener->flags & SESSION_F_PROXY))
       return listener;
   }));
  /* *INDENT-ON* */

  return 0;
}

session_t *
app_worker_proxy_listener (app_worker_t * app_wrk, u8 fib_proto,
			   u8 transport_proto)
{
  session_t *listener;
  u64 handle;
  u32 sm_index;
  u8 sst;

  sst = session_type_from_proto_and_ip (transport_proto,
					fib_proto == FIB_PROTOCOL_IP4);

  /* *INDENT-OFF* */
   hash_foreach (handle, sm_index, app_wrk->listeners_table, ({
     listener = listen_session_get_from_handle (handle);
     if (listener->session_type == sst && (listener->flags & SESSION_F_PROXY))
       return listener;
   }));
  /* *INDENT-ON* */

  return 0;
}

/**
 * Send an API message to the external app, to map new segment
 */
int
app_worker_add_segment_notify (app_worker_t * app_wrk, u64 segment_handle)
{
  application_t *app = application_get (app_wrk->app_index);

  return app->cb_fns.add_segment_callback (app_wrk->wrk_index,
					   segment_handle);
}

int
app_worker_del_segment_notify (app_worker_t * app_wrk, u64 segment_handle)
{
  application_t *app = application_get (app_wrk->app_index);
  return app->cb_fns.del_segment_callback (app_wrk->wrk_index,
					   segment_handle);
}

static inline u8
app_worker_application_is_builtin (app_worker_t * app_wrk)
{
  return app_wrk->app_is_builtin;
}

static inline int
app_send_io_evt_rx (app_worker_t * app_wrk, session_t * s)
{
  session_event_t *evt;
  svm_msg_q_msg_t msg;
  svm_msg_q_t *mq;

  if (app_worker_application_is_builtin (app_wrk))
    return app_worker_builtin_rx (app_wrk, s);

  if (svm_fifo_has_event (s->rx_fifo))
    return 0;

  mq = app_wrk->event_queue;
  svm_msg_q_lock (mq);

  if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
    {
      clib_warning ("evt q full");
      svm_msg_q_unlock (mq);
      return -1;
    }

  if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
    {
      clib_warning ("evt q rings full");
      svm_msg_q_unlock (mq);
      return -1;
    }

  msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
  evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
  evt->session_index = s->rx_fifo->client_session_index;
  evt->event_type = SESSION_IO_EVT_RX;

  (void) svm_fifo_set_event (s->rx_fifo);
  svm_msg_q_add_and_unlock (mq, &msg);

  return 0;
}

static inline int
app_send_io_evt_tx (app_worker_t * app_wrk, session_t * s)
{
  svm_msg_q_t *mq;
  session_event_t *evt;
  svm_msg_q_msg_t msg;

  if (app_worker_application_is_builtin (app_wrk))
    return app_worker_builtin_tx (app_wrk, s);

  mq = app_wrk->event_queue;
  svm_msg_q_lock (mq);

  if (PREDICT_FALSE (svm_msg_q_is_full (mq)))
    {
      clib_warning ("evt q full");
      svm_msg_q_unlock (mq);
      return -1;
    }

  if (PREDICT_FALSE (svm_msg_q_ring_is_full (mq, SESSION_MQ_IO_EVT_RING)))
    {
      clib_warning ("evt q rings full");
      svm_msg_q_unlock (mq);
      return -1;
    }

  msg = svm_msg_q_alloc_msg_w_ring (mq, SESSION_MQ_IO_EVT_RING);
  evt = (session_event_t *) svm_msg_q_msg_data (mq, &msg);
  evt->event_type = SESSION_IO_EVT_TX;
  evt->session_index = s->tx_fifo->client_session_index;

  svm_msg_q_add_and_unlock (mq, &msg);
  return 0;
}

/* *INDENT-OFF* */
typedef int (app_send_evt_handler_fn) (app_worker_t *app,
				       session_t *s);
static app_send_evt_handler_fn * const app_send_evt_handler_fns[2] = {
    app_send_io_evt_rx,
    app_send_io_evt_tx,
};
/* *INDENT-ON* */

/**
 * Send event to application
 *
 * Logic from queue perspective is blocking. However, if queue is full,
 * we return.
 */
int
app_worker_lock_and_send_event (app_worker_t * app, session_t * s,
				u8 evt_type)
{
  return app_send_evt_handler_fns[evt_type] (app, s);
}

u8 *
format_app_worker_listener (u8 * s, va_list * args)
{
  app_worker_t *app_wrk = va_arg (*args, app_worker_t *);
  u64 handle = va_arg (*args, u64);
  u32 sm_index = va_arg (*args, u32);
  int verbose = va_arg (*args, int);
  session_t *listener;
  const u8 *app_name;
  u8 *str;

  if (!app_wrk)
    {
      if (verbose)
	s = format (s, "%-40s%-25s%=10s%-15s%-15s%-10s", "Connection", "App",
		    "Wrk", "API Client", "ListenerID", "SegManager");
      else
	s = format (s, "%-40s%-25s%=10s", "Connection", "App", "Wrk");

      return s;
    }

  app_name = application_name_from_index (app_wrk->app_index);
  listener = listen_session_get_from_handle (handle);
  str = format (0, "%U", format_session, listener, verbose);

  if (verbose)
    {
      char buf[32];
      sprintf (buf, "%u(%u)", app_wrk->wrk_map_index, app_wrk->wrk_index);
      s = format (s, "%-40s%-25s%=10s%-15u%-15u%-10u", str, app_name,
		  buf, app_wrk->api_client_index, handle, sm_index);
    }
  else
    s = format (s, "%-40s%-25s%=10u", str, app_name, app_wrk->wrk_map_index);

  return s;
}

u8 *
format_app_worker (u8 * s, va_list * args)
{
  app_worker_t *app_wrk = va_arg (*args, app_worker_t *);
  u32 indent = 1;

  s = format (s, "%U wrk-index %u app-index %u map-index %u "
	      "api-client-index %d\n", format_white_space, indent,
	      app_wrk->wrk_index, app_wrk->app_index, app_wrk->wrk_map_index,
	      app_wrk->api_client_index);
  return s;
}

void
app_worker_format_connects (app_worker_t * app_wrk, int verbose)
{
  segment_manager_t *sm;

  /* Header */
  if (!app_wrk)
    {
      segment_manager_format_sessions (0, verbose);
      return;
    }

  if (app_wrk->connects_seg_manager == (u32) ~ 0)
    return;

  sm = segment_manager_get (app_wrk->connects_seg_manager);
  segment_manager_format_sessions (sm, verbose);
}

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */