/* * Copyright (c) 2018-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include static tls_main_t tls_main; static tls_engine_vft_t *tls_vfts; #define TLS_INVALID_HANDLE ~0 #define TLS_IDX_MASK 0x00FFFFFF #define TLS_ENGINE_TYPE_SHIFT 29 void tls_disconnect (u32 ctx_handle, u32 thread_index); void tls_disconnect_transport (tls_ctx_t * ctx) { vnet_disconnect_args_t a = { .handle = ctx->tls_session_handle, .app_index = tls_main.app_index, }; if (vnet_disconnect_session (&a)) clib_warning ("disconnect returned"); } tls_engine_type_t tls_get_available_engine (void) { int i; for (i = 0; i < vec_len (tls_vfts); i++) { if (tls_vfts[i].ctx_alloc) return i; } return TLS_ENGINE_NONE; } int tls_add_vpp_q_rx_evt (session_t * s) { if (svm_fifo_set_event (s->rx_fifo)) session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_RX); return 0; } int tls_add_vpp_q_builtin_rx_evt (session_t * s) { if (svm_fifo_set_event (s->rx_fifo)) session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_BUILTIN_RX); return 0; } int tls_add_vpp_q_tx_evt (session_t * s) { if (svm_fifo_set_event (s->tx_fifo)) session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX); return 0; } int tls_add_vpp_q_builtin_tx_evt (session_t * s) { if (svm_fifo_set_event (s->tx_fifo)) session_send_io_evt_to_thread_custom (s, s->thread_index, SESSION_IO_EVT_BUILTIN_TX); return 0; } static inline int tls_add_app_q_evt (app_worker_t * app, session_t * app_session) { return app_worker_lock_and_send_event (app, app_session, SESSION_IO_EVT_RX); } u32 tls_listener_ctx_alloc (void) { tls_main_t *tm = &tls_main; tls_ctx_t *ctx; pool_get (tm->listener_ctx_pool, ctx); clib_memset (ctx, 0, sizeof (*ctx)); return ctx - tm->listener_ctx_pool; } void tls_listener_ctx_free (tls_ctx_t * ctx) { if (CLIB_DEBUG) memset (ctx, 0xfb, sizeof (*ctx)); pool_put (tls_main.listener_ctx_pool, ctx); } tls_ctx_t * tls_listener_ctx_get (u32 ctx_index) { return pool_elt_at_index (tls_main.listener_ctx_pool, ctx_index); } u32 tls_listener_ctx_index (tls_ctx_t * ctx) { return (ctx - tls_main.listener_ctx_pool); } u32 tls_ctx_half_open_alloc (void) { tls_main_t *tm = &tls_main; u8 will_expand = 0; tls_ctx_t *ctx; u32 ctx_index; pool_get_aligned_will_expand (tm->half_open_ctx_pool, will_expand, 0); if (PREDICT_FALSE (will_expand && vlib_num_workers ())) { clib_rwlock_writer_lock (&tm->half_open_rwlock); pool_get (tm->half_open_ctx_pool, ctx); ctx_index = ctx - tm->half_open_ctx_pool; clib_rwlock_writer_unlock (&tm->half_open_rwlock); } else { /* reader lock assumption: only main thread will call pool_get */ clib_rwlock_reader_lock (&tm->half_open_rwlock); pool_get (tm->half_open_ctx_pool, ctx); ctx_index = ctx - tm->half_open_ctx_pool; clib_rwlock_reader_unlock (&tm->half_open_rwlock); } clib_memset (ctx, 0, sizeof (*ctx)); return ctx_index; } void tls_ctx_half_open_free (u32 ho_index) { tls_main_t *tm = &tls_main; clib_rwlock_writer_lock (&tm->half_open_rwlock); pool_put_index (tls_main.half_open_ctx_pool, ho_index); clib_rwlock_writer_unlock (&tm->half_open_rwlock); } tls_ctx_t * tls_ctx_half_open_get (u32 ctx_index) { tls_main_t *tm = &tls_main; clib_rwlock_reader_lock (&tm->half_open_rwlock); return pool_elt_at_index (tm->half_open_ctx_pool, ctx_index); } void tls_ctx_half_open_reader_unlock () { clib_rwlock_reader_unlock (&tls_main.half_open_rwlock); } u32 tls_ctx_half_open_index (tls_ctx_t * ctx) { return (ctx - tls_main.half_open_ctx_pool); } void tls_notify_app_enqueue (tls_ctx_t * ctx, session_t * app_session) { app_worker_t *app_wrk; app_wrk = app_worker_get_if_valid (app_session->app_wrk_index); if (PREDICT_TRUE (app_wrk != 0)) tls_add_app_q_evt (app_wrk, app_session); } int tls_notify_app_accept (tls_ctx_t * ctx) { session_t *app_listener, *app_session; app_worker_t *app_wrk; tls_ctx_t *lctx; int rv; lctx = tls_listener_ctx_get (ctx->listener_ctx_index); app_listener = listen_session_get_from_handle (lctx->app_session_handle); app_session = session_get (ctx->c_s_index, ctx->c_thread_index); app_session->app_wrk_index = ctx->parent_app_wrk_index; app_session->connection_index = ctx->tls_ctx_handle; app_session->session_type = app_listener->session_type; app_session->listener_index = app_listener->session_index; app_session->session_state = SESSION_STATE_ACCEPTING; if ((rv = app_worker_init_accepted (app_session))) { TLS_DBG (1, "failed to allocate fifos"); session_free (app_session); return rv; } ctx->app_session_handle = session_handle (app_session); session_lookup_add_connection (&ctx->connection, session_handle (app_session)); ctx->parent_app_wrk_index = app_session->app_wrk_index; app_wrk = app_worker_get (app_session->app_wrk_index); return app_worker_accept_notify (app_wrk, app_session); } int tls_notify_app_connected (tls_ctx_t * ctx, u8 is_failed) { session_t *app_session; app_worker_t *app_wrk; app_wrk = app_worker_get_if_valid (ctx->parent_app_wrk_index); if (!app_wrk) { tls_disconnect_transport (ctx); return -1; } if (is_failed) goto failed; app_session = session_get (ctx->c_s_index, ctx->c_thread_index); app_session->app_wrk_index = ctx->parent_app_wrk_index; app_session->connection_index = ctx->tls_ctx_handle; app_session->session_type = session_type_from_proto_and_ip (TRANSPORT_PROTO_TLS, ctx->tcp_is_ip4); if (app_worker_init_connected (app_wrk, app_session)) goto failed; app_session->session_state = SESSION_STATE_CONNECTING; if (app_worker_connect_notify (app_wrk, app_session, ctx->parent_app_api_context)) { TLS_DBG (1, "failed to notify app"); tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ()); return -1; } ctx->app_session_handle = session_handle (app_session); app_session->session_state = SESSION_STATE_READY; session_lookup_add_connection (&ctx->connection, session_handle (app_session)); return 0; failed: tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ()); return app_worker_connect_notify (app_wrk, 0, ctx->parent_app_api_context); } static inline void tls_ctx_parse_handle (u32 ctx_handle, u32 * ctx_index, u32 * engine_type) { *ctx_index = ctx_handle & TLS_IDX_MASK; *engine_type = ctx_handle >> TLS_ENGINE_TYPE_SHIFT; } static inline tls_engine_type_t tls_get_engine_type (tls_engine_type_t preferred) { if (!tls_vfts[preferred].ctx_alloc) return tls_get_available_engine (); return preferred; } static inline u32 tls_ctx_alloc (tls_engine_type_t engine_type) { u32 ctx_index; ctx_index = tls_vfts[engine_type].ctx_alloc (); return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index); } static inline tls_ctx_t * tls_ctx_get (u32 ctx_handle) { u32 ctx_index, engine_type; tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type); return tls_vfts[engine_type].ctx_get (ctx_index); } static inline tls_ctx_t * tls_ctx_get_w_thread (u32 ctx_handle, u8 thread_index) { u32 ctx_index, engine_type; tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type); return tls_vfts[engine_type].ctx_get_w_thread (ctx_index, thread_index); } static inline int tls_ctx_init_server (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_init_server (ctx); } static inline int tls_ctx_init_client (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_init_client (ctx); } static inline int tls_ctx_write (tls_ctx_t * ctx, session_t * app_session) { return tls_vfts[ctx->tls_ctx_engine].ctx_write (ctx, app_session); } static inline int tls_ctx_read (tls_ctx_t * ctx, session_t * tls_session) { return tls_vfts[ctx->tls_ctx_engine].ctx_read (ctx, tls_session); } static inline int tls_ctx_transport_close (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_transport_close (ctx); } static inline int tls_ctx_app_close (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_app_close (ctx); } void tls_ctx_free (tls_ctx_t * ctx) { vec_free (ctx->srv_hostname); tls_vfts[ctx->tls_ctx_engine].ctx_free (ctx); } u8 tls_ctx_handshake_is_over (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_handshake_is_over (ctx); } void tls_session_reset_callback (session_t * s) { clib_warning ("called..."); } int tls_add_segment_callback (u32 client_index, u64 segment_handle) { /* No-op for builtin */ return 0; } int tls_del_segment_callback (u32 client_index, u64 segment_handle) { return 0; } void tls_session_disconnect_callback (session_t * tls_session) { tls_ctx_t *ctx; TLS_DBG (1, "TCP disconnecting handle %x session %u", tls_session->opaque, tls_session->session_index); ctx = tls_ctx_get (tls_session->opaque); ctx->is_passive_close = 1; tls_ctx_transport_close (ctx); } int tls_session_accept_callback (session_t * tls_session) { session_t *tls_listener, *app_session; tls_ctx_t *lctx, *ctx; u32 ctx_handle; tls_listener = listen_session_get (tls_session->listener_index); lctx = tls_listener_ctx_get (tls_listener->opaque); ctx_handle = tls_ctx_alloc (lctx->tls_ctx_engine); ctx = tls_ctx_get (ctx_handle); memcpy (ctx, lctx, sizeof (*lctx)); ctx->c_thread_index = vlib_get_thread_index (); ctx->tls_ctx_handle = ctx_handle; tls_session->session_state = SESSION_STATE_READY; tls_session->opaque = ctx_handle; ctx->tls_session_handle = session_handle (tls_session); ctx->listener_ctx_index = tls_listener->opaque; /* Preallocate app session. Avoids allocating a sessi
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @brief
 * The data-path object representing l3_proxying the packet, i.e. it's for-us
 */
#include <vlib/vlib.h>
#include <vnet/ip/ip.h>
#include <vnet/dpo/l3_proxy_dpo.h>

/**
 * @brief pool of all l3_proxy DPOs
 */
l3_proxy_dpo_t *l3_proxy_dpo_pool;

static l3_proxy_dpo_t *
l3_proxy_dpo_alloc (void)
{
    l3_proxy_dpo_t *l3p;

    pool_get_aligned(l3_proxy_dpo_pool, l3p, CLIB_CACHE_LINE_BYTES);
    clib_memset(l3p, 0, sizeof(*l3p));

    return (l3p);
}

static l3_proxy_dpo_t *
l3_proxy_dpo_get_from_dpo (const dpo_id_t *dpo)
{
    ASSERT(DPO_L3_PROXY == dpo->dpoi_type);

    return (l3_proxy_dpo_get(dpo->dpoi_index));
}


/*
 * l3_proxy_dpo_add_or_lock
 *
 * The next_hop address here is used for source address selection in the DP.
 * The local adj is added to an interface's l3_proxy prefix, the next-hop
 * passed here is the local prefix on the same interface.
 */
void
l3_proxy_dpo_add_or_lock (dpo_proto_t proto,
                          u32 sw_if_index,
                          dpo_id_t *dpo)
{
    l3_proxy_dpo_t *