/* * Copyright (c) 2018-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include static tls_main_t tls_main; static tls_engine_vft_t *tls_vfts; #define TLS_INVALID_HANDLE ~0 #define TLS_IDX_MASK 0x00FFFFFF #define TLS_ENGINE_TYPE_SHIFT 29 void tls_disconnect (u32 ctx_handle, u32 thread_index); void tls_disconnect_transport (tls_ctx_t * ctx) { vnet_disconnect_args_t a = { .handle = ctx->tls_session_handle, .app_index = tls_main.app_index, }; if (vnet_disconnect_session (&a)) clib_warning ("disconnect returned"); } crypto_engine_type_t tls_get_available_engine (void) { int i; for (i = 0; i < vec_len (tls_vfts); i++) { if (tls_vfts[i].ctx_alloc) return i; } return CRYPTO_ENGINE_NONE; } int tls_add_vpp_q_rx_evt (session_t * s) { if (svm_fifo_set_event (s->rx_fifo)) session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_RX); return 0; } int tls_add_vpp_q_builtin_rx_evt (session_t * s) { if (svm_fifo_set_event (s->rx_fifo)) session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_BUILTIN_RX); return 0; } int tls_add_vpp_q_tx_evt (session_t * s) { if (svm_fifo_set_event (s->tx_fifo)) session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX); return 0; } static inline int tls_add_app_q_evt (app_worker_t * app, session_t * app_session) { return app_worker_lock_and_send_event (app, app_session, SESSION_IO_EVT_RX); } u32 tls_listener_ctx_alloc (void) { tls_main_t *tm = &tls_main; tls_ctx_t *ctx; pool_get (tm->listener_ctx_pool, ctx); clib_memset (ctx, 0, sizeof (*ctx)); return ctx - tm->listener_ctx_pool; } void tls_listener_ctx_free (tls_ctx_t * ctx) { if (CLIB_DEBUG) memset (ctx, 0xfb, sizeof (*ctx)); pool_put (tls_main.listener_ctx_pool, ctx); } tls_ctx_t * tls_listener_ctx_get (u32 ctx_index) { return pool_elt_at_index (tls_main.listener_ctx_pool, ctx_index); } u32 tls_listener_ctx_index (tls_ctx_t * ctx) { return (ctx - tls_main.listener_ctx_pool); } u32 tls_ctx_half_open_alloc (void) { tls_main_t *tm = &tls_main; u8 will_expand = 0; tls_ctx_t *ctx; u32 ctx_index; pool_get_aligned_will_expand (tm->half_open_ctx_pool, will_expand, 0); if (PREDICT_FALSE (will_expand && vlib_num_workers ())) { clib_rwlock_writer_lock (&tm->half_open_rwlock); pool_get (tm->half_open_ctx_pool, ctx); ctx_index = ctx - tm->half_open_ctx_pool; clib_rwlock_writer_unlock (&tm->half_open_rwlock); } else { /* reader lock assumption: only main thread will call pool_get */ clib_rwlock_reader_lock (&tm->half_open_rwlock); pool_get (tm->half_open_ctx_pool, ctx); ctx_index = ctx - tm->half_open_ctx_pool; clib_rwlock_reader_unlock (&tm->half_open_rwlock); } clib_memset (ctx, 0, sizeof (*ctx)); return ctx_index; } void tls_ctx_half_open_free (u32 ho_index) { tls_main_t *tm = &tls_main; clib_rwlock_writer_lock (&tm->half_open_rwlock); pool_put_index (tls_main.half_open_ctx_pool, ho_index); clib_rwlock_writer_unlock (&tm->half_open_rwlock); } tls_ctx_t * tls_ctx_half_open_get (u32 ctx_index) { tls_main_t *tm = &tls_main; clib_rwlock_reader_lock (&tm->half_open_rwlock); return pool_elt_at_index (tm->half_open_ctx_pool, ctx_index); } void tls_ctx_half_open_reader_unlock () { clib_rwlock_reader_unlock (&tls_main.half_open_rwlock); } u32 tls_ctx_half_open_index (tls_ctx_t * ctx) { return (ctx - tls_main.half_open_ctx_pool); } void tls_notify_app_enqueue (tls_ctx_t * ctx, session_t * app_session) { app_worker_t *app_wrk; app_wrk = app_worker_get_if_valid (app_session->app_wrk_index); if (PREDICT_TRUE (app_wrk != 0)) tls_add_app_q_evt (app_wrk, app_session); } int tls_notify_app_accept (tls_ctx_t * ctx) { session_t *app_listener, *app_session; app_worker_t *app_wrk; tls_ctx_t *lctx; int rv; lctx = tls_listener_ctx_get (ctx->listener_ctx_index); app_listener = listen_session_get_from_handle (lctx->app_session_handle); app_session = session_get (ctx->c_s_index, ctx->c_thread_index); app_session->app_wrk_index = ctx->parent_app_wrk_index; app_session->connection_index = ctx->tls_ctx_handle; app_session->session_type = app_listener->session_type; app_session->listener_handle = listen_session_get_handle (app_listener); app_session->session_state = SESSION_STATE_ACCEPTING; if ((rv = app_worker_init_accepted (app_session))) { TLS_DBG (1, "failed to allocate fifos"); session_free (app_session); return rv; } ctx->app_session_handle = session_handle (app_session); ctx->parent_app_wrk_index = app_session->app_wrk_index; app_wrk = app_worker_get (app_session->app_wrk_index); return app_worker_accept_notify (app_wrk, app_session); } int tls_notify_app_connected (tls_ctx_t * ctx, session_error_t err) { session_t *app_session; app_worker_t *app_wrk; app_wrk = app_worker_get_if_valid (ctx->parent_app_wrk_index); if (!app_wrk) { tls_disconnect_transport (ctx); return -1; } if (err) goto failed; app_session = session_get (ctx->c_s_index, ctx->c_thread_index); app_session->app_wrk_index = ctx->parent_app_wrk_index; app_session->connection_index = ctx->tls_ctx_handle; app_session->session_type = session_type_from_proto_and_ip (TRANSPORT_PROTO_TLS, ctx->tcp_is_ip4); if ((err = app_worker_init_connected (app_wrk, app_session))) goto failed; app_session->session_state = SESSION_STATE_CONNECTING; if (app_worker_connect_notify (app_wrk, app_session, SESSION_E_NONE, ctx->parent_app_api_context)) { TLS_DBG (1, "failed to notify app"); tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ()); return -1; } ctx->app_session_handle = session_handle (app_session); app_session->session_state = SESSION_STATE_READY; return 0; failed: /* Free app session pre-allocated when transport was established */ session_free (session_get (ctx->c_s_index, ctx->c_thread_index)); ctx->no_app_session = 1; tls_disconnect (ctx->tls_ctx_handle, vlib_get_thread_index ()); return app_worker_connect_notify (app_wrk, 0, err, ctx->parent_app_api_context); } static inline void tls_ctx_parse_handle (u32 ctx_handle, u32 * ctx_index, u32 * engine_type) { *ctx_index = ctx_handle & TLS_IDX_MASK; *engine_type = ctx_handle >> TLS_ENGINE_TYPE_SHIFT; } static inline crypto_engine_type_t tls_get_engine_type (crypto_engine_type_t preferred) { if (!tls_vfts[preferred].ctx_alloc) return tls_get_available_engine (); return preferred; } static inline u32 tls_ctx_alloc (crypto_engine_type_t engine_type) { u32 ctx_index; ctx_index = tls_vfts[engine_type].ctx_alloc (); return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index); } static inline tls_ctx_t * tls_ctx_get (u32 ctx_handle) { u32 ctx_index, engine_type; tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type); return tls_vfts[engine_type].ctx_get (ctx_index); } static inline tls_ctx_t * tls_ctx_get_w_thread (u32 ctx_handle, u8 thread_index) { u32 ctx_index, engine_type; tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type); return tls_vfts[engine_type].ctx_get_w_thread (ctx_index, thread_index); } static inline int tls_ctx_init_server (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_init_server (ctx); } static inline int tls_ctx_init_client (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_init_client (ctx); } static inline int tls_ctx_write (tls_ctx_t * ctx, session_t * app_session, transport_send_params_t * sp) { u32 n_wrote; sp->max_burst_size = sp->max_burst_size * TRANSPORT_PACER_MIN_MSS; n_wrote = tls_vfts[ctx->tls_ctx_engine].ctx_write (ctx, app_session, sp); return n_wrote > 0 ? clib_max (n_wrote / TRANSPORT_PACER_MIN_MSS, 1) : 0; } static inline int tls_ctx_read (tls_ctx_t * ctx, session_t * tls_session) { return tls_vfts[ctx->tls_ctx_engine].ctx_read (ctx, tls_session); } static inline int tls_ctx_transport_close (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_transport_close (ctx); } static inline int tls_ctx_app_close (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_app_close (ctx); } void tls_ctx_free (tls_ctx_t * ctx) { tls_vfts[ctx->tls_ctx_engine].ctx_free (ctx); } u8 tls_ctx_handshake_is_over (tls_ctx_t * ctx) { return tls_vfts[ctx->tls_ctx_engine].ctx_handshake_is_over (ctx); } void tls_session_reset_callback (session_t * s) { tls_ctx_t *ctx; transport_connection_t *tc; session_t *app_session; ctx = tls_ctx_get (s->opaque); ctx->is_passive_close = 1; tc = &ctx->connection; if (tls_ctx_handshake_is_over (ctx)) { session_transport_reset_notify (tc); session_transport_closed_notify (tc); tls_disconnect_transport (ctx); } else if ((app_session = session_get_if_valid (ctx->c_s_index, ctx->c_thread_index))) { session_free (app_session); ctx->c_s_index = SESSION_INVALID_INDEX; tls_disconnect_transport (ctx); } } int tls_add_segment_callback (u32 client_index, u64 segment_handle) { /* No-op for builtin */ return 0; } int tls_del_segment_callback (u32 client_index, u64 segment_handle) { return 0; } void tls_session_disconnect_callback (session_t * tls_session) { tls_ctx_t *ctx; TLS_DBG (1, "TCP disconnecting handle %x session %u", tls_session->opaque, tls_session->session_index); ASSERT (tls_session->thread_index == vlib_get_thread_index () || vlib_thread_is_main_w_barrier ()); ctx = tls_ctx_get_w_thread (tls_session->opaque, tls_session->thread_index); ctx->is_passive_close = 1; tls_ctx_transport_close (ctx); } int tls_session_accept_callback (se
/*
 *------------------------------------------------------------------
 * Copyright (c) 2020 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *------------------------------------------------------------------
 */

#ifndef _RDMA_MLX5DV_H_
#define _RDMA_MLX5DV_H_

#undef always_inline
#include <infiniband/mlx5dv.h>
#define always_inline static_always_inline
#include <vppinfra/types.h>
#include <vppinfra/error.h>
/* CQE flags - bits 16-31 of qword at offset 0x1c */
#define CQE_FLAG_L4_OK			10
#define CQE_FLAG_L3_OK			9
#define CQE_FLAG_L2_OK			8
#define CQE_FLAG_IP_FRAG		7
#define CQE_FLAG_L4_HDR_TYPE(f)		(((f) >> 4) & 7)
#define CQE_FLAG_L3_HDR_TYPE_SHIFT	(2)
#define CQE_FLAG_L3_HDR_TYPE_MASK	(3 << CQE_FLAG_L3_HDR_TYPE_SHIFT)
#define CQE_FLAG_L3_HDR_TYPE(f)		(((f) & CQE_FLAG_L3_HDR_TYPE_MASK)  >> CQE_FLAG_L3_HDR_TYPE_SHIFT)
#define CQE_FLAG_L3_HDR_TYPE_IP4	1
#define CQE_FLAG_L3_HDR_TYPE_IP6	2
#define CQE_FLAG_IP_EXT_OPTS		1

/* CQE byte count (Striding RQ) */
#define CQE_BC_FILLER_MASK (1 << 31)
#define CQE_BC_CONSUMED_STRIDES_SHIFT (16)
#define CQE_BC_CONSUMED_STRIDES_MASK (0x3fff << CQE_BC_CONSUMED_STRIDES_SHIFT)
#define CQE_BC_BYTE_COUNT_MASK (0xffff)
typedef struct
{
  struct
  {
    u8 pad1[28];
    u16 flags;
    u8 pad2[14];
    union
    {
      u32 byte_cnt;
      u32 mini_cqe_num;
    };
    u8 pad3[12];
    u16 wqe_counter;
    u8 signature;
    u8 opcode_cqefmt_se_owner;
  };
} mlx5dv_cqe_t;

STATIC_ASSERT_SIZEOF (mlx5dv_cqe_t, 64);

typedef struct
{
  union
  {
    u32 checksum;
    u32 rx_hash_result;
  };
  u32 byte_count;
} mlx5dv_mini_cqe_t;

typedef struct
{
  u64 dsz_and_lkey;
  u64 addr;
} mlx5dv_wqe_ds_t;		/* a WQE data segment */

typedef struct
{
  u8 rsvd0[2];
  u16 next_wqe_index;
  u8 signature;
  u8 rsvd1[11];
} mlx5dv_wqe_srq_next_t;

#define foreach_cqe_rx_field \
  _(0x1c, 26, 26, l4_ok)	\
  _(0x1c, 25, 25, l3_ok)	\
  _(0x1c, 24, 24, l2_ok)	\
  _(0x1c, 23, 23, ip_frag)	\
  _(0x1c, 22, 20, l4_hdr_type)	\
  _(0x1c, 19, 18, l3_hdr_type)	\
  _(0x1c, 17, 17, ip_ext_opts)	\
  _(0x1c, 16, 16, cv)	\
  _(0x2c, 31,  0, byte_cnt)	\
  _(0x30, 63,  0, timestamp)	\
  _(0x38, 31, 24, rx_drop_counter)	\
  _(0x38, 23,  0, flow_tag)	\
  _(0x3c, 31, 16, wqe_counter)	\
  _(0x3c, 15,  8, signature)	\
  _(0x3c,  7,  4, opcode)	\
  _(0x3c,  3,  2, cqe_format)	\
  _(0x3c,  1,  1, sc)	\
  _(0x3c,  0,  0, owner)


/* inline functions */

static inline u32
mlx5_get_u32 (void *start, int offset)
{
  return clib_net_to_host_u32 (*(u32 *) (((u8 *) start) + offset));
}

static inline u64
mlx5_get_u64 (void *start, int offset)
{
  return clib_net_to_host_u64 (*(u64 *) (((u8 *) start) + offset));
}

static inline void
mlx5_set_u32 (void *start, int offset, u32 value)
{
  (*(u32 *) (((u8 *) start) + offset)) = clib_host_to_net_u32 (value);
}

static inline void
mlx5_set_u64 (void *start, int offset, u64 value)
{
  (*(u64 *) (((u8 *) start) + offset)) = clib_host_to_net_u64 (value);
}

static inline void
mlx5_set_bits (void *start, int offset, int first, int last, u32 value)
{
  u32 mask = (1 << (first - last + 1)) - 1;
  u32 old = mlx5_get_u32 (start, offset);
  if ((last == 0) && (first == 31))