1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
|
/* SPDX-License-Identifier: Apache-2.0
* Copyright(c) 2024 Cisco Systems, Inc.
*/
#ifndef SRC_VNET_TLS_TLS_INLINES_H_
#define SRC_VNET_TLS_TLS_INLINES_H_
#include <vnet/tls/tls.h>
static inline void
tls_ctx_parse_handle (u32 ctx_handle, u32 *ctx_index, u32 *engine_type)
{
*ctx_index = ctx_handle & TLS_IDX_MASK;
*engine_type = ctx_handle >> TLS_ENGINE_TYPE_SHIFT;
}
static inline u32
tls_ctx_alloc (crypto_engine_type_t engine_type)
{
u32 ctx_index;
ctx_index = tls_vfts[engine_type].ctx_alloc ();
return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index);
}
static inline u32
tls_ctx_alloc_w_thread (crypto_engine_type_t engine_type, u32 thread_index)
{
u32 ctx_index;
ctx_index = tls_vfts[engine_type].ctx_alloc_w_thread (thread_index);
return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index);
}
static inline tls_ctx_t *
tls_ctx_get (u32 ctx_handle)
{
u32 ctx_index, engine_type;
tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type);
return tls_vfts[engine_type].ctx_get (ctx_index);
}
static inline tls_ctx_t *
tls_ctx_get_w_thread (u32 ctx_handle, u8 thread_index)
{
u32 ctx_index, engine_type;
tls_ctx_parse_handle (ctx_handle, &ctx_index, &engine_type);
return tls_vfts[engine_type].ctx_get_w_thread (ctx_index, thread_index);
}
static inline void
tls_ctx_free (tls_ctx_t *ctx)
{
tls_vfts[ctx->tls_ctx_engine].ctx_free (ctx);
}
static inline int
tls_ctx_init_server (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_init_server (ctx);
}
static inline int
tls_ctx_init_client (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_init_client (ctx);
}
static inline u32
tls_ctx_attach (crypto_engine_type_t engine_type, u32 thread_index, void *ctx)
{
u32 ctx_index;
ctx_index = tls_vfts[engine_type].ctx_attach (thread_index, ctx);
return (((u32) engine_type << TLS_ENGINE_TYPE_SHIFT) | ctx_index);
}
static inline void *
tls_ctx_detach (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_detach (ctx);
}
static inline int
tls_ctx_write (tls_ctx_t *ctx, session_t *app_session,
transport_send_params_t *sp)
{
u32 n_wrote;
sp->max_burst_size = sp->max_burst_size * TRANSPORT_PACER_MIN_MSS;
n_wrote = tls_vfts[ctx->tls_ctx_engine].ctx_write (ctx, app_session, sp);
sp->bytes_dequeued = n_wrote;
return n_wrote > 0 ? clib_max (n_wrote / TRANSPORT_PACER_MIN_MSS, 1) : 0;
}
static inline int
tls_ctx_read (tls_ctx_t *ctx, session_t *tls_session)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_read (ctx, tls_session);
}
static inline int
tls_ctx_transport_close (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_transport_close (ctx);
}
static inline int
tls_ctx_transport_reset (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_transport_reset (ctx);
}
static inline int
tls_ctx_app_close (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_app_close (ctx);
}
static inline u8
tls_ctx_handshake_is_over (tls_ctx_t *ctx)
{
return tls_vfts[ctx->tls_ctx_engine].ctx_handshake_is_over (ctx);
}
static inline int
tls_reinit_ca_chain (crypto_engine_type_t tls_engine_id)
{
return tls_vfts[tls_engine_id].ctx_reinit_cachain ();
}
#endif /* SRC_VNET_TLS_TLS_INLINES_H_ */
|