1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
|
/*
*------------------------------------------------------------------
* Copyright (c) 2019 - 2021 Intel and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*------------------------------------------------------------------
*/
#ifndef included_cryptodev_h
#define included_cryptodev_h
#include <vnet/crypto/crypto.h>
#undef always_inline
#include <rte_cryptodev.h>
#define CRYPTODEV_NB_CRYPTO_OPS 1024
#define CRYPTODEV_CACHE_QUEUE_SIZE VNET_CRYPTO_FRAME_POOL_SIZE
#define CRYPTODEV_CACHE_QUEUE_MASK (VNET_CRYPTO_FRAME_POOL_SIZE - 1)
#define CRYPTODEV_MAX_INFLIGHT (CRYPTODEV_NB_CRYPTO_OPS - 1)
#define CRYPTODEV_AAD_MASK (CRYPTODEV_NB_CRYPTO_OPS - 1)
#define CRYPTODE_ENQ_MAX 64
#define CRYPTODE_DEQ_MAX 64
#define CRYPTODEV_DEQ_CACHE_SZ 32
#define CRYPTODEV_NB_SESSION 4096
#define CRYPTODEV_MAX_IV_SIZE 16
#define CRYPTODEV_MAX_AAD_SIZE 16
#define CRYPTODEV_MAX_N_SGL 8 /**< maximum number of segments */
#define CRYPTODEV_IV_OFFSET (offsetof (cryptodev_op_t, iv))
#define CRYPTODEV_AAD_OFFSET (offsetof (cryptodev_op_t, aad))
/* VNET_CRYPTO_ALGO, TYPE, DPDK_CRYPTO_ALGO, IV_LEN, TAG_LEN, AAD_LEN, KEY_LEN
*/
#define foreach_vnet_aead_crypto_conversion \
_ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 8, 16) \
_ (AES_128_GCM, AEAD, AES_GCM, 12, 16, 12, 16) \
_ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 8, 24) \
_ (AES_192_GCM, AEAD, AES_GCM, 12, 16, 12, 24) \
_ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 8, 32) \
_ (AES_256_GCM, AEAD, AES_GCM, 12, 16, 12, 32) \
_ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 0, 32) \
_ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 8, 32) \
_ (CHACHA20_POLY1305, AEAD, CHACHA20_POLY1305, 12, 16, 12, 32)
/**
* crypto (alg, cryptodev_alg, key_size), hash (alg, digest-size)
**/
#define foreach_cryptodev_link_async_alg \
_ (AES_128_CBC, AES_CBC, 16, MD5, 12) \
_ (AES_192_CBC, AES_CBC, 24, MD5, 12) \
_ (AES_256_CBC, AES_CBC, 32, MD5, 12) \
_ (AES_128_CBC, AES_CBC, 16, SHA1, 12) \
_ (AES_192_CBC, AES_CBC, 24, SHA1, 12) \
_ (AES_256_CBC, AES_CBC, 32, SHA1, 12) \
_ (AES_128_CBC, AES_CBC, 16, SHA224, 14) \
_ (AES_192_CBC, AES_CBC, 24, SHA224, 14) \
_ (AES_256_CBC, AES_CBC, 32, SHA224, 14) \
_ (AES_128_CBC, AES_CBC, 16, SHA256, 16) \
_ (AES_192_CBC, AES_CBC, 24, SHA256, 16) \
_ (AES_256_CBC, AES_CBC, 32, SHA256, 16) \
_ (AES_128_CBC, AES_CBC, 16, SHA384, 24) \
_ (AES_192_CBC, AES_CBC, 24, SHA384, 24) \
_ (AES_256_CBC, AES_CBC, 32, SHA384, 24) \
_ (AES_128_CBC, AES_CBC, 16, SHA512, 32) \
_ (AES_192_CBC, AES_CBC, 24, SHA512, 32) \
_ (AES_256_CBC, AES_CBC, 32, SHA512, 32) \
_ (AES_128_CTR, AES_CTR, 16, SHA1, 12) \
_ (AES_192_CTR, AES_CTR, 24, SHA1, 12) \
_ (AES_256_CTR, AES_CTR, 32, SHA1, 12)
typedef enum
{
CRYPTODEV_OP_TYPE_ENCRYPT = 0,
CRYPTODEV_OP_TYPE_DECRYPT,
CRYPTODEV_N_OP_TYPES,
} cryptodev_op_type_t;
#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
typedef void cryptodev_session_t;
#else
typedef struct rte_cryptodev_sym_session cryptodev_session_t;
#endif
/* Cryptodev session data, one data per direction per numa */
typedef struct
{
cryptodev_session_t ***keys;
} cryptodev_key_t;
/* Replicate DPDK rte_cryptodev_sym_capability structure with key size ranges
* in favor of vpp vector */
typedef struct
{
enum rte_crypto_sym_xform_type xform_type;
union
{
struct
{
enum rte_crypto_auth_algorithm algo; /*auth algo */
u32 *digest_sizes; /* vector of auth digest sizes */
} auth;
struct
{
enum rte_crypto_cipher_algorithm algo; /* cipher algo */
u32 *key_sizes; /* vector of cipher key sizes */
} cipher;
struct
{
enum rte_crypto_aead_algorithm algo; /* aead algo */
u32 *key_sizes; /*vector of aead key sizes */
u32 *aad_sizes; /*vector of aad sizes */
u32 *digest_sizes; /* vector of aead digest sizes */
} aead;
};
} cryptodev_capability_t;
/* Cryptodev instance data */
typedef struct
{
u32 dev_id;
u32 q_id;
char *desc;
} cryptodev_inst_t;
typedef struct
{
struct rte_mempool *sess_pool;
#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
struct rte_mempool *sess_priv_pool;
#endif
} cryptodev_session_pool_t;
typedef struct
{
cryptodev_session_pool_t *sess_pools;
} cryptodev_numa_data_t;
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
struct rte_crypto_op op;
struct rte_crypto_sym_op sop;
u8 iv[CRYPTODEV_MAX_IV_SIZE];
u8 aad[CRYPTODEV_MAX_AAD_SIZE];
vnet_crypto_async_frame_t *frame;
u32 n_elts;
} cryptodev_op_t;
typedef struct
{
vnet_crypto_async_frame_t *f;
u8 enqueued;
u8 dequeued;
u8 deq_state;
u8 frame_inflight;
u8 op_type;
u8 aad_len;
u8 n_elts;
u8 reserved;
} cryptodev_async_ring_elt;
typedef struct
{
cryptodev_async_ring_elt frames[VNET_CRYPTO_FRAME_POOL_SIZE];
uint16_t head;
uint16_t tail;
uint16_t enq; /*record the frame currently being enqueued */
uint16_t deq; /*record the frame currently being dequeued */
} cryptodev_async_frame_sw_ring;
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
vlib_buffer_t *b[VNET_CRYPTO_FRAME_SIZE];
union
{
struct
{
cryptodev_op_t **cops;
struct rte_mempool *cop_pool;
struct rte_ring *ring;
};
struct
{
struct rte_crypto_raw_dp_ctx *ctx;
struct rte_ring *cached_frame;
u16 aad_index;
u8 *aad_buf;
u64 aad_phy_addr;
cryptodev_session_t *reset_sess;
};
};
cryptodev_async_frame_sw_ring frame_ring;
u16 cryptodev_id;
u16 cryptodev_q;
u16 frames_on_ring;
u16 enqueued_not_dequeueq;
u16 deqeued_not_returned;
u16 pending_to_qat;
u16 inflight;
} cryptodev_engine_thread_t;
typedef struct
{
cryptodev_numa_data_t *per_numa_data;
cryptodev_key_t *keys;
cryptodev_engine_thread_t *per_thread_data;
enum rte_iova_mode iova_mode;
cryptodev_inst_t *cryptodev_inst;
clib_bitmap_t *active_cdev_inst_mask;
clib_spinlock_t tlock;
cryptodev_capability_t *supported_caps;
u32 sess_sz;
u32 drivers_cnt;
u8 is_raw_api;
#if RTE_VERSION >= RTE_VERSION_NUM(22, 11, 0, 0)
u8 driver_id;
#endif
} cryptodev_main_t;
extern cryptodev_main_t cryptodev_main;
static_always_inline void
cryptodev_mark_frame_err_status (vnet_crypto_async_frame_t *f,
vnet_crypto_op_status_t s)
{
u32 n_elts = f->n_elts, i;
for (i = 0; i < n_elts; i++)
f->elts[i].status = s;
f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
}
int cryptodev_session_create (vlib_main_t *vm, vnet_crypto_key_index_t idx,
u32 aad_len);
void cryptodev_sess_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
vnet_crypto_key_index_t idx, u32 aad_len);
int cryptodev_check_cap_support (struct rte_cryptodev_sym_capability_idx *idx,
u32 key_size, u32 digest_size, u32 aad_size);
clib_error_t *cryptodev_register_cop_hdl (vlib_main_t *vm, u32 eidx);
clib_error_t *__clib_weak cryptodev_register_raw_hdl (vlib_main_t *vm,
u32 eidx);
clib_error_t *__clib_weak dpdk_cryptodev_init (vlib_main_t *vm);
#endif
|