1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
/* SPDX-License-Identifier: Apache-2.0
* Copyright(c) 2024 Cisco Systems, Inc.
*/
#include <vlib/vlib.h>
#include <vnet/plugin/plugin.h>
#include <vnet/crypto/crypto.h>
#include <crypto_native/crypto_native.h>
#include <vppinfra/crypto/aes_ctr.h>
#if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
#pragma GCC optimize("O3")
#endif
static_always_inline u32
aes_ops_aes_ctr (vlib_main_t *vm, vnet_crypto_op_t *ops[], u32 n_ops,
vnet_crypto_op_chunk_t *chunks, aes_key_size_t ks,
int maybe_chained)
{
crypto_native_main_t *cm = &crypto_native_main;
vnet_crypto_op_t *op = ops[0];
aes_ctr_key_data_t *kd;
aes_ctr_ctx_t ctx;
u32 n_left = n_ops;
next:
kd = (aes_ctr_key_data_t *) cm->key_data[op->key_index];
clib_aes_ctr_init (&ctx, kd, op->iv, ks);
if (op->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
{
vnet_crypto_op_chunk_t *chp = chunks + op->chunk_index;
for (int j = 0; j < op->n_chunks; j++, chp++)
clib_aes_ctr_transform (&ctx, chp->src, chp->dst, chp->len, ks);
}
else
clib_aes_ctr_transform (&ctx, op->src, op->dst, op->len, ks);
op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
if (--n_left)
{
op += 1;
goto next;
}
return n_ops;
}
static_always_inline void *
aes_ctr_key_exp (vnet_crypto_key_t *key, aes_key_size_t ks)
{
aes_ctr_key_data_t *kd;
kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
clib_aes_ctr_key_expand (kd, key->data, ks);
return kd;
}
#define foreach_aes_ctr_handler_type _ (128) _ (192) _ (256)
#define _(x) \
static u32 aes_ops_aes_ctr_##x (vlib_main_t *vm, vnet_crypto_op_t *ops[], \
u32 n_ops) \
{ \
return aes_ops_aes_ctr (vm, ops, n_ops, 0, AES_KEY_##x, 0); \
} \
static u32 aes_ops_aes_ctr_##x##_chained ( \
vlib_main_t *vm, vnet_crypto_op_t *ops[], vnet_crypto_op_chunk_t *chunks, \
u32 n_ops) \
{ \
return aes_ops_aes_ctr (vm, ops, n_ops, chunks, AES_KEY_##x, 1); \
} \
static void *aes_ctr_key_exp_##x (vnet_crypto_key_t *key) \
{ \
return aes_ctr_key_exp (key, AES_KEY_##x); \
}
foreach_aes_ctr_handler_type;
#undef _
static int
probe ()
{
#if defined(__VAES__) && defined(__AVX512F__)
if (clib_cpu_supports_vaes () && clib_cpu_supports_avx512f ())
return 50;
#elif defined(__VAES__)
if (clib_cpu_supports_vaes ())
return 40;
#elif defined(__AVX512F__)
if (clib_cpu_supports_avx512f ())
return 30;
#elif defined(__AVX2__)
if (clib_cpu_supports_avx2 ())
return 20;
#elif __AES__
if (clib_cpu_supports_aes ())
return 10;
#elif __aarch64__
if (clib_cpu_supports_aarch64_aes ())
return 10;
#endif
return -1;
}
#define _(b) \
CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_enc) = { \
.op_id = VNET_CRYPTO_OP_AES_##b##_CTR_ENC, \
.fn = aes_ops_aes_ctr_##b, \
.cfn = aes_ops_aes_ctr_##b##_chained, \
.probe = probe, \
}; \
\
CRYPTO_NATIVE_OP_HANDLER (aes_##b##_ctr_dec) = { \
.op_id = VNET_CRYPTO_OP_AES_##b##_CTR_DEC, \
.fn = aes_ops_aes_ctr_##b, \
.cfn = aes_ops_aes_ctr_##b##_chained, \
.probe = probe, \
}; \
CRYPTO_NATIVE_KEY_HANDLER (aes_##b##_ctr) = { \
.alg_id = VNET_CRYPTO_ALG_AES_##b##_CTR, \
.key_fn = aes_ctr_key_exp_##b, \
.probe = probe, \
};
_ (128) _ (192) _ (256)
#undef _
|