diff options
Diffstat (limited to 'src/vppinfra/crypto')
-rw-r--r-- | src/vppinfra/crypto/aes.h | 86 | ||||
-rw-r--r-- | src/vppinfra/crypto/aes_cbc.h | 28 | ||||
-rw-r--r-- | src/vppinfra/crypto/aes_ctr.h | 190 | ||||
-rw-r--r-- | src/vppinfra/crypto/aes_gcm.h | 274 |
4 files changed, 385 insertions, 193 deletions
diff --git a/src/vppinfra/crypto/aes.h b/src/vppinfra/crypto/aes.h index a5e286e4c6e..0aa154187a8 100644 --- a/src/vppinfra/crypto/aes.h +++ b/src/vppinfra/crypto/aes.h @@ -15,8 +15,8 @@ *------------------------------------------------------------------ */ -#ifndef __aesni_h__ -#define __aesni_h__ +#ifndef __aes_h__ +#define __aes_h__ typedef enum { @@ -35,7 +35,7 @@ aes_block_load (u8 * p) } static_always_inline u8x16 -aes_enc_round (u8x16 a, u8x16 k) +aes_enc_round_x1 (u8x16 a, u8x16 k) { #if defined (__AES__) return (u8x16) _mm_aesenc_si128 ((__m128i) a, (__m128i) k); @@ -97,7 +97,7 @@ aes_dec_last_round_x2 (u8x32 a, u8x32 k) #endif static_always_inline u8x16 -aes_enc_last_round (u8x16 a, u8x16 k) +aes_enc_last_round_x1 (u8x16 a, u8x16 k) { #if defined (__AES__) return (u8x16) _mm_aesenclast_si128 ((__m128i) a, (__m128i) k); @@ -109,13 +109,13 @@ aes_enc_last_round (u8x16 a, u8x16 k) #ifdef __x86_64__ static_always_inline u8x16 -aes_dec_round (u8x16 a, u8x16 k) +aes_dec_round_x1 (u8x16 a, u8x16 k) { return (u8x16) _mm_aesdec_si128 ((__m128i) a, (__m128i) k); } static_always_inline u8x16 -aes_dec_last_round (u8x16 a, u8x16 k) +aes_dec_last_round_x1 (u8x16 a, u8x16 k) { return (u8x16) _mm_aesdeclast_si128 ((__m128i) a, (__m128i) k); } @@ -133,8 +133,8 @@ aes_encrypt_block (u8x16 block, const u8x16 * round_keys, aes_key_size_t ks) int rounds = AES_KEY_ROUNDS (ks); block ^= round_keys[0]; for (int i = 1; i < rounds; i += 1) - block = aes_enc_round (block, round_keys[i]); - return aes_enc_last_round (block, round_keys[rounds]); + block = aes_enc_round_x1 (block, round_keys[i]); + return aes_enc_last_round_x1 (block, round_keys[rounds]); } static_always_inline u8x16 @@ -427,13 +427,67 @@ aes_key_enc_to_dec (u8x16 * ke, u8x16 * kd, aes_key_size_t ks) kd[rounds / 2] = aes_inv_mix_column (ke[rounds / 2]); } +#if defined(__VAES__) && defined(__AVX512F__) +#define N_AES_LANES 4 +#define aes_load_partial(p, n) u8x64_load_partial ((u8 *) (p), n) +#define aes_store_partial(v, p, n) u8x64_store_partial (v, (u8 *) (p), n) +#define aes_reflect(r) u8x64_reflect_u8x16 (r) +typedef u8x64 aes_data_t; +typedef u8x64u aes_mem_t; +typedef u32x16 aes_counter_t; +#elif defined(__VAES__) +#define N_AES_LANES 2 +#define aes_load_partial(p, n) u8x32_load_partial ((u8 *) (p), n) +#define aes_store_partial(v, p, n) u8x32_store_partial (v, (u8 *) (p), n) +#define aes_reflect(r) u8x32_reflect_u8x16 (r) +typedef u8x32 aes_data_t; +typedef u8x32u aes_mem_t; +typedef u32x8 aes_counter_t; +#else +#define N_AES_LANES 1 +#define aes_load_partial(p, n) u8x16_load_partial ((u8 *) (p), n) +#define aes_store_partial(v, p, n) u8x16_store_partial (v, (u8 *) (p), n) +#define aes_reflect(r) u8x16_reflect (r) +typedef u8x16 aes_data_t; +typedef u8x16u aes_mem_t; +typedef u32x4 aes_counter_t; +#endif -#endif /* __aesni_h__ */ +#define N_AES_BYTES (N_AES_LANES * 16) -/* - * fd.io coding-style-patch-verification: ON - * - * Local Variables: - * eval: (c-set-style "gnu") - * End: - */ +typedef union +{ + u8x16 x1; + u8x32 x2; + u8x64 x4; + u8x16 lanes[4]; +} aes_expaned_key_t; + +static_always_inline void +aes_enc_round (aes_data_t *r, const aes_expaned_key_t *ek, uword n_blocks) +{ + for (int i = 0; i < n_blocks; i++) +#if N_AES_LANES == 4 + r[i] = aes_enc_round_x4 (r[i], ek->x4); +#elif N_AES_LANES == 2 + r[i] = aes_enc_round_x2 (r[i], ek->x2); +#else + r[i] = aes_enc_round_x1 (r[i], ek->x1); +#endif +} + +static_always_inline void +aes_enc_last_round (aes_data_t *r, aes_data_t *d, const aes_expaned_key_t *ek, + uword n_blocks) +{ + for (int i = 0; i < n_blocks; i++) +#if N_AES_LANES == 4 + d[i] ^= r[i] = aes_enc_last_round_x4 (r[i], ek->x4); +#elif N_AES_LANES == 2 + d[i] ^= r[i] = aes_enc_last_round_x2 (r[i], ek->x2); +#else + d[i] ^= r[i] = aes_enc_last_round_x1 (r[i], ek->x1); +#endif +} + +#endif /* __aes_h__ */ diff --git a/src/vppinfra/crypto/aes_cbc.h b/src/vppinfra/crypto/aes_cbc.h index 3f0f1f1f797..64bc071e910 100644 --- a/src/vppinfra/crypto/aes_cbc.h +++ b/src/vppinfra/crypto/aes_cbc.h @@ -30,8 +30,8 @@ clib_aes_cbc_encrypt (const aes_cbc_key_data_t *kd, const u8 *src, uword len, #if __x86_64__ r = u8x16_xor3 (r, *(u8x16u *) (src + i), k[0]); for (j = 1; j < rounds; j++) - r = aes_enc_round (r, k[j]); - r = aes_enc_last_round (r, k[rounds]); + r = aes_enc_round_x1 (r, k[j]); + r = aes_enc_last_round_x1 (r, k[rounds]); #else r ^= *(u8x16u *) (src + i); for (j = 1; j < rounds - 1; j++) @@ -85,16 +85,16 @@ aes_cbc_dec (const u8x16 *k, u8x16u *src, u8x16u *dst, u8x16u *iv, int count, for (int i = 1; i < rounds; i++) { - r[0] = aes_dec_round (r[0], k[i]); - r[1] = aes_dec_round (r[1], k[i]); - r[2] = aes_dec_round (r[2], k[i]); - r[3] = aes_dec_round (r[3], k[i]); + r[0] = aes_dec_round_x1 (r[0], k[i]); + r[1] = aes_dec_round_x1 (r[1], k[i]); + r[2] = aes_dec_round_x1 (r[2], k[i]); + r[3] = aes_dec_round_x1 (r[3], k[i]); } - r[0] = aes_dec_last_round (r[0], k[rounds]); - r[1] = aes_dec_last_round (r[1], k[rounds]); - r[2] = aes_dec_last_round (r[2], k[rounds]); - r[3] = aes_dec_last_round (r[3], k[rounds]); + r[0] = aes_dec_last_round_x1 (r[0], k[rounds]); + r[1] = aes_dec_last_round_x1 (r[1], k[rounds]); + r[2] = aes_dec_last_round_x1 (r[2], k[rounds]); + r[3] = aes_dec_last_round_x1 (r[3], k[rounds]); #else for (int i = 0; i < rounds - 1; i++) { @@ -125,8 +125,8 @@ aes_cbc_dec (const u8x16 *k, u8x16u *src, u8x16u *dst, u8x16u *iv, int count, #if __x86_64__ r[0] ^= k[0]; for (int i = 1; i < rounds; i++) - r[0] = aes_dec_round (r[0], k[i]); - r[0] = aes_dec_last_round (r[0], k[rounds]); + r[0] = aes_dec_round_x1 (r[0], k[i]); + r[0] = aes_dec_last_round_x1 (r[0], k[rounds]); #else c[0] = r[0] = src[0]; for (int i = 0; i < rounds - 1; i++) @@ -469,8 +469,8 @@ aes2_cbc_dec (const u8x16 *k, u8x32u *src, u8x32u *dst, u8x16u *iv, int count, { u8x16 rl = *(u8x16u *) src ^ k[0]; for (i = 1; i < rounds; i++) - rl = aes_dec_round (rl, k[i]); - rl = aes_dec_last_round (rl, k[i]); + rl = aes_dec_round_x1 (rl, k[i]); + rl = aes_dec_last_round_x1 (rl, k[i]); *(u8x16u *) dst = rl ^ u8x32_extract_hi (f); } } diff --git a/src/vppinfra/crypto/aes_ctr.h b/src/vppinfra/crypto/aes_ctr.h new file mode 100644 index 00000000000..74a9f96d90d --- /dev/null +++ b/src/vppinfra/crypto/aes_ctr.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: Apache-2.0 + * Copyright(c) 2024 Cisco Systems, Inc. + */ + +#ifndef __crypto_aes_ctr_h__ +#define __crypto_aes_ctr_h__ + +#include <vppinfra/clib.h> +#include <vppinfra/vector.h> +#include <vppinfra/cache.h> +#include <vppinfra/string.h> +#include <vppinfra/crypto/aes.h> + +typedef struct +{ + const aes_expaned_key_t exp_key[AES_KEY_ROUNDS (AES_KEY_256) + 1]; +} aes_ctr_key_data_t; + +typedef struct +{ + const aes_expaned_key_t exp_key[AES_KEY_ROUNDS (AES_KEY_256) + 1]; + aes_counter_t ctr; /* counter (reflected) */ + u8 keystream_bytes[N_AES_BYTES]; /* keystream leftovers */ + u32 n_keystream_bytes; /* number of keystream leftovers */ +} aes_ctr_ctx_t; + +static_always_inline aes_counter_t +aes_ctr_one_block (aes_ctr_ctx_t *ctx, aes_counter_t ctr, const u8 *src, + u8 *dst, u32 n_parallel, u32 n_bytes, int rounds, int last) +{ + u32 __clib_aligned (N_AES_BYTES) + inc[] = { N_AES_LANES, 0, 0, 0, N_AES_LANES, 0, 0, 0, + N_AES_LANES, 0, 0, 0, N_AES_LANES, 0, 0, 0 }; + const aes_expaned_key_t *k = ctx->exp_key; + const aes_mem_t *sv = (aes_mem_t *) src; + aes_mem_t *dv = (aes_mem_t *) dst; + aes_data_t d[4], t[4]; + u32 r; + + n_bytes -= (n_parallel - 1) * N_AES_BYTES; + + /* AES First Round */ + for (int i = 0; i < n_parallel; i++) + { +#if N_AES_LANES == 4 + t[i] = k[0].x4 ^ (u8x64) aes_reflect ((u8x64) ctr); +#elif N_AES_LANES == 2 + t[i] = k[0].x2 ^ (u8x32) aes_reflect ((u8x32) ctr); +#else + t[i] = k[0].x1 ^ (u8x16) aes_reflect ((u8x16) ctr); +#endif + ctr += *(aes_counter_t *) inc; + } + + /* Load Data */ + for (int i = 0; i < n_parallel - last; i++) + d[i] = sv[i]; + + if (last) + d[n_parallel - 1] = + aes_load_partial ((u8 *) (sv + n_parallel - 1), n_bytes); + + /* AES Intermediate Rounds */ + for (r = 1; r < rounds; r++) + aes_enc_round (t, k + r, n_parallel); + + /* AES Last Round */ + aes_enc_last_round (t, d, k + r, n_parallel); + + /* Store Data */ + for (int i = 0; i < n_parallel - last; i++) + dv[i] = d[i]; + + if (last) + { + aes_store_partial (d[n_parallel - 1], dv + n_parallel - 1, n_bytes); + *(aes_data_t *) ctx->keystream_bytes = t[n_parallel - 1]; + ctx->n_keystream_bytes = N_AES_BYTES - n_bytes; + } + + return ctr; +} + +static_always_inline void +clib_aes_ctr_init (aes_ctr_ctx_t *ctx, const aes_ctr_key_data_t *kd, + const u8 *iv, aes_key_size_t ks) +{ + u32x4 ctr = (u32x4) u8x16_reflect (*(u8x16u *) iv); +#if N_AES_LANES == 4 + ctx->ctr = (aes_counter_t) u32x16_splat_u32x4 (ctr) + + (u32x16){ 0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0 }; +#elif N_AES_LANES == 2 + ctx->ctr = (aes_counter_t) u32x8_splat_u32x4 (ctr) + + (u32x8){ 0, 0, 0, 0, 1, 0, 0, 0 }; +#else + ctx->ctr = ctr; +#endif + for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++) + ((aes_expaned_key_t *) ctx->exp_key)[i] = kd->exp_key[i]; + ctx->n_keystream_bytes = 0; +} + +static_always_inline void +clib_aes_ctr_transform (aes_ctr_ctx_t *ctx, const u8 *src, u8 *dst, + u32 n_bytes, aes_key_size_t ks) +{ + int r = AES_KEY_ROUNDS (ks); + aes_counter_t ctr = ctx->ctr; + + if (ctx->n_keystream_bytes) + { + u8 *ks = ctx->keystream_bytes + N_AES_BYTES - ctx->n_keystream_bytes; + + if (ctx->n_keystream_bytes >= n_bytes) + { + for (int i = 0; i < n_bytes; i++) + dst[i] = src[i] ^ ks[i]; + ctx->n_keystream_bytes -= n_bytes; + return; + } + + for (int i = 0; i < ctx->n_keystream_bytes; i++) + dst++[0] = src++[0] ^ ks[i]; + + n_bytes -= ctx->n_keystream_bytes; + ctx->n_keystream_bytes = 0; + } + + /* main loop */ + for (int n = 4 * N_AES_BYTES; n_bytes >= n; n_bytes -= n, dst += n, src += n) + ctr = aes_ctr_one_block (ctx, ctr, src, dst, 4, n, r, 0); + + if (n_bytes) + { + if (n_bytes > 3 * N_AES_BYTES) + ctr = aes_ctr_one_block (ctx, ctr, src, dst, 4, n_bytes, r, 1); + else if (n_bytes > 2 * N_AES_BYTES) + ctr = aes_ctr_one_block (ctx, ctr, src, dst, 3, n_bytes, r, 1); + else if (n_bytes > N_AES_BYTES) + ctr = aes_ctr_one_block (ctx, ctr, src, dst, 2, n_bytes, r, 1); + else + ctr = aes_ctr_one_block (ctx, ctr, src, dst, 1, n_bytes, r, 1); + } + else + ctx->n_keystream_bytes = 0; + + ctx->ctr = ctr; +} + +static_always_inline void +clib_aes_ctr_key_expand (aes_ctr_key_data_t *kd, const u8 *key, + aes_key_size_t ks) +{ + u8x16 ek[AES_KEY_ROUNDS (AES_KEY_256) + 1]; + aes_expaned_key_t *k = (aes_expaned_key_t *) kd->exp_key; + + /* expand AES key */ + aes_key_expand (ek, key, ks); + for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++) + k[i].lanes[0] = k[i].lanes[1] = k[i].lanes[2] = k[i].lanes[3] = ek[i]; +} + +static_always_inline void +clib_aes128_ctr (const aes_ctr_key_data_t *kd, const u8 *src, u32 n_bytes, + const u8 *iv, u8 *dst) +{ + aes_ctr_ctx_t ctx; + clib_aes_ctr_init (&ctx, kd, iv, AES_KEY_128); + clib_aes_ctr_transform (&ctx, src, dst, n_bytes, AES_KEY_128); +} + +static_always_inline void +clib_aes192_ctr (const aes_ctr_key_data_t *kd, const u8 *src, u32 n_bytes, + const u8 *iv, u8 *dst) +{ + aes_ctr_ctx_t ctx; + clib_aes_ctr_init (&ctx, kd, iv, AES_KEY_192); + clib_aes_ctr_transform (&ctx, src, dst, n_bytes, AES_KEY_192); +} + +static_always_inline void +clib_aes256_ctr (const aes_ctr_key_data_t *kd, const u8 *src, u32 n_bytes, + const u8 *iv, u8 *dst) +{ + aes_ctr_ctx_t ctx; + clib_aes_ctr_init (&ctx, kd, iv, AES_KEY_256); + clib_aes_ctr_transform (&ctx, src, dst, n_bytes, AES_KEY_256); +} + +#endif /* __crypto_aes_ctr_h__ */ diff --git a/src/vppinfra/crypto/aes_gcm.h b/src/vppinfra/crypto/aes_gcm.h index 3d1b220f7b8..5b628c87745 100644 --- a/src/vppinfra/crypto/aes_gcm.h +++ b/src/vppinfra/crypto/aes_gcm.h @@ -13,47 +13,25 @@ #include <vppinfra/crypto/ghash.h> #define NUM_HI 36 -#if defined(__VAES__) && defined(__AVX512F__) -typedef u8x64 aes_data_t; +#if N_AES_LANES == 4 typedef u8x64u aes_ghash_t; -typedef u8x64u aes_mem_t; -typedef u32x16 aes_gcm_counter_t; -#define N 64 -#define aes_gcm_load_partial(p, n) u8x64_load_partial ((u8 *) (p), n) -#define aes_gcm_store_partial(v, p, n) u8x64_store_partial (v, (u8 *) (p), n) #define aes_gcm_splat(v) u8x64_splat (v) -#define aes_gcm_reflect(r) u8x64_reflect_u8x16 (r) #define aes_gcm_ghash_reduce(c) ghash4_reduce (&(c)->gd) #define aes_gcm_ghash_reduce2(c) ghash4_reduce2 (&(c)->gd) #define aes_gcm_ghash_final(c) (c)->T = ghash4_final (&(c)->gd) -#elif defined(__VAES__) -typedef u8x32 aes_data_t; +#elif N_AES_LANES == 2 typedef u8x32u aes_ghash_t; -typedef u8x32u aes_mem_t; -typedef u32x8 aes_gcm_counter_t; -#define N 32 -#define aes_gcm_load_partial(p, n) u8x32_load_partial ((u8 *) (p), n) -#define aes_gcm_store_partial(v, p, n) u8x32_store_partial (v, (u8 *) (p), n) #define aes_gcm_splat(v) u8x32_splat (v) -#define aes_gcm_reflect(r) u8x32_reflect_u8x16 (r) #define aes_gcm_ghash_reduce(c) ghash2_reduce (&(c)->gd) #define aes_gcm_ghash_reduce2(c) ghash2_reduce2 (&(c)->gd) #define aes_gcm_ghash_final(c) (c)->T = ghash2_final (&(c)->gd) #else -typedef u8x16 aes_data_t; typedef u8x16 aes_ghash_t; -typedef u8x16u aes_mem_t; -typedef u32x4 aes_gcm_counter_t; -#define N 16 -#define aes_gcm_load_partial(p, n) u8x16_load_partial ((u8 *) (p), n) -#define aes_gcm_store_partial(v, p, n) u8x16_store_partial (v, (u8 *) (p), n) #define aes_gcm_splat(v) u8x16_splat (v) -#define aes_gcm_reflect(r) u8x16_reflect (r) #define aes_gcm_ghash_reduce(c) ghash_reduce (&(c)->gd) #define aes_gcm_ghash_reduce2(c) ghash_reduce2 (&(c)->gd) #define aes_gcm_ghash_final(c) (c)->T = ghash_final (&(c)->gd) #endif -#define N_LANES (N / 16) typedef enum { @@ -63,21 +41,12 @@ typedef enum AES_GCM_OP_GMAC } aes_gcm_op_t; -typedef union -{ - u8x16 x1; - u8x32 x2; - u8x64 x4; - u8x16 lanes[4]; -} __clib_aligned (64) -aes_gcm_expaned_key_t; - typedef struct { /* pre-calculated hash key values */ const u8x16 Hi[NUM_HI]; /* extracted AES key */ - const aes_gcm_expaned_key_t Ke[AES_KEY_ROUNDS (AES_KEY_256) + 1]; + const aes_expaned_key_t Ke[AES_KEY_ROUNDS (AES_KEY_256) + 1]; } aes_gcm_key_data_t; typedef struct @@ -95,12 +64,12 @@ typedef struct const aes_ghash_t *next_Hi; /* expaded keys */ - const aes_gcm_expaned_key_t *Ke; + const aes_expaned_key_t *Ke; /* counter */ u32 counter; u8x16 EY0; - aes_gcm_counter_t Y; + aes_counter_t Y; /* ghash */ ghash_ctx_t gd; @@ -117,39 +86,39 @@ aes_gcm_ghash_mul_first (aes_gcm_ctx_t *ctx, aes_data_t data, u32 n_lanes) { uword hash_offset = NUM_HI - n_lanes; ctx->next_Hi = (aes_ghash_t *) (ctx->Hi + hash_offset); -#if N_LANES == 4 +#if N_AES_LANES == 4 u8x64 tag4 = {}; tag4 = u8x64_insert_u8x16 (tag4, ctx->T, 0); - ghash4_mul_first (&ctx->gd, aes_gcm_reflect (data) ^ tag4, *ctx->next_Hi++); -#elif N_LANES == 2 + ghash4_mul_first (&ctx->gd, aes_reflect (data) ^ tag4, *ctx->next_Hi++); +#elif N_AES_LANES == 2 u8x32 tag2 = {}; tag2 = u8x32_insert_lo (tag2, ctx->T); - ghash2_mul_first (&ctx->gd, aes_gcm_reflect (data) ^ tag2, *ctx->next_Hi++); + ghash2_mul_first (&ctx->gd, aes_reflect (data) ^ tag2, *ctx->next_Hi++); #else - ghash_mul_first (&ctx->gd, aes_gcm_reflect (data) ^ ctx->T, *ctx->next_Hi++); + ghash_mul_first (&ctx->gd, aes_reflect (data) ^ ctx->T, *ctx->next_Hi++); #endif } static_always_inline void aes_gcm_ghash_mul_next (aes_gcm_ctx_t *ctx, aes_data_t data) { -#if N_LANES == 4 - ghash4_mul_next (&ctx->gd, aes_gcm_reflect (data), *ctx->next_Hi++); -#elif N_LANES == 2 - ghash2_mul_next (&ctx->gd, aes_gcm_reflect (data), *ctx->next_Hi++); +#if N_AES_LANES == 4 + ghash4_mul_next (&ctx->gd, aes_reflect (data), *ctx->next_Hi++); +#elif N_AES_LANES == 2 + ghash2_mul_next (&ctx->gd, aes_reflect (data), *ctx->next_Hi++); #else - ghash_mul_next (&ctx->gd, aes_gcm_reflect (data), *ctx->next_Hi++); + ghash_mul_next (&ctx->gd, aes_reflect (data), *ctx->next_Hi++); #endif } static_always_inline void aes_gcm_ghash_mul_final_block (aes_gcm_ctx_t *ctx) { -#if N_LANES == 4 +#if N_AES_LANES == 4 u8x64 h = u8x64_insert_u8x16 (u8x64_zero (), ctx->Hi[NUM_HI - 1], 0); u8x64 r4 = u8x64_insert_u8x16 (u8x64_zero (), aes_gcm_final_block (ctx), 0); ghash4_mul_next (&ctx->gd, r4, h); -#elif N_LANES == 2 +#elif N_AES_LANES == 2 u8x32 h = u8x32_insert_lo (u8x32_zero (), ctx->Hi[NUM_HI - 1]); u8x32 r2 = u8x32_insert_lo (u8x32_zero (), aes_gcm_final_block (ctx)); ghash2_mul_next (&ctx->gd, r2, h); @@ -164,9 +133,9 @@ aes_gcm_enc_ctr0_round (aes_gcm_ctx_t *ctx, int aes_round) if (aes_round == 0) ctx->EY0 ^= ctx->Ke[0].x1; else if (aes_round == ctx->rounds) - ctx->EY0 = aes_enc_last_round (ctx->EY0, ctx->Ke[aes_round].x1); + ctx->EY0 = aes_enc_last_round_x1 (ctx->EY0, ctx->Ke[aes_round].x1); else - ctx->EY0 = aes_enc_round (ctx->EY0, ctx->Ke[aes_round].x1); + ctx->EY0 = aes_enc_round_x1 (ctx->EY0, ctx->Ke[aes_round].x1); } static_always_inline void @@ -176,11 +145,11 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left) aes_data_t r = {}; const aes_mem_t *d = (aes_mem_t *) data; - for (; n_left >= 8 * N; n_left -= 8 * N, d += 8) + for (int n = 8 * N_AES_BYTES; n_left >= n; n_left -= n, d += 8) { - if (ctx->operation == AES_GCM_OP_GMAC && n_left == N * 8) + if (ctx->operation == AES_GCM_OP_GMAC && n_left == n) { - aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_LANES + 1); + aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_AES_LANES + 1); for (i = 1; i < 8; i++) aes_gcm_ghash_mul_next (ctx, d[i]); aes_gcm_ghash_mul_final_block (ctx); @@ -190,7 +159,7 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left) goto done; } - aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_LANES); + aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_AES_LANES); for (i = 1; i < 8; i++) aes_gcm_ghash_mul_next (ctx, d[i]); aes_gcm_ghash_reduce (ctx); @@ -205,7 +174,7 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left) if (ctx->operation == AES_GCM_OP_GMAC) n_lanes++; - if (n_left < N) + if (n_left < N_AES_BYTES) { clib_memcpy_fast (&r, d, n_left); aes_gcm_ghash_mul_first (ctx, r, n_lanes); @@ -213,30 +182,30 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left) else { aes_gcm_ghash_mul_first (ctx, d[0], n_lanes); - n_left -= N; + n_left -= N_AES_BYTES; i = 1; - if (n_left >= 4 * N) + if (n_left >= 4 * N_AES_BYTES) { aes_gcm_ghash_mul_next (ctx, d[i]); aes_gcm_ghash_mul_next (ctx, d[i + 1]); aes_gcm_ghash_mul_next (ctx, d[i + 2]); aes_gcm_ghash_mul_next (ctx, d[i + 3]); - n_left -= 4 * N; + n_left -= 4 * N_AES_BYTES; i += 4; } - if (n_left >= 2 * N) + if (n_left >= 2 * N_AES_BYTES) { aes_gcm_ghash_mul_next (ctx, d[i]); aes_gcm_ghash_mul_next (ctx, d[i + 1]); - n_left -= 2 * N; + n_left -= 2 * N_AES_BYTES; i += 2; } - if (n_left >= N) + if (n_left >= N_AES_BYTES) { aes_gcm_ghash_mul_next (ctx, d[i]); - n_left -= N; + n_left -= N_AES_BYTES; i += 1; } @@ -267,7 +236,7 @@ done: static_always_inline void aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks) { - const aes_gcm_expaned_key_t Ke0 = ctx->Ke[0]; + const aes_expaned_key_t Ke0 = ctx->Ke[0]; uword i = 0; /* As counter is stored in network byte order for performance reasons we @@ -275,7 +244,7 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks) overlow. As we are processing four 128, 256 or 512-blocks in parallel except the last round, overflow can happen only when n_blocks == 4 */ -#if N_LANES == 4 +#if N_AES_LANES == 4 const u32x16 ctr_inv_4444 = { 0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24 }; @@ -292,13 +261,13 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks) if (n_blocks == 4 && PREDICT_FALSE ((u8) ctx->counter == 242)) { - u32x16 Yr = (u32x16) aes_gcm_reflect ((u8x64) ctx->Y); + u32x16 Yr = (u32x16) aes_reflect ((u8x64) ctx->Y); for (; i < n_blocks; i++) { r[i] = Ke0.x4 ^ (u8x64) ctx->Y; /* Initial AES round */ Yr += ctr_4444; - ctx->Y = (u32x16) aes_gcm_reflect ((u8x64) Yr); + ctx->Y = (u32x16) aes_reflect ((u8x64) Yr); } } else @@ -310,7 +279,7 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks) } } ctx->counter += n_blocks * 4; -#elif N_LANES == 2 +#elif N_AES_LANES == 2 const u32x8 ctr_inv_22 = { 0, 0, 0, 2 << 24, 0, 0, 0, 2 << 24 }; const u32x8 ctr_22 = { 2, 0, 0, 0, 2, 0, 0, 0 }; @@ -323,13 +292,13 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks) if (n_blocks == 4 && PREDICT_FALSE ((u8) ctx->counter == 250)) { - u32x8 Yr = (u32x8) aes_gcm_reflect ((u8x32) ctx->Y); + u32x8 Yr = (u32x8) aes_reflect ((u8x32) ctx->Y); for (; i < n_blocks; i++) { r[i] = Ke0.x2 ^ (u8x32) ctx->Y; /* Initial AES round */ Yr += ctr_22; - ctx->Y = (u32x8) aes_gcm_reflect ((u8x32) Yr); + ctx->Y = (u32x8) aes_reflect ((u8x32) Yr); } } else @@ -370,42 +339,21 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks) } static_always_inline void -aes_gcm_enc_round (aes_data_t *r, const aes_gcm_expaned_key_t *Ke, - uword n_blocks) -{ - for (int i = 0; i < n_blocks; i++) -#if N_LANES == 4 - r[i] = aes_enc_round_x4 (r[i], Ke->x4); -#elif N_LANES == 2 - r[i] = aes_enc_round_x2 (r[i], Ke->x2); -#else - r[i] = aes_enc_round (r[i], Ke->x1); -#endif -} - -static_always_inline void aes_gcm_enc_last_round (aes_gcm_ctx_t *ctx, aes_data_t *r, aes_data_t *d, - const aes_gcm_expaned_key_t *Ke, uword n_blocks) + const aes_expaned_key_t *Ke, uword n_blocks) { /* additional ronuds for AES-192 and AES-256 */ for (int i = 10; i < ctx->rounds; i++) - aes_gcm_enc_round (r, Ke + i, n_blocks); + aes_enc_round (r, Ke + i, n_blocks); - for (int i = 0; i < n_blocks; i++) -#if N_LANES == 4 - d[i] ^= aes_enc_last_round_x4 (r[i], Ke[ctx->rounds].x4); -#elif N_LANES == 2 - d[i] ^= aes_enc_last_round_x2 (r[i], Ke[ctx->rounds].x2); -#else - d[i] ^= aes_enc_last_round (r[i], Ke[ctx->rounds].x1); -#endif + aes_enc_last_round (r, d, Ke + ctx->rounds, n_blocks); } static_always_inline void aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, u32 n_bytes, int with_ghash) { - const aes_gcm_expaned_key_t *k = ctx->Ke; + const aes_expaned_key_t *k = ctx->Ke; const aes_mem_t *sv = (aes_mem_t *) src; aes_mem_t *dv = (aes_mem_t *) dst; uword ghash_blocks, gc = 1; @@ -415,23 +363,23 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, if (ctx->operation == AES_GCM_OP_ENCRYPT) { ghash_blocks = 4; - n_lanes = N_LANES * 4; + n_lanes = N_AES_LANES * 4; } else { ghash_blocks = n; - n_lanes = n * N_LANES; -#if N_LANES != 1 + n_lanes = n * N_AES_LANES; +#if N_AES_LANES != 1 if (ctx->last) n_lanes = (n_bytes + 15) / 16; #endif } - n_bytes -= (n - 1) * N; + n_bytes -= (n - 1) * N_AES_BYTES; /* AES rounds 0 and 1 */ aes_gcm_enc_first_round (ctx, r, n); - aes_gcm_enc_round (r, k + 1, n); + aes_enc_round (r, k + 1, n); /* load data - decrypt round */ if (ctx->operation == AES_GCM_OP_DECRYPT) @@ -440,7 +388,7 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, d[i] = sv[i]; if (ctx->last) - d[n - 1] = aes_gcm_load_partial ((u8 *) (sv + n - 1), n_bytes); + d[n - 1] = aes_load_partial ((u8 *) (sv + n - 1), n_bytes); } /* GHASH multiply block 0 */ @@ -448,24 +396,24 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, aes_gcm_ghash_mul_first (ctx, d[0], n_lanes); /* AES rounds 2 and 3 */ - aes_gcm_enc_round (r, k + 2, n); - aes_gcm_enc_round (r, k + 3, n); + aes_enc_round (r, k + 2, n); + aes_enc_round (r, k + 3, n); /* GHASH multiply block 1 */ if (with_ghash && gc++ < ghash_blocks) aes_gcm_ghash_mul_next (ctx, (d[1])); /* AES rounds 4 and 5 */ - aes_gcm_enc_round (r, k + 4, n); - aes_gcm_enc_round (r, k + 5, n); + aes_enc_round (r, k + 4, n); + aes_enc_round (r, k + 5, n); /* GHASH multiply block 2 */ if (with_ghash && gc++ < ghash_blocks) aes_gcm_ghash_mul_next (ctx, (d[2])); /* AES rounds 6 and 7 */ - aes_gcm_enc_round (r, k + 6, n); - aes_gcm_enc_round (r, k + 7, n); + aes_enc_round (r, k + 6, n); + aes_enc_round (r, k + 7, n); /* GHASH multiply block 3 */ if (with_ghash && gc++ < ghash_blocks) @@ -478,12 +426,12 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, d[i] = sv[i]; if (ctx->last) - d[n - 1] = aes_gcm_load_partial (sv + n - 1, n_bytes); + d[n - 1] = aes_load_partial (sv + n - 1, n_bytes); } /* AES rounds 8 and 9 */ - aes_gcm_enc_round (r, k + 8, n); - aes_gcm_enc_round (r, k + 9, n); + aes_enc_round (r, k + 8, n); + aes_enc_round (r, k + 9, n); /* AES last round(s) */ aes_gcm_enc_last_round (ctx, r, d, k, n); @@ -493,7 +441,7 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, dv[i] = d[i]; if (ctx->last) - aes_gcm_store_partial (d[n - 1], dv + n - 1, n_bytes); + aes_store_partial (d[n - 1], dv + n - 1, n_bytes); /* GHASH reduce 1st step */ aes_gcm_ghash_reduce (ctx); @@ -510,14 +458,14 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n, static_always_inline void aes_gcm_calc_double (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst) { - const aes_gcm_expaned_key_t *k = ctx->Ke; + const aes_expaned_key_t *k = ctx->Ke; const aes_mem_t *sv = (aes_mem_t *) src; aes_mem_t *dv = (aes_mem_t *) dst; aes_data_t r[4]; /* AES rounds 0 and 1 */ aes_gcm_enc_first_round (ctx, r, 4); - aes_gcm_enc_round (r, k + 1, 4); + aes_enc_round (r, k + 1, 4); /* load 4 blocks of data - decrypt round */ if (ctx->operation == AES_GCM_OP_DECRYPT) @@ -525,32 +473,32 @@ aes_gcm_calc_double (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst) d[i] = sv[i]; /* GHASH multiply block 0 */ - aes_gcm_ghash_mul_first (ctx, d[0], N_LANES * 8); + aes_gcm_ghash_mul_first (ctx, d[0], N_AES_LANES * 8); /* AES rounds 2 and 3 */ - aes_gcm_enc_round (r, k + 2, 4); - aes_gcm_enc_round (r, k + 3, 4); + aes_enc_round (r, k + 2, 4); + aes_enc_round (r, k + 3, 4); /* GHASH multiply block 1 */ aes_gcm_ghash_mul_next (ctx, (d[1])); /* AES rounds 4 and 5 */ - aes_gcm_enc_round (r, k + 4, 4); - aes_gcm_enc_round (r, k + 5, 4); + aes_enc_round (r, k + 4, 4); + aes_enc_round (r, k + 5, 4); /* GHASH multiply block 2 */ aes_gcm_ghash_mul_next (ctx, (d[2])); /* AES rounds 6 and 7 */ - aes_gcm_enc_round (r, k + 6, 4); - aes_gcm_enc_round (r, k + 7, 4); + aes_enc_round (r, k + 6, 4); + aes_enc_round (r, k + 7, 4); /* GHASH multiply block 3 */ aes_gcm_ghash_mul_next (ctx, (d[3])); /* AES rounds 8 and 9 */ - aes_gcm_enc_round (r, k + 8, 4); - aes_gcm_enc_round (r, k + 9, 4); + aes_enc_round (r, k + 8, 4); + aes_enc_round (r, k + 9, 4); /* load 4 blocks of data - encrypt round */ if (ctx->operation == AES_GCM_OP_ENCRYPT) @@ -574,35 +522,35 @@ aes_gcm_calc_double (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst) /* AES rounds 0 and 1 */ aes_gcm_enc_first_round (ctx, r, 4); - aes_gcm_enc_round (r, k + 1, 4); + aes_enc_round (r, k + 1, 4); /* GHASH multiply block 5 */ aes_gcm_ghash_mul_next (ctx, (d[1])); /* AES rounds 2 and 3 */ - aes_gcm_enc_round (r, k + 2, 4); - aes_gcm_enc_round (r, k + 3, 4); + aes_enc_round (r, k + 2, 4); + aes_enc_round (r, k + 3, 4); /* GHASH multiply block 6 */ aes_gcm_ghash_mul_next (ctx, (d[2])); /* AES rounds 4 and 5 */ - aes_gcm_enc_round (r, k + 4, 4); - aes_gcm_enc_round (r, k + 5, 4); + aes_enc_round (r, k + 4, 4); + aes_enc_round (r, k + 5, 4); /* GHASH multiply block 7 */ aes_gcm_ghash_mul_next (ctx, (d[3])); /* AES rounds 6 and 7 */ - aes_gcm_enc_round (r, k + 6, 4); - aes_gcm_enc_round (r, k + 7, 4); + aes_enc_round (r, k + 6, 4); + aes_enc_round (r, k + 7, 4); /* GHASH reduce 1st step */ aes_gcm_ghash_reduce (ctx); /* AES rounds 8 and 9 */ - aes_gcm_enc_round (r, k + 8, 4); - aes_gcm_enc_round (r, k + 9, 4); + aes_enc_round (r, k + 8, 4); + aes_enc_round (r, k + 9, 4); /* GHASH reduce 2nd step */ aes_gcm_ghash_reduce2 (ctx); @@ -644,14 +592,14 @@ static_always_inline void aes_gcm_calc_last (aes_gcm_ctx_t *ctx, aes_data_t *d, int n_blocks, u32 n_bytes) { - int n_lanes = (N_LANES == 1 ? n_blocks : (n_bytes + 15) / 16) + 1; - n_bytes -= (n_blocks - 1) * N; + int n_lanes = (N_AES_LANES == 1 ? n_blocks : (n_bytes + 15) / 16) + 1; + n_bytes -= (n_blocks - 1) * N_AES_BYTES; int i; aes_gcm_enc_ctr0_round (ctx, 0); aes_gcm_enc_ctr0_round (ctx, 1); - if (n_bytes != N) + if (n_bytes != N_AES_BYTES) aes_gcm_mask_bytes (d + n_blocks - 1, n_bytes); aes_gcm_ghash_mul_first (ctx, d[0], n_lanes); @@ -703,20 +651,20 @@ aes_gcm_enc (aes_gcm_ctx_t *ctx, const u8 *src, u8 *dst, u32 n_left) return; } - if (n_left < 4 * N) + if (n_left < 4 * N_AES_BYTES) { ctx->last = 1; - if (n_left > 3 * N) + if (n_left > 3 * N_AES_BYTES) { aes_gcm_calc (ctx, d, src, dst, 4, n_left, /* with_ghash */ 0); aes_gcm_calc_last (ctx, d, 4, n_left); } - else if (n_left > 2 * N) + else if (n_left > 2 * N_AES_BYTES) { aes_gcm_calc (ctx, d, src, dst, 3, n_left, /* with_ghash */ 0); aes_gcm_calc_last (ctx, d, 3, n_left); } - else if (n_left > N) + else if (n_left > N_AES_BYTES) { aes_gcm_calc (ctx, d, src, dst, 2, n_left, /* with_ghash */ 0); aes_gcm_calc_last (ctx, d, 2, n_left); @@ -729,45 +677,45 @@ aes_gcm_enc (aes_gcm_ctx_t *ctx, const u8 *src, u8 *dst, u32 n_left) return; } - aes_gcm_calc (ctx, d, src, dst, 4, 4 * N, /* with_ghash */ 0); + aes_gcm_calc (ctx, d, src, dst, 4, 4 * N_AES_BYTES, /* with_ghash */ 0); /* next */ - n_left -= 4 * N; - dst += 4 * N; - src += 4 * N; + n_left -= 4 * N_AES_BYTES; + dst += 4 * N_AES_BYTES; + src += 4 * N_AES_BYTES; - for (; n_left >= 8 * N; n_left -= 8 * N, src += 8 * N, dst += 8 * N) + for (int n = 8 * N_AES_BYTES; n_left >= n; n_left -= n, src += n, dst += n) aes_gcm_calc_double (ctx, d, src, dst); - if (n_left >= 4 * N) + if (n_left >= 4 * N_AES_BYTES) { - aes_gcm_calc (ctx, d, src, dst, 4, 4 * N, /* with_ghash */ 1); + aes_gcm_calc (ctx, d, src, dst, 4, 4 * N_AES_BYTES, /* with_ghash */ 1); /* next */ - n_left -= 4 * N; - dst += 4 * N; - src += 4 * N; + n_left -= 4 * N_AES_BYTES; + dst += 4 * N_AES_BYTES; + src += 4 * N_AES_BYTES; } if (n_left == 0) { - aes_gcm_calc_last (ctx, d, 4, 4 * N); + aes_gcm_calc_last (ctx, d, 4, 4 * N_AES_BYTES); return; } ctx->last = 1; - if (n_left > 3 * N) + if (n_left > 3 * N_AES_BYTES) { aes_gcm_calc (ctx, d, src, dst, 4, n_left, /* with_ghash */ 1); aes_gcm_calc_last (ctx, d, 4, n_left); } - else if (n_left > 2 * N) + else if (n_left > 2 * N_AES_BYTES) { aes_gcm_calc (ctx, d, src, dst, 3, n_left, /* with_ghash */ 1); aes_gcm_calc_last (ctx, d, 3, n_left); } - else if (n_left > N) + else if (n_left > N_AES_BYTES) { aes_gcm_calc (ctx, d, src, dst, 2, n_left, /* with_ghash */ 1); aes_gcm_calc_last (ctx, d, 2, n_left); @@ -786,28 +734,28 @@ aes_gcm_dec (aes_gcm_ctx_t *ctx, const u8 *src, u8 *dst, uword n_left) ghash_ctx_t gd; /* main encryption loop */ - for (; n_left >= 8 * N; n_left -= 8 * N, dst += 8 * N, src += 8 * N) + for (int n = 8 * N_AES_BYTES; n_left >= n; n_left -= n, dst += n, src += n) aes_gcm_calc_double (ctx, d, src, dst); - if (n_left >= 4 * N) + if (n_left >= 4 * N_AES_BYTES) { - aes_gcm_calc (ctx, d, src, dst, 4, 4 * N, /* with_ghash */ 1); + aes_gcm_calc (ctx, d, src, dst, 4, 4 * N_AES_BYTES, /* with_ghash */ 1); /* next */ - n_left -= 4 * N; - dst += N * 4; - src += N * 4; + n_left -= 4 * N_AES_BYTES; + dst += N_AES_BYTES * 4; + src += N_AES_BYTES * 4; } if (n_left) { ctx->last = 1; - if (n_left > 3 * N) + if (n_left > 3 * N_AES_BYTES) aes_gcm_calc (ctx, d, src, dst, 4, n_left, /* with_ghash */ 1); - else if (n_left > 2 * N) + else if (n_left > 2 * N_AES_BYTES) aes_gcm_calc (ctx, d, src, dst, 3, n_left, /* with_ghash */ 1); - else if (n_left > N) + else if (n_left > N_AES_BYTES) aes_gcm_calc (ctx, d, src, dst, 2, n_left, /* with_ghash */ 1); else aes_gcm_calc (ctx, d, src, dst, 1, n_left, /* with_ghash */ 1); @@ -867,11 +815,11 @@ aes_gcm (const u8 *src, u8 *dst, const u8 *aad, u8 *ivp, u8 *tag, Y0[3] = 1 << 24; ctx->EY0 = (u8x16) Y0; -#if N_LANES == 4 +#if N_AES_LANES == 4 ctx->Y = u32x16_splat_u32x4 (Y0) + (u32x16){ 0, 0, 0, 1 << 24, 0, 0, 0, 2 << 24, 0, 0, 0, 3 << 24, 0, 0, 0, 4 << 24, }; -#elif N_LANES == 2 +#elif N_AES_LANES == 2 ctx->Y = u32x8_splat_u32x4 (Y0) + (u32x8){ 0, 0, 0, 1 << 24, 0, 0, 0, 2 << 24 }; #else @@ -926,7 +874,7 @@ clib_aes_gcm_key_expand (aes_gcm_key_data_t *kd, const u8 *key, { u8x16 H; u8x16 ek[AES_KEY_ROUNDS (AES_KEY_256) + 1]; - aes_gcm_expaned_key_t *Ke = (aes_gcm_expaned_key_t *) kd->Ke; + aes_expaned_key_t *Ke = (aes_expaned_key_t *) kd->Ke; /* expand AES key */ aes_key_expand (ek, key, ks); |