aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/crypto/aes_gcm.h
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2024-01-08 19:05:40 +0000
committerMohammed HAWARI <momohawari@gmail.com>2024-01-12 17:37:28 +0000
commit9caef2a3518ee9a453adbb748a9f4799a5fbb56b (patch)
tree30bf322c274da88611071ccabdd4cf8cf36aad31 /src/vppinfra/crypto/aes_gcm.h
parentbf40da413f8dc1d2d3a2ca355541d6b3648ba532 (diff)
vppinfra: native AES-CTR implementation
Type: feature Change-Id: I7ef3277edaeb266fbd3c8c9355d4443002ed2311 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vppinfra/crypto/aes_gcm.h')
-rw-r--r--src/vppinfra/crypto/aes_gcm.h274
1 files changed, 111 insertions, 163 deletions
diff --git a/src/vppinfra/crypto/aes_gcm.h b/src/vppinfra/crypto/aes_gcm.h
index 3d1b220f7b8..5b628c87745 100644
--- a/src/vppinfra/crypto/aes_gcm.h
+++ b/src/vppinfra/crypto/aes_gcm.h
@@ -13,47 +13,25 @@
#include <vppinfra/crypto/ghash.h>
#define NUM_HI 36
-#if defined(__VAES__) && defined(__AVX512F__)
-typedef u8x64 aes_data_t;
+#if N_AES_LANES == 4
typedef u8x64u aes_ghash_t;
-typedef u8x64u aes_mem_t;
-typedef u32x16 aes_gcm_counter_t;
-#define N 64
-#define aes_gcm_load_partial(p, n) u8x64_load_partial ((u8 *) (p), n)
-#define aes_gcm_store_partial(v, p, n) u8x64_store_partial (v, (u8 *) (p), n)
#define aes_gcm_splat(v) u8x64_splat (v)
-#define aes_gcm_reflect(r) u8x64_reflect_u8x16 (r)
#define aes_gcm_ghash_reduce(c) ghash4_reduce (&(c)->gd)
#define aes_gcm_ghash_reduce2(c) ghash4_reduce2 (&(c)->gd)
#define aes_gcm_ghash_final(c) (c)->T = ghash4_final (&(c)->gd)
-#elif defined(__VAES__)
-typedef u8x32 aes_data_t;
+#elif N_AES_LANES == 2
typedef u8x32u aes_ghash_t;
-typedef u8x32u aes_mem_t;
-typedef u32x8 aes_gcm_counter_t;
-#define N 32
-#define aes_gcm_load_partial(p, n) u8x32_load_partial ((u8 *) (p), n)
-#define aes_gcm_store_partial(v, p, n) u8x32_store_partial (v, (u8 *) (p), n)
#define aes_gcm_splat(v) u8x32_splat (v)
-#define aes_gcm_reflect(r) u8x32_reflect_u8x16 (r)
#define aes_gcm_ghash_reduce(c) ghash2_reduce (&(c)->gd)
#define aes_gcm_ghash_reduce2(c) ghash2_reduce2 (&(c)->gd)
#define aes_gcm_ghash_final(c) (c)->T = ghash2_final (&(c)->gd)
#else
-typedef u8x16 aes_data_t;
typedef u8x16 aes_ghash_t;
-typedef u8x16u aes_mem_t;
-typedef u32x4 aes_gcm_counter_t;
-#define N 16
-#define aes_gcm_load_partial(p, n) u8x16_load_partial ((u8 *) (p), n)
-#define aes_gcm_store_partial(v, p, n) u8x16_store_partial (v, (u8 *) (p), n)
#define aes_gcm_splat(v) u8x16_splat (v)
-#define aes_gcm_reflect(r) u8x16_reflect (r)
#define aes_gcm_ghash_reduce(c) ghash_reduce (&(c)->gd)
#define aes_gcm_ghash_reduce2(c) ghash_reduce2 (&(c)->gd)
#define aes_gcm_ghash_final(c) (c)->T = ghash_final (&(c)->gd)
#endif
-#define N_LANES (N / 16)
typedef enum
{
@@ -63,21 +41,12 @@ typedef enum
AES_GCM_OP_GMAC
} aes_gcm_op_t;
-typedef union
-{
- u8x16 x1;
- u8x32 x2;
- u8x64 x4;
- u8x16 lanes[4];
-} __clib_aligned (64)
-aes_gcm_expaned_key_t;
-
typedef struct
{
/* pre-calculated hash key values */
const u8x16 Hi[NUM_HI];
/* extracted AES key */
- const aes_gcm_expaned_key_t Ke[AES_KEY_ROUNDS (AES_KEY_256) + 1];
+ const aes_expaned_key_t Ke[AES_KEY_ROUNDS (AES_KEY_256) + 1];
} aes_gcm_key_data_t;
typedef struct
@@ -95,12 +64,12 @@ typedef struct
const aes_ghash_t *next_Hi;
/* expaded keys */
- const aes_gcm_expaned_key_t *Ke;
+ const aes_expaned_key_t *Ke;
/* counter */
u32 counter;
u8x16 EY0;
- aes_gcm_counter_t Y;
+ aes_counter_t Y;
/* ghash */
ghash_ctx_t gd;
@@ -117,39 +86,39 @@ aes_gcm_ghash_mul_first (aes_gcm_ctx_t *ctx, aes_data_t data, u32 n_lanes)
{
uword hash_offset = NUM_HI - n_lanes;
ctx->next_Hi = (aes_ghash_t *) (ctx->Hi + hash_offset);
-#if N_LANES == 4
+#if N_AES_LANES == 4
u8x64 tag4 = {};
tag4 = u8x64_insert_u8x16 (tag4, ctx->T, 0);
- ghash4_mul_first (&ctx->gd, aes_gcm_reflect (data) ^ tag4, *ctx->next_Hi++);
-#elif N_LANES == 2
+ ghash4_mul_first (&ctx->gd, aes_reflect (data) ^ tag4, *ctx->next_Hi++);
+#elif N_AES_LANES == 2
u8x32 tag2 = {};
tag2 = u8x32_insert_lo (tag2, ctx->T);
- ghash2_mul_first (&ctx->gd, aes_gcm_reflect (data) ^ tag2, *ctx->next_Hi++);
+ ghash2_mul_first (&ctx->gd, aes_reflect (data) ^ tag2, *ctx->next_Hi++);
#else
- ghash_mul_first (&ctx->gd, aes_gcm_reflect (data) ^ ctx->T, *ctx->next_Hi++);
+ ghash_mul_first (&ctx->gd, aes_reflect (data) ^ ctx->T, *ctx->next_Hi++);
#endif
}
static_always_inline void
aes_gcm_ghash_mul_next (aes_gcm_ctx_t *ctx, aes_data_t data)
{
-#if N_LANES == 4
- ghash4_mul_next (&ctx->gd, aes_gcm_reflect (data), *ctx->next_Hi++);
-#elif N_LANES == 2
- ghash2_mul_next (&ctx->gd, aes_gcm_reflect (data), *ctx->next_Hi++);
+#if N_AES_LANES == 4
+ ghash4_mul_next (&ctx->gd, aes_reflect (data), *ctx->next_Hi++);
+#elif N_AES_LANES == 2
+ ghash2_mul_next (&ctx->gd, aes_reflect (data), *ctx->next_Hi++);
#else
- ghash_mul_next (&ctx->gd, aes_gcm_reflect (data), *ctx->next_Hi++);
+ ghash_mul_next (&ctx->gd, aes_reflect (data), *ctx->next_Hi++);
#endif
}
static_always_inline void
aes_gcm_ghash_mul_final_block (aes_gcm_ctx_t *ctx)
{
-#if N_LANES == 4
+#if N_AES_LANES == 4
u8x64 h = u8x64_insert_u8x16 (u8x64_zero (), ctx->Hi[NUM_HI - 1], 0);
u8x64 r4 = u8x64_insert_u8x16 (u8x64_zero (), aes_gcm_final_block (ctx), 0);
ghash4_mul_next (&ctx->gd, r4, h);
-#elif N_LANES == 2
+#elif N_AES_LANES == 2
u8x32 h = u8x32_insert_lo (u8x32_zero (), ctx->Hi[NUM_HI - 1]);
u8x32 r2 = u8x32_insert_lo (u8x32_zero (), aes_gcm_final_block (ctx));
ghash2_mul_next (&ctx->gd, r2, h);
@@ -164,9 +133,9 @@ aes_gcm_enc_ctr0_round (aes_gcm_ctx_t *ctx, int aes_round)
if (aes_round == 0)
ctx->EY0 ^= ctx->Ke[0].x1;
else if (aes_round == ctx->rounds)
- ctx->EY0 = aes_enc_last_round (ctx->EY0, ctx->Ke[aes_round].x1);
+ ctx->EY0 = aes_enc_last_round_x1 (ctx->EY0, ctx->Ke[aes_round].x1);
else
- ctx->EY0 = aes_enc_round (ctx->EY0, ctx->Ke[aes_round].x1);
+ ctx->EY0 = aes_enc_round_x1 (ctx->EY0, ctx->Ke[aes_round].x1);
}
static_always_inline void
@@ -176,11 +145,11 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left)
aes_data_t r = {};
const aes_mem_t *d = (aes_mem_t *) data;
- for (; n_left >= 8 * N; n_left -= 8 * N, d += 8)
+ for (int n = 8 * N_AES_BYTES; n_left >= n; n_left -= n, d += 8)
{
- if (ctx->operation == AES_GCM_OP_GMAC && n_left == N * 8)
+ if (ctx->operation == AES_GCM_OP_GMAC && n_left == n)
{
- aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_LANES + 1);
+ aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_AES_LANES + 1);
for (i = 1; i < 8; i++)
aes_gcm_ghash_mul_next (ctx, d[i]);
aes_gcm_ghash_mul_final_block (ctx);
@@ -190,7 +159,7 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left)
goto done;
}
- aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_LANES);
+ aes_gcm_ghash_mul_first (ctx, d[0], 8 * N_AES_LANES);
for (i = 1; i < 8; i++)
aes_gcm_ghash_mul_next (ctx, d[i]);
aes_gcm_ghash_reduce (ctx);
@@ -205,7 +174,7 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left)
if (ctx->operation == AES_GCM_OP_GMAC)
n_lanes++;
- if (n_left < N)
+ if (n_left < N_AES_BYTES)
{
clib_memcpy_fast (&r, d, n_left);
aes_gcm_ghash_mul_first (ctx, r, n_lanes);
@@ -213,30 +182,30 @@ aes_gcm_ghash (aes_gcm_ctx_t *ctx, u8 *data, u32 n_left)
else
{
aes_gcm_ghash_mul_first (ctx, d[0], n_lanes);
- n_left -= N;
+ n_left -= N_AES_BYTES;
i = 1;
- if (n_left >= 4 * N)
+ if (n_left >= 4 * N_AES_BYTES)
{
aes_gcm_ghash_mul_next (ctx, d[i]);
aes_gcm_ghash_mul_next (ctx, d[i + 1]);
aes_gcm_ghash_mul_next (ctx, d[i + 2]);
aes_gcm_ghash_mul_next (ctx, d[i + 3]);
- n_left -= 4 * N;
+ n_left -= 4 * N_AES_BYTES;
i += 4;
}
- if (n_left >= 2 * N)
+ if (n_left >= 2 * N_AES_BYTES)
{
aes_gcm_ghash_mul_next (ctx, d[i]);
aes_gcm_ghash_mul_next (ctx, d[i + 1]);
- n_left -= 2 * N;
+ n_left -= 2 * N_AES_BYTES;
i += 2;
}
- if (n_left >= N)
+ if (n_left >= N_AES_BYTES)
{
aes_gcm_ghash_mul_next (ctx, d[i]);
- n_left -= N;
+ n_left -= N_AES_BYTES;
i += 1;
}
@@ -267,7 +236,7 @@ done:
static_always_inline void
aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks)
{
- const aes_gcm_expaned_key_t Ke0 = ctx->Ke[0];
+ const aes_expaned_key_t Ke0 = ctx->Ke[0];
uword i = 0;
/* As counter is stored in network byte order for performance reasons we
@@ -275,7 +244,7 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks)
overlow. As we are processing four 128, 256 or 512-blocks in parallel
except the last round, overflow can happen only when n_blocks == 4 */
-#if N_LANES == 4
+#if N_AES_LANES == 4
const u32x16 ctr_inv_4444 = { 0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24,
0, 0, 0, 4 << 24, 0, 0, 0, 4 << 24 };
@@ -292,13 +261,13 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks)
if (n_blocks == 4 && PREDICT_FALSE ((u8) ctx->counter == 242))
{
- u32x16 Yr = (u32x16) aes_gcm_reflect ((u8x64) ctx->Y);
+ u32x16 Yr = (u32x16) aes_reflect ((u8x64) ctx->Y);
for (; i < n_blocks; i++)
{
r[i] = Ke0.x4 ^ (u8x64) ctx->Y; /* Initial AES round */
Yr += ctr_4444;
- ctx->Y = (u32x16) aes_gcm_reflect ((u8x64) Yr);
+ ctx->Y = (u32x16) aes_reflect ((u8x64) Yr);
}
}
else
@@ -310,7 +279,7 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks)
}
}
ctx->counter += n_blocks * 4;
-#elif N_LANES == 2
+#elif N_AES_LANES == 2
const u32x8 ctr_inv_22 = { 0, 0, 0, 2 << 24, 0, 0, 0, 2 << 24 };
const u32x8 ctr_22 = { 2, 0, 0, 0, 2, 0, 0, 0 };
@@ -323,13 +292,13 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks)
if (n_blocks == 4 && PREDICT_FALSE ((u8) ctx->counter == 250))
{
- u32x8 Yr = (u32x8) aes_gcm_reflect ((u8x32) ctx->Y);
+ u32x8 Yr = (u32x8) aes_reflect ((u8x32) ctx->Y);
for (; i < n_blocks; i++)
{
r[i] = Ke0.x2 ^ (u8x32) ctx->Y; /* Initial AES round */
Yr += ctr_22;
- ctx->Y = (u32x8) aes_gcm_reflect ((u8x32) Yr);
+ ctx->Y = (u32x8) aes_reflect ((u8x32) Yr);
}
}
else
@@ -370,42 +339,21 @@ aes_gcm_enc_first_round (aes_gcm_ctx_t *ctx, aes_data_t *r, uword n_blocks)
}
static_always_inline void
-aes_gcm_enc_round (aes_data_t *r, const aes_gcm_expaned_key_t *Ke,
- uword n_blocks)
-{
- for (int i = 0; i < n_blocks; i++)
-#if N_LANES == 4
- r[i] = aes_enc_round_x4 (r[i], Ke->x4);
-#elif N_LANES == 2
- r[i] = aes_enc_round_x2 (r[i], Ke->x2);
-#else
- r[i] = aes_enc_round (r[i], Ke->x1);
-#endif
-}
-
-static_always_inline void
aes_gcm_enc_last_round (aes_gcm_ctx_t *ctx, aes_data_t *r, aes_data_t *d,
- const aes_gcm_expaned_key_t *Ke, uword n_blocks)
+ const aes_expaned_key_t *Ke, uword n_blocks)
{
/* additional ronuds for AES-192 and AES-256 */
for (int i = 10; i < ctx->rounds; i++)
- aes_gcm_enc_round (r, Ke + i, n_blocks);
+ aes_enc_round (r, Ke + i, n_blocks);
- for (int i = 0; i < n_blocks; i++)
-#if N_LANES == 4
- d[i] ^= aes_enc_last_round_x4 (r[i], Ke[ctx->rounds].x4);
-#elif N_LANES == 2
- d[i] ^= aes_enc_last_round_x2 (r[i], Ke[ctx->rounds].x2);
-#else
- d[i] ^= aes_enc_last_round (r[i], Ke[ctx->rounds].x1);
-#endif
+ aes_enc_last_round (r, d, Ke + ctx->rounds, n_blocks);
}
static_always_inline void
aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
u32 n_bytes, int with_ghash)
{
- const aes_gcm_expaned_key_t *k = ctx->Ke;
+ const aes_expaned_key_t *k = ctx->Ke;
const aes_mem_t *sv = (aes_mem_t *) src;
aes_mem_t *dv = (aes_mem_t *) dst;
uword ghash_blocks, gc = 1;
@@ -415,23 +363,23 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
if (ctx->operation == AES_GCM_OP_ENCRYPT)
{
ghash_blocks = 4;
- n_lanes = N_LANES * 4;
+ n_lanes = N_AES_LANES * 4;
}
else
{
ghash_blocks = n;
- n_lanes = n * N_LANES;
-#if N_LANES != 1
+ n_lanes = n * N_AES_LANES;
+#if N_AES_LANES != 1
if (ctx->last)
n_lanes = (n_bytes + 15) / 16;
#endif
}
- n_bytes -= (n - 1) * N;
+ n_bytes -= (n - 1) * N_AES_BYTES;
/* AES rounds 0 and 1 */
aes_gcm_enc_first_round (ctx, r, n);
- aes_gcm_enc_round (r, k + 1, n);
+ aes_enc_round (r, k + 1, n);
/* load data - decrypt round */
if (ctx->operation == AES_GCM_OP_DECRYPT)
@@ -440,7 +388,7 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
d[i] = sv[i];
if (ctx->last)
- d[n - 1] = aes_gcm_load_partial ((u8 *) (sv + n - 1), n_bytes);
+ d[n - 1] = aes_load_partial ((u8 *) (sv + n - 1), n_bytes);
}
/* GHASH multiply block 0 */
@@ -448,24 +396,24 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
aes_gcm_ghash_mul_first (ctx, d[0], n_lanes);
/* AES rounds 2 and 3 */
- aes_gcm_enc_round (r, k + 2, n);
- aes_gcm_enc_round (r, k + 3, n);
+ aes_enc_round (r, k + 2, n);
+ aes_enc_round (r, k + 3, n);
/* GHASH multiply block 1 */
if (with_ghash && gc++ < ghash_blocks)
aes_gcm_ghash_mul_next (ctx, (d[1]));
/* AES rounds 4 and 5 */
- aes_gcm_enc_round (r, k + 4, n);
- aes_gcm_enc_round (r, k + 5, n);
+ aes_enc_round (r, k + 4, n);
+ aes_enc_round (r, k + 5, n);
/* GHASH multiply block 2 */
if (with_ghash && gc++ < ghash_blocks)
aes_gcm_ghash_mul_next (ctx, (d[2]));
/* AES rounds 6 and 7 */
- aes_gcm_enc_round (r, k + 6, n);
- aes_gcm_enc_round (r, k + 7, n);
+ aes_enc_round (r, k + 6, n);
+ aes_enc_round (r, k + 7, n);
/* GHASH multiply block 3 */
if (with_ghash && gc++ < ghash_blocks)
@@ -478,12 +426,12 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
d[i] = sv[i];
if (ctx->last)
- d[n - 1] = aes_gcm_load_partial (sv + n - 1, n_bytes);
+ d[n - 1] = aes_load_partial (sv + n - 1, n_bytes);
}
/* AES rounds 8 and 9 */
- aes_gcm_enc_round (r, k + 8, n);
- aes_gcm_enc_round (r, k + 9, n);
+ aes_enc_round (r, k + 8, n);
+ aes_enc_round (r, k + 9, n);
/* AES last round(s) */
aes_gcm_enc_last_round (ctx, r, d, k, n);
@@ -493,7 +441,7 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
dv[i] = d[i];
if (ctx->last)
- aes_gcm_store_partial (d[n - 1], dv + n - 1, n_bytes);
+ aes_store_partial (d[n - 1], dv + n - 1, n_bytes);
/* GHASH reduce 1st step */
aes_gcm_ghash_reduce (ctx);
@@ -510,14 +458,14 @@ aes_gcm_calc (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst, u32 n,
static_always_inline void
aes_gcm_calc_double (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst)
{
- const aes_gcm_expaned_key_t *k = ctx->Ke;
+ const aes_expaned_key_t *k = ctx->Ke;
const aes_mem_t *sv = (aes_mem_t *) src;
aes_mem_t *dv = (aes_mem_t *) dst;
aes_data_t r[4];
/* AES rounds 0 and 1 */
aes_gcm_enc_first_round (ctx, r, 4);
- aes_gcm_enc_round (r, k + 1, 4);
+ aes_enc_round (r, k + 1, 4);
/* load 4 blocks of data - decrypt round */
if (ctx->operation == AES_GCM_OP_DECRYPT)
@@ -525,32 +473,32 @@ aes_gcm_calc_double (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst)
d[i] = sv[i];
/* GHASH multiply block 0 */
- aes_gcm_ghash_mul_first (ctx, d[0], N_LANES * 8);
+ aes_gcm_ghash_mul_first (ctx, d[0], N_AES_LANES * 8);
/* AES rounds 2 and 3 */
- aes_gcm_enc_round (r, k + 2, 4);
- aes_gcm_enc_round (r, k + 3, 4);
+ aes_enc_round (r, k + 2, 4);
+ aes_enc_round (r, k + 3, 4);
/* GHASH multiply block 1 */
aes_gcm_ghash_mul_next (ctx, (d[1]));
/* AES rounds 4 and 5 */
- aes_gcm_enc_round (r, k + 4, 4);
- aes_gcm_enc_round (r, k + 5, 4);
+ aes_enc_round (r, k + 4, 4);
+ aes_enc_round (r, k + 5, 4);
/* GHASH multiply block 2 */
aes_gcm_ghash_mul_next (ctx, (d[2]));
/* AES rounds 6 and 7 */
- aes_gcm_enc_round (r, k + 6, 4);
- aes_gcm_enc_round (r, k + 7, 4);
+ aes_enc_round (r, k + 6, 4);
+ aes_enc_round (r, k + 7, 4);
/* GHASH multiply block 3 */
aes_gcm_ghash_mul_next (ctx, (d[3]));
/* AES rounds 8 and 9 */
- aes_gcm_enc_round (r, k + 8, 4);
- aes_gcm_enc_round (r, k + 9, 4);
+ aes_enc_round (r, k + 8, 4);
+ aes_enc_round (r, k + 9, 4);
/* load 4 blocks of data - encrypt round */
if (ctx->operation == AES_GCM_OP_ENCRYPT)
@@ -574,35 +522,35 @@ aes_gcm_calc_double (aes_gcm_ctx_t *ctx, aes_data_t *d, const u8 *src, u8 *dst)
/* AES rounds 0 and 1 */
aes_gcm_enc_first_round (ctx, r, 4);
- aes_gcm_enc_round (r, k + 1, 4);
+ aes_enc_round (r, k + 1, 4);
/* GHASH multiply block 5 */
aes_gcm_ghash_mul_next (ctx, (d[1]));
/* AES rounds 2 and 3 */
- aes_gcm_enc_round (r, k + 2, 4);
- aes_gcm_enc_round (r, k + 3, 4);
+ aes_enc_round (r, k + 2, 4);
+ aes_enc_round (r, k + 3, 4);
/* GHASH multiply block 6 */
aes_gcm_ghash_mul_next (ctx, (d[2]));
/* AES rounds 4 and 5 */
- aes_gcm_enc_round (r, k + 4, 4);
- aes_gcm_enc_round (r, k + 5, 4);
+ aes_enc_round (r, k + 4, 4);
+ aes_enc_round (r, k + 5, 4);
/* GHASH multiply block 7 */
aes_gcm_ghash_mul_next (ctx, (d[3]));
/* AES rounds 6 and 7 */
- aes_gcm_enc_round (r, k + 6, 4);
- aes_gcm_enc_round (r, k + 7, 4);
+ aes_enc_round (r, k + 6, 4);
+ aes_enc_round (r, k + 7, 4);
/* GHASH reduce 1st step */
aes_gcm_ghash_reduce (ctx);
/* AES rounds 8 and 9 */
- aes_gcm_enc_round (r, k + 8, 4);
- aes_gcm_enc_round (r, k + 9, 4);
+ aes_enc_round (r, k + 8, 4);
+ aes_enc_round (r, k + 9, 4);
/* GHASH reduce 2nd step */
aes_gcm_ghash_reduce2 (ctx);
@@ -644,14 +592,14 @@ static_always_inline void
aes_gcm_calc_last (aes_gcm_ctx_t *ctx, aes_data_t *d, int n_blocks,
u32 n_bytes)
{
- int n_lanes = (N_LANES == 1 ? n_blocks : (n_bytes + 15) / 16) + 1;
- n_bytes -= (n_blocks - 1) * N;
+ int n_lanes = (N_AES_LANES == 1 ? n_blocks : (n_bytes + 15) / 16) + 1;
+ n_bytes -= (n_blocks - 1) * N_AES_BYTES;
int i;
aes_gcm_enc_ctr0_round (ctx, 0);
aes_gcm_enc_ctr0_round (ctx, 1);
- if (n_bytes != N)
+ if (n_bytes != N_AES_BYTES)
aes_gcm_mask_bytes (d + n_blocks - 1, n_bytes);
aes_gcm_ghash_mul_first (ctx, d[0], n_lanes);
@@ -703,20 +651,20 @@ aes_gcm_enc (aes_gcm_ctx_t *ctx, const u8 *src, u8 *dst, u32 n_left)
return;
}
- if (n_left < 4 * N)
+ if (n_left < 4 * N_AES_BYTES)
{
ctx->last = 1;
- if (n_left > 3 * N)
+ if (n_left > 3 * N_AES_BYTES)
{
aes_gcm_calc (ctx, d, src, dst, 4, n_left, /* with_ghash */ 0);
aes_gcm_calc_last (ctx, d, 4, n_left);
}
- else if (n_left > 2 * N)
+ else if (n_left > 2 * N_AES_BYTES)
{
aes_gcm_calc (ctx, d, src, dst, 3, n_left, /* with_ghash */ 0);
aes_gcm_calc_last (ctx, d, 3, n_left);
}
- else if (n_left > N)
+ else if (n_left > N_AES_BYTES)
{
aes_gcm_calc (ctx, d, src, dst, 2, n_left, /* with_ghash */ 0);
aes_gcm_calc_last (ctx, d, 2, n_left);
@@ -729,45 +677,45 @@ aes_gcm_enc (aes_gcm_ctx_t *ctx, const u8 *src, u8 *dst, u32 n_left)
return;
}
- aes_gcm_calc (ctx, d, src, dst, 4, 4 * N, /* with_ghash */ 0);
+ aes_gcm_calc (ctx, d, src, dst, 4, 4 * N_AES_BYTES, /* with_ghash */ 0);
/* next */
- n_left -= 4 * N;
- dst += 4 * N;
- src += 4 * N;
+ n_left -= 4 * N_AES_BYTES;
+ dst += 4 * N_AES_BYTES;
+ src += 4 * N_AES_BYTES;
- for (; n_left >= 8 * N; n_left -= 8 * N, src += 8 * N, dst += 8 * N)
+ for (int n = 8 * N_AES_BYTES; n_left >= n; n_left -= n, src += n, dst += n)
aes_gcm_calc_double (ctx, d, src, dst);
- if (n_left >= 4 * N)
+ if (n_left >= 4 * N_AES_BYTES)
{
- aes_gcm_calc (ctx, d, src, dst, 4, 4 * N, /* with_ghash */ 1);
+ aes_gcm_calc (ctx, d, src, dst, 4, 4 * N_AES_BYTES, /* with_ghash */ 1);
/* next */
- n_left -= 4 * N;
- dst += 4 * N;
- src += 4 * N;
+ n_left -= 4 * N_AES_BYTES;
+ dst += 4 * N_AES_BYTES;
+ src += 4 * N_AES_BYTES;
}
if (n_left == 0)
{
- aes_gcm_calc_last (ctx, d, 4, 4 * N);
+ aes_gcm_calc_last (ctx, d, 4, 4 * N_AES_BYTES);
return;
}
ctx->last = 1;
- if (n_left > 3 * N)
+ if (n_left > 3 * N_AES_BYTES)
{
aes_gcm_calc (ctx, d, src, dst, 4, n_left, /* with_ghash */ 1);
aes_gcm_calc_last (ctx, d, 4, n_left);
}
- else if (n_left > 2 * N)
+ else if (n_left > 2 * N_AES_BYTES)
{
aes_gcm_calc (ctx, d, src, dst, 3, n_left, /* with_ghash */ 1);
aes_gcm_calc_last (ctx, d, 3, n_left);
}
- else if (n_left > N)
+ else if (n_left > N_AES_BYTES)
{
aes_gcm_calc (ctx, d, src, dst, 2, n_left, /* with_ghash */ 1);
aes_gcm_calc_last (ctx, d, 2, n_left);
@@ -786,28 +734,28 @@ aes_gcm_dec (aes_gcm_ctx_t *ctx, const u8 *src, u8 *dst, uword n_left)
ghash_ctx_t gd;
/* main encryption loop */
- for (; n_left >= 8 * N; n_left -= 8 * N, dst += 8 * N, src += 8 * N)
+ for (int n = 8 * N_AES_BYTES; n_left >= n; n_left -= n, dst += n, src += n)
aes_gcm_calc_double (ctx, d, src, dst);
- if (n_left >= 4 * N)
+ if (n_left >= 4 * N_AES_BYTES)
{
- aes_gcm_calc (ctx, d, src, dst, 4, 4 * N, /* with_ghash */ 1);
+ aes_gcm_calc (ctx, d, src, dst, 4, 4 * N_AES_BYTES, /* with_ghash */ 1);
/* next */
- n_left -= 4 * N;
- dst += N * 4;
- src += N * 4;
+ n_left -= 4 * N_AES_BYTES;
+ dst += N_AES_BYTES * 4;
+ src += N_AES_BYTES * 4;
}
if (n_left)
{
ctx->last = 1;
- if (n_left > 3 * N)
+ if (n_left > 3 * N_AES_BYTES)
aes_gcm_calc (ctx, d, src, dst, 4, n_left, /* with_ghash */ 1);
- else if (n_left > 2 * N)
+ else if (n_left > 2 * N_AES_BYTES)
aes_gcm_calc (ctx, d, src, dst, 3, n_left, /* with_ghash */ 1);
- else if (n_left > N)
+ else if (n_left > N_AES_BYTES)
aes_gcm_calc (ctx, d, src, dst, 2, n_left, /* with_ghash */ 1);
else
aes_gcm_calc (ctx, d, src, dst, 1, n_left, /* with_ghash */ 1);
@@ -867,11 +815,11 @@ aes_gcm (const u8 *src, u8 *dst, const u8 *aad, u8 *ivp, u8 *tag,
Y0[3] = 1 << 24;
ctx->EY0 = (u8x16) Y0;
-#if N_LANES == 4
+#if N_AES_LANES == 4
ctx->Y = u32x16_splat_u32x4 (Y0) + (u32x16){
0, 0, 0, 1 << 24, 0, 0, 0, 2 << 24, 0, 0, 0, 3 << 24, 0, 0, 0, 4 << 24,
};
-#elif N_LANES == 2
+#elif N_AES_LANES == 2
ctx->Y =
u32x8_splat_u32x4 (Y0) + (u32x8){ 0, 0, 0, 1 << 24, 0, 0, 0, 2 << 24 };
#else
@@ -926,7 +874,7 @@ clib_aes_gcm_key_expand (aes_gcm_key_data_t *kd, const u8 *key,
{
u8x16 H;
u8x16 ek[AES_KEY_ROUNDS (AES_KEY_256) + 1];
- aes_gcm_expaned_key_t *Ke = (aes_gcm_expaned_key_t *) kd->Ke;
+ aes_expaned_key_t *Ke = (aes_expaned_key_t *) kd->Ke;
/* expand AES key */
aes_key_expand (ek, key, ks);