aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/crypto_native
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/crypto_native')
-rw-r--r--src/plugins/crypto_native/aes.h244
-rw-r--r--src/plugins/crypto_native/aes_cbc.c87
-rw-r--r--src/plugins/crypto_native/aes_gcm.c2
-rw-r--r--src/plugins/crypto_native/crypto_native.h2
4 files changed, 169 insertions, 166 deletions
diff --git a/src/plugins/crypto_native/aes.h b/src/plugins/crypto_native/aes.h
index d1b3104d981..721ad5da038 100644
--- a/src/plugins/crypto_native/aes.h
+++ b/src/plugins/crypto_native/aes.h
@@ -28,175 +28,189 @@ typedef enum
#define AES_KEY_ROUNDS(x) (10 + x * 2)
#define AES_KEY_BYTES(x) (16 + x * 8)
+static_always_inline u8x16
+aes_block_load (u8 * p)
+{
+ return (u8x16) _mm_loadu_si128 ((__m128i *) p);
+}
+
+static_always_inline u8x16
+aes_enc_round (u8x16 a, u8x16 k)
+{
+ return (u8x16) _mm_aesenc_si128 ((__m128i) a, (__m128i) k);
+}
+
+static_always_inline u8x16
+aes_enc_last_round (u8x16 a, u8x16 k)
+{
+ return (u8x16) _mm_aesenclast_si128 ((__m128i) a, (__m128i) k);
+}
+
+static_always_inline u8x16
+aes_dec_round (u8x16 a, u8x16 k)
+{
+ return (u8x16) _mm_aesdec_si128 ((__m128i) a, (__m128i) k);
+}
+
+static_always_inline u8x16
+aes_dec_last_round (u8x16 a, u8x16 k)
+{
+ return (u8x16) _mm_aesdeclast_si128 ((__m128i) a, (__m128i) k);
+}
+
+static_always_inline void
+aes_block_store (u8 * p, u8x16 r)
+{
+ _mm_storeu_si128 ((__m128i *) p, (__m128i) r);
+}
+
+static_always_inline u8x16
+aes_inv_mix_column (u8x16 a)
+{
+ return (u8x16) _mm_aesimc_si128 ((__m128i) a);
+}
/* AES-NI based AES key expansion based on code samples from
Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
(323641-001) */
-static_always_inline __m128i
-aes128_key_assist (__m128i r1, __m128i r2)
+static_always_inline void
+aes128_key_assist (__m128i * k, __m128i r)
{
- r1 ^= _mm_slli_si128 (r1, 4);
- r1 ^= _mm_slli_si128 (r1, 4);
- r1 ^= _mm_slli_si128 (r1, 4);
- return r1 ^ _mm_shuffle_epi32 (r2, 0xff);
+ __m128i t = k[-1];
+ t ^= _mm_slli_si128 (t, 4);
+ t ^= _mm_slli_si128 (t, 4);
+ t ^= _mm_slli_si128 (t, 4);
+ k[0] = t ^ _mm_shuffle_epi32 (r, 0xff);
}
static_always_inline void
-aes128_key_expand (__m128i * k, u8 * key)
+aes128_key_expand (u8x16 * key_schedule, u8 * key)
{
+ __m128i *k = (__m128i *) key_schedule;
k[0] = _mm_loadu_si128 ((const __m128i *) key);
- k[1] = aes128_key_assist (k[0], _mm_aeskeygenassist_si128 (k[0], 0x01));
- k[2] = aes128_key_assist (k[1], _mm_aeskeygenassist_si128 (k[1], 0x02));
- k[3] = aes128_key_assist (k[2], _mm_aeskeygenassist_si128 (k[2], 0x04));
- k[4] = aes128_key_assist (k[3], _mm_aeskeygenassist_si128 (k[3], 0x08));
- k[5] = aes128_key_assist (k[4], _mm_aeskeygenassist_si128 (k[4], 0x10));
- k[6] = aes128_key_assist (k[5], _mm_aeskeygenassist_si128 (k[5], 0x20));
- k[7] = aes128_key_assist (k[6], _mm_aeskeygenassist_si128 (k[6], 0x40));
- k[8] = aes128_key_assist (k[7], _mm_aeskeygenassist_si128 (k[7], 0x80));
- k[9] = aes128_key_assist (k[8], _mm_aeskeygenassist_si128 (k[8], 0x1b));
- k[10] = aes128_key_assist (k[9], _mm_aeskeygenassist_si128 (k[9], 0x36));
+ aes128_key_assist (k + 1, _mm_aeskeygenassist_si128 (k[0], 0x01));
+ aes128_key_assist (k + 2, _mm_aeskeygenassist_si128 (k[1], 0x02));
+ aes128_key_assist (k + 3, _mm_aeskeygenassist_si128 (k[2], 0x04));
+ aes128_key_assist (k + 4, _mm_aeskeygenassist_si128 (k[3], 0x08));
+ aes128_key_assist (k + 5, _mm_aeskeygenassist_si128 (k[4], 0x10));
+ aes128_key_assist (k + 6, _mm_aeskeygenassist_si128 (k[5], 0x20));
+ aes128_key_assist (k + 7, _mm_aeskeygenassist_si128 (k[6], 0x40));
+ aes128_key_assist (k + 8, _mm_aeskeygenassist_si128 (k[7], 0x80));
+ aes128_key_assist (k + 9, _mm_aeskeygenassist_si128 (k[8], 0x1b));
+ aes128_key_assist (k + 10, _mm_aeskeygenassist_si128 (k[9], 0x36));
}
static_always_inline void
-aes192_key_assist (__m128i * r1, __m128i * r2, __m128i * r3)
-{
- __m128i r;
- *r1 ^= r = _mm_slli_si128 (*r1, 0x4);
- *r1 ^= r = _mm_slli_si128 (r, 0x4);
- *r1 ^= _mm_slli_si128 (r, 0x4);
- *r1 ^= _mm_shuffle_epi32 (*r2, 0x55);
- *r3 ^= _mm_slli_si128 (*r3, 0x4);
- *r3 ^= *r2 = _mm_shuffle_epi32 (*r1, 0xff);
+aes192_key_assist (__m128i * r1, __m128i * r2, __m128i key_assist)
+{
+ __m128i t;
+ *r1 ^= t = _mm_slli_si128 (*r1, 0x4);
+ *r1 ^= t = _mm_slli_si128 (t, 0x4);
+ *r1 ^= _mm_slli_si128 (t, 0x4);
+ *r1 ^= _mm_shuffle_epi32 (key_assist, 0x55);
+ *r2 ^= _mm_slli_si128 (*r2, 0x4);
+ *r2 ^= _mm_shuffle_epi32 (*r1, 0xff);
}
static_always_inline void
-aes192_key_expand (__m128i * k, u8 * key)
+aes192_key_expand (u8x16 * key_schedule, u8 * key)
{
- __m128i r1, r2, r3;
+ __m128i r1, r2, *k = (__m128i *) key_schedule;
k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
/* load the 24-bytes key as 2 * 16-bytes (and ignore last 8-bytes) */
- r3 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) (key + 16));
+ k[1] = r2 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) key + 1);
- k[1] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x1);
- aes192_key_assist (&r1, &r2, &r3);
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x1));
k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0);
- k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x2);
- aes192_key_assist (&r1, &r2, &r3);
+ k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
+
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x2));
k[3] = r1;
+ k[4] = r2;
- k[4] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x4);
- aes192_key_assist (&r1, &r2, &r3);
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x4));
k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0);
- k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x8);
- aes192_key_assist (&r1, &r2, &r3);
+ k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
+
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x8));
k[6] = r1;
+ k[7] = r2;
- k[7] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
- aes192_key_assist (&r1, &r2, &r3);
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x10));
k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0);
- k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
- aes192_key_assist (&r1, &r2, &r3);
+ k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
+
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x20));
k[9] = r1;
+ k[10] = r2;
- k[10] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
- aes192_key_assist (&r1, &r2, &r3);
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x40));
k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0);
- k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
- r2 = _mm_aeskeygenassist_si128 (r3, 0x80);
- aes192_key_assist (&r1, &r2, &r3);
- k[12] = r1;
-}
+ k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
-static_always_inline void
-aes256_key_assist1 (__m128i * r1, __m128i * r2)
-{
- __m128i r;
- *r1 ^= r = _mm_slli_si128 (*r1, 0x4);
- *r1 ^= r = _mm_slli_si128 (r, 0x4);
- *r1 ^= _mm_slli_si128 (r, 0x4);
- *r1 ^= *r2 = _mm_shuffle_epi32 (*r2, 0xff);
+ aes192_key_assist (&r1, &r2, _mm_aeskeygenassist_si128 (r2, 0x80));
+ k[12] = r1;
}
static_always_inline void
-aes256_key_assist2 (__m128i r1, __m128i * r3)
+aes256_key_assist (__m128i * k, int i, __m128i key_assist)
{
- __m128i r;
- *r3 ^= r = _mm_slli_si128 (*r3, 0x4);
- *r3 ^= r = _mm_slli_si128 (r, 0x4);
- *r3 ^= _mm_slli_si128 (r, 0x4);
- *r3 ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (r1, 0x0), 0xaa);
+ __m128i r, t;
+ k += i;
+ r = k[-2];
+ r ^= t = _mm_slli_si128 (r, 0x4);
+ r ^= t = _mm_slli_si128 (t, 0x4);
+ r ^= _mm_slli_si128 (t, 0x4);
+ r ^= _mm_shuffle_epi32 (key_assist, 0xff);
+ k[0] = r;
+
+ if (i >= 14)
+ return;
+
+ r = k[-1];
+ r ^= t = _mm_slli_si128 (r, 0x4);
+ r ^= t = _mm_slli_si128 (t, 0x4);
+ r ^= _mm_slli_si128 (t, 0x4);
+ r ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (k[0], 0x0), 0xaa);
+ k[1] = r;
}
static_always_inline void
-aes256_key_expand (__m128i * k, u8 * key)
+aes256_key_expand (u8x16 * key_schedule, u8 * key)
{
- __m128i r1, r2, r3;
- k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
- k[1] = r3 = _mm_loadu_si128 ((__m128i *) (key + 16));
- r2 = _mm_aeskeygenassist_si128 (k[1], 0x01);
- aes256_key_assist1 (&r1, &r2);
- k[2] = r1;
- aes256_key_assist2 (r1, &r3);
- k[3] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x02);
- aes256_key_assist1 (&r1, &r2);
- k[4] = r1;
- aes256_key_assist2 (r1, &r3);
- k[5] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x04);
- aes256_key_assist1 (&r1, &r2);
- k[6] = r1;
- aes256_key_assist2 (r1, &r3);
- k[7] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x08);
- aes256_key_assist1 (&r1, &r2);
- k[8] = r1;
- aes256_key_assist2 (r1, &r3);
- k[9] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
- aes256_key_assist1 (&r1, &r2);
- k[10] = r1;
- aes256_key_assist2 (r1, &r3);
- k[11] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
- aes256_key_assist1 (&r1, &r2);
- k[12] = r1;
- aes256_key_assist2 (r1, &r3);
- k[13] = r3;
- r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
- aes256_key_assist1 (&r1, &r2);
- k[14] = r1;
+ __m128i *k = (__m128i *) key_schedule;
+ k[0] = _mm_loadu_si128 ((__m128i *) key);
+ k[1] = _mm_loadu_si128 ((__m128i *) (key + 16));
+ aes256_key_assist (k, 2, _mm_aeskeygenassist_si128 (k[1], 0x01));
+ aes256_key_assist (k, 4, _mm_aeskeygenassist_si128 (k[3], 0x02));
+ aes256_key_assist (k, 6, _mm_aeskeygenassist_si128 (k[5], 0x04));
+ aes256_key_assist (k, 8, _mm_aeskeygenassist_si128 (k[7], 0x08));
+ aes256_key_assist (k, 10, _mm_aeskeygenassist_si128 (k[9], 0x10));
+ aes256_key_assist (k, 12, _mm_aeskeygenassist_si128 (k[11], 0x20));
+ aes256_key_assist (k, 14, _mm_aeskeygenassist_si128 (k[13], 0x40));
}
static_always_inline void
-aes_key_expand (__m128i * k, u8 * key, aes_key_size_t ks)
+aes_key_expand (u8x16 * key_schedule, u8 * key, aes_key_size_t ks)
{
switch (ks)
{
case AES_KEY_128:
- aes128_key_expand (k, key);
+ aes128_key_expand (key_schedule, key);
break;
case AES_KEY_192:
- aes192_key_expand (k, key);
+ aes192_key_expand (key_schedule, key);
break;
case AES_KEY_256:
- aes256_key_expand (k, key);
+ aes256_key_expand (key_schedule, key);
break;
}
}
-
static_always_inline void
-aes_key_enc_to_dec (__m128i * ke, __m128i * kd, aes_key_size_t ks)
+aes_key_enc_to_dec (u8x16 * ke, u8x16 * kd, aes_key_size_t ks)
{
int rounds = AES_KEY_ROUNDS (ks);
@@ -205,11 +219,11 @@ aes_key_enc_to_dec (__m128i * ke, __m128i * kd, aes_key_size_t ks)
for (int i = 1; i < (rounds / 2); i++)
{
- kd[rounds - i] = _mm_aesimc_si128 (ke[i]);
- kd[i] = _mm_aesimc_si128 (ke[rounds - i]);
+ kd[rounds - i] = aes_inv_mix_column (ke[i]);
+ kd[i] = aes_inv_mix_column (ke[rounds - i]);
}
- kd[rounds / 2] = _mm_aesimc_si128 (ke[rounds / 2]);
+ kd[rounds / 2] = aes_inv_mix_column (ke[rounds / 2]);
}
#endif /* __aesni_h__ */
diff --git a/src/plugins/crypto_native/aes_cbc.c b/src/plugins/crypto_native/aes_cbc.c
index e60f53db92b..97278a01498 100644
--- a/src/plugins/crypto_native/aes_cbc.c
+++ b/src/plugins/crypto_native/aes_cbc.c
@@ -28,31 +28,20 @@
typedef struct
{
- __m128i encrypt_key[15];
+ u8x16 encrypt_key[15];
#if __VAES__
__m512i decrypt_key[15];
#else
- __m128i decrypt_key[15];
+ u8x16 decrypt_key[15];
#endif
} aes_cbc_key_data_t;
-static_always_inline __m128i
-aes_block_load (u8 * p)
-{
- return _mm_loadu_si128 ((__m128i *) p);
-}
-
-static_always_inline void
-aes_block_store (u8 * p, __m128i r)
-{
- _mm_storeu_si128 ((__m128i *) p, r);
-}
-
-static_always_inline __m128i __clib_unused
-xor3 (__m128i a, __m128i b, __m128i c)
+static_always_inline u8x16 __clib_unused
+xor3 (u8x16 a, u8x16 b, u8x16 c)
{
#if __AVX512F__
- return _mm_ternarylogic_epi32 (a, b, c, 0x96);
+ return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
+ (__m128i) c, 0x96);
#endif
return a ^ b ^ c;
}
@@ -68,28 +57,28 @@ static_always_inline __m512i
aes_block_load_x4 (u8 * src[], int i)
{
__m512i r = { };
- r = _mm512_inserti64x2 (r, aes_block_load (src[0] + i), 0);
- r = _mm512_inserti64x2 (r, aes_block_load (src[1] + i), 1);
- r = _mm512_inserti64x2 (r, aes_block_load (src[2] + i), 2);
- r = _mm512_inserti64x2 (r, aes_block_load (src[3] + i), 3);
+ r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[0] + i), 0);
+ r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[1] + i), 1);
+ r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[2] + i), 2);
+ r = _mm512_inserti64x2 (r, (__m128i) aes_block_load (src[3] + i), 3);
return r;
}
static_always_inline void
aes_block_store_x4 (u8 * dst[], int i, __m512i r)
{
- aes_block_store (dst[0] + i, _mm512_extracti64x2_epi64 (r, 0));
- aes_block_store (dst[1] + i, _mm512_extracti64x2_epi64 (r, 1));
- aes_block_store (dst[2] + i, _mm512_extracti64x2_epi64 (r, 2));
- aes_block_store (dst[3] + i, _mm512_extracti64x2_epi64 (r, 3));
+ aes_block_store (dst[0] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 0));
+ aes_block_store (dst[1] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 1));
+ aes_block_store (dst[2] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 2));
+ aes_block_store (dst[3] + i, (u8x16) _mm512_extracti64x2_epi64 (r, 3));
}
#endif
static_always_inline void __clib_unused
-aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count,
+aes_cbc_dec (u8x16 * k, u8 * src, u8 * dst, u8 * iv, int count,
aes_key_size_t rounds)
{
- __m128i r0, r1, r2, r3, c0, c1, c2, c3, f;
+ u8x16 r0, r1, r2, r3, c0, c1, c2, c3, f;
int i;
f = aes_block_load (iv);
@@ -111,16 +100,16 @@ aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count,
for (i = 1; i < rounds; i++)
{
- r0 = _mm_aesdec_si128 (r0, k[i]);
- r1 = _mm_aesdec_si128 (r1, k[i]);
- r2 = _mm_aesdec_si128 (r2, k[i]);
- r3 = _mm_aesdec_si128 (r3, k[i]);
+ r0 = aes_dec_round (r0, k[i]);
+ r1 = aes_dec_round (r1, k[i]);
+ r2 = aes_dec_round (r2, k[i]);
+ r3 = aes_dec_round (r3, k[i]);
}
- r0 = _mm_aesdeclast_si128 (r0, k[i]);
- r1 = _mm_aesdeclast_si128 (r1, k[i]);
- r2 = _mm_aesdeclast_si128 (r2, k[i]);
- r3 = _mm_aesdeclast_si128 (r3, k[i]);
+ r0 = aes_dec_last_round (r0, k[i]);
+ r1 = aes_dec_last_round (r1, k[i]);
+ r2 = aes_dec_last_round (r2, k[i]);
+ r3 = aes_dec_last_round (r3, k[i]);
aes_block_store (dst, r0 ^ f);
aes_block_store (dst + 16, r1 ^ c0);
@@ -139,8 +128,8 @@ aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count,
c0 = aes_block_load (src);
r0 = c0 ^ k[0];
for (i = 1; i < rounds; i++)
- r0 = _mm_aesdec_si128 (r0, k[i]);
- r0 = _mm_aesdeclast_si128 (r0, k[i]);
+ r0 = aes_dec_round (r0, k[i]);
+ r0 = aes_dec_last_round (r0, k[i]);
aes_block_store (dst, r0 ^ f);
f = c0;
count -= 16;
@@ -252,7 +241,7 @@ aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
/* *INDENT-OFF* */
union
{
- __m128i x1[N];
+ u8x16 x1[N];
__m512i x4[N / 4];
} r = { }, k[15] = { };
/* *INDENT-ON* */
@@ -277,7 +266,7 @@ more:
{
r.x1[i] = ptd->cbc_iv[i];
aes_block_store (ops[0]->iv, r.x1[i]);
- ptd->cbc_iv[i] = _mm_aesenc_si128 (r.x1[i], r.x1[i]);
+ ptd->cbc_iv[i] = aes_enc_round (r.x1[i], r.x1[i]);
}
else
r.x1[i] = aes_block_load (ops[0]->iv);
@@ -336,16 +325,16 @@ more:
for (j = 1; j < rounds; j++)
{
- r.x1[0] = _mm_aesenc_si128 (r.x1[0], k[j].x1[0]);
- r.x1[1] = _mm_aesenc_si128 (r.x1[1], k[j].x1[1]);
- r.x1[2] = _mm_aesenc_si128 (r.x1[2], k[j].x1[2]);
- r.x1[3] = _mm_aesenc_si128 (r.x1[3], k[j].x1[3]);
+ r.x1[0] = aes_enc_round (r.x1[0], k[j].x1[0]);
+ r.x1[1] = aes_enc_round (r.x1[1], k[j].x1[1]);
+ r.x1[2] = aes_enc_round (r.x1[2], k[j].x1[2]);
+ r.x1[3] = aes_enc_round (r.x1[3], k[j].x1[3]);
}
- r.x1[0] = _mm_aesenclast_si128 (r.x1[0], k[j].x1[0]);
- r.x1[1] = _mm_aesenclast_si128 (r.x1[1], k[j].x1[1]);
- r.x1[2] = _mm_aesenclast_si128 (r.x1[2], k[j].x1[2]);
- r.x1[3] = _mm_aesenclast_si128 (r.x1[3], k[j].x1[3]);
+ r.x1[0] = aes_enc_last_round (r.x1[0], k[j].x1[0]);
+ r.x1[1] = aes_enc_last_round (r.x1[1], k[j].x1[1]);
+ r.x1[2] = aes_enc_last_round (r.x1[2], k[j].x1[2]);
+ r.x1[3] = aes_enc_last_round (r.x1[3], k[j].x1[3]);
aes_block_store (dst[0] + i, r.x1[0]);
aes_block_store (dst[1] + i, r.x1[1]);
@@ -403,7 +392,7 @@ decrypt:
static_always_inline void *
aesni_cbc_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
{
- __m128i e[15], d[15];
+ u8x16 e[15], d[15];
aes_cbc_key_data_t *kd;
kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
aes_key_expand (e, key->data, ks);
@@ -411,7 +400,7 @@ aesni_cbc_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
for (int i = 0; i < AES_KEY_ROUNDS (ks) + 1; i++)
{
#if __VAES__
- kd->decrypt_key[i] = _mm512_broadcast_i64x2 (d[i]);
+ kd->decrypt_key[i] = _mm512_broadcast_i64x2 ((__m128i) d[i]);
#else
kd->decrypt_key[i] = d[i];
#endif
diff --git a/src/plugins/crypto_native/aes_gcm.c b/src/plugins/crypto_native/aes_gcm.c
index 3eb7ae84e19..554fb2b2699 100644
--- a/src/plugins/crypto_native/aes_gcm.c
+++ b/src/plugins/crypto_native/aes_gcm.c
@@ -718,7 +718,7 @@ aesni_gcm_key_exp (vnet_crypto_key_t * key, aes_key_size_t ks)
kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
/* expand AES key */
- aes_key_expand ((__m128i *) kd->Ke, key->data, ks);
+ aes_key_expand ((u8x16 *) kd->Ke, key->data, ks);
/* pre-calculate H */
H = kd->Ke[0];
diff --git a/src/plugins/crypto_native/crypto_native.h b/src/plugins/crypto_native/crypto_native.h
index 83362102f77..d496cc6f63d 100644
--- a/src/plugins/crypto_native/crypto_native.h
+++ b/src/plugins/crypto_native/crypto_native.h
@@ -22,7 +22,7 @@ typedef void *(crypto_native_key_fn_t) (vnet_crypto_key_t * key);
typedef struct
{
- __m128i cbc_iv[4];
+ u8x16 cbc_iv[4];
} crypto_native_per_thread_data_t;
typedef struct