aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/plugins/crypto_ia32/CMakeLists.txt24
-rw-r--r--src/plugins/crypto_ia32/aes_cbc.c270
-rw-r--r--src/plugins/crypto_ia32/aesni.h224
-rw-r--r--src/plugins/crypto_ia32/crypto_ia32.h44
-rw-r--r--src/plugins/crypto_ia32/main.c70
5 files changed, 632 insertions, 0 deletions
diff --git a/src/plugins/crypto_ia32/CMakeLists.txt b/src/plugins/crypto_ia32/CMakeLists.txt
new file mode 100644
index 00000000000..a100cdbb681
--- /dev/null
+++ b/src/plugins/crypto_ia32/CMakeLists.txt
@@ -0,0 +1,24 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "amd64.*|x86_64.*|AMD64.*")
+ return()
+endif()
+
+add_vpp_plugin(crypto_ia32
+ SOURCES
+ aes_cbc.c
+ main.c
+)
+
+target_compile_options(crypto_ia32_plugin PRIVATE "-march=silvermont")
diff --git a/src/plugins/crypto_ia32/aes_cbc.c b/src/plugins/crypto_ia32/aes_cbc.c
new file mode 100644
index 00000000000..281cc83705a
--- /dev/null
+++ b/src/plugins/crypto_ia32/aes_cbc.c
@@ -0,0 +1,270 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <x86intrin.h>
+#include <crypto_ia32/crypto_ia32.h>
+#include <crypto_ia32/aesni.h>
+
+static_always_inline void
+aes_cbc_dec (__m128i * k, u8 * src, u8 * dst, u8 * iv, int count,
+ aesni_key_size_t rounds)
+{
+ __m128i r0, r1, r2, r3, c0, c1, c2, c3, f;
+ int i;
+
+ f = _mm_loadu_si128 ((__m128i *) iv);
+
+ while (count >= 64)
+ {
+ _mm_prefetch (src + 128, _MM_HINT_T0);
+ _mm_prefetch (dst + 128, _MM_HINT_T0);
+
+ c0 = _mm_loadu_si128 (((__m128i *) src + 0));
+ c1 = _mm_loadu_si128 (((__m128i *) src + 1));
+ c2 = _mm_loadu_si128 (((__m128i *) src + 2));
+ c3 = _mm_loadu_si128 (((__m128i *) src + 3));
+
+ r0 = c0 ^ k[0];
+ r1 = c1 ^ k[0];
+ r2 = c2 ^ k[0];
+ r3 = c3 ^ k[0];
+
+ for (i = 1; i < rounds; i++)
+ {
+ r0 = _mm_aesdec_si128 (r0, k[i]);
+ r1 = _mm_aesdec_si128 (r1, k[i]);
+ r2 = _mm_aesdec_si128 (r2, k[i]);
+ r3 = _mm_aesdec_si128 (r3, k[i]);
+ }
+
+ r0 = _mm_aesdeclast_si128 (r0, k[i]);
+ r1 = _mm_aesdeclast_si128 (r1, k[i]);
+ r2 = _mm_aesdeclast_si128 (r2, k[i]);
+ r3 = _mm_aesdeclast_si128 (r3, k[i]);
+
+ _mm_storeu_si128 ((__m128i *) dst + 0, r0 ^ f);
+ _mm_storeu_si128 ((__m128i *) dst + 1, r1 ^ c0);
+ _mm_storeu_si128 ((__m128i *) dst + 2, r2 ^ c1);
+ _mm_storeu_si128 ((__m128i *) dst + 3, r3 ^ c2);
+
+ f = c3;
+
+ count -= 64;
+ src += 64;
+ dst += 64;
+ }
+
+ while (count > 0)
+ {
+ c0 = _mm_loadu_si128 (((__m128i *) src));
+ r0 = c0 ^ k[0];
+ for (i = 1; i < rounds; i++)
+ r0 = _mm_aesdec_si128 (r0, k[i]);
+ r0 = _mm_aesdeclast_si128 (r0, k[i]);
+ _mm_storeu_si128 ((__m128i *) dst, r0 ^ f);
+ f = c0;
+ count -= 16;
+ src += 16;
+ dst += 16;
+ }
+}
+
+static_always_inline u32
+aesni_ops_enc_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, aesni_key_size_t ks)
+{
+ crypto_ia32_main_t *cm = &crypto_ia32_main;
+ crypto_ia32_per_thread_data_t *ptd = vec_elt_at_index (cm->per_thread_data,
+ vm->thread_index);
+ int rounds = AESNI_KEY_ROUNDS (ks);
+ u8 dummy[8192];
+ u8 *src[4], *dst[4], *key[4];
+ u32x4 dummy_mask, len = { };
+ u32 i, j, count, n_left = n_ops;
+ __m128i r[4], k[4][rounds + 1];
+
+more:
+ for (i = 0; i < 4; i++)
+ if (len[i] == 0)
+ {
+ if (n_left == 0)
+ {
+ /* no more work to enqueue, so we are enqueueing dummy buffer */
+ src[i] = dst[i] = dummy;
+ len[i] = sizeof (dummy);
+ dummy_mask[i] = 0;
+ }
+ else
+ {
+ if (ops[0]->flags & VNET_CRYPTO_OP_FLAG_INIT_IV)
+ {
+ r[i] = ptd->cbc_iv[i];
+ _mm_storeu_si128 ((__m128i *) ops[0]->iv, r[i]);
+ ptd->cbc_iv[i] = _mm_aesenc_si128 (r[i], r[i]);
+ }
+ else
+ r[i] = _mm_loadu_si128 ((__m128i *) ops[0]->iv);
+ src[i] = ops[0]->src;
+ dst[i] = ops[0]->dst;
+ len[i] = ops[0]->len;
+ dummy_mask[i] = ~0;
+ if (key[i] != ops[0]->key)
+ {
+ aes_key_expand (k[i], ops[0]->key, ks);
+ key[i] = ops[0]->key;
+ }
+ ops[0]->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ n_left--;
+ ops++;
+ }
+ }
+
+ count = u32x4_min_scalar (len);
+
+ ASSERT (count % 16 == 0);
+
+ for (i = 0; i < count; i += 16)
+ {
+ r[0] ^= _mm_loadu_si128 ((__m128i *) (src[0] + i)) ^ k[0][0];
+ r[1] ^= _mm_loadu_si128 ((__m128i *) (src[1] + i)) ^ k[1][0];
+ r[2] ^= _mm_loadu_si128 ((__m128i *) (src[2] + i)) ^ k[2][0];
+ r[3] ^= _mm_loadu_si128 ((__m128i *) (src[3] + i)) ^ k[3][0];
+
+ for (j = 1; j < rounds; j++)
+ {
+ r[0] = _mm_aesenc_si128 (r[0], k[0][j]);
+ r[1] = _mm_aesenc_si128 (r[1], k[1][j]);
+ r[2] = _mm_aesenc_si128 (r[2], k[2][j]);
+ r[3] = _mm_aesenc_si128 (r[3], k[3][j]);
+ }
+
+ r[0] = _mm_aesenclast_si128 (r[0], k[0][j]);
+ r[1] = _mm_aesenclast_si128 (r[1], k[1][j]);
+ r[2] = _mm_aesenclast_si128 (r[2], k[2][j]);
+ r[3] = _mm_aesenclast_si128 (r[3], k[3][j]);
+
+ _mm_storeu_si128 ((__m128i *) (dst[0] + i), r[0]);
+ _mm_storeu_si128 ((__m128i *) (dst[1] + i), r[1]);
+ _mm_storeu_si128 ((__m128i *) (dst[2] + i), r[2]);
+ _mm_storeu_si128 ((__m128i *) (dst[3] + i), r[3]);
+ }
+
+ for (i = 0; i < 4; i++)
+ {
+ src[i] += count;
+ dst[i] += count;
+ len[i] -= count;
+ }
+
+ if (n_left > 0)
+ goto more;
+
+ if (!u32x4_is_all_zero (len & dummy_mask))
+ goto more;
+
+ return n_ops;
+}
+
+static_always_inline u32
+aesni_ops_dec_aes_cbc (vlib_main_t * vm, vnet_crypto_op_t * ops[],
+ u32 n_ops, aesni_key_size_t ks)
+{
+ int rounds = AESNI_KEY_ROUNDS (ks);
+ u8 *last_key = 0;
+ u32 i;
+ __m128i k[rounds + 1];
+
+ for (i = 0; i < n_ops; i++)
+ {
+ vnet_crypto_op_t *op = ops[i];
+ if (last_key != op->key)
+ {
+ aes_key_expand (k, op->key, ks);
+ last_key = op->key;
+ aes_key_enc_to_dec (k, rounds);
+ }
+ aes_cbc_dec (k, op->src, op->dst, op->iv, op->len, rounds);
+ op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ }
+ return n_ops;
+}
+
+#define foreach_aesni_cbc_handler_type _(128) _(192) _(256)
+
+#define _(x) \
+static u32 aesni_ops_dec_aes_cbc_##x \
+(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
+{ return aesni_ops_dec_aes_cbc (vm, ops, n_ops, AESNI_KEY_##x); } \
+static u32 aesni_ops_enc_aes_cbc_##x \
+(vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
+{ return aesni_ops_enc_aes_cbc (vm, ops, n_ops, AESNI_KEY_##x); } \
+
+foreach_aesni_cbc_handler_type;
+#undef _
+
+#include <fcntl.h>
+
+clib_error_t *
+crypto_ia32_aesni_cbc_init (vlib_main_t * vm)
+{
+ crypto_ia32_main_t *cm = &crypto_ia32_main;
+ crypto_ia32_per_thread_data_t *ptd;
+ clib_error_t *err = 0;
+ int fd;
+
+ if ((fd = open ("/dev/urandom", O_RDONLY)) < 0)
+ return clib_error_return_unix (0, "failed to open '/dev/urandom'");
+
+ /* *INDENT-OFF* */
+ vec_foreach (ptd, cm->per_thread_data)
+ {
+ for (int i = 0; i < 4; i++)
+ {
+ if (read(fd, ptd->cbc_iv, sizeof (ptd->cbc_iv)) < 0)
+ {
+ err = clib_error_return_unix (0, "'/dev/urandom' read failure");
+ goto error;
+ }
+ }
+ }
+ /* *INDENT-ON* */
+
+#define _(x) \
+ vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
+ VNET_CRYPTO_OP_AES_##x##_CBC_ENC, \
+ aesni_ops_enc_aes_cbc_##x); \
+ vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
+ VNET_CRYPTO_OP_AES_##x##_CBC_DEC, \
+ aesni_ops_dec_aes_cbc_##x);
+ foreach_aesni_cbc_handler_type;
+#undef _
+
+error:
+ close (fd);
+ return err;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/crypto_ia32/aesni.h b/src/plugins/crypto_ia32/aesni.h
new file mode 100644
index 00000000000..077889ae903
--- /dev/null
+++ b/src/plugins/crypto_ia32/aesni.h
@@ -0,0 +1,224 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __aesni_h__
+#define __aesni_h__
+
+typedef enum
+{
+ AESNI_KEY_128 = 0,
+ AESNI_KEY_192 = 1,
+ AESNI_KEY_256 = 2,
+} aesni_key_size_t;
+
+#define AESNI_KEY_ROUNDS(x) (10 + x *2)
+#define AESNI_KEY_BYTES(x) (16 + x * 8)
+
+
+/* AES-NI based AES key expansion based on code samples from
+ Intel(r) Advanced Encryption Standard (AES) New Instructions White Paper
+ (323641-001) */
+
+static_always_inline __m128i
+aes128_key_assist (__m128i r1, __m128i r2)
+{
+ r1 ^= _mm_slli_si128 (r1, 4);
+ r1 ^= _mm_slli_si128 (r1, 4);
+ r1 ^= _mm_slli_si128 (r1, 4);
+ return r1 ^ _mm_shuffle_epi32 (r2, 0xff);
+}
+
+static_always_inline void
+aes128_key_expand (__m128i * k, u8 * key)
+{
+ k[0] = _mm_loadu_si128 ((const __m128i *) key);
+ k[1] = aes128_key_assist (k[0], _mm_aeskeygenassist_si128 (k[0], 0x01));
+ k[2] = aes128_key_assist (k[1], _mm_aeskeygenassist_si128 (k[1], 0x02));
+ k[3] = aes128_key_assist (k[2], _mm_aeskeygenassist_si128 (k[2], 0x04));
+ k[4] = aes128_key_assist (k[3], _mm_aeskeygenassist_si128 (k[3], 0x08));
+ k[5] = aes128_key_assist (k[4], _mm_aeskeygenassist_si128 (k[4], 0x10));
+ k[6] = aes128_key_assist (k[5], _mm_aeskeygenassist_si128 (k[5], 0x20));
+ k[7] = aes128_key_assist (k[6], _mm_aeskeygenassist_si128 (k[6], 0x40));
+ k[8] = aes128_key_assist (k[7], _mm_aeskeygenassist_si128 (k[7], 0x80));
+ k[9] = aes128_key_assist (k[8], _mm_aeskeygenassist_si128 (k[8], 0x1b));
+ k[10] = aes128_key_assist (k[9], _mm_aeskeygenassist_si128 (k[9], 0x36));
+}
+
+static_always_inline void
+aes192_key_assist (__m128i * r1, __m128i * r2, __m128i * r3)
+{
+ __m128i r;
+ *r1 ^= r = _mm_slli_si128 (*r1, 0x4);
+ *r1 ^= r = _mm_slli_si128 (r, 0x4);
+ *r1 ^= _mm_slli_si128 (r, 0x4);
+ *r1 ^= _mm_shuffle_epi32 (*r2, 0x55);
+ *r3 ^= _mm_slli_si128 (*r3, 0x4);
+ *r3 ^= *r2 = _mm_shuffle_epi32 (*r1, 0xff);
+}
+
+static_always_inline void
+aes192_key_expand (__m128i * k, u8 * key)
+{
+ __m128i r1, r2, r3;
+
+ k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
+ r3 = _mm_loadu_si128 ((__m128i *) (key + 16));
+
+ k[1] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x1);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[1] = (__m128i) _mm_shuffle_pd ((__m128d) k[1], (__m128d) r1, 0);
+ k[2] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x2);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[3] = r1;
+
+ k[4] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x4);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[4] = (__m128i) _mm_shuffle_pd ((__m128d) k[4], (__m128d) r1, 0);
+ k[5] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x8);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[6] = r1;
+
+ k[7] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[7] = (__m128i) _mm_shuffle_pd ((__m128d) k[7], (__m128d) r1, 0);
+ k[8] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[9] = r1;
+
+ k[10] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[10] = (__m128i) _mm_shuffle_pd ((__m128d) k[10], (__m128d) r1, 0);
+ k[11] = (__m128i) _mm_shuffle_pd ((__m128d) r1, (__m128d) r3, 1);
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x80);
+ aes192_key_assist (&r1, &r2, &r3);
+ k[12] = r1;
+}
+
+static_always_inline void
+aes256_key_assist1 (__m128i * r1, __m128i * r2)
+{
+ __m128i r;
+ *r1 ^= r = _mm_slli_si128 (*r1, 0x4);
+ *r1 ^= r = _mm_slli_si128 (r, 0x4);
+ *r1 ^= _mm_slli_si128 (r, 0x4);
+ *r1 ^= *r2 = _mm_shuffle_epi32 (*r2, 0xff);
+}
+
+static_always_inline void
+aes256_key_assist2 (__m128i r1, __m128i * r3)
+{
+ __m128i r;
+ *r3 ^= r = _mm_slli_si128 (*r3, 0x4);
+ *r3 ^= r = _mm_slli_si128 (r, 0x4);
+ *r3 ^= _mm_slli_si128 (r, 0x4);
+ *r3 ^= _mm_shuffle_epi32 (_mm_aeskeygenassist_si128 (r1, 0x0), 0xaa);
+}
+
+static_always_inline void
+aes256_key_expand (__m128i * k, u8 * key)
+{
+ __m128i r1, r2, r3;
+ k[0] = r1 = _mm_loadu_si128 ((__m128i *) key);
+ k[1] = r3 = _mm_loadu_si128 ((__m128i *) (key + 16));
+ r2 = _mm_aeskeygenassist_si128 (k[1], 0x01);
+ aes256_key_assist1 (&r1, &r2);
+ k[2] = r1;
+ aes256_key_assist2 (r1, &r3);
+ k[3] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x02);
+ aes256_key_assist1 (&r1, &r2);
+ k[4] = r1;
+ aes256_key_assist2 (r1, &r3);
+ k[5] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x04);
+ aes256_key_assist1 (&r1, &r2);
+ k[6] = r1;
+ aes256_key_assist2 (r1, &r3);
+ k[7] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x08);
+ aes256_key_assist1 (&r1, &r2);
+ k[8] = r1;
+ aes256_key_assist2 (r1, &r3);
+ k[9] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x10);
+ aes256_key_assist1 (&r1, &r2);
+ k[10] = r1;
+ aes256_key_assist2 (r1, &r3);
+ k[11] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x20);
+ aes256_key_assist1 (&r1, &r2);
+ k[12] = r1;
+ aes256_key_assist2 (r1, &r3);
+ k[13] = r3;
+ r2 = _mm_aeskeygenassist_si128 (r3, 0x40);
+ aes256_key_assist1 (&r1, &r2);
+ k[14] = r1;
+}
+
+static_always_inline void
+aes_key_expand (__m128i * k, u8 * key, aesni_key_size_t ks)
+{
+ switch (ks)
+ {
+ case AESNI_KEY_128:
+ aes128_key_expand (k, key);
+ break;
+ case AESNI_KEY_192:
+ aes192_key_expand (k, key);
+ break;
+ case AESNI_KEY_256:
+ aes256_key_expand (k, key);
+ break;
+ }
+}
+
+
+static_always_inline void
+aes_key_enc_to_dec (__m128i * k, aesni_key_size_t rounds)
+{
+ __m128i r;
+
+ r = k[rounds];
+ k[rounds] = k[0];
+ k[0] = r;
+
+ for (int i = 1; i < (rounds / 2); i++)
+ {
+ r = k[rounds - i];
+ k[rounds - i] = _mm_aesimc_si128 (k[i]);
+ k[i] = _mm_aesimc_si128 (r);
+ }
+
+ k[rounds / 2] = _mm_aesimc_si128 (k[rounds / 2]);
+}
+
+#endif /* __aesni_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/crypto_ia32/crypto_ia32.h b/src/plugins/crypto_ia32/crypto_ia32.h
new file mode 100644
index 00000000000..ccb26ab1a4d
--- /dev/null
+++ b/src/plugins/crypto_ia32/crypto_ia32.h
@@ -0,0 +1,44 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#ifndef __crypto_ia32_h__
+#define __crypto_ia32_h__
+
+typedef struct
+{
+ __m128i cbc_iv[4];
+} crypto_ia32_per_thread_data_t;
+
+typedef struct
+{
+ u32 crypto_engine_index;
+ crypto_ia32_per_thread_data_t *per_thread_data;
+} crypto_ia32_main_t;
+
+extern crypto_ia32_main_t crypto_ia32_main;
+
+clib_error_t *crypto_ia32_aesni_cbc_init (vlib_main_t * vm);
+
+#endif /* __crypto_ia32_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/crypto_ia32/main.c b/src/plugins/crypto_ia32/main.c
new file mode 100644
index 00000000000..9b83f8913db
--- /dev/null
+++ b/src/plugins/crypto_ia32/main.c
@@ -0,0 +1,70 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2019 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vnet/crypto/crypto.h>
+#include <crypto_ia32/crypto_ia32.h>
+
+crypto_ia32_main_t crypto_ia32_main;
+
+clib_error_t *
+crypto_ia32_init (vlib_main_t * vm)
+{
+ crypto_ia32_main_t *cm = &crypto_ia32_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ clib_error_t *error;
+
+ if ((error = vlib_call_init_function (vm, vnet_crypto_init)))
+ return error;
+
+ vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ cm->crypto_engine_index =
+ vnet_crypto_register_engine (vm, "ia32", 100,
+ "Intel IA32 ISA Optimized Crypto");
+
+ if (clib_cpu_supports_x86_aes () &&
+ (error = crypto_ia32_aesni_cbc_init (vm)))
+ goto error;
+
+error:
+ if (error)
+ vec_free (cm->per_thread_data);
+
+ return error;
+}
+
+VLIB_INIT_FUNCTION (crypto_ia32_init);
+
+#include <vpp/app/version.h>
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Intel AESNI Software Crypto Backend Plugin",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */