aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/quic/quic_crypto.c
diff options
context:
space:
mode:
authorMathiasRaoul <mathias.raoul@gmail.com>2020-01-09 14:50:53 +0000
committerDave Wallace <dwallacelf@gmail.com>2020-01-31 20:22:28 +0000
commit92de6b65be144c8108149c1a56327832edcd8ba6 (patch)
treeb0122f19055098243240ce5683aab6d1a9c94f61 /src/plugins/quic/quic_crypto.c
parent776644efe78f427a75fc5e122014b44b39d470c3 (diff)
quic: quicly crypto offloading
- Implement our own quic packet allocator to allocate more memory at the end of the packet to store crypto offloading related data - 1RTT packets offloading encryption/decryption using vnet crypto - Add cli to change max packet per key Type: feature Change-Id: I7557fd457d7ba492329d5d8ed192509cbd727f9c Signed-off-by: MathiasRaoul <mathias.raoul@gmail.com>
Diffstat (limited to 'src/plugins/quic/quic_crypto.c')
-rw-r--r--src/plugins/quic/quic_crypto.c480
1 files changed, 429 insertions, 51 deletions
diff --git a/src/plugins/quic/quic_crypto.c b/src/plugins/quic/quic_crypto.c
index ca2eaced19e..b644bed3e75 100644
--- a/src/plugins/quic/quic_crypto.c
+++ b/src/plugins/quic/quic_crypto.c
@@ -12,14 +12,19 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+#include <vnet/crypto/crypto.h>
+#include <vppinfra/lock.h>
-#include <quic/quic_crypto.h>
#include <quic/quic.h>
+#include <quic/quic_crypto.h>
-#include <vnet/crypto/crypto.h>
-
-#include <picotls/openssl.h>
#include <quicly.h>
+#include <picotls/openssl.h>
+
+#define QUICLY_EPOCH_1RTT 3
+
+extern quic_main_t quic_main;
+extern quic_ctx_t *quic_get_conn_ctx (quicly_conn_t * conn);
typedef void (*quicly_do_transform_fn) (ptls_cipher_context_t *, void *,
const void *, size_t);
@@ -38,13 +43,302 @@ struct aead_crypto_context_t
u32 key_index;
};
+static size_t
+quic_crypto_offload_aead_decrypt (quic_ctx_t * qctx,
+ ptls_aead_context_t * _ctx, void *_output,
+ const void *input, size_t inlen,
+ uint64_t decrypted_pn, const void *aad,
+ size_t aadlen);
+
vnet_crypto_main_t *cm = &crypto_main;
+void
+quic_crypto_batch_tx_packets (quic_crypto_batch_ctx_t * batch_ctx)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ if (batch_ctx->nb_tx_packets <= 0)
+ return;
+
+ clib_rwlock_reader_lock (&quic_main.crypto_keys_quic_rw_lock);
+ vnet_crypto_process_ops (vm, batch_ctx->aead_crypto_tx_packets_ops,
+ batch_ctx->nb_tx_packets);
+ clib_rwlock_reader_unlock (&quic_main.crypto_keys_quic_rw_lock);
+
+ for (int i = 0; i < batch_ctx->nb_tx_packets; i++)
+ clib_mem_free (batch_ctx->aead_crypto_tx_packets_ops[i].iv);
+
+ batch_ctx->nb_tx_packets = 0;
+}
+
+void
+quic_crypto_batch_rx_packets (quic_crypto_batch_ctx_t * batch_ctx)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ if (batch_ctx->nb_rx_packets <= 0)
+ return;
+
+ clib_rwlock_reader_lock (&quic_main.crypto_keys_quic_rw_lock);
+ vnet_crypto_process_ops (vm, batch_ctx->aead_crypto_rx_packets_ops,
+ batch_ctx->nb_rx_packets);
+ clib_rwlock_reader_unlock (&quic_main.crypto_keys_quic_rw_lock);
+
+ for (int i = 0; i < batch_ctx->nb_rx_packets; i++)
+ clib_mem_free (batch_ctx->aead_crypto_rx_packets_ops[i].iv);
+
+ batch_ctx->nb_rx_packets = 0;
+}
+
+void
+build_iv (ptls_aead_context_t * ctx, uint8_t * iv, uint64_t seq)
+{
+ size_t iv_size = ctx->algo->iv_size, i;
+ const uint8_t *s = ctx->static_iv;
+ uint8_t *d = iv;
+ /* build iv */
+ for (i = iv_size - 8; i != 0; --i)
+ *d++ = *s++;
+ i = 64;
+ do
+ {
+ i -= 8;
+ *d++ = *s++ ^ (uint8_t) (seq >> i);
+ }
+ while (i != 0);
+}
+
+static void
+do_finalize_send_packet (ptls_cipher_context_t * hp,
+ quicly_datagram_t * packet,
+ size_t first_byte_at, size_t payload_from)
+{
+ uint8_t hpmask[1 + QUICLY_SEND_PN_SIZE] = {
+ 0
+ };
+ size_t i;
+
+ ptls_cipher_init (hp,
+ packet->data.base + payload_from - QUICLY_SEND_PN_SIZE +
+ QUICLY_MAX_PN_SIZE);
+ ptls_cipher_encrypt (hp, hpmask, hpmask, sizeof (hpmask));
+
+ packet->data.base[first_byte_at] ^=
+ hpmask[0] &
+ (QUICLY_PACKET_IS_LONG_HEADER (packet->data.base[first_byte_at]) ? 0xf :
+ 0x1f);
+
+ for (i = 0; i != QUICLY_SEND_PN_SIZE; ++i)
+ packet->data.base[payload_from + i - QUICLY_SEND_PN_SIZE] ^=
+ hpmask[i + 1];
+}
+
+void
+quic_crypto_finalize_send_packet (quicly_datagram_t * packet)
+{
+ quic_encrypt_cb_ctx *encrypt_cb_ctx =
+ (quic_encrypt_cb_ctx *) ((uint8_t *) packet + sizeof (*packet));
+
+ for (int i = 0; i < encrypt_cb_ctx->snd_ctx_count; i++)
+ {
+ do_finalize_send_packet (encrypt_cb_ctx->snd_ctx[i].hp,
+ packet,
+ encrypt_cb_ctx->snd_ctx[i].first_byte_at,
+ encrypt_cb_ctx->snd_ctx[i].payload_from);
+ }
+ encrypt_cb_ctx->snd_ctx_count = 0;
+}
+
+static int
+quic_crypto_setup_cipher (quicly_crypto_engine_t * engine,
+ quicly_conn_t * conn, size_t epoch, int is_enc,
+ ptls_cipher_context_t ** hp_ctx,
+ ptls_aead_context_t ** aead_ctx,
+ ptls_aead_algorithm_t * aead,
+ ptls_hash_algorithm_t * hash, const void *secret)
+{
+ uint8_t hpkey[PTLS_MAX_SECRET_SIZE];
+ int ret;
+
+ if (hp_ctx != NULL)
+ *hp_ctx = NULL;
+ *aead_ctx = NULL;
+
+ /* generate new header protection key */
+ if (hp_ctx != NULL)
+ {
+ if ((ret =
+ ptls_hkdf_expand_label (hash, hpkey, aead->ctr_cipher->key_size,
+ ptls_iovec_init (secret,
+ hash->digest_size),
+ "quic hp", ptls_iovec_init (NULL, 0),
+ NULL)) != 0)
+ goto Exit;
+ if ((*hp_ctx =
+ ptls_cipher_new (aead->ctr_cipher, is_enc, hpkey)) == NULL)
+ {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+ }
+
+ /* generate new AEAD context */
+ if ((*aead_ctx =
+ ptls_aead_new (aead, hash, is_enc, secret,
+ QUICLY_AEAD_BASE_LABEL)) == NULL)
+ {
+ ret = PTLS_ERROR_NO_MEMORY;
+ goto Exit;
+ }
+
+ if (epoch == QUICLY_EPOCH_1RTT && !is_enc)
+ {
+ quic_ctx_t *qctx = quic_get_conn_ctx (conn);
+ if (qctx->ingress_keys.aead_ctx != NULL)
+ {
+ qctx->key_phase_ingress++;
+ }
+
+ qctx->ingress_keys.aead_ctx = *aead_ctx;
+ if (hp_ctx != NULL)
+ qctx->ingress_keys.hp_ctx = *hp_ctx;
+ }
+
+ ret = 0;
+
+Exit:
+ if (ret != 0)
+ {
+ if (*aead_ctx != NULL)
+ {
+ ptls_aead_free (*aead_ctx);
+ *aead_ctx = NULL;
+ }
+ if (*hp_ctx != NULL)
+ {
+ ptls_cipher_free (*hp_ctx);
+ *hp_ctx = NULL;
+ }
+ }
+ ptls_clear_memory (hpkey, sizeof (hpkey));
+ return ret;
+}
+
+void
+quic_crypto_finalize_send_packet_cb (struct st_quicly_crypto_engine_t
+ *engine, quicly_conn_t * conn,
+ ptls_cipher_context_t * hp,
+ ptls_aead_context_t * aead,
+ quicly_datagram_t * packet,
+ size_t first_byte_at,
+ size_t payload_from, int coalesced)
+{
+ quic_encrypt_cb_ctx *encrypt_cb_ctx =
+ (quic_encrypt_cb_ctx *) ((uint8_t *) packet + sizeof (*packet));
+
+ encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].hp = hp;
+ encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].first_byte_at =
+ first_byte_at;
+ encrypt_cb_ctx->snd_ctx[encrypt_cb_ctx->snd_ctx_count].payload_from =
+ payload_from;
+ encrypt_cb_ctx->snd_ctx_count++;
+}
+
+void
+quic_crypto_decrypt_packet (quic_ctx_t * qctx, quic_rx_packet_ctx_t * pctx)
+{
+ ptls_cipher_context_t *header_protection = NULL;
+ ptls_aead_context_t *aead = NULL;
+ int pn;
+
+ /* Long Header packets are not decrypted by vpp */
+ if (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]))
+ return;
+
+ uint64_t next_expected_packet_number =
+ quicly_get_next_expected_packet_number (qctx->conn);
+ if (next_expected_packet_number == UINT64_MAX)
+ return;
+
+ aead = qctx->ingress_keys.aead_ctx;
+ header_protection = qctx->ingress_keys.hp_ctx;
+
+ if (!aead || !header_protection)
+ return;
+
+ size_t encrypted_len = pctx->packet.octets.len - pctx->packet.encrypted_off;
+ uint8_t hpmask[5] = { 0 };
+ uint32_t pnbits = 0;
+ size_t pnlen, ptlen, i;
+
+ /* decipher the header protection, as well as obtaining pnbits, pnlen */
+ if (encrypted_len < header_protection->algo->iv_size + QUICLY_MAX_PN_SIZE)
+ return;
+ ptls_cipher_init (header_protection,
+ pctx->packet.octets.base + pctx->packet.encrypted_off +
+ QUICLY_MAX_PN_SIZE);
+ ptls_cipher_encrypt (header_protection, hpmask, hpmask, sizeof (hpmask));
+ pctx->packet.octets.base[0] ^=
+ hpmask[0] & (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ?
+ 0xf : 0x1f);
+ pnlen = (pctx->packet.octets.base[0] & 0x3) + 1;
+ for (i = 0; i != pnlen; ++i)
+ {
+ pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
+ hpmask[i + 1];
+ pnbits =
+ (pnbits << 8) | pctx->packet.octets.base[pctx->packet.encrypted_off +
+ i];
+ }
+
+ size_t aead_off = pctx->packet.encrypted_off + pnlen;
+
+ pn =
+ quicly_determine_packet_number (pnbits, pnlen * 8,
+ next_expected_packet_number);
+
+ int key_phase_bit =
+ (pctx->packet.octets.base[0] & QUICLY_KEY_PHASE_BIT) != 0;
+
+ if (key_phase_bit != (qctx->key_phase_ingress & 1))
+ {
+ pctx->packet.octets.base[0] ^=
+ hpmask[0] &
+ (QUICLY_PACKET_IS_LONG_HEADER (pctx->packet.octets.base[0]) ? 0xf :
+ 0x1f);
+ for (i = 0; i != pnlen; ++i)
+ {
+ pctx->packet.octets.base[pctx->packet.encrypted_off + i] ^=
+ hpmask[i + 1];
+ }
+ return;
+ }
+
+ if ((ptlen =
+ quic_crypto_offload_aead_decrypt (qctx, aead,
+ pctx->packet.octets.base + aead_off,
+ pctx->packet.octets.base + aead_off,
+ pctx->packet.octets.len - aead_off,
+ pn, pctx->packet.octets.base,
+ aead_off)) == SIZE_MAX)
+ {
+ fprintf (stderr,
+ "%s: aead decryption failure (pn: %d)\n", __FUNCTION__, pn);
+ return;
+ }
+
+ pctx->packet.encrypted_off = aead_off;
+ pctx->packet.octets.len = ptlen + aead_off;
+
+ pctx->packet.decrypted.pn = pn;
+ pctx->packet.decrypted.key_phase = qctx->key_phase_ingress;
+}
+
+#ifdef QUIC_HP_CRYPTO
static void
quic_crypto_cipher_do_init (ptls_cipher_context_t * _ctx, const void *iv)
{
struct cipher_context_t *ctx = (struct cipher_context_t *) _ctx;
-
vnet_crypto_op_id_t id;
if (!strcmp (ctx->super.algo->name, "AES128-CTR"))
{
@@ -60,7 +354,6 @@ quic_crypto_cipher_do_init (ptls_cipher_context_t * _ctx, const void *iv)
_ctx->algo->name);
assert (0);
}
-
vnet_crypto_op_init (&ctx->op, id);
ctx->op.iv = (u8 *) iv;
ctx->op.key_index = ctx->key_index;
@@ -121,25 +414,30 @@ quic_crypto_cipher_setup_crypto (ptls_cipher_context_t * _ctx, int is_enc,
}
static int
-aes128ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
- const void *key)
+quic_crypto_aes128ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
+ const void *key)
{
return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_128_ctr (),
quic_crypto_cipher_encrypt);
}
static int
-aes256ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
- const void *key)
+quic_crypto_aes256ctr_setup_crypto (ptls_cipher_context_t * ctx, int is_enc,
+ const void *key)
{
return quic_crypto_cipher_setup_crypto (ctx, 1, key, EVP_aes_256_ctr (),
quic_crypto_cipher_encrypt);
}
+#endif // QUIC_HP_CRYPTO
+
void
quic_crypto_aead_encrypt_init (ptls_aead_context_t * _ctx, const void *iv,
const void *aad, size_t aadlen)
{
+ quic_main_t *qm = &quic_main;
+ u32 thread_index = vlib_get_thread_index ();
+
struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
vnet_crypto_op_id_t id;
@@ -156,11 +454,18 @@ quic_crypto_aead_encrypt_init (ptls_aead_context_t * _ctx, const void *iv,
assert (0);
}
- vnet_crypto_op_init (&ctx->op, id);
- ctx->op.aad = (u8 *) aad;
- ctx->op.aad_len = aadlen;
- ctx->op.iv = (u8 *) iv;
- ctx->op.key_index = ctx->key_index;
+ quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
+ &qm->wrk_ctx[thread_index].crypto_context_batch;
+
+ vnet_crypto_op_t *vnet_op =
+ &quic_crypto_batch_ctx->aead_crypto_tx_packets_ops
+ [quic_crypto_batch_ctx->nb_tx_packets];
+ vnet_crypto_op_init (vnet_op, id);
+ vnet_op->aad = (u8 *) aad;
+ vnet_op->aad_len = aadlen;
+ vnet_op->iv = clib_mem_alloc (PTLS_MAX_IV_SIZE);
+ clib_memcpy (vnet_op->iv, iv, PTLS_MAX_IV_SIZE);
+ vnet_op->key_index = ctx->key_index;
}
size_t
@@ -169,11 +474,20 @@ quic_crypto_aead_encrypt_update (ptls_aead_context_t * _ctx, void *output,
{
struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
- ctx->op.src = (u8 *) input;
- ctx->op.dst = output;
- ctx->op.len = inlen;
- ctx->op.tag_len = ctx->super.algo->tag_size;
- ctx->op.tag = ctx->op.src + inlen;
+ quic_main_t *qm = &quic_main;
+ u32 thread_index = vlib_get_thread_index ();
+ quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
+ &qm->wrk_ctx[thread_index].crypto_context_batch;
+
+ vnet_crypto_op_t *vnet_op =
+ &quic_crypto_batch_ctx->aead_crypto_tx_packets_ops
+ [quic_crypto_batch_ctx->nb_tx_packets];
+ vnet_op->src = (u8 *) input;
+ vnet_op->dst = output;
+ vnet_op->len = inlen;
+ vnet_op->tag_len = ctx->super.algo->tag_size;
+
+ vnet_op->tag = vnet_op->src + inlen;
return 0;
}
@@ -181,12 +495,16 @@ quic_crypto_aead_encrypt_update (ptls_aead_context_t * _ctx, void *output,
size_t
quic_crypto_aead_encrypt_final (ptls_aead_context_t * _ctx, void *output)
{
- vlib_main_t *vm = vlib_get_main ();
- struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
-
- vnet_crypto_process_ops (vm, &ctx->op, 1);
-
- return ctx->op.len + ctx->op.tag_len;
+ quic_main_t *qm = &quic_main;
+ u32 thread_index = vlib_get_thread_index ();
+ quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
+ &qm->wrk_ctx[thread_index].crypto_context_batch;
+
+ vnet_crypto_op_t *vnet_op =
+ &quic_crypto_batch_ctx->
+ aead_crypto_tx_packets_ops[quic_crypto_batch_ctx->nb_tx_packets];
+ quic_crypto_batch_ctx->nb_tx_packets++;
+ return vnet_op->len + vnet_op->tag_len;
}
size_t
@@ -226,9 +544,57 @@ quic_crypto_aead_decrypt (ptls_aead_context_t * _ctx, void *_output,
vnet_crypto_process_ops (vm, &ctx->op, 1);
+ if (ctx->op.status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ return SIZE_MAX;
+
return ctx->op.len;
}
+static size_t
+quic_crypto_offload_aead_decrypt (quic_ctx_t * qctx,
+ ptls_aead_context_t * _ctx, void *_output,
+ const void *input, size_t inlen,
+ uint64_t decrypted_pn, const void *aad,
+ size_t aadlen)
+{
+ struct aead_crypto_context_t *ctx = (struct aead_crypto_context_t *) _ctx;
+ vnet_crypto_op_id_t id;
+ if (!strcmp (ctx->super.algo->name, "AES128-GCM"))
+ {
+ id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
+ }
+ else if (!strcmp (ctx->super.algo->name, "AES256-GCM"))
+ {
+ id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
+ }
+ else
+ {
+ return SIZE_MAX;
+ }
+
+ quic_main_t *qm = &quic_main;
+ quic_crypto_batch_ctx_t *quic_crypto_batch_ctx =
+ &qm->wrk_ctx[qctx->c_thread_index].crypto_context_batch;
+
+ vnet_crypto_op_t *vnet_op =
+ &quic_crypto_batch_ctx->aead_crypto_rx_packets_ops
+ [quic_crypto_batch_ctx->nb_rx_packets];
+
+ vnet_crypto_op_init (vnet_op, id);
+ vnet_op->aad = (u8 *) aad;
+ vnet_op->aad_len = aadlen;
+ vnet_op->iv = clib_mem_alloc (PTLS_MAX_IV_SIZE);
+ build_iv (_ctx, vnet_op->iv, decrypted_pn);
+ vnet_op->src = (u8 *) input;
+ vnet_op->dst = _output;
+ vnet_op->key_index = ctx->key_index;
+ vnet_op->len = inlen - ctx->super.algo->tag_size;
+ vnet_op->tag_len = ctx->super.algo->tag_size;
+ vnet_op->tag = vnet_op->src + vnet_op->len;
+ quic_crypto_batch_ctx->nb_rx_packets++;
+ return vnet_op->len;
+}
+
static void
quic_crypto_aead_dispose_crypto (ptls_aead_context_t * _ctx)
{
@@ -259,13 +625,16 @@ quic_crypto_aead_setup_crypto (ptls_aead_context_t * _ctx, int is_enc,
}
ctx->super.do_decrypt = quic_crypto_aead_decrypt;
+
ctx->super.do_encrypt_init = quic_crypto_aead_encrypt_init;
ctx->super.do_encrypt_update = quic_crypto_aead_encrypt_update;
ctx->super.do_encrypt_final = quic_crypto_aead_encrypt_final;
ctx->super.dispose_crypto = quic_crypto_aead_dispose_crypto;
+ clib_rwlock_writer_lock (&quic_main.crypto_keys_quic_rw_lock);
ctx->key_index = vnet_crypto_key_add (vm, algo,
(u8 *) key, _ctx->algo->key_size);
+ clib_rwlock_writer_unlock (&quic_main.crypto_keys_quic_rw_lock);
return 0;
}
@@ -284,24 +653,28 @@ quic_crypto_aead_aes256gcm_setup_crypto (ptls_aead_context_t * ctx,
return quic_crypto_aead_setup_crypto (ctx, is_enc, key, EVP_aes_256_gcm ());
}
-ptls_cipher_algorithm_t quic_crypto_aes128ctr = { "AES128-CTR",
+#ifdef QUIC_HP_CRYPTO
+ptls_cipher_algorithm_t quic_crypto_aes128ctr = {
+ "AES128-CTR",
PTLS_AES128_KEY_SIZE,
1, PTLS_AES_IV_SIZE,
- sizeof (struct cipher_context_t),
- aes128ctr_setup_crypto
+ sizeof (struct cipher_context_t), aes128ctr_setup_crypto
};
-ptls_cipher_algorithm_t quic_crypto_aes256ctr = { "AES256-CTR",
- PTLS_AES256_KEY_SIZE,
- 1 /* block size */ ,
- PTLS_AES_IV_SIZE,
- sizeof (struct cipher_context_t),
- aes256ctr_setup_crypto
+ptls_cipher_algorithm_t quic_crypto_aes256ctr = {
+ "AES256-CTR", PTLS_AES256_KEY_SIZE, 1 /* block size */ ,
+ PTLS_AES_IV_SIZE, sizeof (struct cipher_context_t), aes256ctr_setup_crypto
};
+#endif
-ptls_aead_algorithm_t quic_crypto_aes128gcm = { "AES128-GCM",
+ptls_aead_algorithm_t quic_crypto_aes128gcm = {
+ "AES128-GCM",
+#ifdef QUIC_HP_CRYPTO
&quic_crypto_aes128ctr,
- NULL,
+#else
+ &ptls_openssl_aes128ctr,
+#endif
+ &ptls_openssl_aes128ecb,
PTLS_AES128_KEY_SIZE,
PTLS_AESGCM_IV_SIZE,
PTLS_AESGCM_TAG_SIZE,
@@ -309,9 +682,14 @@ ptls_aead_algorithm_t quic_crypto_aes128gcm = { "AES128-GCM",
quic_crypto_aead_aes128gcm_setup_crypto
};
-ptls_aead_algorithm_t quic_crypto_aes256gcm = { "AES256-GCM",
+ptls_aead_algorithm_t quic_crypto_aes256gcm = {
+ "AES256-GCM",
+#ifdef QUIC_HP_CRYPTO
&quic_crypto_aes256ctr,
- NULL,
+#else
+ &ptls_openssl_aes256ctr,
+#endif
+ &ptls_openssl_aes256ecb,
PTLS_AES256_KEY_SIZE,
PTLS_AESGCM_IV_SIZE,
PTLS_AESGCM_TAG_SIZE,
@@ -319,22 +697,22 @@ ptls_aead_algorithm_t quic_crypto_aes256gcm = { "AES256-GCM",
quic_crypto_aead_aes256gcm_setup_crypto
};
-ptls_cipher_suite_t quic_crypto_aes128gcmsha256 =
- { PTLS_CIPHER_SUITE_AES_128_GCM_SHA256,
- &quic_crypto_aes128gcm,
- &ptls_openssl_sha256
+ptls_cipher_suite_t quic_crypto_aes128gcmsha256 = {
+ PTLS_CIPHER_SUITE_AES_128_GCM_SHA256,
+ &quic_crypto_aes128gcm, &ptls_openssl_sha256
+};
+
+ptls_cipher_suite_t quic_crypto_aes256gcmsha384 = {
+ PTLS_CIPHER_SUITE_AES_256_GCM_SHA384,
+ &quic_crypto_aes256gcm, &ptls_openssl_sha384
};
-ptls_cipher_suite_t quic_crypto_aes256gcmsha384 =
- { PTLS_CIPHER_SUITE_AES_256_GCM_SHA384,
- &quic_crypto_aes256gcm,
- &ptls_openssl_sha384
+ptls_cipher_suite_t *quic_crypto_cipher_suites[] = {
+ &quic_crypto_aes256gcmsha384, &quic_crypto_aes128gcmsha256, NULL
};
-ptls_cipher_suite_t *quic_crypto_cipher_suites[] =
- { &quic_crypto_aes256gcmsha384,
- &quic_crypto_aes128gcmsha256,
- NULL
+quicly_crypto_engine_t quic_crypto_engine = {
+ quic_crypto_setup_cipher, quic_crypto_finalize_send_packet_cb
};
int