diff options
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/crypto/crypto.h | 82 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_decrypt.c | 118 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_encrypt.c | 123 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 1 |
4 files changed, 146 insertions, 178 deletions
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h index 1df6e7f6651..127731866db 100644 --- a/src/vnet/crypto/crypto.h +++ b/src/vnet/crypto/crypto.h @@ -348,7 +348,6 @@ typedef struct typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - vnet_crypto_async_frame_t *frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; vnet_crypto_async_frame_t *frame_pool; u32 *buffer_indices; u16 *nexts; @@ -543,18 +542,15 @@ vnet_crypto_async_get_frame (vlib_main_t * vm, vnet_crypto_async_op_id_t opt) { vnet_crypto_main_t *cm = &crypto_main; vnet_crypto_thread_t *ct = cm->threads + vm->thread_index; - vnet_crypto_async_frame_t *f = ct->frames[opt]; + vnet_crypto_async_frame_t *f = NULL; + + pool_get_aligned (ct->frame_pool, f, CLIB_CACHE_LINE_BYTES); + if (CLIB_DEBUG > 0) + clib_memset (f, 0xfe, sizeof (*f)); + f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED; + f->op = opt; + f->n_elts = 0; - if (!f) - { - pool_get_aligned (ct->frame_pool, f, CLIB_CACHE_LINE_BYTES); - if (CLIB_DEBUG > 0) - clib_memset (f, 0xfe, sizeof (*f)); - f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED; - f->op = opt; - f->n_elts = 0; - ct->frames[opt] = f; - } return f; } @@ -573,7 +569,6 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm, { vnet_crypto_main_t *cm = &crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); - vnet_crypto_thread_t *ct = cm->threads + vm->thread_index; vnet_crypto_async_op_id_t opt = frame->op; u32 i = vlib_num_workers () > 0; @@ -585,56 +580,33 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm, clib_bitmap_set_no_check (cm->async_active_ids, opt, 1); if (PREDICT_TRUE (ret == 0)) { - vnet_crypto_async_frame_t *nf = 0; - pool_get_aligned (ct->frame_pool, nf, CLIB_CACHE_LINE_BYTES); - if (CLIB_DEBUG > 0) - clib_memset (nf, 0xfe, sizeof (*nf)); - nf->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED; - nf->op = opt; - nf->n_elts = 0; - ct->frames[opt] = nf; + if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT) + { + for (; i < tm->n_vlib_mains; i++) + vlib_node_set_interrupt_pending (vlib_mains[i], + cm->crypto_node_index); + } } else { frame->state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR; } - if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT) - { - for (; i < tm->n_vlib_mains; i++) - { - vlib_node_set_interrupt_pending (vlib_mains[i], - cm->crypto_node_index); - } - } return ret; } -static_always_inline int -vnet_crypto_async_add_to_frame (vlib_main_t * vm, - vnet_crypto_async_frame_t ** frame, - u32 key_index, - u32 crypto_len, i16 integ_len_adj, - i16 crypto_start_offset, - u16 integ_start_offset, - u32 buffer_index, - u16 next_node, - u8 * iv, u8 * tag, u8 * aad, u8 flags) -{ - vnet_crypto_async_frame_t *f = *frame; +static_always_inline void +vnet_crypto_async_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f, + u32 key_index, u32 crypto_len, + i16 integ_len_adj, i16 crypto_start_offset, + u16 integ_start_offset, u32 buffer_index, + u16 next_node, u8 *iv, u8 *tag, u8 *aad, + u8 flags) +{ vnet_crypto_async_frame_elt_t *fe; u16 index; - if (PREDICT_FALSE (f->n_elts == VNET_CRYPTO_FRAME_SIZE)) - { - vnet_crypto_async_op_id_t opt = f->op; - int ret; - ret = vnet_crypto_async_submit_open_frame (vm, f); - if (PREDICT_FALSE (ret < 0)) - return -1; - f = vnet_crypto_async_get_frame (vm, opt); - *frame = f; - } + ASSERT (f->n_elts < VNET_CRYPTO_FRAME_SIZE); index = f->n_elts; fe = &f->elts[index]; @@ -650,8 +622,6 @@ vnet_crypto_async_add_to_frame (vlib_main_t * vm, fe->flags = flags; f->buffer_indices[index] = buffer_index; f->next_node_index[index] = next_node; - - return 0; } static_always_inline void @@ -669,6 +639,12 @@ vnet_crypto_async_reset_frame (vnet_crypto_async_frame_t * f) f->n_elts = 0; } +static_always_inline u8 +vnet_crypto_async_frame_is_full (const vnet_crypto_async_frame_t *f) +{ + return (f->n_elts == VNET_CRYPTO_FRAME_SIZE); +} + #endif /* included_vnet_crypto_crypto_h */ /* diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index 80ce08bf853..141b1b987d4 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -607,17 +607,14 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node, } } -static_always_inline int -esp_decrypt_prepare_async_frame (vlib_main_t * vm, - vlib_node_runtime_t * node, - ipsec_per_thread_data_t * ptd, - vnet_crypto_async_frame_t ** f, - ipsec_sa_t * sa0, u8 * payload, u16 len, - u8 icv_sz, u8 iv_sz, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, u32 bi, - vlib_buffer_t * b, u16 * next, - u16 async_next) +static_always_inline esp_decrypt_error_t +esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node, + ipsec_per_thread_data_t *ptd, + vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0, + u8 *payload, u16 len, u8 icv_sz, u8 iv_sz, + esp_decrypt_packet_data_t *pd, + esp_decrypt_packet_data2_t *pd2, u32 bi, + vlib_buffer_t *b, u16 *next, u16 async_next) { const u8 esp_sz = sizeof (esp_header_t); u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index; @@ -677,9 +674,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm, 0, &integ_len) < 0) { /* allocate buffer failed, will not add to frame and drop */ - b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; - next[0] = ESP_DECRYPT_NEXT_DROP; - return -1; + return (ESP_DECRYPT_ERROR_NO_BUFFERS); } } else @@ -737,11 +732,11 @@ out: /* for AEAD integ_len - crypto_len will be negative, it is ok since it * is ignored by the engine. */ - return vnet_crypto_async_add_to_frame (vm, f, key_index, crypto_len, - integ_len - crypto_len, - crypto_start_offset, - integ_start_offset, - bi, async_next, iv, tag, aad, flags); + vnet_crypto_async_add_to_frame ( + vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset, + integ_start_offset, bi, async_next, iv, tag, aad, flags); + + return (ESP_DECRYPT_ERROR_RX_PKTS); } static_always_inline void @@ -1035,10 +1030,10 @@ esp_decrypt_inline (vlib_main_t * vm, vnet_crypto_op_t _op, *op = &_op; vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; vnet_crypto_op_t **integ_ops = &ptd->integ_ops; - vnet_crypto_async_frame_t *async_frame = 0; int is_async = im->async_mode; - vnet_crypto_async_op_id_t last_async_op = ~0; + vnet_crypto_async_op_id_t async_op = ~0; u16 n_async_drop = 0; + vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; vlib_get_buffers (vm, from, b, n_left); if (!is_async) @@ -1048,8 +1043,10 @@ esp_decrypt_inline (vlib_main_t * vm, vec_reset_length (ptd->chained_crypto_ops); vec_reset_length (ptd->chained_integ_ops); } + vec_reset_length (ptd->async_frames); vec_reset_length (ptd->chunks); clib_memset_u16 (nexts, -1, n_left); + clib_memset (async_frames, 0, sizeof (async_frames)); while (n_left > 0) { @@ -1093,21 +1090,28 @@ esp_decrypt_inline (vlib_main_t * vm, cpd.iv_sz = sa0->crypto_iv_size; cpd.flags = sa0->flags; cpd.sa_index = current_sa_index; + } - /* submit frame when op_id is different then the old one */ - if (is_async && last_async_op != sa0->crypto_async_dec_op_id) + if (is_async) + { + async_op = sa0->crypto_async_dec_op_id; + + if (PREDICT_FALSE (async_op == 0)) { - if (async_frame && async_frame->n_elts) - { - if (vnet_crypto_async_submit_open_frame (vm, async_frame)) - esp_async_recycle_failed_submit (async_frame, b, from, - nexts, &n_async_drop, - ESP_DECRYPT_NEXT_DROP, - ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); - } - async_frame = - vnet_crypto_async_get_frame (vm, sa0->crypto_async_dec_op_id); - last_async_op = sa0->crypto_async_dec_op_id; + esp_set_next_index (is_async, from, nexts, from[b - bufs], + &n_async_drop, ESP_DECRYPT_NEXT_DROP, next); + goto next; + } + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) + { + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); } } @@ -1176,29 +1180,17 @@ esp_decrypt_inline (vlib_main_t * vm, if (is_async) { - int ret = esp_decrypt_prepare_async_frame (vm, node, ptd, - &async_frame, - sa0, payload, len, - cpd.icv_sz, - cpd.iv_sz, - pd, pd2, - from[b - bufs], - b[0], next, async_next); - if (PREDICT_FALSE (ret < 0)) + esp_decrypt_error_t err; + + err = esp_decrypt_prepare_async_frame ( + vm, node, ptd, async_frames[async_op], sa0, payload, len, + cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], next, + async_next); + if (ESP_DECRYPT_ERROR_RX_PKTS != err) { - b[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; + b[0]->error = err; esp_set_next_index (1, from, nexts, from[b - bufs], &n_async_drop, ESP_DECRYPT_NEXT_DROP, next); - /* when next[0] is ESP_DECRYPT_NEXT_DROP we only have to drop - * the current packet. Otherwise it is frame submission error - * thus we have to drop the whole frame. - */ - if (next[0] != ESP_DECRYPT_NEXT_DROP && async_frame->n_elts) - esp_async_recycle_failed_submit (async_frame, b, from, - nexts, &n_async_drop, - ESP_DECRYPT_NEXT_DROP, - ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); - goto next; } } else @@ -1222,13 +1214,19 @@ esp_decrypt_inline (vlib_main_t * vm, if (is_async) { - if (async_frame && async_frame->n_elts) + /* submit all of the open frames */ + vnet_crypto_async_frame_t **async_frame; + + vec_foreach (async_frame, ptd->async_frames) { - if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) - esp_async_recycle_failed_submit (async_frame, b, from, nexts, - &n_async_drop, - ESP_DECRYPT_NEXT_DROP, - ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + esp_async_recycle_failed_submit ( + *async_frame, b, from, nexts, &n_async_drop, + ESP_DECRYPT_NEXT_DROP, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); + } } /* no post process in async */ diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 16bca1da536..1fc53a53317 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -471,13 +471,13 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, } } -static_always_inline int -esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, - vnet_crypto_async_frame_t ** async_frame, - ipsec_sa_t * sa, vlib_buffer_t * b, - esp_header_t * esp, u8 * payload, u32 payload_len, - u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len, - u16 async_next, vlib_buffer_t * lb) +static_always_inline void +esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd, + vnet_crypto_async_frame_t *async_frame, + ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp, + u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz, + u32 bi, u16 next, u32 hdr_len, u16 async_next, + vlib_buffer_t *lb) { esp_post_data_t *post = esp_post_data (b); u8 *tag, *iv, *aad = 0; @@ -554,12 +554,11 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd, } } - return vnet_crypto_async_add_to_frame (vm, async_frame, key_index, - crypto_total_len, - integ_total_len - crypto_total_len, - crypto_start_offset, - integ_start_offset, bi, async_next, - iv, tag, aad, flag); + /* this always succeeds because we know the frame is not full */ + vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len, + integ_total_len - crypto_total_len, + crypto_start_offset, integ_start_offset, bi, + async_next, iv, tag, aad, flag); } always_inline uword @@ -582,9 +581,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *lb; vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; vnet_crypto_op_t **integ_ops = &ptd->integ_ops; - vnet_crypto_async_frame_t *async_frame = 0; + vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS]; int is_async = im->async_mode; - vnet_crypto_async_op_id_t last_async_op = ~0; + vnet_crypto_async_op_id_t async_op = ~0; u16 drop_next = (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 : (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 : @@ -603,7 +602,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vec_reset_length (ptd->chained_crypto_ops); vec_reset_length (ptd->chained_integ_ops); } + vec_reset_length (ptd->async_frames); vec_reset_length (ptd->chunks); + clib_memset (async_frames, 0, sizeof (async_frames)); while (n_left > 0) { @@ -656,21 +657,28 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp_align = sa0->esp_block_align; icv_sz = sa0->integ_icv_size; iv_sz = sa0->crypto_iv_size; + } + + if (is_async) + { + async_op = sa0->crypto_async_enc_op_id; - /* submit frame when op_id is different then the old one */ - if (is_async && sa0->crypto_async_enc_op_id != last_async_op) + if (PREDICT_FALSE (async_op == 0)) { - if (async_frame && async_frame->n_elts) - { - if (vnet_crypto_async_submit_open_frame (vm, async_frame)) - esp_async_recycle_failed_submit (async_frame, b, from, - nexts, &n_async_drop, - drop_next, - ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); - } - async_frame = - vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id); - last_async_op = sa0->crypto_async_enc_op_id; + esp_set_next_index (is_async, from, nexts, from[b - bufs], + &n_async_drop, drop_next, next); + goto trace; + } + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) + { + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); } } @@ -948,36 +956,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp->seq = clib_net_to_host_u32 (sa0->seq); if (is_async) - { - if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0)) - { - esp_set_next_index (is_async, from, nexts, from[b - bufs], - &n_async_drop, drop_next, next); - goto trace; - } - - if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp, - payload, payload_len, iv_sz, - icv_sz, from[b - bufs], next[0], - hdr_len, async_next, lb)) - { - /* The fail only caused by submission, free the whole frame. */ - if (async_frame->n_elts) - esp_async_recycle_failed_submit (async_frame, b, from, nexts, - &n_async_drop, drop_next, - ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); - b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR; - esp_set_next_index (1, from, nexts, from[b - bufs], - &n_async_drop, drop_next, next); - goto trace; - } - } + esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0], + esp, payload, payload_len, iv_sz, icv_sz, + from[b - bufs], next[0], hdr_len, async_next, + lb); else - { esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload, payload_len, iv_sz, icv_sz, bufs, b, lb, hdr_len, esp); - } vlib_buffer_advance (b[0], 0LL - hdr_len); @@ -1015,29 +1001,36 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next); esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, ptd->chunks, drop_next); + + vlib_node_increment_counter ( + vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); + + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); } else { - if (async_frame && async_frame->n_elts) + /* submit all of the open frames */ + vnet_crypto_async_frame_t **async_frame; + + vec_foreach (async_frame, ptd->async_frames) { - if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0) - esp_async_recycle_failed_submit (async_frame, b, from, nexts, - &n_async_drop, drop_next, - ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + esp_async_recycle_failed_submit ( + *async_frame, b, from, nexts, &n_async_drop, drop_next, + ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); + } } + vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); if (n_async_drop) vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop); - - return frame->n_vectors; } - vlib_node_increment_counter (vm, node->node_index, - ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); - - vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 7ca15866a14..15de80764a8 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -102,6 +102,7 @@ typedef struct vnet_crypto_op_t *chained_crypto_ops; vnet_crypto_op_t *chained_integ_ops; vnet_crypto_op_chunk_t *chunks; + vnet_crypto_async_frame_t **async_frames; } ipsec_per_thread_data_t; typedef struct |