diff options
author | Matthew Smith <mgsmith@netgate.com> | 2021-06-04 09:18:37 -0500 |
---|---|---|
committer | Matthew Smith <mgsmith@netgate.com> | 2021-06-08 16:05:37 +0000 |
commit | 51d56bab707965399d524c350eaaa33d20b55244 (patch) | |
tree | 06faf8d070cae252b9af810d3ae23371d6e501e4 /src/vnet | |
parent | 4de5f9be88857197ddf17e3bff66318f78f4b6bb (diff) |
ipsec: fix async crypto frame leak
Type: fix
If an async crypto frame is allocated during ESP encrypt/decrypt but
a buffer/op is not subsequently added to the frame, the frame leaks. It
is not submitted if the count of async ops is zero nor is it
returned to the frame pool. This happens frequently if >= 2 worker
threads are configured and a vector of buffers all have to be handed
off to other threads.
Wait until it is almost certain that the buffer will be added to the
frame before allocating the frame to make it more unlikely that an
allocated frame will not have any operations added to it.
For encrypt this is sufficient to ressolve the leak. For decrypt there
is still a chance that the buffer will fail to be added to the frame, so
remove the counter of async ops and ensure that all frames that were
allocated get either submitted or freed at the end.
Change-Id: I4778c3265359b192d8a88ab9f8c53519d46285a2
Signed-off-by: Matthew Smith <mgsmith@netgate.com>
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/ipsec/esp_decrypt.c | 65 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_encrypt.c | 39 |
2 files changed, 50 insertions, 54 deletions
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index c13657d0819..ec6d9811806 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -1024,7 +1024,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE]; u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0; - u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0; + u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts; u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0; u32 sync_bi[VLIB_FRAME_SIZE]; u32 noop_bi[VLIB_FRAME_SIZE]; @@ -1100,22 +1100,6 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0); } - if (is_async) - { - async_op = sa0->crypto_async_dec_op_id; - - /* get a frame for this op if we don't yet have one or it's full - */ - if (NULL == async_frames[async_op] || - vnet_crypto_async_frame_is_full (async_frames[async_op])) - { - async_frames[async_op] = - vnet_crypto_async_get_frame (vm, async_op); - /* Save the frame to the list we'll submit at the end */ - vec_add1 (ptd->async_frames, async_frames[async_op]); - } - } - if (PREDICT_FALSE (~0 == sa0->thread_index)) { /* this is the first packet to use this SA, claim the SA @@ -1186,6 +1170,18 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, if (is_async) { + async_op = sa0->crypto_async_dec_op_id; + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) + { + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); + } err = esp_decrypt_prepare_async_frame ( vm, node, ptd, async_frames[async_op], sa0, payload, len, @@ -1219,10 +1215,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, pd2 += 1; } else - { - n_async++; - async_next++; - } + async_next++; + n_left -= 1; b += 1; } @@ -1232,21 +1226,24 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, current_sa_index, current_sa_pkts, current_sa_bytes); - if (n_async) - { - /* submit all of the open frames */ - vnet_crypto_async_frame_t **async_frame; + /* submit or free all of the open frames */ + vnet_crypto_async_frame_t **async_frame; - vec_foreach (async_frame, ptd->async_frames) + vec_foreach (async_frame, ptd->async_frames) + { + /* free frame and move on if no ops were successfully added */ + if (PREDICT_FALSE (!(*async_frame)->n_elts)) { - if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) - { - n_noop += esp_async_recycle_failed_submit ( - vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR, - n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP); - vnet_crypto_async_reset_frame (*async_frame); - vnet_crypto_async_free_frame (vm, *async_frame); - } + vnet_crypto_async_free_frame (vm, *async_frame); + continue; + } + if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0) + { + n_noop += esp_async_recycle_failed_submit ( + vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR, + n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP); + vnet_crypto_async_reset_frame (*async_frame); + vnet_crypto_async_free_frame (vm, *async_frame); } } diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 214cf674c75..30c2bf9a8ac 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -665,22 +665,6 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0); } - if (is_async) - { - async_op = sa0->crypto_async_enc_op_id; - - /* get a frame for this op if we don't yet have one or it's full - */ - if (NULL == async_frames[async_op] || - vnet_crypto_async_frame_is_full (async_frames[async_op])) - { - async_frames[async_op] = - vnet_crypto_async_get_frame (vm, async_op); - /* Save the frame to the list we'll submit at the end */ - vec_add1 (ptd->async_frames, async_frames[async_op]); - } - } - if (PREDICT_FALSE (~0 == sa0->thread_index)) { /* this is the first packet to use this SA, claim the SA @@ -951,10 +935,25 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, esp->seq = clib_net_to_host_u32 (sa0->seq); if (is_async) - esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0], - esp, payload, payload_len, iv_sz, icv_sz, - from[b - bufs], sync_next[0], hdr_len, - async_next_node, lb); + { + async_op = sa0->crypto_async_enc_op_id; + + /* get a frame for this op if we don't yet have one or it's full + */ + if (NULL == async_frames[async_op] || + vnet_crypto_async_frame_is_full (async_frames[async_op])) + { + async_frames[async_op] = + vnet_crypto_async_get_frame (vm, async_op); + /* Save the frame to the list we'll submit at the end */ + vec_add1 (ptd->async_frames, async_frames[async_op]); + } + + esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0], + esp, payload, payload_len, iv_sz, icv_sz, + from[b - bufs], sync_next[0], hdr_len, + async_next_node, lb); + } else esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload, payload_len, iv_sz, icv_sz, n_sync, b, lb, |