diff options
author | Matthew Smith <mgsmith@netgate.com> | 2016-05-01 14:52:08 -0500 |
---|---|---|
committer | Matthew Smith <mgsmith@netgate.com> | 2016-05-04 11:41:52 -0500 |
commit | 29d8510d22d05ebc73d423cc1904a4fef7123889 (patch) | |
tree | 1b057d5fed90196d9f3c6aa3073f934ef6f931fc /vnet | |
parent | 0aaf92ffbb8dd19f903c0784ea4ea6584ad6d0ee (diff) |
VPP-42: VPP crashes in IPsec code when running multithreaded
Change-Id: Ib231642cfead5f5e8e45508361a11c87aad83b51
Signed-off-by: Matthew Smith <mgsmith@netgate.com>
Diffstat (limited to 'vnet')
-rw-r--r-- | vnet/vnet/ipsec/esp.h | 36 | ||||
-rw-r--r-- | vnet/vnet/ipsec/esp_decrypt.c | 19 | ||||
-rw-r--r-- | vnet/vnet/ipsec/esp_encrypt.c | 19 | ||||
-rw-r--r-- | vnet/vnet/ipsec/ipsec.c | 3 | ||||
-rw-r--r-- | vnet/vnet/ipsec/ipsec.h | 14 |
5 files changed, 58 insertions, 33 deletions
diff --git a/vnet/vnet/ipsec/esp.h b/vnet/vnet/ipsec/esp.h index b44c26e1fa4..2334dc48bc5 100644 --- a/vnet/vnet/ipsec/esp.h +++ b/vnet/vnet/ipsec/esp.h @@ -50,16 +50,22 @@ typedef struct { u8 trunc_size; } esp_integ_alg_t; - typedef struct { - esp_crypto_alg_t * esp_crypto_algs; - esp_integ_alg_t * esp_integ_algs; + CLIB_CACHE_LINE_ALIGN_MARK(cacheline0); EVP_CIPHER_CTX encrypt_ctx; + CLIB_CACHE_LINE_ALIGN_MARK(cacheline1); EVP_CIPHER_CTX decrypt_ctx; + CLIB_CACHE_LINE_ALIGN_MARK(cacheline2); HMAC_CTX hmac_ctx; - ipsec_crypto_alg_t last_encrytp_alg; - ipsec_crypto_alg_t last_decrytp_alg; + ipsec_crypto_alg_t last_encrypt_alg; + ipsec_crypto_alg_t last_decrypt_alg; ipsec_integ_alg_t last_integ_alg; +} esp_main_per_thread_data_t; + +typedef struct { + esp_crypto_alg_t * esp_crypto_algs; + esp_integ_alg_t * esp_integ_algs; + esp_main_per_thread_data_t * per_thread_data; } esp_main_t; esp_main_t esp_main; @@ -68,6 +74,7 @@ always_inline void esp_init() { esp_main_t * em = &esp_main; + vlib_thread_main_t * tm = vlib_get_thread_main(); memset (em, 0, sizeof (em[0])); @@ -99,9 +106,15 @@ esp_init() i->md = EVP_sha512(); i->trunc_size = 32; - EVP_CIPHER_CTX_init(&(em->encrypt_ctx)); - EVP_CIPHER_CTX_init(&(em->decrypt_ctx)); - HMAC_CTX_init(&(em->hmac_ctx)); + vec_validate_aligned(em->per_thread_data, tm->n_vlib_mains-1, CLIB_CACHE_LINE_BYTES); + int thread_id; + + for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++) + { + EVP_CIPHER_CTX_init(&(em->per_thread_data[thread_id].encrypt_ctx)); + EVP_CIPHER_CTX_init(&(em->per_thread_data[thread_id].decrypt_ctx)); + HMAC_CTX_init(&(em->per_thread_data[thread_id].hmac_ctx)); + } } always_inline unsigned int @@ -115,7 +128,8 @@ hmac_calc(ipsec_integ_alg_t alg, u32 seq_hi) { esp_main_t * em = &esp_main; - HMAC_CTX * ctx = &(em->hmac_ctx); + u32 cpu_index = os_get_cpu_number(); + HMAC_CTX * ctx = &(em->per_thread_data[cpu_index].hmac_ctx); const EVP_MD * md = NULL; unsigned int len; @@ -124,9 +138,9 @@ hmac_calc(ipsec_integ_alg_t alg, if (PREDICT_FALSE(em->esp_integ_algs[alg].md == 0)) return 0; - if (PREDICT_FALSE(alg != em->last_integ_alg)) { + if (PREDICT_FALSE(alg != em->per_thread_data[cpu_index].last_integ_alg)) { md = em->esp_integ_algs[alg].md; - em->last_integ_alg = alg; + em->per_thread_data[cpu_index].last_integ_alg = alg; } HMAC_Init(ctx, key, key_len, md); diff --git a/vnet/vnet/ipsec/esp_decrypt.c b/vnet/vnet/ipsec/esp_decrypt.c index ad511b0fba3..958a4d6738a 100644 --- a/vnet/vnet/ipsec/esp_decrypt.c +++ b/vnet/vnet/ipsec/esp_decrypt.c @@ -85,7 +85,8 @@ esp_decrypt_aes_cbc(ipsec_crypto_alg_t alg, u8 * iv) { esp_main_t * em = &esp_main; - EVP_CIPHER_CTX * ctx = &(em->decrypt_ctx); + u32 cpu_index = os_get_cpu_number(); + EVP_CIPHER_CTX * ctx = &(em->per_thread_data[cpu_index].decrypt_ctx); const EVP_CIPHER * cipher = NULL; int out_len; @@ -94,9 +95,9 @@ esp_decrypt_aes_cbc(ipsec_crypto_alg_t alg, if (PREDICT_FALSE(em->esp_crypto_algs[alg].type == 0)) return; - if (PREDICT_FALSE(alg != em->last_decrytp_alg)) { + if (PREDICT_FALSE(alg != em->per_thread_data[cpu_index].last_decrypt_alg)) { cipher = em->esp_crypto_algs[alg].type; - em->last_decrytp_alg = alg; + em->per_thread_data[cpu_index].last_decrypt_alg = alg; } EVP_DecryptInit_ex(ctx, cipher, NULL, key, iv); @@ -235,10 +236,12 @@ esp_decrypt_node_fn (vlib_main_t * vm, u32 * recycle = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; + u32 cpu_index = os_get_cpu_number(); + u32 * empty_buffers = im->empty_buffers[cpu_index]; ipsec_alloc_empty_buffers(vm, im); - if (PREDICT_FALSE(vec_len (im->empty_buffers) < n_left_from)){ + if (PREDICT_FALSE(vec_len (empty_buffers) < n_left_from)){ vlib_node_increment_counter (vm, esp_decrypt_node.index, ESP_DECRYPT_ERROR_NO_BUFFER, n_left_from); goto free_buffers_and_exit; @@ -327,11 +330,11 @@ esp_decrypt_node_fn (vlib_main_t * vm, } /* grab free buffer */ - uword last_empty_buffer = vec_len (im->empty_buffers) - 1; - o_bi0 = im->empty_buffers[last_empty_buffer]; + uword last_empty_buffer = vec_len (empty_buffers) - 1; + o_bi0 = empty_buffers[last_empty_buffer]; o_b0 = vlib_get_buffer (vm, o_bi0); - vlib_prefetch_buffer_with_index (vm, im->empty_buffers[last_empty_buffer-1], STORE); - _vec_len (im->empty_buffers) = last_empty_buffer; + vlib_prefetch_buffer_with_index (vm, empty_buffers[last_empty_buffer-1], STORE); + _vec_len (empty_buffers) = last_empty_buffer; /* add old buffer to the recycle list */ vec_add1(recycle, i_bi0); diff --git a/vnet/vnet/ipsec/esp_encrypt.c b/vnet/vnet/ipsec/esp_encrypt.c index 7194399f803..39bbf2e47ff 100644 --- a/vnet/vnet/ipsec/esp_encrypt.c +++ b/vnet/vnet/ipsec/esp_encrypt.c @@ -89,7 +89,8 @@ esp_encrypt_aes_cbc(ipsec_crypto_alg_t alg, u8 * iv) { esp_main_t * em = &esp_main; - EVP_CIPHER_CTX * ctx = &(em->encrypt_ctx); + u32 cpu_index = os_get_cpu_number(); + EVP_CIPHER_CTX * ctx = &(em->per_thread_data[cpu_index].encrypt_ctx); const EVP_CIPHER * cipher = NULL; int out_len; @@ -98,9 +99,9 @@ esp_encrypt_aes_cbc(ipsec_crypto_alg_t alg, if (PREDICT_FALSE(em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE)) return; - if (PREDICT_FALSE(alg != em->last_encrytp_alg)) { + if (PREDICT_FALSE(alg != em->per_thread_data[cpu_index].last_encrypt_alg)) { cipher = em->esp_crypto_algs[alg].type; - em->last_encrytp_alg = alg; + em->per_thread_data[cpu_index].last_encrypt_alg = alg; } EVP_EncryptInit_ex(ctx, cipher, NULL, key, iv); @@ -142,10 +143,12 @@ esp_encrypt_node_fn (vlib_main_t * vm, n_left_from = from_frame->n_vectors; ipsec_main_t *im = &ipsec_main; u32 * recycle = 0; + u32 cpu_index = os_get_cpu_number(); + u32 * empty_buffers = im->empty_buffers[cpu_index]; ipsec_alloc_empty_buffers(vm, im); - if (PREDICT_FALSE(vec_len (im->empty_buffers) < n_left_from)){ + if (PREDICT_FALSE(vec_len (empty_buffers) < n_left_from)){ vlib_node_increment_counter (vm, esp_encrypt_node.index, ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from); clib_warning("no enough empty buffers. discarding frame"); @@ -197,13 +200,13 @@ esp_encrypt_node_fn (vlib_main_t * vm, } /* grab free buffer */ - last_empty_buffer = vec_len (im->empty_buffers) - 1; - o_bi0 = im->empty_buffers[last_empty_buffer]; + last_empty_buffer = vec_len (empty_buffers) - 1; + o_bi0 = empty_buffers[last_empty_buffer]; o_b0 = vlib_get_buffer (vm, o_bi0); o_b0->current_data = sizeof(ethernet_header_t); ih0 = vlib_buffer_get_current (i_b0); - vlib_prefetch_buffer_with_index (vm, im->empty_buffers[last_empty_buffer-1], STORE); - _vec_len (im->empty_buffers) = last_empty_buffer; + vlib_prefetch_buffer_with_index (vm, empty_buffers[last_empty_buffer-1], STORE); + _vec_len (empty_buffers) = last_empty_buffer; to_next[0] = o_bi0; to_next += 1; diff --git a/vnet/vnet/ipsec/ipsec.c b/vnet/vnet/ipsec/ipsec.c index 47beafc32b5..ea077d0a127 100644 --- a/vnet/vnet/ipsec/ipsec.c +++ b/vnet/vnet/ipsec/ipsec.c @@ -492,6 +492,7 @@ ipsec_init (vlib_main_t * vm) { clib_error_t * error; ipsec_main_t * im = &ipsec_main; + vlib_thread_main_t * tm = vlib_get_thread_main(); vlib_node_t * node; ipsec_rand_seed(); @@ -505,6 +506,8 @@ ipsec_init (vlib_main_t * vm) im->sa_index_by_sa_id = hash_create (0, sizeof (uword)); im->spd_index_by_sw_if_index = hash_create (0, sizeof (uword)); + vec_validate_aligned(im->empty_buffers, tm->n_vlib_mains-1, CLIB_CACHE_LINE_BYTES); + node = vlib_get_node_by_name (vm, (u8 *) "error-drop"); ASSERT(node); im->error_drop_node_index = node->index; diff --git a/vnet/vnet/ipsec/ipsec.h b/vnet/vnet/ipsec/ipsec.h index 386c0adecd6..8cd2b41c92f 100644 --- a/vnet/vnet/ipsec/ipsec.h +++ b/vnet/vnet/ipsec/ipsec.h @@ -175,7 +175,7 @@ typedef struct { ipsec_tunnel_if_t * tunnel_interfaces; u32 * free_tunnel_if_indices; - u32 * empty_buffers; + u32 ** empty_buffers; uword * tunnel_index_by_key; @@ -242,20 +242,22 @@ ipsec_alloc_empty_buffers(vlib_main_t * vm, ipsec_main_t *im) #else u32 free_list_index = VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX; #endif - uword l = vec_len (im->empty_buffers); + u32 cpu_index = os_get_cpu_number(); + uword l = vec_len (im->empty_buffers[cpu_index]); uword n_alloc = 0; if (PREDICT_FALSE(l < VLIB_FRAME_SIZE)) { - if (!im->empty_buffers) { - vec_alloc (im->empty_buffers, 2 * VLIB_FRAME_SIZE ); + if (!im->empty_buffers[cpu_index]) { + vec_alloc (im->empty_buffers[cpu_index], 2 * VLIB_FRAME_SIZE ); } - n_alloc = vlib_buffer_alloc_from_free_list (vm, im->empty_buffers + l, + n_alloc = vlib_buffer_alloc_from_free_list (vm, + im->empty_buffers[cpu_index] + l, 2 * VLIB_FRAME_SIZE - l, free_list_index); - _vec_len (im->empty_buffers) = l + n_alloc; + _vec_len (im->empty_buffers[cpu_index]) = l + n_alloc; } } |