diff options
-rw-r--r-- | src/vnet/ipsec/esp_decrypt.c | 80 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.c | 49 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 7 |
3 files changed, 100 insertions, 36 deletions
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index 905b13aa81c..21159fba84b 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -14,7 +14,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include <vnet/vnet.h> #include <vnet/api_errno.h> #include <vnet/ip/ip.h> @@ -749,6 +748,7 @@ out: static_always_inline void esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node, + const u16 *next_by_next_header, const esp_decrypt_packet_data_t *pd, const esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun, @@ -920,44 +920,49 @@ esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node, b->current_length = pd->current_length - adv; esp_remove_tail (vm, b, lb, tail); } - else + else if (is_tun && next_header == IP_PROTOCOL_GRE) { - if (is_tun && next_header == IP_PROTOCOL_GRE) - { - gre_header_t *gre; + gre_header_t *gre; - b->current_data = pd->current_data + adv; - b->current_length = pd->current_length - adv - tail; + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv - tail; - gre = vlib_buffer_get_current (b); + gre = vlib_buffer_get_current (b); - vlib_buffer_advance (b, sizeof (*gre)); + vlib_buffer_advance (b, sizeof (*gre)); - switch (clib_net_to_host_u16 (gre->protocol)) - { - case GRE_PROTOCOL_teb: - vnet_update_l2_len (b); - next[0] = ESP_DECRYPT_NEXT_L2_INPUT; - break; - case GRE_PROTOCOL_ip4: - next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; - break; - case GRE_PROTOCOL_ip6: - next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; - break; - default: - b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; - next[0] = ESP_DECRYPT_NEXT_DROP; - break; - } - } - else + switch (clib_net_to_host_u16 (gre->protocol)) { - next[0] = ESP_DECRYPT_NEXT_DROP; + case GRE_PROTOCOL_teb: + vnet_update_l2_len (b); + next[0] = ESP_DECRYPT_NEXT_L2_INPUT; + break; + case GRE_PROTOCOL_ip4: + next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; + break; + case GRE_PROTOCOL_ip6: + next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; + break; + default: b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; - return; + next[0] = ESP_DECRYPT_NEXT_DROP; + break; } } + else if ((next[0] = vec_elt (next_by_next_header, next_header)) != + (u16) ~0) + { + b->current_data = pd->current_data + adv; + b->current_length = pd->current_length - adv; + esp_remove_tail (vm, b, lb, tail); + } + else + { + next[0] = ESP_DECRYPT_NEXT_DROP; + b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD]; + return; + } + if (is_tun) { if (ipsec_sa_is_set_IS_PROTECT (sa0)) @@ -1028,6 +1033,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, u16 async_next_node) { ipsec_main_t *im = &ipsec_main; + const u16 *next_by_next_header = im->next_header_registrations; u32 thread_index = vm->thread_index; u16 len; ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index); @@ -1307,8 +1313,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node, current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; if (sync_next[0] >= ESP_DECRYPT_N_NEXT) - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6, - is_tun, 0); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0], + sync_next, is_ip6, is_tun, 0); /* trace: */ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) @@ -1349,6 +1355,8 @@ esp_decrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip6, int is_tun) { + const ipsec_main_t *im = &ipsec_main; + const u16 *next_by_next_header = im->next_header_registrations; u32 *from = vlib_frame_vector_args (from_frame); u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; @@ -1366,13 +1374,13 @@ esp_decrypt_post_inline (vlib_main_t * vm, } if (!pd->is_chain) - esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun, - 1); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0], + next, is_ip6, is_tun, 1); else { esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]); - esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6, - is_tun, 1); + esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, + b[0], next, is_ip6, is_tun, 1); } /*trace: */ diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index 2749b04587b..62ab23976a8 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -39,6 +39,52 @@ ipsec_main_t ipsec_main; esp_async_post_next_t esp_encrypt_async_next; esp_async_post_next_t esp_decrypt_async_next; +clib_error_t * +ipsec_register_next_header (vlib_main_t *vm, u8 next_header, + const char *next_node) +{ + ipsec_main_t *im = &ipsec_main; + const vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) next_node); + /* -post nodes (eg. esp4-decrypt-post) are siblings of non-post nodes (eg. + * esp4-decrypt) and will therefore have the same next index */ + const vlib_node_t *esp_decrypt_nodes[] = { + vlib_get_node (vm, im->esp4_decrypt_node_index), + vlib_get_node (vm, im->esp6_decrypt_node_index), + vlib_get_node (vm, im->esp4_decrypt_tun_node_index), + vlib_get_node (vm, im->esp6_decrypt_tun_node_index), + }; + uword slot, max; + int i; + + /* looks for a next_index value that we can use for all esp decrypt nodes to + * avoid maintaining different next index arrays... */ + + slot = vlib_node_get_next (vm, esp_decrypt_nodes[0]->index, node->index); + max = vec_len (esp_decrypt_nodes[0]->next_nodes); + for (i = 1; i < ARRAY_LEN (esp_decrypt_nodes); i++) + { + /* if next node already exists, check it shares the same next_index */ + if (slot != + vlib_node_get_next (vm, esp_decrypt_nodes[i]->index, node->index)) + return clib_error_return ( + 0, "next node already exists with different next index"); + /* compute a suitable slot from the max of all nodes next index */ + max = clib_max (max, vec_len (esp_decrypt_nodes[i]->next_nodes)); + } + + if (~0 == slot) + { + /* next node not there yet, add it using the computed max */ + slot = max; + for (i = 0; i < ARRAY_LEN (esp_decrypt_nodes); i++) + vlib_node_add_next_with_slot (vm, esp_decrypt_nodes[i]->index, + node->index, slot); + } + + im->next_header_registrations[next_header] = slot; + return 0; +} + static clib_error_t * ipsec_check_ah_support (ipsec_sa_t * sa) { @@ -570,6 +616,9 @@ ipsec_init (vlib_main_t * vm) im->input_epoch_count = 0; im->ipsec4_in_spd_hash_num_buckets = IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS; + vec_validate_init_empty_aligned (im->next_header_registrations, 255, ~0, + CLIB_CACHE_LINE_BYTES); + return 0; } diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 05d8484a28c..58b0ffc93f9 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -149,6 +149,9 @@ typedef struct uword *tunnel_index_by_key; + /* next_header protocol registration */ + u16 *next_header_registrations; + /* convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; @@ -358,6 +361,10 @@ void ipsec_set_async_mode (u32 is_enabled); extern void ipsec_register_udp_port (u16 udp_port); extern void ipsec_unregister_udp_port (u16 udp_port); +extern clib_error_t *ipsec_register_next_header (vlib_main_t *vm, + u8 next_header, + const char *next_node); + #endif /* __IPSEC_H__ */ /* |