diff options
author | Klement Sekera <ksekera@cisco.com> | 2018-10-09 16:05:48 +0200 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2018-10-22 08:22:52 +0000 |
commit | be5a5dd904d4d25857c53a4b5dee7951f724e3e2 (patch) | |
tree | f1623d508a65daf534b8307d29eb6d5492d7cb06 | |
parent | b9ffffd61967ba9bc9453c93348be1ea5412c638 (diff) |
ipsec: split ipsec nodes into ip4/ip6 nodes
Change-Id: Ic6b27659f1fe9e8df39e80a0441305e4e952195a
Signed-off-by: Klement Sekera <ksekera@cisco.com>
-rw-r--r-- | src/plugins/dpdk/ipsec/esp_decrypt.c | 148 | ||||
-rw-r--r-- | src/plugins/dpdk/ipsec/esp_encrypt.c | 145 | ||||
-rw-r--r-- | src/plugins/dpdk/ipsec/ipsec.c | 32 | ||||
-rw-r--r-- | src/vnet/ip/ip4_forward.c | 8 | ||||
-rw-r--r-- | src/vnet/ip/ip6_forward.c | 8 | ||||
-rw-r--r-- | src/vnet/ipsec/ah_decrypt.c | 189 | ||||
-rw-r--r-- | src/vnet/ipsec/ah_encrypt.c | 119 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_decrypt.c | 148 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_encrypt.c | 95 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.c | 53 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 58 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_if.c | 8 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_if_in.c | 4 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_input.c | 71 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_output.c | 68 |
15 files changed, 764 insertions, 390 deletions
diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c index 8327e743d47..bc40b4ba2f8 100644 --- a/src/plugins/dpdk/ipsec/esp_decrypt.c +++ b/src/plugins/dpdk/ipsec/esp_decrypt.c @@ -62,7 +62,8 @@ static char * esp_decrypt_error_strings[] = { #undef _ }; -vlib_node_registration_t dpdk_esp_decrypt_node; +vlib_node_registration_t dpdk_esp4_decrypt_node; +vlib_node_registration_t dpdk_esp6_decrypt_node; typedef struct { ipsec_crypto_alg_t crypto_alg; @@ -87,10 +88,11 @@ static u8 * format_esp_decrypt_trace (u8 * s, va_list * args) return s; } -static uword -dpdk_esp_decrypt_node_fn (vlib_main_t * vm, +always_inline uword +dpdk_esp_decrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * from_frame) + vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; @@ -113,7 +115,11 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, ret = crypto_alloc_ops (numa, ops, n_left_from); if (ret) { - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DISCARD, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_DISCARD, 1); /* Discard whole frame */ return n_left_from; @@ -179,7 +185,11 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (res_idx == (u16) ~0)) { clib_warning ("unsupported SA by thread index %u", thread_idx); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_NOSUP, 1); to_next[0] = bi0; to_next += 1; @@ -192,7 +202,11 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (error || !session)) { clib_warning ("failed to get crypto session"); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_SESSION, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_SESSION, 1); to_next[0] = bi0; to_next += 1; @@ -218,7 +232,11 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (rv)) { clib_warning ("failed anti-replay check"); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_REPLAY, 1); to_next[0] = bi0; to_next += 1; @@ -254,7 +272,11 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, { clib_warning ("payload %u not multiple of %d\n", payload_len, cipher_alg->boundary); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_BAD_LEN, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_BAD_LEN, 1); res->n_ops -= 1; to_next[0] = bi0; @@ -331,22 +353,41 @@ trace: vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, - ESP_DECRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if(is_ip6){ + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, 0, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_ENQ_FAIL, numa); + } + else + { + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); - crypto_enqueue_ops (vm, cwm, 0, dpdk_esp_decrypt_node.index, + crypto_enqueue_ops (vm, cwm, 0, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_ENQ_FAIL, numa); + } crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); return from_frame->n_vectors; } +static uword +dpdk_esp4_decrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_decrypt_inline(vm, node, from_frame, 0 /*is_ip6*/); +} + /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp_decrypt_node) = { - .function = dpdk_esp_decrypt_node_fn, - .name = "dpdk-esp-decrypt", +VLIB_REGISTER_NODE (dpdk_esp4_decrypt_node) = { + .function = dpdk_esp4_decrypt_node_fn, + .name = "dpdk4-esp-decrypt", .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -363,7 +404,7 @@ VLIB_REGISTER_NODE (dpdk_esp_decrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_node, dpdk_esp_decrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp4_decrypt_node, dpdk_esp4_decrypt_node_fn); /* * Decrypt Post Node @@ -385,7 +426,8 @@ static char * esp_decrypt_post_error_strings[] = { #undef _ }; -vlib_node_registration_t dpdk_esp_decrypt_post_node; +vlib_node_registration_t dpdk_esp4_decrypt_post_node; +vlib_node_registration_t dpdk_esp6_decrypt_post_node; static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args) { @@ -407,10 +449,11 @@ static u8 * format_esp_decrypt_post_trace (u8 * s, va_list * args) return s; } -static uword -dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, +always_inline uword +dpdk_esp_decrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * from_frame) + vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, *to_next = 0, next_index; ipsec_sa_t * sa0; @@ -516,7 +559,12 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, else { clib_warning("next header: 0x%x", f0->next_header); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_DECRYPTION_FAILED, 1); goto trace; @@ -553,7 +601,12 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, else { clib_warning("next header: 0x%x", f0->next_header); - vlib_node_increment_counter (vm, dpdk_esp_decrypt_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_node.index, ESP_DECRYPT_ERROR_DECRYPTION_FAILED, 1); goto trace; @@ -578,17 +631,56 @@ dpdk_esp_decrypt_post_node_fn (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, dpdk_esp_decrypt_post_node.index, + if(is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_decrypt_post_node.index, + ESP_DECRYPT_POST_ERROR_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, dpdk_esp4_decrypt_post_node.index, ESP_DECRYPT_POST_ERROR_PKTS, from_frame->n_vectors); return from_frame->n_vectors; } +static uword +dpdk_esp4_decrypt_post_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{return dpdk_esp_decrypt_post_inline(vm, node, from_frame, 0/*is_ip6*/);} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_decrypt_post_node) = { + .function = dpdk_esp4_decrypt_post_node_fn, + .name = "dpdk4-esp-decrypt-post", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_post_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_post_error_strings), + .error_strings = esp_decrypt_post_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp4_decrypt_post_node, dpdk_esp4_decrypt_post_node_fn); + +static uword +dpdk_esp6_decrypt_post_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{return dpdk_esp_decrypt_post_inline(vm, node, from_frame, 0/*is_ip6*/);} + /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node) = { - .function = dpdk_esp_decrypt_post_node_fn, - .name = "dpdk-esp-decrypt-post", +VLIB_REGISTER_NODE (dpdk_esp6_decrypt_post_node) = { + .function = dpdk_esp6_decrypt_post_node_fn, + .name = "dpdk6-esp-decrypt-post", .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_post_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -605,4 +697,4 @@ VLIB_REGISTER_NODE (dpdk_esp_decrypt_post_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_decrypt_post_node, dpdk_esp_decrypt_post_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp6_decrypt_post_node, dpdk_esp6_decrypt_post_node_fn); diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c index 7947a8e9d02..6be8e97b7c5 100644 --- a/src/plugins/dpdk/ipsec/esp_encrypt.c +++ b/src/plugins/dpdk/ipsec/esp_encrypt.c @@ -63,7 +63,8 @@ static char *esp_encrypt_error_strings[] = { #undef _ }; -vlib_node_registration_t dpdk_esp_encrypt_node; +vlib_node_registration_t dpdk_esp4_encrypt_node; +vlib_node_registration_t dpdk_esp6_encrypt_node; typedef struct { @@ -105,10 +106,10 @@ format_esp_encrypt_trace (u8 * s, va_list * args) return s; } -static uword -dpdk_esp_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) +always_inline uword +dpdk_esp_encrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, int is_ip6) { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; @@ -131,8 +132,12 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, ret = crypto_alloc_ops (numa, ops, n_left_from); if (ret) { - vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_DISCARD, 1); + if (is_ip6) + vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_DISCARD, 1); + else + vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_DISCARD, 1); /* Discard whole frame */ return n_left_from; } @@ -156,7 +161,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, ip4_and_udp_and_esp_header_t *ouh0 = 0; esp_header_t *esp0; esp_footer_t *f0; - u8 is_ipv6, next_hdr_type; + u8 next_hdr_type; u32 iv_size; u16 orig_sz; u8 trunc_size; @@ -212,9 +217,14 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, { clib_warning ("unsupported SA by thread index %u", thread_idx); - vlib_node_increment_counter (vm, - dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_NOSUP, 1); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; @@ -226,9 +236,16 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (error || !session)) { clib_warning ("failed to get crypto session"); - vlib_node_increment_counter (vm, - dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_SESSION, 1); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SESSION, + 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SESSION, + 1); to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; @@ -242,8 +259,14 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, { clib_warning ("sequence number counter has cycled SPI %u", sa0->spi); - vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); //TODO: rekey SA to_next[0] = bi0; to_next += 1; @@ -264,19 +287,17 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, crypto_set_icb (icb, sa0->salt, sa0->seq, sa0->seq_hi); - is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60; - iv_size = cipher_alg->iv_len; trunc_size = auth_alg->trunc_size; /* if UDP encapsulation is used adjust the address of the IP header */ - if (sa0->udp_encap && !is_ipv6) + if (sa0->udp_encap && !is_ip6) udp_encap_adv = sizeof (udp_header_t); if (sa0->is_tunnel) { rewrite_len = 0; - if (!is_ipv6 && !sa0->is_tunnel_ip6) /* ip4inip4 */ + if (!is_ip6 && !sa0->is_tunnel_ip6) /* ip4inip4 */ { /* in tunnel mode send it back to FIB */ priv->next = DPDK_CRYPTO_INPUT_NEXT_IP4_LOOKUP; @@ -316,7 +337,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, esp0->spi = clib_host_to_net_u32 (sa0->spi); esp0->seq = clib_host_to_net_u32 (sa0->seq); } - else if (is_ipv6 && sa0->is_tunnel_ip6) /* ip6inip6 */ + else if (is_ip6 && sa0->is_tunnel_ip6) /* ip6inip6 */ { /* in tunnel mode send it back to FIB */ priv->next = DPDK_CRYPTO_INPUT_NEXT_IP6_LOOKUP; @@ -348,9 +369,14 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, } else /* unsupported ip4inip6, ip6inip4 */ { - vlib_node_increment_counter (vm, - dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_NOSUP, 1); + if (is_ip6) + vlib_node_increment_counter (vm, + dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); + else + vlib_node_increment_counter (vm, + dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_NOSUP, 1); to_next[0] = bi0; to_next += 1; n_left_to_next -= 1; @@ -368,7 +394,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, u8 *dst = vlib_buffer_get_current (b0); oh0 = vlib_buffer_get_current (b0) + rewrite_len; - if (is_ipv6) + if (is_ip6) { orig_sz -= sizeof (ip6_header_t); ih6_0 = (ip6_and_esp_header_t *) ih0; @@ -424,7 +450,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, f0->pad_length = pad_bytes; f0->next_header = next_hdr_type; - if (is_ipv6) + if (is_ip6) { u16 len = b0->current_length - sizeof (ip6_header_t); oh6_0->ip6.payload_length = @@ -517,22 +543,70 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if (is_ip6) + { + vlib_node_increment_counter (vm, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + + crypto_enqueue_ops (vm, cwm, 1, dpdk_esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, numa); + } + else + { + vlib_node_increment_counter (vm, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); - crypto_enqueue_ops (vm, cwm, 1, dpdk_esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_ENQ_FAIL, numa); + crypto_enqueue_ops (vm, cwm, 1, dpdk_esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_ENQ_FAIL, numa); + } crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops); return from_frame->n_vectors; } +static uword +dpdk_esp4_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 0 /*is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (dpdk_esp4_encrypt_node) = { + .function = dpdk_esp4_encrypt_node_fn, + .name = "dpdk4-esp-encrypt", + .flags = VLIB_NODE_FLAG_IS_OUTPUT, + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .n_errors = ARRAY_LEN (esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + .n_next_nodes = 1, + .next_nodes = + { + [ESP_ENCRYPT_NEXT_DROP] = "error-drop", + } +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp4_encrypt_node, + dpdk_esp4_encrypt_node_fn); + +static uword +dpdk_esp6_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return dpdk_esp_encrypt_inline (vm, node, from_frame, 1 /*is_ip6 */ ); +} + /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) = { - .function = dpdk_esp_encrypt_node_fn, - .name = "dpdk-esp-encrypt", +VLIB_REGISTER_NODE (dpdk_esp6_encrypt_node) = { + .function = dpdk_esp6_encrypt_node_fn, + .name = "dpdk6-esp-encrypt", .flags = VLIB_NODE_FLAG_IS_OUTPUT, .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, @@ -546,7 +620,8 @@ VLIB_REGISTER_NODE (dpdk_esp_encrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp_encrypt_node, dpdk_esp_encrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (dpdk_esp6_encrypt_node, + dpdk_esp6_encrypt_node_fn); /* * fd.io coding-style-patch-verification: ON * diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c index a7f501f9d9d..1bd46d55b8e 100644 --- a/src/plugins/dpdk/ipsec/ipsec.c +++ b/src/plugins/dpdk/ipsec/ipsec.c @@ -1051,20 +1051,36 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, /* Add new next node and set it as default */ vlib_node_t *node, *next_node; - next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-encrypt"); + next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp4-encrypt"); ASSERT (next_node); - node = vlib_get_node_by_name (vm, (u8 *) "ipsec-output-ip4"); + node = vlib_get_node_by_name (vm, (u8 *) "ipsec4-output"); ASSERT (node); - im->esp_encrypt_node_index = next_node->index; - im->esp_encrypt_next_index = + im->esp4_encrypt_node_index = next_node->index; + im->esp4_encrypt_next_index = vlib_node_add_next (vm, node->index, next_node->index); - next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-decrypt"); + next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp4-decrypt"); ASSERT (next_node); - node = vlib_get_node_by_name (vm, (u8 *) "ipsec-input-ip4"); + node = vlib_get_node_by_name (vm, (u8 *) "ipsec4-input"); ASSERT (node); - im->esp_decrypt_node_index = next_node->index; - im->esp_decrypt_next_index = + im->esp4_decrypt_node_index = next_node->index; + im->esp4_decrypt_next_index = + vlib_node_add_next (vm, node->index, next_node->index); + + next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp6-encrypt"); + ASSERT (next_node); + node = vlib_get_node_by_name (vm, (u8 *) "ipsec6-output"); + ASSERT (node); + im->esp6_encrypt_node_index = next_node->index; + im->esp6_encrypt_next_index = + vlib_node_add_next (vm, node->index, next_node->index); + + next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp6-decrypt"); + ASSERT (next_node); + node = vlib_get_node_by_name (vm, (u8 *) "ipsec6-input"); + ASSERT (node); + im->esp6_decrypt_node_index = next_node->index; + im->esp6_decrypt_next_index = vlib_node_add_next (vm, node->index, next_node->index); im->cb.check_support_cb = dpdk_ipsec_check_support; diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 69a8dbad805..23e90df8b8a 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -753,13 +753,13 @@ VNET_FEATURE_INIT (ip4_policer_classify, static) = { .arc_name = "ip4-unicast", .node_name = "ip4-policer-classify", - .runs_before = VNET_FEATURES ("ipsec-input-ip4"), + .runs_before = VNET_FEATURES ("ipsec4-input"), }; VNET_FEATURE_INIT (ip4_ipsec, static) = { .arc_name = "ip4-unicast", - .node_name = "ipsec-input-ip4", + .node_name = "ipsec4-input", .runs_before = VNET_FEATURES ("vpath-input-ip4"), }; @@ -839,13 +839,13 @@ VNET_FEATURE_INIT (ip4_outacl, static) = { .arc_name = "ip4-output", .node_name = "ip4-outacl", - .runs_before = VNET_FEATURES ("ipsec-output-ip4"), + .runs_before = VNET_FEATURES ("ipsec4-output"), }; VNET_FEATURE_INIT (ip4_ipsec_output, static) = { .arc_name = "ip4-output", - .node_name = "ipsec-output-ip4", + .node_name = "ipsec4-output", .runs_before = VNET_FEATURES ("interface-output"), }; diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index 00af2822ac4..9a9a64b54b8 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -361,13 +361,13 @@ VNET_FEATURE_INIT (ip6_policer_classify, static) = { .arc_name = "ip6-unicast", .node_name = "ip6-policer-classify", - .runs_before = VNET_FEATURES ("ipsec-input-ip6"), + .runs_before = VNET_FEATURES ("ipsec6-input"), }; VNET_FEATURE_INIT (ip6_ipsec, static) = { .arc_name = "ip6-unicast", - .node_name = "ipsec-input-ip6", + .node_name = "ipsec6-input", .runs_before = VNET_FEATURES ("l2tp-decap"), }; @@ -443,12 +443,12 @@ VNET_FEATURE_ARC_INIT (ip6_output, static) = VNET_FEATURE_INIT (ip6_outacl, static) = { .arc_name = "ip6-output", .node_name = "ip6-outacl", - .runs_before = VNET_FEATURES ("ipsec-output-ip6"), + .runs_before = VNET_FEATURES ("ipsec6-output"), }; VNET_FEATURE_INIT (ip6_ipsec_output, static) = { .arc_name = "ip6-output", - .node_name = "ipsec-output-ip6", + .node_name = "ipsec6-output", .runs_before = VNET_FEATURES ("interface-output"), }; diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c index abe2e6f5f80..e3e0071a40f 100644 --- a/src/vnet/ipsec/ah_decrypt.c +++ b/src/vnet/ipsec/ah_decrypt.c @@ -23,11 +23,11 @@ #include <vnet/ipsec/esp.h> #include <vnet/ipsec/ah.h> -#define foreach_ah_decrypt_next \ -_(DROP, "error-drop") \ -_(IP4_INPUT, "ip4-input") \ -_(IP6_INPUT, "ip6-input") \ -_(IPSEC_GRE_INPUT, "ipsec-gre-input") +#define foreach_ah_decrypt_next \ + _ (DROP, "error-drop") \ + _ (IP4_INPUT, "ip4-input") \ + _ (IP6_INPUT, "ip6-input") \ + _ (IPSEC_GRE_INPUT, "ipsec-gre-input") #define _(v, s) AH_DECRYPT_NEXT_##v, typedef enum @@ -37,14 +37,11 @@ typedef enum AH_DECRYPT_N_NEXT, } ah_decrypt_next_t; - -#define foreach_ah_decrypt_error \ - _(RX_PKTS, "AH pkts received") \ - _(DECRYPTION_FAILED, "AH decryption failed") \ - _(INTEG_ERROR, "Integrity check failed") \ - _(REPLAY, "SA replayed packet") \ - _(NOT_IP, "Not IP packet (dropped)") - +#define foreach_ah_decrypt_error \ + _ (RX_PKTS, "AH pkts received") \ + _ (DECRYPTION_FAILED, "AH decryption failed") \ + _ (INTEG_ERROR, "Integrity check failed") \ + _ (REPLAY, "SA replayed packet") typedef enum { @@ -77,9 +74,10 @@ format_ah_decrypt_trace (u8 * s, va_list * args) return s; } -static uword -ah_decrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +always_inline uword +ah_decrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, next_index, *to_next; ipsec_main_t *im = &ipsec_main; @@ -107,8 +105,6 @@ ah_decrypt_node_fn (vlib_main_t * vm, u32 seq; ip4_header_t *ih4 = 0, *oh4 = 0; ip6_header_t *ih6 = 0, *oh6 = 0; - u8 tunnel_mode = 1; - u8 transport_ip6 = 0; u8 ip_hdr_size = 0; u8 tos = 0; u8 ttl = 0; @@ -133,12 +129,7 @@ ah_decrypt_node_fn (vlib_main_t * vm, sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index; sa0 = pool_elt_at_index (im->sad, sa_index0); - if ((ih4->ip_version_and_header_length & 0xF0) == 0x40) - { - ip_hdr_size = ip4_header_bytes (ih4); - ah0 = (ah_header_t *) ((u8 *) ih4 + ip_hdr_size); - } - else if ((ih4->ip_version_and_header_length & 0xF0) == 0x60) + if (is_ip6) { ip6_ext_header_t *prev = NULL; ip6_ext_header_find_t (ih6, prev, ah0, IP_PROTOCOL_IPSEC_AH); @@ -147,9 +138,8 @@ ah_decrypt_node_fn (vlib_main_t * vm, } else { - vlib_node_increment_counter (vm, ah_decrypt_node.index, - AH_DECRYPT_ERROR_NOT_IP, 1); - goto trace; + ip_hdr_size = ip4_header_bytes (ih4); + ah0 = (ah_header_t *) ((u8 *) ih4 + ip_hdr_size); } seq = clib_host_to_net_u32 (ah0->seq_no); @@ -167,8 +157,14 @@ ah_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (rv)) { clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq); - vlib_node_increment_counter (vm, ah_decrypt_node.index, - AH_DECRYPT_ERROR_REPLAY, 1); + if (is_ip6) + vlib_node_increment_counter (vm, + ah6_decrypt_node.index, + AH_DECRYPT_ERROR_REPLAY, 1); + else + vlib_node_increment_counter (vm, + ah6_decrypt_node.index, + AH_DECRYPT_ERROR_REPLAY, 1); to_next[0] = i_bi0; to_next += 1; goto trace; @@ -189,18 +185,7 @@ ah_decrypt_node_fn (vlib_main_t * vm, memcpy (digest, icv, icv_size); memset (icv, 0, icv_size); - if ((ih4->ip_version_and_header_length & 0xF0) == 0x40) - { - tos = ih4->tos; - ttl = ih4->ttl; - ih4->tos = 0; - ih4->ttl = 0; - ih4->checksum = 0; - ih4->flags_and_fragment_offset = 0; - icv_padding_len = - ah_calc_icv_padding_len (icv_size, 0 /* is_ipv6 */ ); - } - else + if (is_ip6) { ip_version_traffic_class_and_flow_label = ih6->ip_version_traffic_class_and_flow_label; @@ -211,15 +196,33 @@ ah_decrypt_node_fn (vlib_main_t * vm, icv_padding_len = ah_calc_icv_padding_len (icv_size, 1 /* is_ipv6 */ ); } + else + { + tos = ih4->tos; + ttl = ih4->ttl; + ih4->tos = 0; + ih4->ttl = 0; + ih4->checksum = 0; + ih4->flags_and_fragment_offset = 0; + icv_padding_len = + ah_calc_icv_padding_len (icv_size, 0 /* is_ipv6 */ ); + } hmac_calc (sa0->integ_alg, sa0->integ_key, sa0->integ_key_len, (u8 *) ih4, i_b0->current_length, sig, sa0->use_esn, sa0->seq_hi); if (PREDICT_FALSE (memcmp (digest, sig, icv_size))) { - vlib_node_increment_counter (vm, ah_decrypt_node.index, - AH_DECRYPT_ERROR_INTEG_ERROR, - 1); + if (is_ip6) + vlib_node_increment_counter (vm, + ah6_decrypt_node.index, + AH_DECRYPT_ERROR_INTEG_ERROR, + 1); + else + vlib_node_increment_counter (vm, + ah4_decrypt_node.index, + AH_DECRYPT_ERROR_INTEG_ERROR, + 1); to_next[0] = i_bi0; to_next += 1; goto trace; @@ -241,30 +244,8 @@ ah_decrypt_node_fn (vlib_main_t * vm, icv_padding_len); i_b0->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID; - /* transport mode */ - if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6)) - { - tunnel_mode = 0; - - if (PREDICT_TRUE - ((ih4->ip_version_and_header_length & 0xF0) != 0x40)) - { - if (PREDICT_TRUE - ((ih4->ip_version_and_header_length & 0xF0) == 0x60)) - transport_ip6 = 1; - else - { - clib_warning ("next header: 0x%x", ah0->nexthdr); - vlib_node_increment_counter (vm, ah_decrypt_node.index, - AH_DECRYPT_ERROR_NOT_IP, - 1); - goto trace; - } - } - } - - if (PREDICT_TRUE (tunnel_mode)) - { + if (PREDICT_TRUE (sa0->is_tunnel)) + { /* tunnel mode */ if (PREDICT_TRUE (ah0->nexthdr == IP_PROTOCOL_IP_IN_IP)) next0 = AH_DECRYPT_NEXT_IP4_INPUT; else if (ah0->nexthdr == IP_PROTOCOL_IPV6) @@ -272,16 +253,22 @@ ah_decrypt_node_fn (vlib_main_t * vm, else { clib_warning ("next header: 0x%x", ah0->nexthdr); - vlib_node_increment_counter (vm, ah_decrypt_node.index, - AH_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); + if (is_ip6) + vlib_node_increment_counter (vm, + ah6_decrypt_node.index, + AH_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, + ah4_decrypt_node.index, + AH_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); goto trace; } } - /* transport mode */ else - { - if (PREDICT_FALSE (transport_ip6)) + { /* transport mode */ + if (is_ip6) { vlib_buffer_advance (i_b0, -sizeof (ip6_header_t)); oh6 = vlib_buffer_get_current (i_b0); @@ -337,18 +324,58 @@ ah_decrypt_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, ah_decrypt_node.index, - AH_DECRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if (is_ip6) + vlib_node_increment_counter (vm, ah6_decrypt_node.index, + AH_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, ah4_decrypt_node.index, + AH_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); return from_frame->n_vectors; } +static uword +ah4_decrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return ah_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (ah4_decrypt_node) = { + .function = ah4_decrypt_node_fn, + .name = "ah4-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_ah_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(ah_decrypt_error_strings), + .error_strings = ah_decrypt_error_strings, + + .n_next_nodes = AH_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [AH_DECRYPT_NEXT_##s] = n, + foreach_ah_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (ah4_decrypt_node, ah4_decrypt_node_fn); + +static uword +ah6_decrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return ah_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); +} /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ah_decrypt_node) = { - .function = ah_decrypt_node_fn, - .name = "ah-decrypt", +VLIB_REGISTER_NODE (ah6_decrypt_node) = { + .function = ah6_decrypt_node_fn, + .name = "ah6-decrypt", .vector_size = sizeof (u32), .format_trace = format_ah_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -365,7 +392,7 @@ VLIB_REGISTER_NODE (ah_decrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ah_decrypt_node, ah_decrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (ah6_decrypt_node, ah6_decrypt_node_fn); /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c index 911dd333882..856eab62c91 100644 --- a/src/vnet/ipsec/ah_encrypt.c +++ b/src/vnet/ipsec/ah_encrypt.c @@ -23,11 +23,12 @@ #include <vnet/ipsec/esp.h> #include <vnet/ipsec/ah.h> -#define foreach_ah_encrypt_next \ -_(DROP, "error-drop") \ -_(IP4_LOOKUP, "ip4-lookup") \ -_(IP6_LOOKUP, "ip6-lookup") \ -_(INTERFACE_OUTPUT, "interface-output") +#define foreach_ah_encrypt_next \ + _ (DROP, "error-drop") \ + _ (IP4_LOOKUP, "ip4-lookup") \ + _ (IP6_LOOKUP, "ip6-lookup") \ + _ (INTERFACE_OUTPUT, "interface-output") + #define _(v, s) AH_ENCRYPT_NEXT_##v, typedef enum @@ -56,8 +57,6 @@ static char *ah_encrypt_error_strings[] = { #undef _ }; -vlib_node_registration_t ah_encrypt_node; - typedef struct { u32 spi; @@ -78,9 +77,10 @@ format_ah_encrypt_trace (u8 * s, va_list * args) return s; } -static uword -ah_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +always_inline uword +ah_encrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, *to_next = 0, next_index; int icv_size = 0; @@ -104,10 +104,8 @@ ah_encrypt_node_fn (vlib_main_t * vm, ipsec_sa_t *sa0; ip4_and_ah_header_t *ih0, *oh0 = 0; ip6_and_ah_header_t *ih6_0, *oh6_0 = 0; - u8 is_ipv6; u8 ip_hdr_size; u8 next_hdr_type; - u8 transport_mode = 0; u8 tos = 0; u8 ttl = 0; u8 hop_limit = 0; @@ -129,8 +127,12 @@ ah_encrypt_node_fn (vlib_main_t * vm, { clib_warning ("sequence number counter has cycled SPI %u", sa0->spi); - vlib_node_increment_counter (vm, ah_encrypt_node.index, - AH_ENCRYPT_ERROR_SEQ_CYCLED, 1); + if (is_ip6) + vlib_node_increment_counter (vm, ah6_encrypt_node.index, + AH_ENCRYPT_ERROR_SEQ_CYCLED, 1); + else + vlib_node_increment_counter (vm, ah4_encrypt_node.index, + AH_ENCRYPT_ERROR_SEQ_CYCLED, 1); //TODO need to confirm if below is needed to_next[0] = i_bi0; to_next += 1; @@ -145,14 +147,12 @@ ah_encrypt_node_fn (vlib_main_t * vm, ttl = ih0->ip4.ttl; tos = ih0->ip4.tos; - is_ipv6 = (ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60; - /* is ipv6 */ if (PREDICT_TRUE (sa0->is_tunnel)) { - if (PREDICT_TRUE (!is_ipv6)) - adv = -sizeof (ip4_and_ah_header_t); - else + if (is_ip6) adv = -sizeof (ip6_and_ah_header_t); + else + adv = -sizeof (ip4_and_ah_header_t); } else { @@ -161,9 +161,9 @@ ah_encrypt_node_fn (vlib_main_t * vm, icv_size = em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size; - const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ipv6); + const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6); adv -= padding_len; - /*transport mode save the eth header before it is overwritten */ + /* transport mode save the eth header before it is overwritten */ if (PREDICT_FALSE (!sa0->is_tunnel)) { ethernet_header_t *ieh0 = (ethernet_header_t *) @@ -176,8 +176,8 @@ ah_encrypt_node_fn (vlib_main_t * vm, vlib_buffer_advance (i_b0, adv - icv_size); - if (PREDICT_FALSE (is_ipv6)) - { /* is ipv6 */ + if (is_ip6) + { ih6_0 = (ip6_and_ah_header_t *) ih0; ip_hdr_size = sizeof (ip6_header_t); oh6_0 = vlib_buffer_get_current (i_b0); @@ -241,8 +241,7 @@ ah_encrypt_node_fn (vlib_main_t * vm, } - if (PREDICT_TRUE - (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) + if (PREDICT_TRUE (!is_ip6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) { oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; @@ -250,7 +249,7 @@ ah_encrypt_node_fn (vlib_main_t * vm, next0 = AH_ENCRYPT_NEXT_IP4_LOOKUP; vnet_buffer (i_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; } - else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6) + else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6) { oh6_0->ip6.src_address.as_u64[0] = sa0->tunnel_src_addr.ip6.as_u64[0]; @@ -264,11 +263,6 @@ ah_encrypt_node_fn (vlib_main_t * vm, next0 = AH_ENCRYPT_NEXT_IP6_LOOKUP; vnet_buffer (i_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; } - else - { - transport_mode = 1; - next0 = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT; - } u8 sig[64]; memset (sig, 0, sizeof (sig)); @@ -284,7 +278,7 @@ ah_encrypt_node_fn (vlib_main_t * vm, sa0->seq_hi); memcpy (digest, sig, size); - if (PREDICT_FALSE (is_ipv6)) + if (is_ip6) { oh6_0->ip6.hop_limit = hop_limit; oh6_0->ip6.ip_version_traffic_class_and_flow_label = @@ -297,8 +291,11 @@ ah_encrypt_node_fn (vlib_main_t * vm, oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4); } - if (transport_mode) - vlib_buffer_advance (i_b0, -sizeof (ethernet_header_t)); + if (!sa0->is_tunnel) + { + next0 = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT; + vlib_buffer_advance (i_b0, -sizeof (ethernet_header_t)); + } trace: if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -317,18 +314,58 @@ ah_encrypt_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, ah_encrypt_node.index, - AH_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if (is_ip6) + vlib_node_increment_counter (vm, ah6_encrypt_node.index, + AH_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, ah4_encrypt_node.index, + AH_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); return from_frame->n_vectors; } +static uword +ah4_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return ah_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (ah4_encrypt_node) = { + .function = ah4_encrypt_node_fn, + .name = "ah4-encrypt", + .vector_size = sizeof (u32), + .format_trace = format_ah_encrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(ah_encrypt_error_strings), + .error_strings = ah_encrypt_error_strings, + + .n_next_nodes = AH_ENCRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [AH_ENCRYPT_NEXT_##s] = n, + foreach_ah_encrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (ah4_encrypt_node, ah4_encrypt_node_fn); + +static uword +ah6_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return ah_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); +} /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ah_encrypt_node) = { - .function = ah_encrypt_node_fn, - .name = "ah-encrypt", +VLIB_REGISTER_NODE (ah6_encrypt_node) = { + .function = ah6_encrypt_node_fn, + .name = "ah6-encrypt", .vector_size = sizeof (u32), .format_trace = format_ah_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -345,7 +382,7 @@ VLIB_REGISTER_NODE (ah_encrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ah_encrypt_node, ah_encrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (ah6_encrypt_node, ah6_encrypt_node_fn); /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index a0eeed464da..bfddb9ece8d 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -112,9 +112,10 @@ esp_decrypt_cbc (ipsec_crypto_alg_t alg, EVP_DecryptFinal_ex (ctx, out + out_len, &out_len); } -static uword -esp_decrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +always_inline uword +esp_decrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, next_index, *to_next; ipsec_main_t *im = &ipsec_main; @@ -130,8 +131,14 @@ esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { - vlib_node_increment_counter (vm, esp_decrypt_node.index, - ESP_DECRYPT_ERROR_NO_BUFFER, n_left_from); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_NO_BUFFER, + n_left_from); + else + vlib_node_increment_counter (vm, esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_NO_BUFFER, + n_left_from); goto free_buffers_and_exit; } @@ -155,8 +162,6 @@ esp_decrypt_node_fn (vlib_main_t * vm, ip4_header_t *ih4 = 0, *oh4 = 0; ip6_header_t *ih6 = 0, *oh6 = 0; u8 tunnel_mode = 1; - u8 transport_ip6 = 0; - i_bi0 = from[0]; from += 1; @@ -186,8 +191,14 @@ esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (rv)) { clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq); - vlib_node_increment_counter (vm, esp_decrypt_node.index, - ESP_DECRYPT_ERROR_REPLAY, 1); + if (is_ip6) + vlib_node_increment_counter (vm, + esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); + else + vlib_node_increment_counter (vm, + esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_REPLAY, 1); o_bi0 = i_bi0; to_next[0] = o_bi0; to_next += 1; @@ -214,9 +225,16 @@ esp_decrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (memcmp (icv, sig, icv_size))) { - vlib_node_increment_counter (vm, esp_decrypt_node.index, - ESP_DECRYPT_ERROR_INTEG_ERROR, - 1); + if (is_ip6) + vlib_node_increment_counter (vm, + esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_INTEG_ERROR, + 1); + else + vlib_node_increment_counter (vm, + esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_INTEG_ERROR, + 1); o_bi0 = i_bi0; to_next[0] = o_bi0; to_next += 1; @@ -269,37 +287,19 @@ esp_decrypt_node_fn (vlib_main_t * vm, { tunnel_mode = 0; - if (i_b0->flags & VNET_BUFFER_F_IS_IP4) - ih4 = - (ip4_header_t *) ((u8 *) esp0 - sizeof (ip4_header_t)); - else - ih4 = - (ip4_header_t *) ((u8 *) esp0 - sizeof (ip6_header_t)); - - if (PREDICT_TRUE - ((ih4->ip_version_and_header_length & 0xF0) != 0x40)) + if (is_ip6) { - if (PREDICT_TRUE - ((ih4->ip_version_and_header_length & 0xF0) == - 0x60)) - { - transport_ip6 = 1; - ip_hdr_size = sizeof (ip6_header_t); - ih6 = (ip6_header_t *) ih4; - oh6 = vlib_buffer_get_current (o_b0); - } - else - { - vlib_node_increment_counter (vm, - esp_decrypt_node.index, - ESP_DECRYPT_ERROR_NOT_IP, - 1); - o_b0 = 0; - goto trace; - } + ih6 = + (ip6_header_t *) ((u8 *) esp0 - + sizeof (ip6_header_t)); + ip_hdr_size = sizeof (ip6_header_t); + oh6 = vlib_buffer_get_current (o_b0); } else { + ih4 = + (ip4_header_t *) ((u8 *) esp0 - + sizeof (ip4_header_t)); oh4 = vlib_buffer_get_current (o_b0); ip_hdr_size = sizeof (ip4_header_t); } @@ -331,9 +331,16 @@ esp_decrypt_node_fn (vlib_main_t * vm, else { clib_warning ("next header: 0x%x", f0->next_header); - vlib_node_increment_counter (vm, esp_decrypt_node.index, - ESP_DECRYPT_ERROR_DECRYPTION_FAILED, - 1); + if (is_ip6) + vlib_node_increment_counter (vm, + esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); + else + vlib_node_increment_counter (vm, + esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED, + 1); o_b0 = 0; goto trace; } @@ -341,7 +348,7 @@ esp_decrypt_node_fn (vlib_main_t * vm, /* transport mode */ else { - if (PREDICT_FALSE (transport_ip6)) + if (is_ip6) { next0 = ESP_DECRYPT_NEXT_IP6_INPUT; oh6->ip_version_traffic_class_and_flow_label = @@ -405,9 +412,15 @@ esp_decrypt_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, esp_decrypt_node.index, - ESP_DECRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, esp4_decrypt_node.index, + ESP_DECRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + free_buffers_and_exit: if (recycle) @@ -416,11 +429,46 @@ free_buffers_and_exit: return from_frame->n_vectors; } +static uword +esp4_decrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp4_decrypt_node) = { + .function = esp4_decrypt_node_fn, + .name = "esp4-decrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_decrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_decrypt_error_strings), + .error_strings = esp_decrypt_error_strings, + + .n_next_nodes = ESP_DECRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n, + foreach_esp_decrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (esp4_decrypt_node, esp4_decrypt_node_fn); + +static uword +esp6_decrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); +} /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (esp_decrypt_node) = { - .function = esp_decrypt_node_fn, - .name = "esp-decrypt", +VLIB_REGISTER_NODE (esp6_decrypt_node) = { + .function = esp6_decrypt_node_fn, + .name = "esp6-decrypt", .vector_size = sizeof (u32), .format_trace = format_esp_decrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -437,7 +485,7 @@ VLIB_REGISTER_NODE (esp_decrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (esp_decrypt_node, esp_decrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (esp6_decrypt_node, esp6_decrypt_node_fn); /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 4291e946b36..9c775ef1e47 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -60,8 +60,6 @@ static char *esp_encrypt_error_strings[] = { #undef _ }; -vlib_node_registration_t esp_encrypt_node; - typedef struct { u32 spi; @@ -120,9 +118,10 @@ esp_encrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg, EVP_EncryptFinal_ex (ctx, out + out_len, &out_len); } -static uword -esp_encrypt_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +always_inline uword +esp_encrypt_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame, + int is_ip6) { u32 n_left_from, *from, *to_next = 0, next_index; from = vlib_frame_vector_args (from_frame); @@ -138,8 +137,14 @@ esp_encrypt_node_fn (vlib_main_t * vm, if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_NO_BUFFER, + n_left_from); + else + vlib_node_increment_counter (vm, esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_NO_BUFFER, + n_left_from); clib_warning ("no enough empty buffers. discarding frame"); goto free_buffers_and_exit; } @@ -164,7 +169,6 @@ esp_encrypt_node_fn (vlib_main_t * vm, uword last_empty_buffer; esp_header_t *o_esp0; esp_footer_t *f0; - u8 is_ipv6; u8 ip_udp_hdr_size; u8 next_hdr_type; u32 ip_proto = 0; @@ -185,8 +189,12 @@ esp_encrypt_node_fn (vlib_main_t * vm, { clib_warning ("sequence number counter has cycled SPI %u", sa0->spi); - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); + else + vlib_node_increment_counter (vm, esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1); //TODO: rekey SA o_bi0 = i_bi0; to_next[0] = o_bi0; @@ -213,11 +221,8 @@ esp_encrypt_node_fn (vlib_main_t * vm, /* add old buffer to the recycle list */ vec_add1 (recycle, i_bi0); - /* is ipv6 */ - if (PREDICT_FALSE - ((iuh0->ip4.ip_version_and_header_length & 0xF0) == 0x60)) + if (is_ip6) { - is_ipv6 = 1; ih6_0 = vlib_buffer_get_current (i_b0); next_hdr_type = IP_PROTOCOL_IPV6; oh6_0 = vlib_buffer_get_current (o_b0); @@ -244,7 +249,6 @@ esp_encrypt_node_fn (vlib_main_t * vm, } else { - is_ipv6 = 0; next_hdr_type = IP_PROTOCOL_IP_IN_IP; oh0 = vlib_buffer_get_current (o_b0); ouh0 = vlib_buffer_get_current (o_b0); @@ -280,15 +284,14 @@ esp_encrypt_node_fn (vlib_main_t * vm, next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP; } - if (PREDICT_TRUE - (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) + if (PREDICT_TRUE (!is_ip6 && sa0->is_tunnel && !sa0->is_tunnel_ip6)) { oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32; oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32; vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0; } - else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6) + else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6) { oh6_0->ip6.src_address.as_u64[0] = sa0->tunnel_src_addr.ip6.as_u64[0]; @@ -379,7 +382,7 @@ esp_encrypt_node_fn (vlib_main_t * vm, sa0->use_esn, sa0->seq_hi); - if (PREDICT_FALSE (is_ipv6)) + if (is_ip6) { oh6_0->ip6.payload_length = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) - @@ -425,9 +428,14 @@ esp_encrypt_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, esp_encrypt_node.index, - ESP_ENCRYPT_ERROR_RX_PKTS, - from_frame->n_vectors); + if (is_ip6) + vlib_node_increment_counter (vm, esp6_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); + else + vlib_node_increment_counter (vm, esp4_encrypt_node.index, + ESP_ENCRYPT_ERROR_RX_PKTS, + from_frame->n_vectors); free_buffers_and_exit: if (recycle) @@ -436,11 +444,46 @@ free_buffers_and_exit: return from_frame->n_vectors; } +static uword +esp4_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ ); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (esp4_encrypt_node) = { + .function = esp4_encrypt_node_fn, + .name = "esp4-encrypt", + .vector_size = sizeof (u32), + .format_trace = format_esp_encrypt_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(esp_encrypt_error_strings), + .error_strings = esp_encrypt_error_strings, + + .n_next_nodes = ESP_ENCRYPT_N_NEXT, + .next_nodes = { +#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n, + foreach_esp_encrypt_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +VLIB_NODE_FUNCTION_MULTIARCH (esp4_encrypt_node, esp4_encrypt_node_fn); + +static uword +esp6_encrypt_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) +{ + return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ ); +} /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (esp_encrypt_node) = { - .function = esp_encrypt_node_fn, - .name = "esp-encrypt", +VLIB_REGISTER_NODE (esp6_encrypt_node) = { + .function = esp6_encrypt_node_fn, + .name = "esp6-encrypt", .vector_size = sizeof (u32), .format_trace = format_esp_encrypt_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -457,7 +500,7 @@ VLIB_REGISTER_NODE (esp_encrypt_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (esp6_encrypt_node, esp6_encrypt_node_fn); /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index c882a628602..d1b82b51326 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -73,17 +73,17 @@ ipsec_set_interface_spd (vlib_main_t * vm, u32 sw_if_index, u32 spd_id, sw_if_index, spd_id, spd_index); /* enable IPsec on TX */ - vnet_feature_enable_disable ("ip4-output", "ipsec-output-ip4", sw_if_index, + vnet_feature_enable_disable ("ip4-output", "ipsec4-output", sw_if_index, is_add, 0, 0); - vnet_feature_enable_disable ("ip6-output", "ipsec-output-ip6", sw_if_index, + vnet_feature_enable_disable ("ip6-output", "ipsec6-output", sw_if_index, is_add, 0, 0); config.spd_index = spd_index; /* enable IPsec on RX */ - vnet_feature_enable_disable ("ip4-unicast", "ipsec-input-ip4", sw_if_index, + vnet_feature_enable_disable ("ip4-unicast", "ipsec4-input", sw_if_index, is_add, &config, sizeof (config)); - vnet_feature_enable_disable ("ip6-unicast", "ipsec-input-ip6", sw_if_index, + vnet_feature_enable_disable ("ip6-unicast", "ipsec6-input", sw_if_index, is_add, &config, sizeof (config)); return 0; @@ -562,26 +562,47 @@ ipsec_init (vlib_main_t * vm) ASSERT (node); im->error_drop_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) "esp-encrypt"); + node = vlib_get_node_by_name (vm, (u8 *) "esp4-encrypt"); ASSERT (node); - im->esp_encrypt_node_index = node->index; + im->esp4_encrypt_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) "esp-decrypt"); + node = vlib_get_node_by_name (vm, (u8 *) "esp4-decrypt"); ASSERT (node); - im->esp_decrypt_node_index = node->index; + im->esp4_decrypt_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) "ah-encrypt"); + node = vlib_get_node_by_name (vm, (u8 *) "ah4-encrypt"); ASSERT (node); - im->ah_encrypt_node_index = node->index; + im->ah4_encrypt_node_index = node->index; - node = vlib_get_node_by_name (vm, (u8 *) "ah-decrypt"); + node = vlib_get_node_by_name (vm, (u8 *) "ah4-decrypt"); ASSERT (node); - im->ah_decrypt_node_index = node->index; + im->ah4_decrypt_node_index = node->index; - im->esp_encrypt_next_index = IPSEC_OUTPUT_NEXT_ESP_ENCRYPT; - im->esp_decrypt_next_index = IPSEC_INPUT_NEXT_ESP_DECRYPT; - im->ah_encrypt_next_index = IPSEC_OUTPUT_NEXT_AH_ENCRYPT; - im->ah_decrypt_next_index = IPSEC_INPUT_NEXT_AH_DECRYPT; + im->esp4_encrypt_next_index = IPSEC_OUTPUT_NEXT_ESP4_ENCRYPT; + im->esp4_decrypt_next_index = IPSEC_INPUT_NEXT_ESP4_DECRYPT; + im->ah4_encrypt_next_index = IPSEC_OUTPUT_NEXT_AH4_ENCRYPT; + im->ah4_decrypt_next_index = IPSEC_INPUT_NEXT_AH4_DECRYPT; + + node = vlib_get_node_by_name (vm, (u8 *) "esp6-encrypt"); + ASSERT (node); + im->esp6_encrypt_node_index = node->index; + + node = vlib_get_node_by_name (vm, (u8 *) "esp6-decrypt"); + ASSERT (node); + im->esp6_decrypt_node_index = node->index; + + node = vlib_get_node_by_name (vm, (u8 *) "ah6-encrypt"); + ASSERT (node); + im->ah6_encrypt_node_index = node->index; + + node = vlib_get_node_by_name (vm, (u8 *) "ah6-decrypt"); + ASSERT (node); + im->ah6_decrypt_node_index = node->index; + + im->esp6_encrypt_next_index = IPSEC_OUTPUT_NEXT_ESP6_ENCRYPT; + im->esp6_decrypt_next_index = IPSEC_INPUT_NEXT_ESP6_DECRYPT; + im->ah6_encrypt_next_index = IPSEC_OUTPUT_NEXT_AH6_ENCRYPT; + im->ah6_decrypt_next_index = IPSEC_INPUT_NEXT_AH6_DECRYPT; im->cb.check_support_cb = ipsec_check_support; diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index f9ceae55a05..6a9c5b1c824 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -20,11 +20,12 @@ #define IPSEC_FLAG_IPSEC_GRE_TUNNEL (1 << 0) - -#define foreach_ipsec_output_next \ -_(DROP, "error-drop") \ -_(ESP_ENCRYPT, "esp-encrypt") \ -_(AH_ENCRYPT, "ah-encrypt") +#define foreach_ipsec_output_next \ + _ (DROP, "error-drop") \ + _ (ESP4_ENCRYPT, "esp4-encrypt") \ + _ (AH4_ENCRYPT, "ah4-encrypt") \ + _ (ESP6_ENCRYPT, "esp6-encrypt") \ + _ (AH6_ENCRYPT, "ah6-encrypt") #define _(v, s) IPSEC_OUTPUT_NEXT_##v, typedef enum @@ -34,11 +35,12 @@ typedef enum IPSEC_OUTPUT_N_NEXT, } ipsec_output_next_t; - -#define foreach_ipsec_input_next \ -_(DROP, "error-drop") \ -_(ESP_DECRYPT, "esp-decrypt") \ -_(AH_DECRYPT, "ah-decrypt") +#define foreach_ipsec_input_next \ + _ (DROP, "error-drop") \ + _ (ESP4_DECRYPT, "esp4-decrypt") \ + _ (AH4_DECRYPT, "ah4-decrypt") \ + _ (ESP6_DECRYPT, "esp6-decrypt") \ + _ (AH6_DECRYPT, "ah6-decrypt") #define _(v, s) IPSEC_INPUT_NEXT_##v, typedef enum @@ -288,15 +290,23 @@ typedef struct /* node indices */ u32 error_drop_node_index; - u32 esp_encrypt_node_index; - u32 esp_decrypt_node_index; - u32 ah_encrypt_node_index; - u32 ah_decrypt_node_index; + u32 esp4_encrypt_node_index; + u32 esp4_decrypt_node_index; + u32 ah4_encrypt_node_index; + u32 ah4_decrypt_node_index; + u32 esp6_encrypt_node_index; + u32 esp6_decrypt_node_index; + u32 ah6_encrypt_node_index; + u32 ah6_decrypt_node_index; /* next node indices */ - u32 esp_encrypt_next_index; - u32 esp_decrypt_next_index; - u32 ah_encrypt_next_index; - u32 ah_decrypt_next_index; + u32 esp4_encrypt_next_index; + u32 esp4_decrypt_next_index; + u32 ah4_encrypt_next_index; + u32 ah4_decrypt_next_index; + u32 esp6_encrypt_next_index; + u32 esp6_decrypt_next_index; + u32 ah6_encrypt_next_index; + u32 ah6_decrypt_next_index; /* callbacks */ ipsec_main_callbacks_t cb; @@ -307,10 +317,14 @@ typedef struct extern ipsec_main_t ipsec_main; -extern vlib_node_registration_t esp_encrypt_node; -extern vlib_node_registration_t esp_decrypt_node; -extern vlib_node_registration_t ah_encrypt_node; -extern vlib_node_registration_t ah_decrypt_node; +extern vlib_node_registration_t esp4_encrypt_node; +extern vlib_node_registration_t esp4_decrypt_node; +extern vlib_node_registration_t ah4_encrypt_node; +extern vlib_node_registration_t ah4_decrypt_node; +extern vlib_node_registration_t esp6_encrypt_node; +extern vlib_node_registration_t esp6_decrypt_node; +extern vlib_node_registration_t ah6_encrypt_node; +extern vlib_node_registration_t ah6_decrypt_node; extern vlib_node_registration_t ipsec_if_input_node; diff --git a/src/vnet/ipsec/ipsec_if.c b/src/vnet/ipsec/ipsec_if.c index cb7e89a68e6..2640f25c011 100644 --- a/src/vnet/ipsec/ipsec_if.c +++ b/src/vnet/ipsec/ipsec_if.c @@ -108,7 +108,7 @@ ipsec_if_tx_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0); t0 = pool_elt_at_index (im->tunnel_interfaces, hi0->dev_instance); vnet_buffer (b0)->ipsec.sad_index = t0->output_sa_index; - next0 = IPSEC_OUTPUT_NEXT_ESP_ENCRYPT; + next0 = IPSEC_OUTPUT_NEXT_ESP4_ENCRYPT; len0 = vlib_buffer_length_in_chain (vm, b0); @@ -369,10 +369,10 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm, hi = vnet_get_hw_interface (vnm, hw_if_index); slot = vlib_node_add_next_with_slot - (vnm->vlib_main, hi->tx_node_index, im->esp_encrypt_node_index, - IPSEC_OUTPUT_NEXT_ESP_ENCRYPT); + (vnm->vlib_main, hi->tx_node_index, im->esp4_encrypt_node_index, + IPSEC_OUTPUT_NEXT_ESP4_ENCRYPT); - ASSERT (slot == IPSEC_OUTPUT_NEXT_ESP_ENCRYPT); + ASSERT (slot == IPSEC_OUTPUT_NEXT_ESP4_ENCRYPT); t->hw_if_index = hw_if_index; diff --git a/src/vnet/ipsec/ipsec_if_in.c b/src/vnet/ipsec/ipsec_if_in.c index bc10f7f5f6c..a486bd98dbb 100644 --- a/src/vnet/ipsec/ipsec_if_in.c +++ b/src/vnet/ipsec/ipsec_if_in.c @@ -179,7 +179,7 @@ ipsec_if_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } vlib_buffer_advance (b0, ip4_header_bytes (ip0)); - next0 = im->esp_decrypt_next_index; + next0 = im->esp4_decrypt_next_index; } trace: @@ -231,7 +231,7 @@ VLIB_REGISTER_NODE (ipsec_if_input_node) = { .n_errors = ARRAY_LEN(ipsec_if_input_error_strings), .error_strings = ipsec_if_input_error_strings, - .sibling_of = "ipsec-input-ip4", + .sibling_of = "ipsec4-input", }; /* *INDENT-ON* */ diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c index ebfb909cbcf..6d5b2dcce69 100644 --- a/src/vnet/ipsec/ipsec_input.c +++ b/src/vnet/ipsec/ipsec_input.c @@ -129,9 +129,9 @@ ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la, } always_inline ipsec_policy_t * -ipsec_input_ip6_protect_policy_match (ipsec_spd_t * spd, - ip6_address_t * sa, - ip6_address_t * da, u32 spi) +ipsec6_input_protect_policy_match (ipsec_spd_t * spd, + ip6_address_t * sa, + ip6_address_t * da, u32 spi) { ipsec_main_t *im = &ipsec_main; ipsec_policy_t *p; @@ -168,12 +168,11 @@ ipsec_input_ip6_protect_policy_match (ipsec_spd_t * spd, return 0; } -static vlib_node_registration_t ipsec_input_ip4_node; +static vlib_node_registration_t ipsec4_input_node; static uword -ipsec_input_ip4_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) +ipsec4_input_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) { u32 n_left_from, *from, next_index, *to_next; ipsec_main_t *im = &ipsec_main; @@ -252,7 +251,7 @@ ipsec_input_ip4_node_fn (vlib_main_t * vm, p0->counter.bytes += clib_net_to_host_u16 (ip0->length); vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.flags = 0; - next0 = im->esp_decrypt_next_index; + next0 = im->esp4_decrypt_next_index; vlib_buffer_advance (b0, ((u8 *) esp0 - (u8 *) ip0)); goto trace0; } @@ -295,7 +294,7 @@ ipsec_input_ip4_node_fn (vlib_main_t * vm, p0->counter.bytes += clib_net_to_host_u16 (ip0->length); vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.flags = 0; - next0 = im->ah_decrypt_next_index; + next0 = im->ah4_decrypt_next_index; goto trace1; } /* FIXME bypass and discard */ @@ -320,7 +319,7 @@ ipsec_input_ip4_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, ipsec_input_ip4_node.index, + vlib_node_increment_counter (vm, ipsec4_input_node.index, IPSEC_INPUT_ERROR_RX_PKTS, from_frame->n_vectors); @@ -329,9 +328,9 @@ ipsec_input_ip4_node_fn (vlib_main_t * vm, /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ipsec_input_ip4_node,static) = { - .function = ipsec_input_ip4_node_fn, - .name = "ipsec-input-ip4", +VLIB_REGISTER_NODE (ipsec4_input_node,static) = { + .function = ipsec4_input_node_fn, + .name = "ipsec4-input", .vector_size = sizeof (u32), .format_trace = format_ipsec_input_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -348,13 +347,13 @@ VLIB_REGISTER_NODE (ipsec_input_ip4_node,static) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip4_node, ipsec_input_ip4_node_fn); -static vlib_node_registration_t ipsec_input_ip6_node; +VLIB_NODE_FUNCTION_MULTIARCH (ipsec4_input_node, ipsec4_input_node_fn); + +static vlib_node_registration_t ipsec6_input_node; static uword -ipsec_input_ip6_node_fn (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame) +ipsec6_input_node_fn (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * from_frame) { u32 n_left_from, *from, next_index, *to_next; ipsec_main_t *im = &ipsec_main; @@ -409,11 +408,11 @@ ipsec_input_ip6_node_fn (vlib_main_t * vm, clib_net_to_host_u16 (ip0->payload_length) + header_size, spd0->id); #endif - p0 = ipsec_input_ip6_protect_policy_match (spd0, - &ip0->src_address, - &ip0->dst_address, - clib_net_to_host_u32 - (esp0->spi)); + p0 = ipsec6_input_protect_policy_match (spd0, + &ip0->src_address, + &ip0->dst_address, + clib_net_to_host_u32 + (esp0->spi)); if (PREDICT_TRUE (p0 != 0)) { @@ -423,18 +422,18 @@ ipsec_input_ip6_node_fn (vlib_main_t * vm, p0->counter.bytes += header_size; vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.flags = 0; - next0 = im->esp_decrypt_next_index; + next0 = im->esp6_decrypt_next_index; vlib_buffer_advance (b0, header_size); goto trace0; } } else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH) { - p0 = ipsec_input_ip6_protect_policy_match (spd0, - &ip0->src_address, - &ip0->dst_address, - clib_net_to_host_u32 - (ah0->spi)); + p0 = ipsec6_input_protect_policy_match (spd0, + &ip0->src_address, + &ip0->dst_address, + clib_net_to_host_u32 + (ah0->spi)); if (PREDICT_TRUE (p0 != 0)) { @@ -444,7 +443,7 @@ ipsec_input_ip6_node_fn (vlib_main_t * vm, p0->counter.bytes += header_size; vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; vnet_buffer (b0)->ipsec.flags = 0; - next0 = im->ah_decrypt_next_index; + next0 = im->ah6_decrypt_next_index; goto trace0; } } @@ -468,7 +467,7 @@ ipsec_input_ip6_node_fn (vlib_main_t * vm, } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, ipsec_input_ip6_node.index, + vlib_node_increment_counter (vm, ipsec6_input_node.index, IPSEC_INPUT_ERROR_RX_PKTS, from_frame->n_vectors); @@ -477,9 +476,9 @@ ipsec_input_ip6_node_fn (vlib_main_t * vm, /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ipsec_input_ip6_node,static) = { - .function = ipsec_input_ip6_node_fn, - .name = "ipsec-input-ip6", +VLIB_REGISTER_NODE (ipsec6_input_node,static) = { + .function = ipsec6_input_node_fn, + .name = "ipsec6-input", .vector_size = sizeof (u32), .format_trace = format_ipsec_input_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -487,11 +486,11 @@ VLIB_REGISTER_NODE (ipsec_input_ip6_node,static) = { .n_errors = ARRAY_LEN(ipsec_input_error_strings), .error_strings = ipsec_input_error_strings, - .sibling_of = "ipsec-input-ip4", + .sibling_of = "ipsec4-input", }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip6_node, ipsec_input_ip6_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (ipsec6_input_node, ipsec6_input_node_fn); /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c index a62c0a53458..3a20d512f60 100644 --- a/src/vnet/ipsec/ipsec_output.c +++ b/src/vnet/ipsec/ipsec_output.c @@ -45,9 +45,6 @@ static char *ipsec_output_error_strings[] = { #undef _ }; -static vlib_node_registration_t ipsec_output_ip4_node; -static vlib_node_registration_t ipsec_output_ip6_node; - typedef struct { u32 spd_id; @@ -133,9 +130,9 @@ ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la, } always_inline ipsec_policy_t * -ipsec_output_ip6_policy_match (ipsec_spd_t * spd, - ip6_address_t * la, - ip6_address_t * ra, u16 lp, u16 rp, u8 pr) +ipsec6_output_policy_match (ipsec_spd_t * spd, + ip6_address_t * la, + ip6_address_t * ra, u16 lp, u16 rp, u8 pr) { ipsec_policy_t *p; u32 *i; @@ -239,14 +236,13 @@ ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, spd0->id); #endif - p0 = ipsec_output_ip6_policy_match (spd0, - &ip6_0->src_address, - &ip6_0->dst_address, - clib_net_to_host_u16 - (udp0->src_port), - clib_net_to_host_u16 - (udp0->dst_port), - ip6_0->protocol); + p0 = ipsec6_output_policy_match (spd0, + &ip6_0->src_address, + &ip6_0->dst_address, + clib_net_to_host_u16 + (udp0->src_port), + clib_net_to_host_u16 + (udp0->dst_port), ip6_0->protocol); } else { @@ -281,9 +277,14 @@ ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, nc_protect++; sa = pool_elt_at_index (im->sad, p0->sa_index); if (sa->protocol == IPSEC_PROTOCOL_ESP) - next_node_index = im->esp_encrypt_node_index; + if (is_ipv6) + next_node_index = im->esp6_encrypt_node_index; + else + next_node_index = im->esp4_encrypt_node_index; + else if (is_ipv6) + next_node_index = im->ah6_encrypt_node_index; else - next_node_index = im->ah_encrypt_node_index; + next_node_index = im->ah4_encrypt_node_index; vnet_buffer (b0)->ipsec.sad_index = p0->sa_index; p0->counter.packets++; if (is_ipv6) @@ -414,16 +415,16 @@ ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } static uword -ipsec_output_ip4_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) +ipsec4_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ipsec_output_inline (vm, node, frame, 0); } /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ipsec_output_ip4_node,static) = { - .function = ipsec_output_ip4_node_fn, - .name = "ipsec-output-ip4", +VLIB_REGISTER_NODE (ipsec4_output_node,static) = { + .function = ipsec4_output_node_fn, + .name = "ipsec4-output", .vector_size = sizeof (u32), .format_trace = format_ipsec_output_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -440,18 +441,19 @@ VLIB_REGISTER_NODE (ipsec_output_ip4_node,static) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ipsec_output_ip4_node, ipsec_output_ip4_node_fn) - static uword - ipsec_output_ip6_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * frame) +VLIB_NODE_FUNCTION_MULTIARCH (ipsec4_output_node, ipsec4_output_node_fn); + +static uword +ipsec6_output_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ipsec_output_inline (vm, node, frame, 1); } /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ipsec_output_ip6_node,static) = { - .function = ipsec_output_ip6_node_fn, - .name = "ipsec-output-ip6", +VLIB_REGISTER_NODE (ipsec6_output_node,static) = { + .function = ipsec6_output_node_fn, + .name = "ipsec6-output", .vector_size = sizeof (u32), .format_trace = format_ipsec_output_trace, .type = VLIB_NODE_TYPE_INTERNAL, @@ -468,7 +470,7 @@ VLIB_REGISTER_NODE (ipsec_output_ip6_node,static) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ipsec_output_ip6_node, ipsec_output_ip6_node_fn) +VLIB_NODE_FUNCTION_MULTIARCH (ipsec6_output_node, ipsec6_output_node_fn); #else /* IPSEC > 1 */ /* Dummy ipsec output node, in case when IPSec is disabled */ @@ -482,16 +484,16 @@ ipsec_output_node_fn (vlib_main_t * vm, } /* *INDENT-OFF* */ -VLIB_REGISTER_NODE (ipsec_output_node) = { +VLIB_REGISTER_NODE (ipsec4_output_node) = { .vector_size = sizeof (u32), .function = ipsec_output_node_fn, - .name = "ipsec-output-ip4", + .name = "ipsec4-output", }; -VLIB_REGISTER_NODE (ipsec_output_node) = { +VLIB_REGISTER_NODE (ipsec6_output_node) = { .vector_size = sizeof (u32), .function = ipsec_output_node_fn, - .name = "ipsec-output-ip6", + .name = "ipsec6-output", }; /* *INDENT-ON* */ #endif |