aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/ipsec')
-rw-r--r--src/vnet/ipsec/ah_decrypt.c8
-rw-r--r--src/vnet/ipsec/ah_encrypt.c8
-rw-r--r--src/vnet/ipsec/esp_decrypt.c16
-rw-r--r--src/vnet/ipsec/esp_encrypt.c53
-rw-r--r--src/vnet/ipsec/ipsec.c6
-rw-r--r--src/vnet/ipsec/ipsec.h2
-rw-r--r--src/vnet/ipsec/ipsec_input.c310
7 files changed, 231 insertions, 172 deletions
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index 918ebf03f67..ec4db0fed57 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -500,10 +500,10 @@ ah_decrypt_init (vlib_main_t *vm)
{
ipsec_main_t *im = &ipsec_main;
- im->ah4_dec_fq_index =
- vlib_frame_queue_main_init (ah4_decrypt_node.index, 0);
- im->ah6_dec_fq_index =
- vlib_frame_queue_main_init (ah6_decrypt_node.index, 0);
+ im->ah4_dec_fq_index = vlib_frame_queue_main_init (ah4_decrypt_node.index,
+ im->handoff_queue_size);
+ im->ah6_dec_fq_index = vlib_frame_queue_main_init (ah6_decrypt_node.index,
+ im->handoff_queue_size);
return 0;
}
diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c
index 960327f071d..86694660878 100644
--- a/src/vnet/ipsec/ah_encrypt.c
+++ b/src/vnet/ipsec/ah_encrypt.c
@@ -490,10 +490,10 @@ ah_encrypt_init (vlib_main_t *vm)
{
ipsec_main_t *im = &ipsec_main;
- im->ah4_enc_fq_index =
- vlib_frame_queue_main_init (ah4_encrypt_node.index, 0);
- im->ah6_enc_fq_index =
- vlib_frame_queue_main_init (ah6_encrypt_node.index, 0);
+ im->ah4_enc_fq_index = vlib_frame_queue_main_init (ah4_encrypt_node.index,
+ im->handoff_queue_size);
+ im->ah6_enc_fq_index = vlib_frame_queue_main_init (ah6_encrypt_node.index,
+ im->handoff_queue_size);
return 0;
}
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 26d8ca1deee..01b2d2971b0 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -1675,14 +1675,14 @@ esp_decrypt_init (vlib_main_t *vm)
{
ipsec_main_t *im = &ipsec_main;
- im->esp4_dec_fq_index =
- vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
- im->esp6_dec_fq_index =
- vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
- im->esp4_dec_tun_fq_index =
- vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
- im->esp6_dec_tun_fq_index =
- vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
+ im->esp4_dec_fq_index = vlib_frame_queue_main_init (esp4_decrypt_node.index,
+ im->handoff_queue_size);
+ im->esp6_dec_fq_index = vlib_frame_queue_main_init (esp6_decrypt_node.index,
+ im->handoff_queue_size);
+ im->esp4_dec_tun_fq_index = vlib_frame_queue_main_init (
+ esp4_decrypt_tun_node.index, im->handoff_queue_size);
+ im->esp6_dec_tun_fq_index = vlib_frame_queue_main_init (
+ esp6_decrypt_tun_node.index, im->handoff_queue_size);
return 0;
}
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index dd47053874c..f6d1ecaed24 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -589,6 +589,22 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
async_next, iv, tag, aad, flag);
}
+/* Per RFC6935 section 5, the UDP checksum must be computed when originating
+ * an IPv6 UDP packet. The default behavior may be overridden when conditions
+ * defined by RFC6936 are satisfied. This implementation does not satisfy all
+ * the conditions so the checksum must be computed.
+ */
+static_always_inline void
+set_ip6_udp_cksum_offload (vlib_buffer_t *b, i16 l3_hdr_offset,
+ i16 l4_hdr_offset)
+{
+ vnet_buffer (b)->l3_hdr_offset = l3_hdr_offset;
+ vnet_buffer (b)->l4_hdr_offset = l4_hdr_offset;
+ vnet_buffer_offload_flags_set (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
+ b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
+}
+
always_inline uword
esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame, vnet_link_t lt, int is_tun,
@@ -869,6 +885,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
+ if (ipsec_sa_is_set_UDP_ENCAP (sa0) &&
+ ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ {
+ i16 l3_off = b[0]->current_data - hdr_len;
+ i16 l4_off = l3_off + sizeof (ip6_header_t);
+
+ set_ip6_udp_cksum_offload (b[0], l3_off, l4_off);
+ }
+
dpo = &sa0->dpo;
if (!is_tun)
{
@@ -988,6 +1013,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_fill_udp_hdr (sa0, udp, udp_len);
}
+ if (udp && (VNET_LINK_IP6 == lt))
+ {
+ i16 l3_off = b[0]->current_data - hdr_len + l2_len;
+ i16 l4_off = l3_off + ip_len;
+
+ set_ip6_udp_cksum_offload (b[0], l3_off, l4_off);
+ }
+
sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
}
@@ -1445,16 +1478,16 @@ esp_encrypt_init (vlib_main_t *vm)
{
ipsec_main_t *im = &ipsec_main;
- im->esp4_enc_fq_index =
- vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
- im->esp6_enc_fq_index =
- vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
- im->esp4_enc_tun_fq_index =
- vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
- im->esp6_enc_tun_fq_index =
- vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
- im->esp_mpls_enc_tun_fq_index =
- vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
+ im->esp4_enc_fq_index = vlib_frame_queue_main_init (esp4_encrypt_node.index,
+ im->handoff_queue_size);
+ im->esp6_enc_fq_index = vlib_frame_queue_main_init (esp6_encrypt_node.index,
+ im->handoff_queue_size);
+ im->esp4_enc_tun_fq_index = vlib_frame_queue_main_init (
+ esp4_encrypt_tun_node.index, im->handoff_queue_size);
+ im->esp6_enc_tun_fq_index = vlib_frame_queue_main_init (
+ esp6_encrypt_tun_node.index, im->handoff_queue_size);
+ im->esp_mpls_enc_tun_fq_index = vlib_frame_queue_main_init (
+ esp_mpls_encrypt_tun_node.index, im->handoff_queue_size);
return 0;
}
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index f8c39c327ed..8b43dd23cc8 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -663,6 +663,7 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
u32 ipsec_spd_fp_num_buckets;
bool fp_spd_ip4_enabled = false;
bool fp_spd_ip6_enabled = false;
+ u32 handoff_queue_size;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
@@ -758,6 +759,11 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
ipsec_tun_table_init (AF_IP6, table_size, n_buckets);
}
+ else if (unformat (input, "async-handoff-queue-size %d",
+ &handoff_queue_size))
+ {
+ im->handoff_queue_size = handoff_queue_size;
+ }
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 4aa09d7560e..9ab054cf2a9 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -248,6 +248,8 @@ typedef struct
u32 esp4_dec_tun_fq_index;
u32 esp6_dec_tun_fq_index;
+ u32 handoff_queue_size;
+
/* Number of buckets for flow cache */
u32 ipsec4_out_spd_hash_num_buckets;
u32 ipsec4_out_spd_flow_cache_entries;
diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c
index 6ccc0be2622..48f7deadda3 100644
--- a/src/vnet/ipsec/ipsec_input.c
+++ b/src/vnet/ipsec/ipsec_input.c
@@ -274,6 +274,159 @@ ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
}
always_inline void
+ipsec_collect_ah_trace (vlib_buffer_t **b, vlib_node_runtime_t *node,
+ vlib_main_t *vm, ip4_header_t *ip0, ah_header_t *ah0,
+ u8 has_space0, ipsec_spd_t *spd0, ipsec_policy_t *p0,
+ u32 pi0)
+{
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
+ PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_input_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
+
+ tr->proto = ip0->protocol;
+ tr->sa_id = p0 ? p0->sa_id : ~0;
+ tr->spi = has_space0 ? clib_net_to_host_u32 (ah0->spi) : ~0;
+ tr->seq = has_space0 ? clib_net_to_host_u32 (ah0->seq_no) : ~0;
+ tr->spd = spd0->id;
+ tr->policy_index = pi0;
+ }
+}
+
+always_inline void
+ipsec_ah_packet_process (vlib_main_t *vm, ipsec_main_t *im, ip4_header_t *ip0,
+ ah_header_t *ah0, u32 thread_index, ipsec_spd_t *spd0,
+ vlib_buffer_t **b, vlib_node_runtime_t *node,
+ u64 *ipsec_bypassed, u64 *ipsec_dropped,
+ u64 *ipsec_matched, u64 *ipsec_unprocessed, u16 *next)
+
+{
+ ipsec_policy_t *p0 = NULL;
+ u32 pi0 = ~0;
+ u8 has_space0;
+ /* if flow cache is enabled, first search through flow cache for a
+ * policy match and revert back to linear search on failure */
+ bool search_flow_cache = im->input_flow_cache_flag;
+
+ while (1)
+ {
+ if (search_flow_cache)
+ {
+ p0 = ipsec4_input_spd_find_flow_cache_entry (
+ im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
+ IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
+ }
+ else
+ {
+ p0 = ipsec_input_protect_policy_match (
+ spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
+ clib_net_to_host_u32 (ip0->dst_address.as_u32),
+ clib_net_to_host_u32 (ah0->spi));
+ }
+
+ has_space0 = vlib_buffer_has_space (b[0], (clib_address_t) (ah0 + 1) -
+ (clib_address_t) ip0);
+
+ if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
+ {
+ *ipsec_matched += 1;
+ pi0 = p0 - im->policies;
+ vlib_increment_combined_counter (&ipsec_spd_policy_counters,
+ thread_index, pi0, 1,
+ clib_net_to_host_u16 (ip0->length));
+
+ vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
+ next[0] = im->ah4_decrypt_next_index;
+ ipsec_collect_ah_trace (b, node, vm, ip0, ah0, has_space0, spd0, p0,
+ pi0);
+ return;
+ }
+ else
+ {
+ p0 = 0;
+ pi0 = ~0;
+ }
+ if (search_flow_cache)
+ {
+ p0 = ipsec4_input_spd_find_flow_cache_entry (
+ im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
+ IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
+ }
+
+ else
+ {
+ p0 = ipsec_input_policy_match (
+ spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
+ clib_net_to_host_u32 (ip0->dst_address.as_u32),
+ IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
+ }
+
+ if (PREDICT_TRUE ((p0 != NULL)))
+ {
+ *ipsec_bypassed += 1;
+ pi0 = p0 - im->policies;
+ vlib_increment_combined_counter (&ipsec_spd_policy_counters,
+ thread_index, pi0, 1,
+ clib_net_to_host_u16 (ip0->length));
+ ipsec_collect_ah_trace (b, node, vm, ip0, ah0, has_space0, spd0, p0,
+ pi0);
+ return;
+ }
+ else
+ {
+ p0 = 0;
+ pi0 = ~0;
+ };
+
+ if (search_flow_cache)
+ {
+ p0 = ipsec4_input_spd_find_flow_cache_entry (
+ im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
+ IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
+ }
+
+ else
+ {
+ p0 = ipsec_input_policy_match (
+ spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
+ clib_net_to_host_u32 (ip0->dst_address.as_u32),
+ IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
+ }
+
+ if (PREDICT_TRUE ((p0 != NULL)))
+ {
+ *ipsec_dropped += 1;
+ pi0 = p0 - im->policies;
+ vlib_increment_combined_counter (&ipsec_spd_policy_counters,
+ thread_index, pi0, 1,
+ clib_net_to_host_u16 (ip0->length));
+
+ next[0] = IPSEC_INPUT_NEXT_DROP;
+ ipsec_collect_ah_trace (b, node, vm, ip0, ah0, has_space0, spd0, p0,
+ pi0);
+ return;
+ }
+ else
+ {
+ p0 = 0;
+ pi0 = ~0;
+ };
+ /* flow cache search failed, retry with linear search */
+ if (search_flow_cache && p0 == NULL)
+ {
+ search_flow_cache = false;
+ }
+ else if (search_flow_cache == false && p0 == NULL)
+ {
+ /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
+ *ipsec_unprocessed += 1;
+ next[0] = IPSEC_INPUT_NEXT_DROP;
+ return;
+ }
+ }
+}
+
+always_inline void
ipsec_esp_packet_process (vlib_main_t *vm, ipsec_main_t *im, ip4_header_t *ip0,
esp_header_t *esp0, u32 thread_index,
ipsec_spd_t *spd0, vlib_buffer_t **b,
@@ -299,10 +452,11 @@ ipsec_esp_packet_process (vlib_main_t *vm, ipsec_main_t *im, ip4_header_t *ip0,
search_flow_cache = im->input_flow_cache_flag;
udp_or_esp:
- /* SPI ID field in the ESP header MUST NOT be a zero value */
if (esp0->spi == 0)
{
- /* Drop the packet if SPI ID is zero */
+ /* RFC 4303, section 2.1: The SPI value of zero (0 is reserved for
+ * local, implementation-specific use and MUST NOT be sent on the wire.
+ */
*ipsec_unprocessed += 1;
next[0] = IPSEC_INPUT_NEXT_DROP;
return;
@@ -523,15 +677,12 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
while (n_left_from > 0)
{
- u32 next32, pi0;
+ u32 next32;
ip4_header_t *ip0;
esp_header_t *esp0 = NULL;
ah_header_t *ah0;
ip4_ipsec_config_t *c0;
ipsec_spd_t *spd0;
- ipsec_policy_t *p0 = NULL;
- u8 has_space0;
- bool search_flow_cache = false;
if (n_left_from > 2)
{
@@ -552,12 +703,10 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
udp_header_t *udp0 = NULL;
udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
- /* As per rfc3948 in UDP Encapsulated Header, UDP checksum must be
- * Zero, and receivers must not depen upon UPD checksum.
- * inside ESP header , SPI ID value MUST NOT be a zero value
- * */
-
- if (udp0->checksum == 0)
+ /* RFC5996 Section 2.23 "Port 4500 is reserved for
+ * UDP-encapsulated ESP and IKE."
+ */
+ if (clib_host_to_net_u16 (4500) == udp0->dst_port)
{
esp0 = (esp_header_t *) ((u8 *) udp0 + sizeof (udp_header_t));
@@ -582,140 +731,9 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
{
ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
- // if flow cache is enabled, first search through flow cache for a
- // policy match and revert back to linear search on failure
- search_flow_cache = im->input_flow_cache_flag;
-
- ah:
- if (search_flow_cache)
- {
- p0 = ipsec4_input_spd_find_flow_cache_entry (
- im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
- IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
- }
-
- else
- {
- p0 = ipsec_input_protect_policy_match (
- spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
- clib_net_to_host_u32 (ip0->dst_address.as_u32),
- clib_net_to_host_u32 (ah0->spi));
- }
-
- has_space0 =
- vlib_buffer_has_space (b[0],
- (clib_address_t) (ah0 + 1) -
- (clib_address_t) ip0);
-
- if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
- {
- ipsec_matched += 1;
-
- pi0 = p0 - im->policies;
- vlib_increment_combined_counter
- (&ipsec_spd_policy_counters,
- thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
-
- vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
- next[0] = im->ah4_decrypt_next_index;
- goto trace1;
- }
- else
- {
- p0 = 0;
- pi0 = ~0;
- }
-
- if (search_flow_cache)
- {
- p0 = ipsec4_input_spd_find_flow_cache_entry (
- im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
- IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
- }
-
- else
- {
- p0 = ipsec_input_policy_match (
- spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
- clib_net_to_host_u32 (ip0->dst_address.as_u32),
- IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
- }
-
- if (PREDICT_TRUE ((p0 != NULL)))
- {
- ipsec_bypassed += 1;
-
- pi0 = p0 - im->policies;
- vlib_increment_combined_counter (
- &ipsec_spd_policy_counters, thread_index, pi0, 1,
- clib_net_to_host_u16 (ip0->length));
-
- goto trace1;
- }
- else
- {
- p0 = 0;
- pi0 = ~0;
- };
-
- if (search_flow_cache)
- {
- p0 = ipsec4_input_spd_find_flow_cache_entry (
- im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
- IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
- }
-
- else
- {
- p0 = ipsec_input_policy_match (
- spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
- clib_net_to_host_u32 (ip0->dst_address.as_u32),
- IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
- }
-
- if (PREDICT_TRUE ((p0 != NULL)))
- {
- ipsec_dropped += 1;
-
- pi0 = p0 - im->policies;
- vlib_increment_combined_counter (
- &ipsec_spd_policy_counters, thread_index, pi0, 1,
- clib_net_to_host_u16 (ip0->length));
-
- next[0] = IPSEC_INPUT_NEXT_DROP;
- goto trace1;
- }
- else
- {
- p0 = 0;
- pi0 = ~0;
- };
-
- // flow cache search failed, retry with linear search
- if (search_flow_cache && p0 == NULL)
- {
- search_flow_cache = false;
- goto ah;
- }
-
- /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
- ipsec_unprocessed += 1;
- next[0] = IPSEC_INPUT_NEXT_DROP;
-
- trace1:
- if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
- PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
- {
- ipsec_input_trace_t *tr =
- vlib_add_trace (vm, node, b[0], sizeof (*tr));
-
- tr->proto = ip0->protocol;
- tr->sa_id = p0 ? p0->sa_id : ~0;
- tr->spi = has_space0 ? clib_net_to_host_u32 (ah0->spi) : ~0;
- tr->seq = has_space0 ? clib_net_to_host_u32 (ah0->seq_no) : ~0;
- tr->spd = spd0->id;
- tr->policy_index = pi0;
- }
+ ipsec_ah_packet_process (vm, im, ip0, ah0, thread_index, spd0, b,
+ node, &ipsec_bypassed, &ipsec_dropped,
+ &ipsec_matched, &ipsec_unprocessed, next);
}
else
{