aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/esp_encrypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/ipsec/esp_encrypt.c')
-rw-r--r--src/vnet/ipsec/esp_encrypt.c480
1 files changed, 218 insertions, 262 deletions
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index d102bd68f74..dd47053874c 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -18,11 +18,13 @@
#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
+#include <vnet/interface_output.h>
#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
#include <vnet/ipsec/esp.h>
#include <vnet/tunnel/tunnel_dp.h>
@@ -43,29 +45,6 @@ typedef enum
ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
-#define foreach_esp_encrypt_error \
- _ (RX_PKTS, "ESP pkts received") \
- _ (POST_RX_PKTS, "ESP-post pkts received") \
- _ (HANDOFF, "Hand-off") \
- _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
- _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
- _ (NO_BUFFERS, "no buffers (packet dropped)")
-
-typedef enum
-{
-#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
- foreach_esp_encrypt_error
-#undef _
- ESP_ENCRYPT_N_ERROR,
-} esp_encrypt_error_t;
-
-static char *esp_encrypt_error_strings[] = {
-#define _(sym,string) string,
- foreach_esp_encrypt_error
-#undef _
-};
-
typedef struct
{
u32 sa_index;
@@ -82,6 +61,8 @@ typedef struct
u32 next_index;
} esp_encrypt_post_trace_t;
+typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
+
/* packet trace format function */
static u8 *
format_esp_encrypt_trace (u8 * s, va_list * args)
@@ -114,8 +95,7 @@ format_esp_post_encrypt_trace (u8 * s, va_list * args)
/* pad packet in input buffer */
static_always_inline u8 *
esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
- u8 icv_sz, vlib_node_runtime_t *node,
- u16 buffer_data_size, uword total_len)
+ u8 icv_sz, u16 buffer_data_size, uword total_len)
{
static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
@@ -169,11 +149,9 @@ esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
if (is_transport)
{
u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
-
- sum = ip_csum_update (ip4->checksum, ip4->protocol,
- prot, ip4_header_t, protocol);
+ sum = ip_csum_update (ip4->checksum, ip4->protocol, prot, ip4_header_t,
+ protocol);
ip4->protocol = prot;
-
sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
}
else
@@ -202,9 +180,9 @@ ext_hdr_is_pre_esp (u8 nexthdr)
return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
#else
- return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
- (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
- (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
+ return (!(nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) ||
+ !(nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) ||
+ !(nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION));
#endif
}
@@ -223,9 +201,8 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
return len;
}
- p = (void *) (ip6 + 1);
+ p = ip6_next_header (ip6);
len += ip6_ext_header_len (p);
-
while (ext_hdr_is_pre_esp (p->next_hdr))
{
len += ip6_ext_header_len (p);
@@ -236,6 +213,25 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
return len;
}
+/* IPsec IV generation: IVs requirements differ depending of the
+ * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
+ * be predictable but should never be reused with the same key material
+ * for CTR and GCM.
+ * To avoid reusing the same IVs between multiple VPP instances and between
+ * restarts, we use a properly chosen PRNG to generate IVs. To ensure the IV is
+ * unpredictable for CBC, it is then encrypted using the same key as the
+ * message. You can refer to NIST SP800-38a and NIST SP800-38d for more
+ * details. */
+static_always_inline void *
+esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+{
+ ASSERT (iv_sz >= sizeof (u64));
+ u64 *iv = (u64 *) (payload - iv_sz);
+ clib_memset_u8 (iv, 0, iv_sz);
+ *iv = clib_pcg64i_random_r (&sa->iv_prng);
+ return iv;
+}
+
static_always_inline void
esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t * ops, vlib_buffer_t * b[],
@@ -257,8 +253,10 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = drop_next;
+ esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ bi, nexts, drop_next,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -285,8 +283,10 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = drop_next;
+ esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ bi, nexts, drop_next,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -389,28 +389,36 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t *op;
vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+ u8 *crypto_start = payload;
+ /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
+ * have enough space for ESP header and footer which includes ICV */
+ ASSERT (payload_len > icv_sz);
+ u16 crypto_len = payload_len - icv_sz;
+
+ /* generate the IV in front of the payload */
+ void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
- op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
- op->len = payload_len - icv_sz;
op->user_data = bi;
if (ipsec_sa_is_set_IS_CTR (sa0))
{
- ASSERT (sizeof (u64) == iv_sz);
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
- (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
- sizeof (*nonce));
- u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
-
+ (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
if (ipsec_sa_is_set_IS_AEAD (sa0))
{
/* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
- op->tag = payload + op->len;
+ op->tag = payload + crypto_len;
op->tag_len = 16;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ crypto_start -= iv_sz;
+ crypto_len += iv_sz;
+ }
}
else
{
@@ -418,23 +426,34 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
nonce->salt = sa0->salt;
- nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
+ nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
}
else
{
- op->iv = payload - iv_sz;
- op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ /* construct zero iv in front of the IP header */
+ op->iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (op->iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start -= iv_sz;
+ crypto_len += iv_sz;
}
- if (lb != b[0])
+ if (PREDICT_FALSE (lb != b[0]))
{
/* is chained */
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
op->tag = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
- payload_len, &op->n_chunks);
+ esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
+ crypto_start, crypto_len + icv_sz,
+ &op->n_chunks);
+ }
+ else
+ {
+ /* not chained */
+ op->src = op->dst = crypto_start;
+ op->len = crypto_len;
}
}
@@ -483,33 +502,36 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
u8 flag = 0;
- u32 key_index;
- i16 crypto_start_offset, integ_start_offset = 0;
+ const u32 key_index = sa->crypto_key_index;
+ i16 crypto_start_offset, integ_start_offset;
u16 crypto_total_len, integ_total_len;
post->next_index = next;
/* crypto */
- crypto_start_offset = payload - b->data;
+ crypto_start_offset = integ_start_offset = payload - b->data;
crypto_total_len = integ_total_len = payload_len - icv_sz;
tag = payload + crypto_total_len;
- key_index = sa->linked_key_index;
+ /* generate the IV in front of the payload */
+ void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
if (ipsec_sa_is_set_IS_CTR (sa))
{
- ASSERT (sizeof (u64) == iv_sz);
/* construct nonce in a scratch space in front of the IP header */
- esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
- hdr_len - sizeof (*nonce));
- u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
-
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
if (ipsec_sa_is_set_IS_AEAD (sa))
{
/* constuct aad in a scratch space in front of the nonce */
aad = (u8 *) nonce - sizeof (esp_aead_t);
esp_aad_fill (aad, esp, sa, sa->seq_hi);
- key_index = sa->crypto_key_index;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ crypto_start_offset -= iv_sz;
+ crypto_total_len += iv_sz;
+ }
}
else
{
@@ -517,13 +539,17 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
nonce->salt = sa->salt;
- nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
+ nonce->iv = *(u64 *) pkt_iv;
iv = (u8 *) nonce;
}
else
{
- iv = payload - iv_sz;
- flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+ /* construct zero iv in front of the IP header */
+ iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start_offset -= iv_sz;
+ crypto_total_len += iv_sz;
}
if (lb != b)
@@ -531,13 +557,14 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
/* chain */
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
tag = vlib_buffer_get_tail (lb) - icv_sz;
- crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
- payload, payload_len, 0);
+ crypto_total_len = esp_encrypt_chain_crypto (
+ vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ crypto_total_len + icv_sz, 0);
}
if (sa->integ_op_id)
{
- integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
+ integ_start_offset -= iv_sz + sizeof (esp_header_t);
integ_total_len += iv_sz + sizeof (esp_header_t);
if (b != lb)
@@ -578,6 +605,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 current_sa_bytes = 0, spi = 0;
u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
ipsec_sa_t *sa0 = 0;
+ u8 sa_drop_no_crypto = 0;
vlib_buffer_t *lb;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
@@ -594,8 +622,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
- u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
- u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
u32 sync_bi[VLIB_FRAME_SIZE];
u32 noop_bi[VLIB_FRAME_SIZE];
esp_encrypt_error_t err;
@@ -634,12 +662,24 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
CLIB_CACHE_LINE_BYTES, LOAD);
}
+ vnet_calc_checksums_inline (vm, b[0], b[0]->flags & VNET_BUFFER_F_IS_IP4,
+ b[0]->flags & VNET_BUFFER_F_IS_IP6);
+ vnet_calc_outer_checksums_inline (vm, b[0]);
+
if (is_tun)
{
/* we are on a ipsec tunnel's feature arc */
vnet_buffer (b[0])->ipsec.sad_index =
sa_index0 = ipsec_tun_protect_get_sa_out
(vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+ noop_nexts[n_noop] = drop_next;
+ b[0]->error = node->errors[err];
+ goto trace;
+ }
}
else
sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
@@ -647,18 +687,24 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (sa_index0 != current_sa_index)
{
if (current_sa_packets)
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_packets,
- current_sa_bytes);
+ vlib_increment_combined_counter (
+ &ipsec_sa_counters, thread_index, current_sa_index,
+ current_sa_packets, current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
sa0 = ipsec_sa_get (sa_index0);
+ current_sa_index = sa_index0;
+
+ sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
+
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
/* fetch the second cacheline ASAP */
clib_prefetch_load (sa0->cacheline1);
- current_sa_index = sa_index0;
spi = clib_net_to_host_u32 (sa0->spi);
esp_align = sa0->esp_block_align;
icv_sz = sa0->integ_icv_size;
@@ -666,7 +712,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
- if (PREDICT_FALSE (~0 == sa0->thread_index))
+ if (PREDICT_FALSE (sa_drop_no_crypto != 0))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, sa_index0);
+ goto trace;
+ }
+
+ if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
@@ -679,8 +733,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
err = ESP_ENCRYPT_ERROR_HANDOFF;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- handoff_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, handoff_next,
+ current_sa_index);
goto trace;
}
@@ -689,7 +744,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (n_bufs == 0)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
goto trace;
}
@@ -703,7 +759,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
goto trace;
}
@@ -714,13 +771,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
payload = vlib_buffer_get_current (b[0]);
next_hdr_ptr = esp_add_footer_and_icv (
- vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vm, &lb, esp_align, icv_sz, buffer_data_size,
vlib_buffer_length_in_chain (vm, b[0]));
if (!next_hdr_ptr)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -823,27 +881,41 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
else /* transport mode */
{
- u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
+ u8 *l2_hdr, l2_len, *ip_hdr;
+ u16 ip_len;
ip6_ext_header_t *ext_hdr;
udp_header_t *udp = 0;
u16 udp_len = 0;
u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
+ /*
+ * Get extension header chain length. It might be longer than the
+ * buffer's pre_data area.
+ */
ip_len =
(VNET_LINK_IP6 == lt ?
esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
+ if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
+ goto trace;
+ }
vlib_buffer_advance (b[0], ip_len);
payload = vlib_buffer_get_current (b[0]);
next_hdr_ptr = esp_add_footer_and_icv (
- vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vm, &lb, esp_align, icv_sz, buffer_data_size,
vlib_buffer_length_in_chain (vm, b[0]));
if (!next_hdr_ptr)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
@@ -879,42 +951,40 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
else
l2_len = 0;
+ u16 len;
+ len = payload_len_total + hdr_len - l2_len;
+
if (VNET_LINK_IP6 == lt)
{
ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
if (PREDICT_TRUE (NULL == ext_hdr))
{
*next_hdr_ptr = ip6->protocol;
- ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
+ ip6->protocol =
+ (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
}
else
{
*next_hdr_ptr = ext_hdr->next_hdr;
- ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
+ ext_hdr->next_hdr =
+ (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
}
ip6->payload_length =
- clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
- sizeof (ip6_header_t));
+ clib_host_to_net_u16 (len - sizeof (ip6_header_t));
}
else if (VNET_LINK_IP4 == lt)
{
- u16 len;
ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
*next_hdr_ptr = ip4->protocol;
- len = payload_len_total + hdr_len - l2_len;
- if (udp)
- {
- esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
- udp_len = len - ip_len;
- }
- else
- esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
+ (udp != NULL));
}
clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
if (udp)
{
+ udp_len = len - ip_len;
esp_fill_udp_hdr (sa0, udp, udp_len);
}
@@ -946,6 +1016,16 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
async_frames[async_op] =
vnet_crypto_async_get_frame (vm, async_op);
+
+ if (PREDICT_FALSE (!async_frames[async_op]))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_AVAIL_FRAME;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
+ goto trace;
+ }
+
/* Save the frame to the list we'll submit at the end */
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
@@ -970,13 +1050,18 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
sizeof (*tr));
- tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
- tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ if (INDEX_INVALID == sa_index0)
+ clib_memset_u8 (tr, 0xff, sizeof (*tr));
+ else
+ {
+ tr->sa_index = sa_index0;
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq;
+ tr->sa_seq_hi = sa0->seq_hi;
+ tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
}
/* next */
@@ -984,7 +1069,6 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
noop_bi[n_noop] = from[b - bufs];
n_noop++;
- noop_next++;
}
else if (!is_async)
{
@@ -996,15 +1080,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
else
{
n_async++;
- async_next++;
}
n_left -= 1;
b += 1;
}
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index, current_sa_packets,
- current_sa_bytes);
+ if (INDEX_INVALID != current_sa_index)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_packets,
+ current_sa_bytes);
if (n_sync)
{
esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
@@ -1030,7 +1114,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_sync, noop_bi, noop_nexts, drop_next);
+ IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
+ noop_nexts, drop_next, true);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
@@ -1139,15 +1224,14 @@ VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
esp_encrypt_async_next.esp4_post_next);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp4_encrypt_node) = {
.name = "esp4-encrypt",
.vector_size = sizeof (u32),
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
@@ -1158,7 +1242,6 @@ VLIB_REGISTER_NODE (esp4_encrypt_node) = {
[ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
[ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1167,7 +1250,6 @@ VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
return esp_encrypt_post_inline (vm, node, from_frame);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
.name = "esp4-encrypt-post",
.vector_size = sizeof (u32),
@@ -1175,10 +1257,9 @@ VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1188,7 +1269,6 @@ VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
esp_encrypt_async_next.esp6_post_next);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp6_encrypt_node) = {
.name = "esp6-encrypt",
.vector_size = sizeof (u32),
@@ -1196,10 +1276,9 @@ VLIB_REGISTER_NODE (esp6_encrypt_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1208,7 +1287,6 @@ VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
return esp_encrypt_post_inline (vm, node, from_frame);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
.name = "esp6-encrypt-post",
.vector_size = sizeof (u32),
@@ -1216,10 +1294,9 @@ VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1229,15 +1306,14 @@ VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
esp_encrypt_async_next.esp4_tun_post_next);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
.name = "esp4-encrypt-tun",
.vector_size = sizeof (u32),
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
@@ -1258,7 +1334,6 @@ VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
return esp_encrypt_post_inline (vm, node, from_frame);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
.name = "esp4-encrypt-tun-post",
.vector_size = sizeof (u32),
@@ -1266,10 +1341,9 @@ VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt-tun",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1279,15 +1353,14 @@ VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
esp_encrypt_async_next.esp6_tun_post_next);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
.name = "esp6-encrypt-tun",
.vector_size = sizeof (u32),
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
@@ -1301,7 +1374,6 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
},
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1310,7 +1382,6 @@ VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
return esp_encrypt_post_inline (vm, node, from_frame);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
.name = "esp6-encrypt-tun-post",
.vector_size = sizeof (u32),
@@ -1318,10 +1389,9 @@ VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp-mpls-encrypt-tun",
- .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
@@ -1336,8 +1406,8 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
@@ -1364,123 +1434,9 @@ VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp-mpls-encrypt-tun",
- .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
-};
-
-typedef struct
-{
- u32 sa_index;
-} esp_no_crypto_trace_t;
-
-static u8 *
-format_esp_no_crypto_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
-
- s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
-
- return s;
-}
-
-enum
-{
- ESP_NO_CRYPTO_NEXT_DROP,
- ESP_NO_CRYPTO_N_NEXT,
-};
-
-enum
-{
- ESP_NO_CRYPTO_ERROR_RX_PKTS,
-};
-
-static char *esp_no_crypto_error_strings[] = {
- "Outbound ESP packets received",
-};
-
-always_inline uword
-esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u32 *from = vlib_frame_vector_args (frame);
- u32 n_left = frame->n_vectors;
-
- vlib_get_buffers (vm, from, b, n_left);
-
- while (n_left > 0)
- {
- u32 sa_index0;
-
- /* packets are always going to be dropped, but get the sa_index */
- sa_index0 = ipsec_tun_protect_get_sa_out
- (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
-
- if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
- {
- esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
- sizeof (*tr));
- tr->sa_index = sa_index0;
- }
-
- n_left -= 1;
- b += 1;
- }
-
- vlib_node_increment_counter (vm, node->node_index,
- ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
-
- vlib_buffer_enqueue_to_single_next (vm, node, from,
- ESP_NO_CRYPTO_NEXT_DROP,
- frame->n_vectors);
-
- return frame->n_vectors;
-}
-
-VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
-{
- .name = "esp4-no-crypto",
- .vector_size = sizeof (u32),
- .format_trace = format_esp_no_crypto_trace,
- .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
- .error_strings = esp_no_crypto_error_strings,
- .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
- .next_nodes = {
- [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
- },
-};
-
-VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
-{
- .name = "esp6-no-crypto",
- .vector_size = sizeof (u32),
- .format_trace = format_esp_no_crypto_trace,
- .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
- .error_strings = esp_no_crypto_error_strings,
- .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
- .next_nodes = {
- [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
- },
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT