summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/ipsec')
-rw-r--r--src/vnet/ipsec/esp.h35
-rw-r--r--src/vnet/ipsec/esp_decrypt.c179
-rw-r--r--src/vnet/ipsec/esp_encrypt.c181
-rw-r--r--src/vnet/ipsec/ipsec.c11
-rw-r--r--src/vnet/ipsec/ipsec_api.c1
-rw-r--r--src/vnet/ipsec/ipsec_cli.c7
-rw-r--r--src/vnet/ipsec/ipsec_sa.c12
-rw-r--r--src/vnet/ipsec/ipsec_sa.h3
-rw-r--r--src/vnet/ipsec/ipsec_types.api2
-rw-r--r--src/vnet/ipsec/ipsec_types_api.c4
10 files changed, 231 insertions, 204 deletions
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
index 51386e68844..a0643c3b939 100644
--- a/src/vnet/ipsec/esp.h
+++ b/src/vnet/ipsec/esp.h
@@ -146,38 +146,33 @@ esp_aad_fill (u8 * data, const esp_header_t * esp, const ipsec_sa_t * sa)
* to next nodes.
*/
always_inline void
-esp_set_next_index (int is_async, u32 * from, u16 * nexts, u32 bi,
- u16 * drop_index, u16 drop_next, u16 * next)
+esp_set_next_index (vlib_buffer_t *b, vlib_node_runtime_t *node, u32 err,
+ u16 index, u16 *nexts, u16 drop_next)
{
- if (is_async)
- {
- from[*drop_index] = bi;
- nexts[*drop_index] = drop_next;
- *drop_index += 1;
- }
- else
- next[0] = drop_next;
+ nexts[index] = drop_next;
+ b->error = node->errors[err];
}
/* when submitting a frame is failed, drop all buffers in the frame */
-always_inline void
-esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
- vlib_buffer_t ** b, u32 * from, u16 * nexts,
- u16 * n_dropped, u16 drop_next_index,
- vlib_error_t err)
+always_inline u32
+esp_async_recycle_failed_submit (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
+ vlib_node_runtime_t *node, u32 err, u16 index,
+ u32 *from, u16 *nexts, u16 drop_next_index)
{
u32 n_drop = f->n_elts;
u32 *bi = f->buffer_indices;
- b -= n_drop;
+
while (n_drop--)
{
- b[0]->error = err;
- esp_set_next_index (1, from, nexts, bi[0], n_dropped, drop_next_index,
- NULL);
+ from[index] = bi[0];
+ esp_set_next_index (vlib_get_buffer (vm, bi[0]), node, err, index, nexts,
+ drop_next_index);
bi++;
- b++;
+ index++;
}
vnet_crypto_async_reset_frame (f);
+
+ return (f->n_elts);
}
/**
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 141b1b987d4..ea5a99c6fa1 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -58,20 +58,20 @@ typedef enum
ESP_DECRYPT_POST_N_NEXT,
} esp_decrypt_post_next_t;
-#define foreach_esp_decrypt_error \
- _(RX_PKTS, "ESP pkts received") \
- _(RX_POST_PKTS, "ESP-POST pkts received") \
- _(DECRYPTION_FAILED, "ESP decryption failed") \
- _(INTEG_ERROR, "Integrity check failed") \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(REPLAY, "SA replayed packet") \
- _(RUNT, "undersized packet") \
- _(NO_BUFFERS, "no buffers (packet dropped)") \
- _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
- _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
- _(TUN_NO_PROTO, "no tunnel protocol") \
- _(UNSUP_PAYLOAD, "unsupported payload") \
-
+#define foreach_esp_decrypt_error \
+ _ (RX_PKTS, "ESP pkts received") \
+ _ (RX_POST_PKTS, "ESP-POST pkts received") \
+ _ (HANDOFF, "hand-off") \
+ _ (DECRYPTION_FAILED, "ESP decryption failed") \
+ _ (INTEG_ERROR, "Integrity check failed") \
+ _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
+ _ (REPLAY, "SA replayed packet") \
+ _ (RUNT, "undersized packet") \
+ _ (NO_BUFFERS, "no buffers (packet dropped)") \
+ _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
+ _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
+ _ (TUN_NO_PROTO, "no tunnel protocol") \
+ _ (UNSUP_PAYLOAD, "unsupported payload")
typedef enum
{
@@ -154,7 +154,7 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t *op = ops;
u32 n_fail, n_ops = vec_len (ops);
- if (n_ops == 0)
+ if (PREDICT_TRUE (n_ops == 0))
return;
n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
@@ -1009,9 +1009,9 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
}
always_inline uword
-esp_decrypt_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * from_frame,
- int is_ip6, int is_tun, u16 async_next)
+esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *from_frame, int is_ip6, int is_tun,
+ u16 async_next_node)
{
ipsec_main_t *im = &ipsec_main;
u32 thread_index = vm->thread_index;
@@ -1020,7 +1020,12 @@ esp_decrypt_inline (vlib_main_t * vm,
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left = from_frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
esp_decrypt_packet_data_t cpd = { };
@@ -1032,8 +1037,8 @@ esp_decrypt_inline (vlib_main_t * vm,
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
int is_async = im->async_mode;
vnet_crypto_async_op_id_t async_op = ~0;
- u16 n_async_drop = 0;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ esp_decrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
if (!is_async)
@@ -1045,13 +1050,14 @@ esp_decrypt_inline (vlib_main_t * vm,
}
vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
- clib_memset_u16 (nexts, -1, n_left);
+ clib_memset (sync_nexts, -1, sizeof (sync_nexts));
clib_memset (async_frames, 0, sizeof (async_frames));
while (n_left > 0)
{
u8 *payload;
+ err = ESP_DECRYPT_ERROR_RX_PKTS;
if (n_left > 2)
{
u8 *p;
@@ -1065,10 +1071,9 @@ esp_decrypt_inline (vlib_main_t * vm,
u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
if (n_bufs == 0)
{
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ err = ESP_DECRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
goto next;
}
@@ -1090,19 +1095,13 @@ esp_decrypt_inline (vlib_main_t * vm,
cpd.iv_sz = sa0->crypto_iv_size;
cpd.flags = sa0->flags;
cpd.sa_index = current_sa_index;
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
if (is_async)
{
async_op = sa0->crypto_async_dec_op_id;
- if (PREDICT_FALSE (async_op == 0))
- {
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
- goto next;
- }
-
/* get a frame for this op if we don't yet have one or it's full
*/
if (NULL == async_frames[async_op] ||
@@ -1127,9 +1126,9 @@ esp_decrypt_inline (vlib_main_t * vm,
if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
- next[0] = ESP_DECRYPT_NEXT_HANDOFF;
+ err = ESP_DECRYPT_ERROR_HANDOFF;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_HANDOFF);
goto next;
}
@@ -1160,17 +1159,17 @@ esp_decrypt_inline (vlib_main_t * vm,
/* anti-reply check */
if (ipsec_sa_anti_replay_check (sa0, pd->seq))
{
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ err = ESP_DECRYPT_ERROR_REPLAY;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
goto next;
}
if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
{
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ err = ESP_DECRYPT_ERROR_RUNT;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
goto next;
}
@@ -1180,30 +1179,44 @@ esp_decrypt_inline (vlib_main_t * vm,
if (is_async)
{
- esp_decrypt_error_t err;
err = esp_decrypt_prepare_async_frame (
vm, node, ptd, async_frames[async_op], sa0, payload, len,
- cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], next,
- async_next);
+ cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
+ async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
- b[0]->error = err;
- esp_set_next_index (1, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
}
}
else
- esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops,
- op, sa0, payload, len, cpd.icv_sz,
- cpd.iv_sz, pd, pd2, b[0], next,
- b - bufs);
+ esp_decrypt_prepare_sync_op (
+ vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
+ cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
/* next */
next:
+ if (ESP_DECRYPT_ERROR_RX_PKTS != err)
+ {
+ noop_bi[n_noop] = from[b - bufs];
+ n_noop++;
+ noop_next++;
+ }
+ else if (!is_async)
+ {
+ sync_bi[n_sync] = from[b - bufs];
+ sync_bufs[n_sync] = b[0];
+ n_sync++;
+ sync_next++;
+ pd += 1;
+ pd2 += 1;
+ }
+ else
+ {
+ n_async++;
+ async_next++;
+ }
n_left -= 1;
- next += 1;
- pd += 1;
- pd2 += 1;
b += 1;
}
@@ -1212,7 +1225,7 @@ esp_decrypt_inline (vlib_main_t * vm,
current_sa_index, current_sa_pkts,
current_sa_bytes);
- if (is_async)
+ if (n_async)
{
/* submit all of the open frames */
vnet_crypto_async_frame_t **async_frame;
@@ -1221,45 +1234,38 @@ esp_decrypt_inline (vlib_main_t * vm,
{
if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
{
- esp_async_recycle_failed_submit (
- *async_frame, b, from, nexts, &n_async_drop,
- ESP_DECRYPT_NEXT_DROP, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ n_noop += esp_async_recycle_failed_submit (
+ vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
}
-
- /* no post process in async */
- vlib_node_increment_counter (vm, node->node_index,
- ESP_DECRYPT_ERROR_RX_PKTS,
- from_frame->n_vectors);
- if (n_async_drop)
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
-
- return n_left;
}
- else
+
+ if (n_sync)
{
- esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
+ esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
ESP_DECRYPT_ERROR_INTEG_ERROR);
- esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
- ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
+ esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+ sync_nexts, ptd->chunks,
+ ESP_DECRYPT_ERROR_INTEG_ERROR);
- esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
+ esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
- esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
- ptd->chunks,
+ esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+ sync_nexts, ptd->chunks,
ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
}
/* Post decryption ronud - adjust packet data start and length and next
node */
- n_left = from_frame->n_vectors;
- next = nexts;
+ n_left = n_sync;
+ sync_next = sync_nexts;
pd = pkt_data;
pd2 = pkt_data2;
- b = bufs;
+ b = sync_bufs;
while (n_left)
{
@@ -1283,8 +1289,8 @@ esp_decrypt_inline (vlib_main_t * vm,
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- if (next[0] >= ESP_DECRYPT_N_NEXT)
- esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
+ if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
+ esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6,
is_tun, 0);
/* trace: */
@@ -1302,19 +1308,22 @@ esp_decrypt_inline (vlib_main_t * vm,
/* next */
n_left -= 1;
- next += 1;
+ sync_next += 1;
pd += 1;
pd2 += 1;
b += 1;
}
- n_left = from_frame->n_vectors;
- vlib_node_increment_counter (vm, node->node_index,
- ESP_DECRYPT_ERROR_RX_PKTS, n_left);
+ vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
+ if (n_sync)
+ vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
- return n_left;
+ if (n_noop)
+ vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+
+ return (from_frame->n_vectors);
}
always_inline uword
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 1fc53a53317..214cf674c75 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -43,13 +43,14 @@ typedef enum
ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
-#define foreach_esp_encrypt_error \
- _(RX_PKTS, "ESP pkts received") \
- _(POST_RX_PKTS, "ESP-post pkts received") \
- _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
- _(NO_BUFFERS, "no buffers (packet dropped)") \
+#define foreach_esp_encrypt_error \
+ _ (RX_PKTS, "ESP pkts received") \
+ _ (POST_RX_PKTS, "ESP-post pkts received") \
+ _ (HANDOFF, "Hand-off") \
+ _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
+ _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
+ _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
+ _ (NO_BUFFERS, "no buffers (packet dropped)")
typedef enum
{
@@ -112,9 +113,8 @@ format_esp_post_encrypt_trace (u8 * s, va_list * args)
/* pad packet in input buffer */
static_always_inline u8 *
-esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
- u8 esp_align, u8 icv_sz,
- u16 * next, vlib_node_runtime_t * node,
+esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
+ u8 icv_sz, vlib_node_runtime_t *node,
u16 buffer_data_size, uword total_len)
{
static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
@@ -379,9 +379,9 @@ always_inline void
esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t **crypto_ops,
vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0,
- u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
- vlib_buffer_t **bufs, vlib_buffer_t **b,
- vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp)
+ u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
+ vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
+ esp_header_t *esp)
{
if (sa0->crypto_enc_op_id)
{
@@ -392,7 +392,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
op->len = payload_len - icv_sz;
- op->user_data = b - bufs;
+ op->user_data = bi;
if (ipsec_sa_is_set_IS_CTR (sa0))
{
@@ -447,7 +447,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->key_index = sa0->integ_key_index;
op->digest_len = icv_sz;
op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
- op->user_data = b - bufs;
+ op->user_data = bi;
if (lb != b[0])
{
@@ -564,14 +564,13 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
always_inline uword
esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_frame_t *frame, vnet_link_t lt, int is_tun,
- u16 async_next)
+ u16 async_next_node)
{
ipsec_main_t *im = &ipsec_main;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
u32 *from = vlib_frame_vector_args (frame);
u32 n_left = frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
u32 thread_index = vm->thread_index;
u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
u32 current_sa_index = ~0, current_sa_packets = 0;
@@ -592,16 +591,20 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
ESP_ENCRYPT_NEXT_HANDOFF6 :
(lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
- u16 n_async_drop = 0;
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_encrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
- if (!is_async)
- {
- vec_reset_length (ptd->crypto_ops);
- vec_reset_length (ptd->integ_ops);
- vec_reset_length (ptd->chained_crypto_ops);
- vec_reset_length (ptd->chained_integ_ops);
- }
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
clib_memset (async_frames, 0, sizeof (async_frames));
@@ -615,6 +618,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u16 payload_len, payload_len_total, n_bufs;
u32 hdr_len;
+ err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
if (n_left > 2)
{
u8 *p;
@@ -657,19 +662,13 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_align = sa0->esp_block_align;
icv_sz = sa0->integ_icv_size;
iv_sz = sa0->crypto_iv_size;
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
if (is_async)
{
async_op = sa0->crypto_async_enc_op_id;
- if (PREDICT_FALSE (async_op == 0))
- {
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
- goto trace;
- }
-
/* get a frame for this op if we don't yet have one or it's full
*/
if (NULL == async_frames[async_op] ||
@@ -694,8 +693,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, handoff_next, next);
+ err = ESP_ENCRYPT_ERROR_HANDOFF;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ handoff_next);
goto trace;
}
@@ -703,9 +703,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
if (n_bufs == 0)
{
- b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
goto trace;
}
@@ -718,9 +717,8 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
- b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
+ err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
goto trace;
}
@@ -730,16 +728,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (ipsec_sa_is_set_IS_TUNNEL (sa0))
{
payload = vlib_buffer_get_current (b[0]);
- next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
- next, node,
- buffer_data_size,
- vlib_buffer_length_in_chain
- (vm, b[0]));
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
if (!next_hdr_ptr)
{
- b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
goto trace;
}
b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -833,11 +829,11 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
dpo = &sa0->dpo;
if (!is_tun)
{
- next[0] = dpo->dpoi_next_node;
+ sync_next[0] = dpo->dpoi_next_node;
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
}
else
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
}
else /* transport mode */
@@ -855,15 +851,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_advance (b[0], ip_len);
payload = vlib_buffer_get_current (b[0]);
- next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
- next, node,
- buffer_data_size,
- vlib_buffer_length_in_chain
- (vm, b[0]));
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
if (!next_hdr_ptr)
{
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
goto trace;
}
@@ -938,7 +933,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_fill_udp_hdr (sa0, udp, udp_len);
}
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
}
if (lb != b[0])
@@ -958,12 +953,12 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (is_async)
esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
esp, payload, payload_len, iv_sz, icv_sz,
- from[b - bufs], next[0], hdr_len, async_next,
- lb);
+ from[b - bufs], sync_next[0], hdr_len,
+ async_next_node, lb);
else
- esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
- payload_len, iv_sz, icv_sz, bufs, b, lb,
- hdr_len, esp);
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
+ payload_len, iv_sz, icv_sz, n_sync, b, lb,
+ hdr_len, esp);
vlib_buffer_advance (b[0], 0LL - hdr_len);
@@ -983,31 +978,48 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
tr->crypto_alg = sa0->crypto_alg;
tr->integ_alg = sa0->integ_alg;
}
+
/* next */
+ if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
+ {
+ noop_bi[n_noop] = from[b - bufs];
+ n_noop++;
+ noop_next++;
+ }
+ else if (!is_async)
+ {
+ sync_bi[n_sync] = from[b - bufs];
+ sync_bufs[n_sync] = b[0];
+ n_sync++;
+ sync_next++;
+ }
+ else
+ {
+ n_async++;
+ async_next++;
+ }
n_left -= 1;
- next += 1;
b += 1;
}
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index, current_sa_packets,
current_sa_bytes);
- if (!is_async)
+ if (n_sync)
{
- esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
- esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
- ptd->chunks, drop_next);
+ esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
+ drop_next);
+ esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+ sync_nexts, ptd->chunks, drop_next);
- esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
- esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
- ptd->chunks, drop_next);
+ esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
+ drop_next);
+ esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+ sync_nexts, ptd->chunks, drop_next);
- vlib_node_increment_counter (
- vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
-
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
}
- else
+ if (n_async)
{
/* submit all of the open frames */
vnet_crypto_async_frame_t **async_frame;
@@ -1016,20 +1028,19 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
{
- esp_async_recycle_failed_submit (
- *async_frame, b, from, nexts, &n_async_drop, drop_next,
- ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ n_noop += esp_async_recycle_failed_submit (
+ vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ n_sync, noop_bi, noop_nexts, drop_next);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
}
-
- vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_RX_PKTS,
- frame->n_vectors);
- if (n_async_drop)
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
}
+ if (n_noop)
+ vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+
+ vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
+ frame->n_vectors);
return frame->n_vectors;
}
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index 45ae5acd4b0..74713458b14 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -329,20 +329,15 @@ ipsec_set_async_mode (u32 is_enabled)
ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
- /* lock all SAs before change im->async_mode */
- pool_foreach (sa, ipsec_sa_pool)
- {
- fib_node_lock (&sa->node);
- }
+ vnet_crypto_request_async_mode (is_enabled);
im->async_mode = is_enabled;
- /* change SA crypto op data before unlock them */
+ /* change SA crypto op data */
pool_foreach (sa, ipsec_sa_pool)
{
sa->crypto_op_data =
- is_enabled ? sa->async_op_data.data : sa->sync_op_data.data;
- fib_node_unlock (&sa->node);
+ (is_enabled ? sa->async_op_data.data : sa->sync_op_data.data);
}
}
diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c
index 45e4e6f0703..5ce64d9a7d6 100644
--- a/src/vnet/ipsec/ipsec_api.c
+++ b/src/vnet/ipsec/ipsec_api.c
@@ -1154,7 +1154,6 @@ vl_api_ipsec_set_async_mode_t_handler (vl_api_ipsec_set_async_mode_t * mp)
vl_api_ipsec_set_async_mode_reply_t *rmp;
int rv = 0;
- vnet_crypto_request_async_mode (mp->async_enable);
ipsec_set_async_mode (mp->async_enable);
REPLY_MACRO (VL_API_IPSEC_SET_ASYNC_MODE_REPLY);
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
index bb80b328d56..2c7a923adc3 100644
--- a/src/vnet/ipsec/ipsec_cli.c
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -98,7 +98,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
u16 udp_src, udp_dst;
int is_add, rv;
u32 m_args = 0;
- tunnel_t tun;
+ tunnel_t tun = {};
salt = 0;
error = NULL;
@@ -161,6 +161,8 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
flags |= IPSEC_SA_FLAG_USE_ESN;
else if (unformat (line_input, "udp-encap"))
flags |= IPSEC_SA_FLAG_UDP_ENCAP;
+ else if (unformat (line_input, "async"))
+ flags |= IPSEC_SA_FLAG_IS_ASYNC;
else
{
error = clib_error_return (0, "parse error: '%U'",
@@ -198,7 +200,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
}
if (rv)
- error = clib_error_return (0, "failed");
+ error = clib_error_return (0, "failed: %d", rv);
done:
unformat_free (line_input);
@@ -940,7 +942,6 @@ set_async_mode_command_fn (vlib_main_t * vm, unformat_input_t * input,
format_unformat_error, line_input));
}
- vnet_crypto_request_async_mode (async_enable);
ipsec_set_async_mode (async_enable);
unformat_free (line_input);
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index 7e2dc20ef90..b1e337470ab 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -245,7 +245,15 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (im->async_mode)
sa->crypto_op_data = sa->async_op_data.data;
else
- sa->crypto_op_data = sa->sync_op_data.data;
+ {
+ if (ipsec_sa_is_set_IS_ASYNC (sa))
+ {
+ vnet_crypto_request_async_mode (1);
+ sa->crypto_op_data = sa->async_op_data.data;
+ }
+ else
+ sa->crypto_op_data = sa->sync_op_data.data;
+ }
err = ipsec_check_support_cb (im, sa);
if (err)
@@ -332,6 +340,8 @@ ipsec_sa_del (ipsec_sa_t * sa)
/* no recovery possible when deleting an SA */
(void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
+ if (ipsec_sa_is_set_IS_ASYNC (sa))
+ vnet_crypto_request_async_mode (0);
if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port));
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 705034e8b47..7827ef18084 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -101,7 +101,8 @@ typedef struct ipsec_key_t_
_ (32, IS_PROTECT, "Protect") \
_ (64, IS_INBOUND, "inbound") \
_ (128, IS_AEAD, "aead") \
- _ (256, IS_CTR, "ctr")
+ _ (256, IS_CTR, "ctr") \
+ _ (512, IS_ASYNC, "async")
typedef enum ipsec_sad_flags_t_
{
diff --git a/src/vnet/ipsec/ipsec_types.api b/src/vnet/ipsec/ipsec_types.api
index b47355908e7..9fa7e058cbf 100644
--- a/src/vnet/ipsec/ipsec_types.api
+++ b/src/vnet/ipsec/ipsec_types.api
@@ -74,6 +74,8 @@ enum ipsec_sad_flags
IPSEC_API_SAD_FLAG_UDP_ENCAP = 0x10,
/* IPsec SA is for inbound traffic */
IPSEC_API_SAD_FLAG_IS_INBOUND = 0x40,
+ /* IPsec SA uses an Async driver */
+ IPSEC_API_SAD_FLAG_ASYNC = 0x80 [backwards_compatible],
};
enum ipsec_proto
diff --git a/src/vnet/ipsec/ipsec_types_api.c b/src/vnet/ipsec/ipsec_types_api.c
index 44b129b3b66..7044f1eb046 100644
--- a/src/vnet/ipsec/ipsec_types_api.c
+++ b/src/vnet/ipsec/ipsec_types_api.c
@@ -147,6 +147,8 @@ ipsec_sa_flags_decode (vl_api_ipsec_sad_flags_t in)
flags |= IPSEC_SA_FLAG_UDP_ENCAP;
if (in & IPSEC_API_SAD_FLAG_IS_INBOUND)
flags |= IPSEC_SA_FLAG_IS_INBOUND;
+ if (in & IPSEC_API_SAD_FLAG_ASYNC)
+ flags |= IPSEC_SA_FLAG_IS_ASYNC;
return (flags);
}
@@ -168,6 +170,8 @@ ipsec_sad_flags_encode (const ipsec_sa_t * sa)
flags |= IPSEC_API_SAD_FLAG_UDP_ENCAP;
if (ipsec_sa_is_set_IS_INBOUND (sa))
flags |= IPSEC_API_SAD_FLAG_IS_INBOUND;
+ if (ipsec_sa_is_set_IS_ASYNC (sa))
+ flags |= IPSEC_API_SAD_FLAG_ASYNC;
return clib_host_to_net_u32 (flags);
}