summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/esp_decrypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/ipsec/esp_decrypt.c')
-rw-r--r--src/vnet/ipsec/esp_decrypt.c179
1 files changed, 94 insertions, 85 deletions
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 141b1b987d4..ea5a99c6fa1 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -58,20 +58,20 @@ typedef enum
ESP_DECRYPT_POST_N_NEXT,
} esp_decrypt_post_next_t;
-#define foreach_esp_decrypt_error \
- _(RX_PKTS, "ESP pkts received") \
- _(RX_POST_PKTS, "ESP-POST pkts received") \
- _(DECRYPTION_FAILED, "ESP decryption failed") \
- _(INTEG_ERROR, "Integrity check failed") \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(REPLAY, "SA replayed packet") \
- _(RUNT, "undersized packet") \
- _(NO_BUFFERS, "no buffers (packet dropped)") \
- _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
- _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
- _(TUN_NO_PROTO, "no tunnel protocol") \
- _(UNSUP_PAYLOAD, "unsupported payload") \
-
+#define foreach_esp_decrypt_error \
+ _ (RX_PKTS, "ESP pkts received") \
+ _ (RX_POST_PKTS, "ESP-POST pkts received") \
+ _ (HANDOFF, "hand-off") \
+ _ (DECRYPTION_FAILED, "ESP decryption failed") \
+ _ (INTEG_ERROR, "Integrity check failed") \
+ _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
+ _ (REPLAY, "SA replayed packet") \
+ _ (RUNT, "undersized packet") \
+ _ (NO_BUFFERS, "no buffers (packet dropped)") \
+ _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
+ _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
+ _ (TUN_NO_PROTO, "no tunnel protocol") \
+ _ (UNSUP_PAYLOAD, "unsupported payload")
typedef enum
{
@@ -154,7 +154,7 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t *op = ops;
u32 n_fail, n_ops = vec_len (ops);
- if (n_ops == 0)
+ if (PREDICT_TRUE (n_ops == 0))
return;
n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
@@ -1009,9 +1009,9 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
}
always_inline uword
-esp_decrypt_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * from_frame,
- int is_ip6, int is_tun, u16 async_next)
+esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *from_frame, int is_ip6, int is_tun,
+ u16 async_next_node)
{
ipsec_main_t *im = &ipsec_main;
u32 thread_index = vm->thread_index;
@@ -1020,7 +1020,12 @@ esp_decrypt_inline (vlib_main_t * vm,
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left = from_frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
esp_decrypt_packet_data_t cpd = { };
@@ -1032,8 +1037,8 @@ esp_decrypt_inline (vlib_main_t * vm,
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
int is_async = im->async_mode;
vnet_crypto_async_op_id_t async_op = ~0;
- u16 n_async_drop = 0;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ esp_decrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
if (!is_async)
@@ -1045,13 +1050,14 @@ esp_decrypt_inline (vlib_main_t * vm,
}
vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
- clib_memset_u16 (nexts, -1, n_left);
+ clib_memset (sync_nexts, -1, sizeof (sync_nexts));
clib_memset (async_frames, 0, sizeof (async_frames));
while (n_left > 0)
{
u8 *payload;
+ err = ESP_DECRYPT_ERROR_RX_PKTS;
if (n_left > 2)
{
u8 *p;
@@ -1065,10 +1071,9 @@ esp_decrypt_inline (vlib_main_t * vm,
u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
if (n_bufs == 0)
{
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ err = ESP_DECRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
goto next;
}
@@ -1090,19 +1095,13 @@ esp_decrypt_inline (vlib_main_t * vm,
cpd.iv_sz = sa0->crypto_iv_size;
cpd.flags = sa0->flags;
cpd.sa_index = current_sa_index;
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
if (is_async)
{
async_op = sa0->crypto_async_dec_op_id;
- if (PREDICT_FALSE (async_op == 0))
- {
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
- goto next;
- }
-
/* get a frame for this op if we don't yet have one or it's full
*/
if (NULL == async_frames[async_op] ||
@@ -1127,9 +1126,9 @@ esp_decrypt_inline (vlib_main_t * vm,
if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
- next[0] = ESP_DECRYPT_NEXT_HANDOFF;
+ err = ESP_DECRYPT_ERROR_HANDOFF;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_HANDOFF);
goto next;
}
@@ -1160,17 +1159,17 @@ esp_decrypt_inline (vlib_main_t * vm,
/* anti-reply check */
if (ipsec_sa_anti_replay_check (sa0, pd->seq))
{
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ err = ESP_DECRYPT_ERROR_REPLAY;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
goto next;
}
if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
{
- b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ err = ESP_DECRYPT_ERROR_RUNT;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
goto next;
}
@@ -1180,30 +1179,44 @@ esp_decrypt_inline (vlib_main_t * vm,
if (is_async)
{
- esp_decrypt_error_t err;
err = esp_decrypt_prepare_async_frame (
vm, node, ptd, async_frames[async_op], sa0, payload, len,
- cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], next,
- async_next);
+ cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
+ async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
- b[0]->error = err;
- esp_set_next_index (1, from, nexts, from[b - bufs],
- &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
}
}
else
- esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops,
- op, sa0, payload, len, cpd.icv_sz,
- cpd.iv_sz, pd, pd2, b[0], next,
- b - bufs);
+ esp_decrypt_prepare_sync_op (
+ vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
+ cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
/* next */
next:
+ if (ESP_DECRYPT_ERROR_RX_PKTS != err)
+ {
+ noop_bi[n_noop] = from[b - bufs];
+ n_noop++;
+ noop_next++;
+ }
+ else if (!is_async)
+ {
+ sync_bi[n_sync] = from[b - bufs];
+ sync_bufs[n_sync] = b[0];
+ n_sync++;
+ sync_next++;
+ pd += 1;
+ pd2 += 1;
+ }
+ else
+ {
+ n_async++;
+ async_next++;
+ }
n_left -= 1;
- next += 1;
- pd += 1;
- pd2 += 1;
b += 1;
}
@@ -1212,7 +1225,7 @@ esp_decrypt_inline (vlib_main_t * vm,
current_sa_index, current_sa_pkts,
current_sa_bytes);
- if (is_async)
+ if (n_async)
{
/* submit all of the open frames */
vnet_crypto_async_frame_t **async_frame;
@@ -1221,45 +1234,38 @@ esp_decrypt_inline (vlib_main_t * vm,
{
if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
{
- esp_async_recycle_failed_submit (
- *async_frame, b, from, nexts, &n_async_drop,
- ESP_DECRYPT_NEXT_DROP, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ n_noop += esp_async_recycle_failed_submit (
+ vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
}
-
- /* no post process in async */
- vlib_node_increment_counter (vm, node->node_index,
- ESP_DECRYPT_ERROR_RX_PKTS,
- from_frame->n_vectors);
- if (n_async_drop)
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
-
- return n_left;
}
- else
+
+ if (n_sync)
{
- esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
+ esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
ESP_DECRYPT_ERROR_INTEG_ERROR);
- esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
- ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
+ esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+ sync_nexts, ptd->chunks,
+ ESP_DECRYPT_ERROR_INTEG_ERROR);
- esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
+ esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
- esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
- ptd->chunks,
+ esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+ sync_nexts, ptd->chunks,
ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
}
/* Post decryption ronud - adjust packet data start and length and next
node */
- n_left = from_frame->n_vectors;
- next = nexts;
+ n_left = n_sync;
+ sync_next = sync_nexts;
pd = pkt_data;
pd2 = pkt_data2;
- b = bufs;
+ b = sync_bufs;
while (n_left)
{
@@ -1283,8 +1289,8 @@ esp_decrypt_inline (vlib_main_t * vm,
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- if (next[0] >= ESP_DECRYPT_N_NEXT)
- esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
+ if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
+ esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6,
is_tun, 0);
/* trace: */
@@ -1302,19 +1308,22 @@ esp_decrypt_inline (vlib_main_t * vm,
/* next */
n_left -= 1;
- next += 1;
+ sync_next += 1;
pd += 1;
pd2 += 1;
b += 1;
}
- n_left = from_frame->n_vectors;
- vlib_node_increment_counter (vm, node->node_index,
- ESP_DECRYPT_ERROR_RX_PKTS, n_left);
+ vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
+ if (n_sync)
+ vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
- return n_left;
+ if (n_noop)
+ vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+
+ return (from_frame->n_vectors);
}
always_inline uword