summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeale Ranns <neale@graphiant.com>2021-02-26 10:35:33 +0000
committerDamjan Marion <dmarion@me.com>2021-03-05 10:34:55 +0000
commitfc81134a26458a8358483b0d2908a6b83afb7f11 (patch)
tree225398a6c3193f37999909e91b0d95513d0430ec
parent3a9bd7608f74594ab6ebc2fb20786bceaca72dea (diff)
ipsec: Submit fuller async frames
Type: improvement In the current scheme an async frame is submitted each time the crypto op changes. thus happens each time a different SA is used and thus potentially many times per-node. thi can lead to the submision of many partially filled frames. change the scheme to construct as many full frames as possible in the node and submit them all at the end. the frame owner ship is passed to the user so that there can be more than one open frame per-op at any given time. Signed-off-by: Neale Ranns <neale@graphiant.com> Change-Id: Ic2305581d7b5aa26133f52115e0cd28ba956ed55
-rw-r--r--src/vnet/crypto/crypto.h82
-rw-r--r--src/vnet/ipsec/esp_decrypt.c118
-rw-r--r--src/vnet/ipsec/esp_encrypt.c123
-rw-r--r--src/vnet/ipsec/ipsec.h1
-rw-r--r--test/test_ipsec_esp.py84
-rw-r--r--test/vpp_ipsec.py1
6 files changed, 231 insertions, 178 deletions
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index 1df6e7f6651..127731866db 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -348,7 +348,6 @@ typedef struct
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- vnet_crypto_async_frame_t *frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
vnet_crypto_async_frame_t *frame_pool;
u32 *buffer_indices;
u16 *nexts;
@@ -543,18 +542,15 @@ vnet_crypto_async_get_frame (vlib_main_t * vm, vnet_crypto_async_op_id_t opt)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
- vnet_crypto_async_frame_t *f = ct->frames[opt];
+ vnet_crypto_async_frame_t *f = NULL;
+
+ pool_get_aligned (ct->frame_pool, f, CLIB_CACHE_LINE_BYTES);
+ if (CLIB_DEBUG > 0)
+ clib_memset (f, 0xfe, sizeof (*f));
+ f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
+ f->op = opt;
+ f->n_elts = 0;
- if (!f)
- {
- pool_get_aligned (ct->frame_pool, f, CLIB_CACHE_LINE_BYTES);
- if (CLIB_DEBUG > 0)
- clib_memset (f, 0xfe, sizeof (*f));
- f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
- f->op = opt;
- f->n_elts = 0;
- ct->frames[opt] = f;
- }
return f;
}
@@ -573,7 +569,6 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
{
vnet_crypto_main_t *cm = &crypto_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
- vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
vnet_crypto_async_op_id_t opt = frame->op;
u32 i = vlib_num_workers () > 0;
@@ -585,56 +580,33 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
clib_bitmap_set_no_check (cm->async_active_ids, opt, 1);
if (PREDICT_TRUE (ret == 0))
{
- vnet_crypto_async_frame_t *nf = 0;
- pool_get_aligned (ct->frame_pool, nf, CLIB_CACHE_LINE_BYTES);
- if (CLIB_DEBUG > 0)
- clib_memset (nf, 0xfe, sizeof (*nf));
- nf->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
- nf->op = opt;
- nf->n_elts = 0;
- ct->frames[opt] = nf;
+ if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
+ {
+ for (; i < tm->n_vlib_mains; i++)
+ vlib_node_set_interrupt_pending (vlib_mains[i],
+ cm->crypto_node_index);
+ }
}
else
{
frame->state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
}
- if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
- {
- for (; i < tm->n_vlib_mains; i++)
- {
- vlib_node_set_interrupt_pending (vlib_mains[i],
- cm->crypto_node_index);
- }
- }
return ret;
}
-static_always_inline int
-vnet_crypto_async_add_to_frame (vlib_main_t * vm,
- vnet_crypto_async_frame_t ** frame,
- u32 key_index,
- u32 crypto_len, i16 integ_len_adj,
- i16 crypto_start_offset,
- u16 integ_start_offset,
- u32 buffer_index,
- u16 next_node,
- u8 * iv, u8 * tag, u8 * aad, u8 flags)
-{
- vnet_crypto_async_frame_t *f = *frame;
+static_always_inline void
+vnet_crypto_async_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
+ u32 key_index, u32 crypto_len,
+ i16 integ_len_adj, i16 crypto_start_offset,
+ u16 integ_start_offset, u32 buffer_index,
+ u16 next_node, u8 *iv, u8 *tag, u8 *aad,
+ u8 flags)
+{
vnet_crypto_async_frame_elt_t *fe;
u16 index;
- if (PREDICT_FALSE (f->n_elts == VNET_CRYPTO_FRAME_SIZE))
- {
- vnet_crypto_async_op_id_t opt = f->op;
- int ret;
- ret = vnet_crypto_async_submit_open_frame (vm, f);
- if (PREDICT_FALSE (ret < 0))
- return -1;
- f = vnet_crypto_async_get_frame (vm, opt);
- *frame = f;
- }
+ ASSERT (f->n_elts < VNET_CRYPTO_FRAME_SIZE);
index = f->n_elts;
fe = &f->elts[index];
@@ -650,8 +622,6 @@ vnet_crypto_async_add_to_frame (vlib_main_t * vm,
fe->flags = flags;
f->buffer_indices[index] = buffer_index;
f->next_node_index[index] = next_node;
-
- return 0;
}
static_always_inline void
@@ -669,6 +639,12 @@ vnet_crypto_async_reset_frame (vnet_crypto_async_frame_t * f)
f->n_elts = 0;
}
+static_always_inline u8
+vnet_crypto_async_frame_is_full (const vnet_crypto_async_frame_t *f)
+{
+ return (f->n_elts == VNET_CRYPTO_FRAME_SIZE);
+}
+
#endif /* included_vnet_crypto_crypto_h */
/*
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 80ce08bf853..141b1b987d4 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -607,17 +607,14 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
}
}
-static_always_inline int
-esp_decrypt_prepare_async_frame (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- ipsec_per_thread_data_t * ptd,
- vnet_crypto_async_frame_t ** f,
- ipsec_sa_t * sa0, u8 * payload, u16 len,
- u8 icv_sz, u8 iv_sz,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2, u32 bi,
- vlib_buffer_t * b, u16 * next,
- u16 async_next)
+static_always_inline esp_decrypt_error_t
+esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ipsec_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
+ u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2, u32 bi,
+ vlib_buffer_t *b, u16 *next, u16 async_next)
{
const u8 esp_sz = sizeof (esp_header_t);
u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index;
@@ -677,9 +674,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t * vm,
0, &integ_len) < 0)
{
/* allocate buffer failed, will not add to frame and drop */
- b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
- next[0] = ESP_DECRYPT_NEXT_DROP;
- return -1;
+ return (ESP_DECRYPT_ERROR_NO_BUFFERS);
}
}
else
@@ -737,11 +732,11 @@ out:
/* for AEAD integ_len - crypto_len will be negative, it is ok since it
* is ignored by the engine. */
- return vnet_crypto_async_add_to_frame (vm, f, key_index, crypto_len,
- integ_len - crypto_len,
- crypto_start_offset,
- integ_start_offset,
- bi, async_next, iv, tag, aad, flags);
+ vnet_crypto_async_add_to_frame (
+ vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
+ integ_start_offset, bi, async_next, iv, tag, aad, flags);
+
+ return (ESP_DECRYPT_ERROR_RX_PKTS);
}
static_always_inline void
@@ -1035,10 +1030,10 @@ esp_decrypt_inline (vlib_main_t * vm,
vnet_crypto_op_t _op, *op = &_op;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
- vnet_crypto_async_frame_t *async_frame = 0;
int is_async = im->async_mode;
- vnet_crypto_async_op_id_t last_async_op = ~0;
+ vnet_crypto_async_op_id_t async_op = ~0;
u16 n_async_drop = 0;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
vlib_get_buffers (vm, from, b, n_left);
if (!is_async)
@@ -1048,8 +1043,10 @@ esp_decrypt_inline (vlib_main_t * vm,
vec_reset_length (ptd->chained_crypto_ops);
vec_reset_length (ptd->chained_integ_ops);
}
+ vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
clib_memset_u16 (nexts, -1, n_left);
+ clib_memset (async_frames, 0, sizeof (async_frames));
while (n_left > 0)
{
@@ -1093,21 +1090,28 @@ esp_decrypt_inline (vlib_main_t * vm,
cpd.iv_sz = sa0->crypto_iv_size;
cpd.flags = sa0->flags;
cpd.sa_index = current_sa_index;
+ }
- /* submit frame when op_id is different then the old one */
- if (is_async && last_async_op != sa0->crypto_async_dec_op_id)
+ if (is_async)
+ {
+ async_op = sa0->crypto_async_dec_op_id;
+
+ if (PREDICT_FALSE (async_op == 0))
{
- if (async_frame && async_frame->n_elts)
- {
- if (vnet_crypto_async_submit_open_frame (vm, async_frame))
- esp_async_recycle_failed_submit (async_frame, b, from,
- nexts, &n_async_drop,
- ESP_DECRYPT_NEXT_DROP,
- ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
- }
- async_frame =
- vnet_crypto_async_get_frame (vm, sa0->crypto_async_dec_op_id);
- last_async_op = sa0->crypto_async_dec_op_id;
+ esp_set_next_index (is_async, from, nexts, from[b - bufs],
+ &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ goto next;
+ }
+
+ /* get a frame for this op if we don't yet have one or it's full
+ */
+ if (NULL == async_frames[async_op] ||
+ vnet_crypto_async_frame_is_full (async_frames[async_op]))
+ {
+ async_frames[async_op] =
+ vnet_crypto_async_get_frame (vm, async_op);
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, async_frames[async_op]);
}
}
@@ -1176,29 +1180,17 @@ esp_decrypt_inline (vlib_main_t * vm,
if (is_async)
{
- int ret = esp_decrypt_prepare_async_frame (vm, node, ptd,
- &async_frame,
- sa0, payload, len,
- cpd.icv_sz,
- cpd.iv_sz,
- pd, pd2,
- from[b - bufs],
- b[0], next, async_next);
- if (PREDICT_FALSE (ret < 0))
+ esp_decrypt_error_t err;
+
+ err = esp_decrypt_prepare_async_frame (
+ vm, node, ptd, async_frames[async_op], sa0, payload, len,
+ cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], next,
+ async_next);
+ if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
- b[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+ b[0]->error = err;
esp_set_next_index (1, from, nexts, from[b - bufs],
&n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
- /* when next[0] is ESP_DECRYPT_NEXT_DROP we only have to drop
- * the current packet. Otherwise it is frame submission error
- * thus we have to drop the whole frame.
- */
- if (next[0] != ESP_DECRYPT_NEXT_DROP && async_frame->n_elts)
- esp_async_recycle_failed_submit (async_frame, b, from,
- nexts, &n_async_drop,
- ESP_DECRYPT_NEXT_DROP,
- ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
- goto next;
}
}
else
@@ -1222,13 +1214,19 @@ esp_decrypt_inline (vlib_main_t * vm,
if (is_async)
{
- if (async_frame && async_frame->n_elts)
+ /* submit all of the open frames */
+ vnet_crypto_async_frame_t **async_frame;
+
+ vec_foreach (async_frame, ptd->async_frames)
{
- if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
- esp_async_recycle_failed_submit (async_frame, b, from, nexts,
- &n_async_drop,
- ESP_DECRYPT_NEXT_DROP,
- ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
+ {
+ esp_async_recycle_failed_submit (
+ *async_frame, b, from, nexts, &n_async_drop,
+ ESP_DECRYPT_NEXT_DROP, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ vnet_crypto_async_reset_frame (*async_frame);
+ vnet_crypto_async_free_frame (vm, *async_frame);
+ }
}
/* no post process in async */
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 16bca1da536..1fc53a53317 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -471,13 +471,13 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
}
-static_always_inline int
-esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- vnet_crypto_async_frame_t ** async_frame,
- ipsec_sa_t * sa, vlib_buffer_t * b,
- esp_header_t * esp, u8 * payload, u32 payload_len,
- u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
- u16 async_next, vlib_buffer_t * lb)
+static_always_inline void
+esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t *async_frame,
+ ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
+ u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
+ u32 bi, u16 next, u32 hdr_len, u16 async_next,
+ vlib_buffer_t *lb)
{
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
@@ -554,12 +554,11 @@ esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
}
}
- return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
- crypto_total_len,
- integ_total_len - crypto_total_len,
- crypto_start_offset,
- integ_start_offset, bi, async_next,
- iv, tag, aad, flag);
+ /* this always succeeds because we know the frame is not full */
+ vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+ integ_total_len - crypto_total_len,
+ crypto_start_offset, integ_start_offset, bi,
+ async_next, iv, tag, aad, flag);
}
always_inline uword
@@ -582,9 +581,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_t *lb;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
- vnet_crypto_async_frame_t *async_frame = 0;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
int is_async = im->async_mode;
- vnet_crypto_async_op_id_t last_async_op = ~0;
+ vnet_crypto_async_op_id_t async_op = ~0;
u16 drop_next =
(lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
(lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
@@ -603,7 +602,9 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vec_reset_length (ptd->chained_crypto_ops);
vec_reset_length (ptd->chained_integ_ops);
}
+ vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
+ clib_memset (async_frames, 0, sizeof (async_frames));
while (n_left > 0)
{
@@ -656,21 +657,28 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_align = sa0->esp_block_align;
icv_sz = sa0->integ_icv_size;
iv_sz = sa0->crypto_iv_size;
+ }
+
+ if (is_async)
+ {
+ async_op = sa0->crypto_async_enc_op_id;
- /* submit frame when op_id is different then the old one */
- if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
+ if (PREDICT_FALSE (async_op == 0))
{
- if (async_frame && async_frame->n_elts)
- {
- if (vnet_crypto_async_submit_open_frame (vm, async_frame))
- esp_async_recycle_failed_submit (async_frame, b, from,
- nexts, &n_async_drop,
- drop_next,
- ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
- }
- async_frame =
- vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
- last_async_op = sa0->crypto_async_enc_op_id;
+ esp_set_next_index (is_async, from, nexts, from[b - bufs],
+ &n_async_drop, drop_next, next);
+ goto trace;
+ }
+
+ /* get a frame for this op if we don't yet have one or it's full
+ */
+ if (NULL == async_frames[async_op] ||
+ vnet_crypto_async_frame_is_full (async_frames[async_op]))
+ {
+ async_frames[async_op] =
+ vnet_crypto_async_get_frame (vm, async_op);
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, async_frames[async_op]);
}
}
@@ -948,36 +956,14 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp->seq = clib_net_to_host_u32 (sa0->seq);
if (is_async)
- {
- if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
- {
- esp_set_next_index (is_async, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
- goto trace;
- }
-
- if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
- payload, payload_len, iv_sz,
- icv_sz, from[b - bufs], next[0],
- hdr_len, async_next, lb))
- {
- /* The fail only caused by submission, free the whole frame. */
- if (async_frame->n_elts)
- esp_async_recycle_failed_submit (async_frame, b, from, nexts,
- &n_async_drop, drop_next,
- ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
- b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- esp_set_next_index (1, from, nexts, from[b - bufs],
- &n_async_drop, drop_next, next);
- goto trace;
- }
- }
+ esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+ esp, payload, payload_len, iv_sz, icv_sz,
+ from[b - bufs], next[0], hdr_len, async_next,
+ lb);
else
- {
esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
payload_len, iv_sz, icv_sz, bufs, b, lb,
hdr_len, esp);
- }
vlib_buffer_advance (b[0], 0LL - hdr_len);
@@ -1015,29 +1001,36 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
ptd->chunks, drop_next);
+
+ vlib_node_increment_counter (
+ vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
}
else
{
- if (async_frame && async_frame->n_elts)
+ /* submit all of the open frames */
+ vnet_crypto_async_frame_t **async_frame;
+
+ vec_foreach (async_frame, ptd->async_frames)
{
- if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
- esp_async_recycle_failed_submit (async_frame, b, from, nexts,
- &n_async_drop, drop_next,
- ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
+ {
+ esp_async_recycle_failed_submit (
+ *async_frame, b, from, nexts, &n_async_drop, drop_next,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ vnet_crypto_async_reset_frame (*async_frame);
+ vnet_crypto_async_free_frame (vm, *async_frame);
+ }
}
+
vlib_node_increment_counter (vm, node->node_index,
ESP_ENCRYPT_ERROR_RX_PKTS,
frame->n_vectors);
if (n_async_drop)
vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
-
- return frame->n_vectors;
}
- vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
-
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 7ca15866a14..15de80764a8 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -102,6 +102,7 @@ typedef struct
vnet_crypto_op_t *chained_crypto_ops;
vnet_crypto_op_t *chained_integ_ops;
vnet_crypto_op_chunk_t *chunks;
+ vnet_crypto_async_frame_t **async_frames;
} ipsec_per_thread_data_t;
typedef struct
diff --git a/test/test_ipsec_esp.py b/test/test_ipsec_esp.py
index 209298a30a4..50c6f5c8db5 100644
--- a/test/test_ipsec_esp.py
+++ b/test/test_ipsec_esp.py
@@ -466,6 +466,90 @@ class TestIpsecEsp2(TemplateIpsecEsp, IpsecTcpTests):
pass
+class TestIpsecEspAsync(TemplateIpsecEsp):
+ """ Ipsec ESP - Aysnc tests """
+
+ worker_config = "workers 2"
+
+ def setUp(self):
+ super(TestIpsecEspAsync, self).setUp()
+
+ self.vapi.ipsec_set_async_mode(async_enable=True)
+ self.p4 = IPsecIPv4Params()
+
+ self.p4.crypt_algo_vpp_id = (VppEnum.vl_api_ipsec_crypto_alg_t.
+ IPSEC_API_CRYPTO_ALG_AES_CBC_256)
+ self.p4.crypt_algo = 'AES-CBC' # scapy name
+ self.p4.crypt_key = b'JPjyOWBeVEQiMe7hJPjyOWBeVEQiMe7h'
+
+ self.p4.scapy_tun_sa_id += 0xf0000
+ self.p4.scapy_tun_spi += 0xf0000
+ self.p4.vpp_tun_sa_id += 0xf0000
+ self.p4.vpp_tun_spi += 0xf0000
+ self.p4.remote_tun_if_host = "2.2.2.2"
+ e = VppEnum.vl_api_ipsec_spd_action_t
+
+ self.p4.sa = VppIpsecSA(
+ self,
+ self.p4.vpp_tun_sa_id,
+ self.p4.vpp_tun_spi,
+ self.p4.auth_algo_vpp_id,
+ self.p4.auth_key,
+ self.p4.crypt_algo_vpp_id,
+ self.p4.crypt_key,
+ self.vpp_esp_protocol,
+ self.tun_if.local_addr[self.p4.addr_type],
+ self.tun_if.remote_addr[self.p4.addr_type]).add_vpp_config()
+ self.p4.spd = VppIpsecSpdEntry(
+ self,
+ self.tun_spd,
+ self.p4.vpp_tun_sa_id,
+ self.pg1.remote_addr[self.p4.addr_type],
+ self.pg1.remote_addr[self.p4.addr_type],
+ self.p4.remote_tun_if_host,
+ self.p4.remote_tun_if_host,
+ 0,
+ priority=1,
+ policy=e.IPSEC_API_SPD_ACTION_PROTECT,
+ is_outbound=1).add_vpp_config()
+ VppIpRoute(self, self.p4.remote_tun_if_host, self.p4.addr_len,
+ [VppRoutePath(self.tun_if.remote_addr[self.p4.addr_type],
+ 0xffffffff)]).add_vpp_config()
+ config_tun_params(self.p4, self.encryption_type, self.tun_if)
+
+ def test_dual_stream(self):
+ """ Alternating SAs """
+ p = self.params[self.p4.addr_type]
+
+ pkts = [(Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4,
+ dst=self.p4.remote_tun_if_host) /
+ UDP(sport=4444, dport=4444) /
+ Raw(b'0x0' * 200)),
+ (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
+ IP(src=self.pg1.remote_ip4,
+ dst=p.remote_tun_if_host) /
+ UDP(sport=4444, dport=4444) /
+ Raw(b'0x0' * 200))]
+ pkts *= 1023
+
+ rxs = self.send_and_expect(self.pg1, pkts, self.pg0)
+
+ self.assertEqual(len(rxs), len(pkts))
+
+ for rx in rxs:
+ if rx[ESP].spi == p.scapy_tun_spi:
+ decrypted = p.vpp_tun_sa.decrypt(rx[IP])
+ elif rx[ESP].spi == self.p4.vpp_tun_spi:
+ decrypted = self.p4.scapy_tun_sa.decrypt(rx[IP])
+ else:
+ rx.show()
+ self.assertTrue(False)
+
+ self.p4.spd.remove_vpp_config()
+ self.p4.sa.remove_vpp_config()
+
+
class TestIpsecEspHandoff(TemplateIpsecEsp,
IpsecTun6HandoffTests,
IpsecTun4HandoffTests):
diff --git a/test/vpp_ipsec.py b/test/vpp_ipsec.py
index d0ceeae2e4d..aa2a05d1068 100644
--- a/test/vpp_ipsec.py
+++ b/test/vpp_ipsec.py
@@ -128,6 +128,7 @@ class VppIpsecSpdEntry(VppObject):
remote_port_stop=self.remote_port_stop)
self.stat_index = rv.stat_index
self.test.registry.register(self, self.test.logger)
+ return self
def remove_vpp_config(self):
self.test.vapi.ipsec_spd_entry_add_del(