aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec_handoff.c
diff options
context:
space:
mode:
authorNeale Ranns <neale@graphiant.com>2021-02-10 08:42:49 +0000
committerMatthew Smith <mgsmith@netgate.com>2021-02-12 20:09:28 +0000
commitaa7d7662f63bfe6643b193f74288aea67f883744 (patch)
treebc69b3a3be3aed942548247a19735471a0b95db9 /src/vnet/ipsec/ipsec_handoff.c
parent088bf1dcc32fcd26dac312a5561e0f263fa401d9 (diff)
ipsec: Store thread-index in buffer meta-data during SA handoff
Type: improvement negates the need to load the SA in the handoff node. don't prefetch the packet data, it's not needed. Signed-off-by: Neale Ranns <neale@graphiant.com> Change-Id: I340472dc437f050cc1c3c11dfeb47ab09c609624
Diffstat (limited to 'src/vnet/ipsec/ipsec_handoff.c')
-rw-r--r--src/vnet/ipsec/ipsec_handoff.c39
1 files changed, 7 insertions, 32 deletions
diff --git a/src/vnet/ipsec/ipsec_handoff.c b/src/vnet/ipsec/ipsec_handoff.c
index 8bd6d22a588..9092f817857 100644
--- a/src/vnet/ipsec/ipsec_handoff.c
+++ b/src/vnet/ipsec/ipsec_handoff.c
@@ -60,9 +60,7 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 thread_indices[VLIB_FRAME_SIZE], *ti;
u32 n_enq, n_left_from, *from;
- ipsec_main_t *im;
- im = &ipsec_main;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
vlib_get_buffers (vm, from, bufs, n_left_from);
@@ -72,9 +70,6 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
while (n_left_from >= 4)
{
- ipsec_sa_t *sa0, *sa1, *sa2, *sa3;
- u32 sai0, sai1, sai2, sai3;
-
/* Prefetch next iteration. */
if (n_left_from >= 12)
{
@@ -82,28 +77,14 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
vlib_prefetch_buffer_header (b[9], LOAD);
vlib_prefetch_buffer_header (b[10], LOAD);
vlib_prefetch_buffer_header (b[11], LOAD);
-
- vlib_prefetch_buffer_data (b[4], LOAD);
- vlib_prefetch_buffer_data (b[5], LOAD);
- vlib_prefetch_buffer_data (b[6], LOAD);
- vlib_prefetch_buffer_data (b[7], LOAD);
}
- sai0 = vnet_buffer (b[0])->ipsec.sad_index;
- sai1 = vnet_buffer (b[1])->ipsec.sad_index;
- sai2 = vnet_buffer (b[2])->ipsec.sad_index;
- sai3 = vnet_buffer (b[3])->ipsec.sad_index;
- sa0 = pool_elt_at_index (im->sad, sai0);
- sa1 = pool_elt_at_index (im->sad, sai1);
- sa2 = pool_elt_at_index (im->sad, sai2);
- sa3 = pool_elt_at_index (im->sad, sai3);
-
- ti[0] = sa0->thread_index;
- ti[1] = sa1->thread_index;
- ti[2] = sa2->thread_index;
- ti[3] = sa3->thread_index;
-
- if (node->flags & VLIB_NODE_FLAG_TRACE)
+ ti[0] = vnet_buffer (b[0])->ipsec.thread_index;
+ ti[1] = vnet_buffer (b[1])->ipsec.thread_index;
+ ti[2] = vnet_buffer (b[2])->ipsec.thread_index;
+ ti[3] = vnet_buffer (b[3])->ipsec.thread_index;
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
{
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -137,13 +118,7 @@ ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
}
while (n_left_from > 0)
{
- ipsec_sa_t *sa0;
- u32 sai0;
-
- sai0 = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = pool_elt_at_index (im->sad, sai0);
-
- ti[0] = sa0->thread_index;
+ ti[0] = vnet_buffer (b[0])->ipsec.thread_index;
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{