summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec
diff options
context:
space:
mode:
authorNeale Ranns <neale@graphiant.com>2021-02-04 11:33:32 +0000
committerNeale Ranns <neale@graphiant.com>2021-02-04 11:33:32 +0000
commit1a52d37fc50acd479274c29c2e92b05cf32c3a6d (patch)
tree65552badb1eff7b5557e2e1a47f39553d42efada /src/vnet/ipsec
parent38fae310843b7431136f40bfa8cf7c6bec59450f (diff)
ipsec: one thread index per-SA
Type: improvement AN SA is uni-drectional therefore it can be used only for encrypt or decrypt, not both. So it only needs one thread ID. free up some space on the 1st cacheline. Signed-off-by: Neale Ranns <neale@graphiant.com> Change-Id: I21cb7cff70a763cbe2bffead860b574bc80b3136
Diffstat (limited to 'src/vnet/ipsec')
-rw-r--r--src/vnet/ipsec/ah_decrypt.c6
-rw-r--r--src/vnet/ipsec/ah_encrypt.c6
-rw-r--r--src/vnet/ipsec/esp_decrypt.c6
-rw-r--r--src/vnet/ipsec/esp_encrypt.c6
-rw-r--r--src/vnet/ipsec/ipsec_format.c3
-rw-r--r--src/vnet/ipsec/ipsec_handoff.c59
-rw-r--r--src/vnet/ipsec/ipsec_sa.c3
-rw-r--r--src/vnet/ipsec/ipsec_sa.h4
8 files changed, 36 insertions, 57 deletions
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index 682f6cc91f0..03a9dc859c8 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -176,16 +176,16 @@ ah_decrypt_inline (vlib_main_t * vm,
thread_index, current_sa_index);
}
- if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0,
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index))
+ if (PREDICT_TRUE (thread_index != sa0->thread_index))
{
next[0] = AH_DECRYPT_NEXT_HANDOFF;
goto next;
diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c
index d89cb093f26..a4c34917550 100644
--- a/src/vnet/ipsec/ah_encrypt.c
+++ b/src/vnet/ipsec/ah_encrypt.c
@@ -184,16 +184,16 @@ ah_encrypt_inline (vlib_main_t * vm,
pd->sa_index = current_sa_index;
next[0] = AH_ENCRYPT_NEXT_DROP;
- if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
+ if (PREDICT_TRUE (thread_index != sa0->thread_index))
{
next[0] = AH_ENCRYPT_NEXT_HANDOFF;
goto next;
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index f5b6232dbd8..a0ae612a683 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -1122,16 +1122,16 @@ esp_decrypt_inline (vlib_main_t * vm,
}
}
- if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0,
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->decrypt_thread_index))
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
esp_set_next_index (is_async, from, nexts, from[b - bufs],
&n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 4f6976baaee..e5cf1581a69 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -669,16 +669,16 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
}
- if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->encrypt_thread_index))
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
esp_set_next_index (is_async, from, nexts, from[b - bufs],
&n_async_drop, handoff_next, next);
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
index 909254a1dc9..19321e7e347 100644
--- a/src/vnet/ipsec/ipsec_format.c
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -293,8 +293,7 @@ format_ipsec_sa (u8 * s, va_list * args)
s = format (s, "\n locks %d", sa->node.fn_locks);
s = format (s, "\n salt 0x%x", clib_net_to_host_u32 (sa->salt));
- s = format (s, "\n thread-indices [encrypt:%d decrypt:%d]",
- sa->encrypt_thread_index, sa->decrypt_thread_index);
+ s = format (s, "\n thread-index:%d", sa->thread_index);
s = format (s, "\n seq %u seq-hi %u", sa->seq, sa->seq_hi);
s = format (s, "\n last-seq %u last-seq-hi %u window %U",
sa->last_seq, sa->last_seq_hi,
diff --git a/src/vnet/ipsec/ipsec_handoff.c b/src/vnet/ipsec/ipsec_handoff.c
index 87eb801f76f..8bd6d22a588 100644
--- a/src/vnet/ipsec/ipsec_handoff.c
+++ b/src/vnet/ipsec/ipsec_handoff.c
@@ -54,9 +54,8 @@ format_ipsec_handoff_trace (u8 * s, va_list * args)
/* do worker handoff based on thread_index in NAT HA protcol header */
static_always_inline uword
-ipsec_handoff (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, u32 fq_index, bool is_enc)
+ipsec_handoff (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
+ u32 fq_index)
{
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
u16 thread_indices[VLIB_FRAME_SIZE], *ti;
@@ -99,20 +98,10 @@ ipsec_handoff (vlib_main_t * vm,
sa2 = pool_elt_at_index (im->sad, sai2);
sa3 = pool_elt_at_index (im->sad, sai3);
- if (is_enc)
- {
- ti[0] = sa0->encrypt_thread_index;
- ti[1] = sa1->encrypt_thread_index;
- ti[2] = sa2->encrypt_thread_index;
- ti[3] = sa3->encrypt_thread_index;
- }
- else
- {
- ti[0] = sa0->decrypt_thread_index;
- ti[1] = sa1->decrypt_thread_index;
- ti[2] = sa2->decrypt_thread_index;
- ti[3] = sa3->decrypt_thread_index;
- }
+ ti[0] = sa0->thread_index;
+ ti[1] = sa1->thread_index;
+ ti[2] = sa2->thread_index;
+ ti[3] = sa3->thread_index;
if (node->flags & VLIB_NODE_FLAG_TRACE)
{
@@ -154,10 +143,7 @@ ipsec_handoff (vlib_main_t * vm,
sai0 = vnet_buffer (b[0])->ipsec.sad_index;
sa0 = pool_elt_at_index (im->sad, sai0);
- if (is_enc)
- ti[0] = sa0->encrypt_thread_index;
- else
- ti[0] = sa0->decrypt_thread_index;
+ ti[0] = sa0->thread_index;
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -188,7 +174,7 @@ VLIB_NODE_FN (esp4_encrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp4_enc_fq_index, true);
+ return ipsec_handoff (vm, node, from_frame, im->esp4_enc_fq_index);
}
VLIB_NODE_FN (esp6_encrypt_handoff) (vlib_main_t * vm,
@@ -197,7 +183,7 @@ VLIB_NODE_FN (esp6_encrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp6_enc_fq_index, true);
+ return ipsec_handoff (vm, node, from_frame, im->esp6_enc_fq_index);
}
VLIB_NODE_FN (esp4_encrypt_tun_handoff) (vlib_main_t * vm,
@@ -206,8 +192,7 @@ VLIB_NODE_FN (esp4_encrypt_tun_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp4_enc_tun_fq_index,
- true);
+ return ipsec_handoff (vm, node, from_frame, im->esp4_enc_tun_fq_index);
}
VLIB_NODE_FN (esp6_encrypt_tun_handoff) (vlib_main_t * vm,
@@ -216,8 +201,7 @@ VLIB_NODE_FN (esp6_encrypt_tun_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp6_enc_tun_fq_index,
- true);
+ return ipsec_handoff (vm, node, from_frame, im->esp6_enc_tun_fq_index);
}
VLIB_NODE_FN (esp_mpls_encrypt_tun_handoff)
@@ -225,8 +209,7 @@ VLIB_NODE_FN (esp_mpls_encrypt_tun_handoff)
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp_mpls_enc_tun_fq_index,
- true);
+ return ipsec_handoff (vm, node, from_frame, im->esp_mpls_enc_tun_fq_index);
}
VLIB_NODE_FN (esp4_decrypt_handoff) (vlib_main_t * vm,
@@ -235,7 +218,7 @@ VLIB_NODE_FN (esp4_decrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp4_dec_fq_index, false);
+ return ipsec_handoff (vm, node, from_frame, im->esp4_dec_fq_index);
}
VLIB_NODE_FN (esp6_decrypt_handoff) (vlib_main_t * vm,
@@ -244,7 +227,7 @@ VLIB_NODE_FN (esp6_decrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp6_dec_fq_index, false);
+ return ipsec_handoff (vm, node, from_frame, im->esp6_dec_fq_index);
}
VLIB_NODE_FN (esp4_decrypt_tun_handoff) (vlib_main_t * vm,
@@ -253,8 +236,7 @@ VLIB_NODE_FN (esp4_decrypt_tun_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp4_dec_tun_fq_index,
- false);
+ return ipsec_handoff (vm, node, from_frame, im->esp4_dec_tun_fq_index);
}
VLIB_NODE_FN (esp6_decrypt_tun_handoff) (vlib_main_t * vm,
@@ -263,8 +245,7 @@ VLIB_NODE_FN (esp6_decrypt_tun_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->esp6_dec_tun_fq_index,
- false);
+ return ipsec_handoff (vm, node, from_frame, im->esp6_dec_tun_fq_index);
}
VLIB_NODE_FN (ah4_encrypt_handoff) (vlib_main_t * vm,
@@ -273,7 +254,7 @@ VLIB_NODE_FN (ah4_encrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->ah4_enc_fq_index, true);
+ return ipsec_handoff (vm, node, from_frame, im->ah4_enc_fq_index);
}
VLIB_NODE_FN (ah6_encrypt_handoff) (vlib_main_t * vm,
@@ -282,7 +263,7 @@ VLIB_NODE_FN (ah6_encrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->ah6_enc_fq_index, true);
+ return ipsec_handoff (vm, node, from_frame, im->ah6_enc_fq_index);
}
VLIB_NODE_FN (ah4_decrypt_handoff) (vlib_main_t * vm,
@@ -291,7 +272,7 @@ VLIB_NODE_FN (ah4_decrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->ah4_dec_fq_index, false);
+ return ipsec_handoff (vm, node, from_frame, im->ah4_dec_fq_index);
}
VLIB_NODE_FN (ah6_decrypt_handoff) (vlib_main_t * vm,
@@ -300,7 +281,7 @@ VLIB_NODE_FN (ah6_decrypt_handoff) (vlib_main_t * vm,
{
ipsec_main_t *im = &ipsec_main;
- return ipsec_handoff (vm, node, from_frame, im->ah6_dec_fq_index, false);
+ return ipsec_handoff (vm, node, from_frame, im->ah6_dec_fq_index);
}
/* *INDENT-OFF* */
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index d5a971bb46d..d950af64df3 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -211,8 +211,7 @@ ipsec_sa_add_and_lock (u32 id,
sa->tunnel_flags = tunnel_flags;
sa->dscp = dscp;
sa->salt = salt;
- sa->encrypt_thread_index = (vlib_num_workers ())? ~0 : 0;
- sa->decrypt_thread_index = (vlib_num_workers ())? ~0 : 0;
+ sa->thread_index = (vlib_num_workers ()) ? ~0 : 0;
if (integ_alg != IPSEC_INTEG_ALG_NONE)
{
ipsec_sa_set_integ_alg (sa, integ_alg);
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 6ed71325f69..28ac93185ec 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -116,8 +116,8 @@ typedef struct
u8 crypto_iv_size;
u8 esp_block_align;
u8 integ_icv_size;
- u32 encrypt_thread_index;
- u32 decrypt_thread_index;
+ u32 thread_index;
+ u32 __pad_u32;
u32 spi;
u32 seq;
u32 seq_hi;