aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/esp_decrypt.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/ipsec/esp_decrypt.c')
-rw-r--r--src/vnet/ipsec/esp_decrypt.c437
1 files changed, 261 insertions, 176 deletions
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index e30fc9effcb..26d8ca1deee 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -14,7 +14,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
@@ -58,35 +57,6 @@ typedef enum
ESP_DECRYPT_POST_N_NEXT,
} esp_decrypt_post_next_t;
-#define foreach_esp_decrypt_error \
- _ (RX_PKTS, "ESP pkts received") \
- _ (RX_POST_PKTS, "ESP-POST pkts received") \
- _ (HANDOFF, "hand-off") \
- _ (DECRYPTION_FAILED, "ESP decryption failed") \
- _ (INTEG_ERROR, "Integrity check failed") \
- _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _ (REPLAY, "SA replayed packet") \
- _ (RUNT, "undersized packet") \
- _ (NO_BUFFERS, "no buffers (packet dropped)") \
- _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
- _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
- _ (TUN_NO_PROTO, "no tunnel protocol") \
- _ (UNSUP_PAYLOAD, "unsupported payload")
-
-typedef enum
-{
-#define _(sym,str) ESP_DECRYPT_ERROR_##sym,
- foreach_esp_decrypt_error
-#undef _
- ESP_DECRYPT_N_ERROR,
-} esp_decrypt_error_t;
-
-static char *esp_decrypt_error_strings[] = {
-#define _(sym,string) string,
- foreach_esp_decrypt_error
-#undef _
-};
-
typedef struct
{
u32 seq;
@@ -97,6 +67,8 @@ typedef struct
ipsec_integ_alg_t integ_alg;
} esp_decrypt_trace_t;
+typedef vl_counter_esp_decrypt_enum_t esp_decrypt_error_t;
+
/* The number of byres in the hisequence number */
#define N_HI_ESN_BYTES 4
@@ -141,8 +113,9 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
err = e;
else
err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- b[bi]->error = node->errors[err];
- nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+ nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -173,8 +146,9 @@ esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
err = e;
else
err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- b[bi]->error = node->errors[err];
- nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+ nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
@@ -187,6 +161,9 @@ esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
{
vlib_buffer_t *before_last = b;
+ if (b != last)
+ b->total_length_not_including_first_buffer -= tail;
+
if (last->current_length > tail)
{
last->current_length -= tail;
@@ -204,6 +181,37 @@ esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
}
+always_inline void
+esp_remove_tail_and_tfc_padding (vlib_main_t *vm, vlib_node_runtime_t *node,
+ const esp_decrypt_packet_data_t *pd,
+ vlib_buffer_t *b, vlib_buffer_t *last,
+ u16 *next, u16 tail, int is_ip6)
+{
+ const u16 total_buffer_length = vlib_buffer_length_in_chain (vm, b);
+ u16 ip_packet_length;
+ if (is_ip6)
+ {
+ const ip6_header_t *ip6 = vlib_buffer_get_current (b);
+ ip_packet_length =
+ clib_net_to_host_u16 (ip6->payload_length) + sizeof (ip6_header_t);
+ }
+ else
+ {
+ const ip4_header_t *ip4 = vlib_buffer_get_current (b);
+ ip_packet_length = clib_net_to_host_u16 (ip4->length);
+ }
+ /* In case of TFC padding, the size of the buffer data needs to be adjusted
+ * to the ip packet length */
+ if (PREDICT_FALSE (total_buffer_length < ip_packet_length + tail))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_NO_TAIL_SPACE, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ esp_remove_tail (vm, b, last, total_buffer_length - ip_packet_length);
+}
+
/* ICV is splitted in last two buffers so move it to the last buffer and
return pointer to it */
static_always_inline u8 *
@@ -229,9 +237,12 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
before_last->current_length -= first_sz;
if (before_last == first)
pd->current_length -= first_sz;
+ else
+ first->total_length_not_including_first_buffer -= first_sz;
clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
if (dif)
dif[0] = first_sz;
+ first->total_length_not_including_first_buffer -= last_sz;
pd2->lb = before_last;
pd2->icv_removed = 1;
pd2->free_buffer_index = before_last->next_buffer;
@@ -483,18 +494,16 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
return total_len;
}
-static_always_inline void
-esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
- ipsec_per_thread_data_t * ptd,
- vnet_crypto_op_t *** crypto_ops,
- vnet_crypto_op_t *** integ_ops,
- vnet_crypto_op_t * op,
- ipsec_sa_t * sa0, u8 * payload,
- u16 len, u8 icv_sz, u8 iv_sz,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2,
- vlib_buffer_t * b, u16 * next, u32 index)
+static_always_inline esp_decrypt_error_t
+esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz,
+ u8 iv_sz, esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b,
+ u32 index)
{
+ vnet_crypto_op_t **crypto_ops;
+ vnet_crypto_op_t **integ_ops;
+ vnet_crypto_op_t _op, *op = &_op;
const u8 esp_sz = sizeof (esp_header_t);
if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
@@ -511,6 +520,8 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
if (pd->is_chain)
{
/* buffer is chained */
+ integ_ops = &ptd->chained_integ_ops;
+
op->len = pd->current_length;
/* special case when ICV is splitted and needs to be reassembled
@@ -536,8 +547,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
{
/* we now have a single buffer of crypto data, adjust
* the length (second buffer contains only ICV) */
- *integ_ops = &ptd->integ_ops;
- *crypto_ops = &ptd->crypto_ops;
+ integ_ops = &ptd->integ_ops;
len = b->current_length;
goto out;
}
@@ -551,17 +561,16 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
payload, pd->current_length,
&op->digest, &op->n_chunks, 0) < 0)
- {
- b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
- next[0] = ESP_DECRYPT_NEXT_DROP;
- return;
- }
+ return ESP_DECRYPT_ERROR_NO_BUFFERS;
}
else
- esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
- payload);
+ {
+ integ_ops = &ptd->integ_ops;
+ esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
+ payload);
+ }
out:
- vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
+ vec_add_aligned (*integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
}
payload += esp_sz;
@@ -587,6 +596,12 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
op->tag = payload + len;
op->tag_len = 16;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ payload -= iv_sz;
+ len += iv_sz;
+ }
}
else
{
@@ -609,26 +624,32 @@ esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
payload, len - pd->iv_sz + pd->icv_sz,
&op->tag, &op->n_chunks);
+ crypto_ops = &ptd->chained_crypto_ops;
+ }
+ else
+ {
+ crypto_ops = &ptd->crypto_ops;
}
- vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
+ vec_add_aligned (*crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
}
+
+ return ESP_DECRYPT_ERROR_RX_PKTS;
}
static_always_inline esp_decrypt_error_t
-esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
- ipsec_per_thread_data_t *ptd,
+esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, u32 bi,
- vlib_buffer_t *b, u16 *next, u16 async_next)
+ vlib_buffer_t *b, u16 async_next)
{
const u8 esp_sz = sizeof (esp_header_t);
esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
- u32 key_index;
+ const u32 key_index = sa0->crypto_key_index;
u32 crypto_len, integ_len = 0;
i16 crypto_start_offset, integ_start_offset = 0;
u8 flags = 0;
@@ -636,7 +657,6 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
if (!ipsec_sa_is_set_IS_AEAD (sa0))
{
/* linked algs */
- key_index = sa0->linked_key_index;
integ_start_offset = payload - b->data;
integ_len = len;
if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
@@ -689,8 +709,6 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
else
esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
}
- else
- key_index = sa0->crypto_key_index;
out:
/* crypto */
@@ -710,6 +728,12 @@ out:
aad = (u8 *) nonce - sizeof (esp_aead_t);
esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
tag = payload + len;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ payload -= iv_sz;
+ len += iv_sz;
+ }
}
else
{
@@ -748,10 +772,12 @@ out:
}
static_always_inline void
-esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b,
- u16 * next, int is_ip6, int is_tun, int is_async)
+esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
+ const u16 *next_by_next_header,
+ const esp_decrypt_packet_data_t *pd,
+ const esp_decrypt_packet_data2_t *pd2,
+ vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
+ int is_async)
{
ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
vlib_buffer_t *lb = b;
@@ -759,6 +785,7 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
u8 pad_length = 0, next_header = 0;
u16 icv_sz;
+ u64 n_lost;
/*
* redo the anti-reply check
@@ -767,30 +794,50 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
* check above we did so against the state of the window (W),
* after packet s-1. So each of the packets in the sequence will be
* accepted.
- * This time s will be cheked against Ws-1, s+1 chceked against Ws
- * (i.e. the window state is updated/advnaced)
- * so this time the successive s+! packet will be dropped.
+ * This time s will be cheked against Ws-1, s+1 checked against Ws
+ * (i.e. the window state is updated/advanced)
+ * so this time the successive s+1 packet will be dropped.
* This is a consequence of batching the decrypts. If the
- * check-dcrypt-advance process was done for each packet it would
+ * check-decrypt-advance process was done for each packet it would
* be fine. But we batch the decrypts because it's much more efficient
* to do so in SW and if we offload to HW and the process is async.
*
* You're probably thinking, but this means an attacker can send the
- * above sequence and cause VPP to perform decrpyts that will fail,
+ * above sequence and cause VPP to perform decrypts that will fail,
* and that's true. But if the attacker can determine s (a valid
* sequence number in the window) which is non-trivial, it can generate
* a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
* implementation, sequential or batching, from decrypting these.
*/
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
- NULL))
+ if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
{
- b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
- next[0] = ESP_DECRYPT_NEXT_DROP;
- return;
+ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ NULL, true))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ pd->seq_hi, true);
+ }
+ else
+ {
+ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ NULL, false))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ pd->seq_hi, false);
}
- ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi);
+ vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index);
if (pd->is_chain)
{
@@ -849,7 +896,8 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
u16 adv = pd->iv_sz + esp_sz;
u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
- b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ b->flags &=
+ ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
{
@@ -899,14 +947,16 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
b->current_data = pd->current_data + adv;
b->current_length = pd->current_length - adv;
- esp_remove_tail (vm, b, lb, tail);
+ esp_remove_tail_and_tfc_padding (vm, node, pd, b, lb, next, tail,
+ false);
}
else if (next_header == IP_PROTOCOL_IPV6)
{
next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
b->current_data = pd->current_data + adv;
b->current_length = pd->current_length - adv;
- esp_remove_tail (vm, b, lb, tail);
+ esp_remove_tail_and_tfc_padding (vm, node, pd, b, lb, next, tail,
+ true);
}
else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
{
@@ -915,44 +965,51 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
b->current_length = pd->current_length - adv;
esp_remove_tail (vm, b, lb, tail);
}
- else
+ else if (is_tun && next_header == IP_PROTOCOL_GRE)
{
- if (is_tun && next_header == IP_PROTOCOL_GRE)
- {
- gre_header_t *gre;
+ gre_header_t *gre;
- b->current_data = pd->current_data + adv;
- b->current_length = pd->current_length - adv - tail;
+ b->current_data = pd->current_data + adv;
+ b->current_length = pd->current_length - adv - tail;
- gre = vlib_buffer_get_current (b);
+ gre = vlib_buffer_get_current (b);
- vlib_buffer_advance (b, sizeof (*gre));
+ vlib_buffer_advance (b, sizeof (*gre));
- switch (clib_net_to_host_u16 (gre->protocol))
- {
- case GRE_PROTOCOL_teb:
- vnet_update_l2_len (b);
- next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
- break;
- case GRE_PROTOCOL_ip4:
- next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
- break;
- case GRE_PROTOCOL_ip6:
- next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
- break;
- default:
- b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
- next[0] = ESP_DECRYPT_NEXT_DROP;
- break;
- }
- }
- else
+ switch (clib_net_to_host_u16 (gre->protocol))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
- return;
+ case GRE_PROTOCOL_teb:
+ vnet_update_l2_len (b);
+ next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
+ break;
+ case GRE_PROTOCOL_ip4:
+ next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
+ break;
+ case GRE_PROTOCOL_ip6:
+ next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
+ break;
+ default:
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
+ next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ break;
}
}
+ else if ((next[0] = vec_elt (next_by_next_header, next_header)) !=
+ (u16) ~0)
+ {
+ b->current_data = pd->current_data + adv;
+ b->current_length = pd->current_length - adv;
+ esp_remove_tail (vm, b, lb, tail);
+ }
+ else
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+
if (is_tun)
{
if (ipsec_sa_is_set_IS_PROTECT (sa0))
@@ -989,8 +1046,10 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
!ip46_address_is_equal_v4 (&itp->itp_tun.dst,
&ip4->src_address))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
}
}
else if (next_header == IP_PROTOCOL_IPV6)
@@ -1004,13 +1063,19 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
!ip46_address_is_equal_v6 (&itp->itp_tun.dst,
&ip6->src_address))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
}
}
}
}
}
+
+ if (PREDICT_FALSE (n_lost))
+ vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index, n_lost);
}
always_inline uword
@@ -1019,6 +1084,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u16 async_next_node)
{
ipsec_main_t *im = &ipsec_main;
+ const u16 *next_by_next_header = im->next_header_registrations;
u32 thread_index = vm->thread_index;
u16 len;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
@@ -1027,8 +1093,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
- u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts;
- u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
u32 sync_bi[VLIB_FRAME_SIZE];
u32 noop_bi[VLIB_FRAME_SIZE];
esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
@@ -1037,9 +1102,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
const u8 esp_sz = sizeof (esp_header_t);
ipsec_sa_t *sa0 = 0;
- vnet_crypto_op_t _op, *op = &_op;
- vnet_crypto_op_t **crypto_ops;
- vnet_crypto_op_t **integ_ops;
+ bool anti_replay_result;
int is_async = im->async_mode;
vnet_crypto_async_op_id_t async_op = ~0;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
@@ -1077,8 +1140,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (n_bufs == 0)
{
err = ESP_DECRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[0])->ipsec.sad_index);
goto next;
}
@@ -1086,12 +1150,13 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
if (current_sa_pkts)
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_pkts,
+ current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_bytes = current_sa_pkts = 0;
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
sa0 = ipsec_sa_get (current_sa_index);
/* fetch the second cacheline ASAP */
@@ -1103,7 +1168,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
- if (PREDICT_FALSE (~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
@@ -1116,8 +1181,9 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
err = ESP_DECRYPT_ERROR_HANDOFF;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_HANDOFF);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
+ current_sa_index);
goto next;
}
@@ -1138,33 +1204,37 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
/* find last buffer in the chain */
while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
+ }
- crypto_ops = &ptd->chained_crypto_ops;
- integ_ops = &ptd->chained_integ_ops;
+ pd->current_length = b[0]->current_length;
+
+ /* anti-reply check */
+ if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ {
+ anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
+ sa0, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
- crypto_ops = &ptd->crypto_ops;
- integ_ops = &ptd->integ_ops;
+ anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
+ sa0, pd->seq, ~0, false, &pd->seq_hi, false);
}
- pd->current_length = b[0]->current_length;
-
- /* anti-reply check */
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
- &pd->seq_hi))
+ if (anti_replay_result)
{
err = ESP_DECRYPT_ERROR_REPLAY;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
goto next;
}
if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
{
err = ESP_DECRYPT_ERROR_RUNT;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
goto next;
}
@@ -1183,31 +1253,47 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
async_frames[async_op] =
vnet_crypto_async_get_frame (vm, async_op);
+ if (PREDICT_FALSE (!async_frames[async_op]))
+ {
+ err = ESP_DECRYPT_ERROR_NO_AVAIL_FRAME;
+ esp_decrypt_set_next_index (
+ b[0], node, thread_index, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, current_sa_index);
+ goto next;
+ }
+
/* Save the frame to the list we'll submit at the end */
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
err = esp_decrypt_prepare_async_frame (
- vm, node, ptd, async_frames[async_op], sa0, payload, len,
- cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
- async_next_node);
+ vm, ptd, async_frames[async_op], sa0, payload, len, cpd.icv_sz,
+ cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (
+ b[0], node, thread_index, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, current_sa_index);
}
}
else
- esp_decrypt_prepare_sync_op (
- vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
- cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
+ {
+ err = esp_decrypt_prepare_sync_op (vm, ptd, sa0, payload, len,
+ cpd.icv_sz, cpd.iv_sz, pd, pd2,
+ b[0], n_sync);
+ if (err != ESP_DECRYPT_ERROR_RX_PKTS)
+ {
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, 0,
+ sync_next, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
+ }
+ }
/* next */
next:
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
noop_bi[n_noop] = from[b - bufs];
n_noop++;
- noop_next++;
}
else if (!is_async)
{
@@ -1218,8 +1304,6 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
pd += 1;
pd2 += 1;
}
- else
- async_next++;
n_left -= 1;
b += 1;
@@ -1245,7 +1329,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
+ IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, false);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
@@ -1298,8 +1383,8 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
- esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6,
- is_tun, 0);
+ esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0],
+ sync_next, is_ip6, is_tun, 0);
/* trace: */
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
@@ -1340,6 +1425,8 @@ esp_decrypt_post_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame, int is_ip6, int is_tun)
{
+ const ipsec_main_t *im = &ipsec_main;
+ const u16 *next_by_next_header = im->next_header_registrations;
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left = from_frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
@@ -1357,13 +1444,13 @@ esp_decrypt_post_inline (vlib_main_t * vm,
}
if (!pd->is_chain)
- esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun,
- 1);
+ esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0],
+ next, is_ip6, is_tun, 1);
else
{
esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
- esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
- is_tun, 1);
+ esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2,
+ b[0], next, is_ip6, is_tun, 1);
}
/*trace: */
@@ -1457,15 +1544,14 @@ VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp4_decrypt_node) = {
.name = "esp4-decrypt",
.vector_size = sizeof (u32),
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.n_next_nodes = ESP_DECRYPT_N_NEXT,
.next_nodes = {
@@ -1484,8 +1570,8 @@ VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.sibling_of = "esp4-decrypt",
};
@@ -1496,8 +1582,8 @@ VLIB_REGISTER_NODE (esp6_decrypt_node) = {
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.n_next_nodes = ESP_DECRYPT_N_NEXT,
.next_nodes = {
@@ -1516,8 +1602,8 @@ VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.sibling_of = "esp6-decrypt",
};
@@ -1527,8 +1613,8 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
.vector_size = sizeof (u32),
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.n_next_nodes = ESP_DECRYPT_N_NEXT,
.next_nodes = {
[ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
@@ -1546,8 +1632,8 @@ VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.sibling_of = "esp4-decrypt-tun",
};
@@ -1557,8 +1643,8 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
.vector_size = sizeof (u32),
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.n_next_nodes = ESP_DECRYPT_N_NEXT,
.next_nodes = {
[ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
@@ -1576,12 +1662,11 @@ VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
- .error_strings = esp_decrypt_error_strings,
+ .n_errors = ESP_DECRYPT_N_ERROR,
+ .error_counters = esp_decrypt_error_counters,
.sibling_of = "esp6-decrypt-tun",
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT