diff options
Diffstat (limited to 'src/vnet/ipsec/ah_decrypt.c')
-rw-r--r-- | src/vnet/ipsec/ah_decrypt.c | 55 |
1 files changed, 28 insertions, 27 deletions
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c index ec4db0fed57..6b62ff7f05c 100644 --- a/src/vnet/ipsec/ah_decrypt.c +++ b/src/vnet/ipsec/ah_decrypt.c @@ -127,7 +127,7 @@ ah_decrypt_inline (vlib_main_t * vm, ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index); from = vlib_frame_vector_args (from_frame); n_left = from_frame->n_vectors; - ipsec_sa_t *sa0 = 0; + ipsec_sa_inb_rt_t *irt = 0; bool anti_replay_result; u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0; @@ -149,25 +149,25 @@ ah_decrypt_inline (vlib_main_t * vm, current_sa_index, current_sa_pkts, current_sa_bytes); current_sa_index = vnet_buffer (b[0])->ipsec.sad_index; - sa0 = ipsec_sa_get (current_sa_index); + irt = ipsec_sa_get_inb_rt_by_index (current_sa_index); current_sa_bytes = current_sa_pkts = 0; vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index, current_sa_index); } - if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index)) + if (PREDICT_FALSE ((u16) ~0 == irt->thread_index)) { /* this is the first packet to use this SA, claim the SA * for this thread. this could happen simultaneously on * another thread */ - clib_atomic_cmp_and_swap (&sa0->thread_index, ~0, + clib_atomic_cmp_and_swap (&irt->thread_index, ~0, ipsec_sa_assign_thread (thread_index)); } - if (PREDICT_TRUE (thread_index != sa0->thread_index)) + if (PREDICT_TRUE (thread_index != irt->thread_index)) { - vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index; + vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index; next[0] = AH_DECRYPT_NEXT_HANDOFF; goto next; } @@ -202,15 +202,15 @@ ah_decrypt_inline (vlib_main_t * vm, pd->seq = clib_host_to_net_u32 (ah0->seq_no); /* anti-replay check */ - if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0))) + if (PREDICT_FALSE (irt->anti_reply_huge)) { anti_replay_result = ipsec_sa_anti_replay_and_sn_advance ( - sa0, pd->seq, ~0, false, &pd->seq_hi, true); + irt, pd->seq, ~0, false, &pd->seq_hi, true); } else { anti_replay_result = ipsec_sa_anti_replay_and_sn_advance ( - sa0, pd->seq, ~0, false, &pd->seq_hi, false); + irt, pd->seq, ~0, false, &pd->seq_hi, false); } if (anti_replay_result) { @@ -223,13 +223,14 @@ ah_decrypt_inline (vlib_main_t * vm, current_sa_bytes += b[0]->current_length; current_sa_pkts += 1; - pd->icv_size = sa0->integ_icv_size; + pd->icv_size = irt->integ_icv_size; pd->nexthdr_cached = ah0->nexthdr; - if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE)) + if (PREDICT_TRUE (irt->integ_icv_size)) { - if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) && - pd->current_data + b[0]->current_length - + sizeof (u32) > buffer_data_size)) + if (PREDICT_FALSE (irt->use_esn && pd->current_data + + b[0]->current_length + + sizeof (u32) > + buffer_data_size)) { ah_decrypt_set_next_index ( b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE, @@ -239,16 +240,16 @@ ah_decrypt_inline (vlib_main_t * vm, vnet_crypto_op_t *op; vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES); - vnet_crypto_op_init (op, sa0->integ_op_id); + vnet_crypto_op_init (op, irt->integ_op_id); op->src = (u8 *) ih4; op->len = b[0]->current_length; op->digest = (u8 *) ih4 - pd->icv_size; op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK; op->digest_len = pd->icv_size; - op->key_index = sa0->integ_key_index; + op->key_index = irt->integ_key_index; op->user_data = b - bufs; - if (ipsec_sa_is_set_USE_ESN (sa0)) + if (irt->use_esn) { u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi); @@ -311,15 +312,15 @@ ah_decrypt_inline (vlib_main_t * vm, if (next[0] < AH_DECRYPT_N_NEXT) goto trace; - sa0 = ipsec_sa_get (pd->sa_index); + irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index); - if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE)) + if (PREDICT_TRUE (irt->integ_icv_size)) { /* redo the anti-reply check. see esp_decrypt for details */ - if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0))) + if (PREDICT_FALSE (irt->anti_reply_huge)) { if (ipsec_sa_anti_replay_and_sn_advance ( - sa0, pd->seq, pd->seq_hi, true, NULL, true)) + irt, pd->seq, pd->seq_hi, true, NULL, true)) { ah_decrypt_set_next_index ( b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0, @@ -327,12 +328,12 @@ ah_decrypt_inline (vlib_main_t * vm, goto trace; } n_lost = ipsec_sa_anti_replay_advance ( - sa0, thread_index, pd->seq, pd->seq_hi, true); + irt, thread_index, pd->seq, pd->seq_hi, true); } else { if (ipsec_sa_anti_replay_and_sn_advance ( - sa0, pd->seq, pd->seq_hi, true, NULL, false)) + irt, pd->seq, pd->seq_hi, true, NULL, false)) { ah_decrypt_set_next_index ( b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0, @@ -340,7 +341,7 @@ ah_decrypt_inline (vlib_main_t * vm, goto trace; } n_lost = ipsec_sa_anti_replay_advance ( - sa0, thread_index, pd->seq, pd->seq_hi, false); + irt, thread_index, pd->seq, pd->seq_hi, false); } vlib_prefetch_simple_counter ( &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index, @@ -354,7 +355,7 @@ ah_decrypt_inline (vlib_main_t * vm, b[0]->flags &= ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT); - if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0))) + if (PREDICT_TRUE (irt->is_tunnel)) { /* tunnel mode */ if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP)) next[0] = AH_DECRYPT_NEXT_IP4_INPUT; @@ -424,10 +425,10 @@ ah_decrypt_inline (vlib_main_t * vm, trace: if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { - sa0 = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index); + ipsec_sa_t *sa = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index); ah_decrypt_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); - tr->integ_alg = sa0->integ_alg; + tr->integ_alg = sa->integ_alg; tr->seq_num = pd->seq; } |