diff options
Diffstat (limited to 'src')
-rw-r--r-- | src/vlib/counter.h | 14 | ||||
-rw-r--r-- | src/vnet/ipsec/ah_decrypt.c | 12 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_decrypt.c | 19 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_cli.c | 1 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_format.c | 5 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_sa.c | 7 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_sa.h | 66 |
7 files changed, 102 insertions, 22 deletions
diff --git a/src/vlib/counter.h b/src/vlib/counter.h index 9f5654292b9..56701e8b391 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -64,6 +64,20 @@ typedef struct /** The number of counters (not the number of per-thread counters) */ u32 vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm); +/** Pre-fetch a per-thread simple counter for the given object index */ +always_inline void +vlib_prefetch_simple_counter (const vlib_simple_counter_main_t *cm, + u32 thread_index, u32 index) +{ + counter_t *my_counters; + + /* + * This CPU's index is assumed to already be in cache + */ + my_counters = cm->counters[thread_index]; + clib_prefetch_store (my_counters + index); +} + /** Increment a simple counter @param cm - (vlib_simple_counter_main_t *) simple counter main pointer @param thread_index - (u32) the current cpu index diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c index 182ed3d231c..1ad372a7de0 100644 --- a/src/vnet/ipsec/ah_decrypt.c +++ b/src/vnet/ipsec/ah_decrypt.c @@ -315,6 +315,7 @@ ah_decrypt_inline (vlib_main_t * vm, { ip4_header_t *oh4; ip6_header_t *oh6; + u64 n_lost = 0; if (next[0] < AH_DECRYPT_N_NEXT) goto trace; @@ -323,7 +324,7 @@ ah_decrypt_inline (vlib_main_t * vm, if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE)) { - /* redo the anit-reply check. see esp_decrypt for details */ + /* redo the anti-reply check. see esp_decrypt for details */ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true, NULL)) { @@ -331,7 +332,10 @@ ah_decrypt_inline (vlib_main_t * vm, next[0] = AH_DECRYPT_NEXT_DROP; goto trace; } - ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi); + n_lost = ipsec_sa_anti_replay_advance (sa0, thread_index, pd->seq, + pd->seq_hi); + vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, thread_index, + pd->sa_index); } u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size @@ -398,6 +402,10 @@ ah_decrypt_inline (vlib_main_t * vm, } } + if (PREDICT_FALSE (n_lost)) + vlib_increment_simple_counter (&ipsec_sa_lost_counters, thread_index, + pd->sa_index, n_lost); + vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0; trace: if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index e30fc9effcb..f1e8065b8ff 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -748,10 +748,11 @@ out: } static_always_inline void -esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, - esp_decrypt_packet_data_t * pd, - esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b, - u16 * next, int is_ip6, int is_tun, int is_async) +esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node, + const esp_decrypt_packet_data_t *pd, + const esp_decrypt_packet_data2_t *pd2, + vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun, + int is_async) { ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index); vlib_buffer_t *lb = b; @@ -790,7 +791,11 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, return; } - ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi); + u64 n_lost = + ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi); + + vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index, + pd->sa_index); if (pd->is_chain) { @@ -1011,6 +1016,10 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node, } } } + + if (PREDICT_FALSE (n_lost)) + vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index, + pd->sa_index, n_lost); } always_inline uword diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c index 3a3e53b663e..bdb9c7bf698 100644 --- a/src/vnet/ipsec/ipsec_cli.c +++ b/src/vnet/ipsec/ipsec_cli.c @@ -759,6 +759,7 @@ clear_ipsec_counters_command_fn (vlib_main_t * vm, { vlib_clear_combined_counters (&ipsec_spd_policy_counters); vlib_clear_combined_counters (&ipsec_sa_counters); + vlib_clear_simple_counters (&ipsec_sa_lost_counters); return (NULL); } diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c index 5f7caab44e4..ec644a7dca6 100644 --- a/src/vnet/ipsec/ipsec_format.c +++ b/src/vnet/ipsec/ipsec_format.c @@ -272,6 +272,7 @@ format_ipsec_sa (u8 * s, va_list * args) u32 sai = va_arg (*args, u32); ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t); vlib_counter_t counts; + counter_t lost; ipsec_sa_t *sa; if (pool_is_free_index (ipsec_sa_pool, sai)) @@ -312,7 +313,9 @@ format_ipsec_sa (u8 * s, va_list * args) clib_host_to_net_u16 (sa->udp_hdr.dst_port)); vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts); - s = format (s, "\n packets %u bytes %u", counts.packets, counts.bytes); + lost = vlib_get_simple_counter (&ipsec_sa_lost_counters, sai); + s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld], lost:[packets:%Ld]", + counts.packets, counts.bytes, lost); if (ipsec_sa_is_set_IS_TUNNEL (sa)) s = format (s, "\n%U", format_tunnel, &sa->tunnel, 3); diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c index b5d58d0c053..387d8a747a3 100644 --- a/src/vnet/ipsec/ipsec_sa.c +++ b/src/vnet/ipsec/ipsec_sa.c @@ -28,6 +28,10 @@ vlib_combined_counter_main_t ipsec_sa_counters = { .name = "SA", .stat_segment_name = "/net/ipsec/sa", }; +vlib_simple_counter_main_t ipsec_sa_lost_counters = { + .name = "SA-lost", + .stat_segment_name = "/net/ipsec/sa/lost", +}; ipsec_sa_t *ipsec_sa_pool; @@ -193,6 +197,8 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto, vlib_validate_combined_counter (&ipsec_sa_counters, sa_index); vlib_zero_combined_counter (&ipsec_sa_counters, sa_index); + vlib_validate_simple_counter (&ipsec_sa_lost_counters, sa_index); + vlib_zero_simple_counter (&ipsec_sa_lost_counters, sa_index); tunnel_copy (tun, &sa->tunnel); sa->id = id; @@ -422,6 +428,7 @@ void ipsec_sa_clear (index_t sai) { vlib_zero_combined_counter (&ipsec_sa_counters, sai); + vlib_zero_simple_counter (&ipsec_sa_lost_counters, sai); } void diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h index 14461ad2cdd..2cc64e19546 100644 --- a/src/vnet/ipsec/ipsec_sa.h +++ b/src/vnet/ipsec/ipsec_sa.h @@ -261,6 +261,7 @@ foreach_ipsec_sa_flags * SA packet & bytes counters */ extern vlib_combined_counter_main_t ipsec_sa_counters; +extern vlib_simple_counter_main_t ipsec_sa_lost_counters; extern void ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len); @@ -522,6 +523,48 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq, return 0; } +always_inline u32 +ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc) +{ + u32 n_lost = 0; + + if (inc < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) + { + if (sa->seq > IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) + { + /* + * count how many holes there are in the portion + * of the window that we will right shift of the end + * as a result of this increments + */ + u64 mask = (((u64) 1 << inc) - 1) << (BITS (u64) - inc); + u64 old = sa->replay_window & mask; + /* the number of packets we saw in this section of the window */ + u64 seen = count_set_bits (old); + + /* + * the number we missed is the size of the window section + * minus the number we saw. + */ + n_lost = inc - seen; + } + sa->replay_window = ((sa->replay_window) << inc) | 1; + } + else + { + /* holes in the replay window are lost packets */ + n_lost = BITS (u64) - count_set_bits (sa->replay_window); + + /* any sequence numbers that now fall outside the window + * are forever lost */ + n_lost += inc - IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE; + + sa->replay_window = 1; + } + + return (n_lost); +} + /* * Anti replay window advance * inputs need to be in host byte order. @@ -531,9 +574,11 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq, * However, updating the window is trivial, so we do it anyway to save * the branch cost. */ -always_inline void -ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq) +always_inline u64 +ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 thread_index, u32 seq, + u32 hi_seq) { + u64 n_lost = 0; u32 pos; if (ipsec_sa_is_set_USE_ESN (sa)) @@ -543,19 +588,13 @@ ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq) if (wrap == 0 && seq > sa->seq) { pos = seq - sa->seq; - if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; + n_lost = ipsec_sa_anti_replay_window_shift (sa, pos); sa->seq = seq; } else if (wrap > 0) { pos = ~seq + sa->seq + 1; - if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; + n_lost = ipsec_sa_anti_replay_window_shift (sa, pos); sa->seq = seq; sa->seq_hi = hi_seq; } @@ -575,10 +614,7 @@ ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq) if (seq > sa->seq) { pos = seq - sa->seq; - if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; + n_lost = ipsec_sa_anti_replay_window_shift (sa, pos); sa->seq = seq; } else @@ -587,6 +623,8 @@ ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq) sa->replay_window |= (1ULL << pos); } } + + return n_lost; } |