aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/vlib/counter.h14
-rw-r--r--src/vnet/ipsec/ah_decrypt.c12
-rw-r--r--src/vnet/ipsec/esp_decrypt.c19
-rw-r--r--src/vnet/ipsec/ipsec_cli.c1
-rw-r--r--src/vnet/ipsec/ipsec_format.c5
-rw-r--r--src/vnet/ipsec/ipsec_sa.c7
-rw-r--r--src/vnet/ipsec/ipsec_sa.h66
-rw-r--r--test/template_ipsec.py87
-rw-r--r--test/test_ipsec_esp.py3
-rw-r--r--test/vpp_ipsec.py11
10 files changed, 203 insertions, 22 deletions
diff --git a/src/vlib/counter.h b/src/vlib/counter.h
index 9f5654292b9..56701e8b391 100644
--- a/src/vlib/counter.h
+++ b/src/vlib/counter.h
@@ -64,6 +64,20 @@ typedef struct
/** The number of counters (not the number of per-thread counters) */
u32 vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm);
+/** Pre-fetch a per-thread simple counter for the given object index */
+always_inline void
+vlib_prefetch_simple_counter (const vlib_simple_counter_main_t *cm,
+ u32 thread_index, u32 index)
+{
+ counter_t *my_counters;
+
+ /*
+ * This CPU's index is assumed to already be in cache
+ */
+ my_counters = cm->counters[thread_index];
+ clib_prefetch_store (my_counters + index);
+}
+
/** Increment a simple counter
@param cm - (vlib_simple_counter_main_t *) simple counter main pointer
@param thread_index - (u32) the current cpu index
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index 182ed3d231c..1ad372a7de0 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -315,6 +315,7 @@ ah_decrypt_inline (vlib_main_t * vm,
{
ip4_header_t *oh4;
ip6_header_t *oh6;
+ u64 n_lost = 0;
if (next[0] < AH_DECRYPT_N_NEXT)
goto trace;
@@ -323,7 +324,7 @@ ah_decrypt_inline (vlib_main_t * vm,
if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
{
- /* redo the anit-reply check. see esp_decrypt for details */
+ /* redo the anti-reply check. see esp_decrypt for details */
if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi,
true, NULL))
{
@@ -331,7 +332,10 @@ ah_decrypt_inline (vlib_main_t * vm,
next[0] = AH_DECRYPT_NEXT_DROP;
goto trace;
}
- ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi);
+ n_lost = ipsec_sa_anti_replay_advance (sa0, thread_index, pd->seq,
+ pd->seq_hi);
+ vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, thread_index,
+ pd->sa_index);
}
u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
@@ -398,6 +402,10 @@ ah_decrypt_inline (vlib_main_t * vm,
}
}
+ if (PREDICT_FALSE (n_lost))
+ vlib_increment_simple_counter (&ipsec_sa_lost_counters, thread_index,
+ pd->sa_index, n_lost);
+
vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index e30fc9effcb..f1e8065b8ff 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -748,10 +748,11 @@ out:
}
static_always_inline void
-esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b,
- u16 * next, int is_ip6, int is_tun, int is_async)
+esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
+ const esp_decrypt_packet_data_t *pd,
+ const esp_decrypt_packet_data2_t *pd2,
+ vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
+ int is_async)
{
ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
vlib_buffer_t *lb = b;
@@ -790,7 +791,11 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
return;
}
- ipsec_sa_anti_replay_advance (sa0, pd->seq, pd->seq_hi);
+ u64 n_lost =
+ ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
+
+ vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
+ pd->sa_index);
if (pd->is_chain)
{
@@ -1011,6 +1016,10 @@ esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
}
}
}
+
+ if (PREDICT_FALSE (n_lost))
+ vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
+ pd->sa_index, n_lost);
}
always_inline uword
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
index 3a3e53b663e..bdb9c7bf698 100644
--- a/src/vnet/ipsec/ipsec_cli.c
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -759,6 +759,7 @@ clear_ipsec_counters_command_fn (vlib_main_t * vm,
{
vlib_clear_combined_counters (&ipsec_spd_policy_counters);
vlib_clear_combined_counters (&ipsec_sa_counters);
+ vlib_clear_simple_counters (&ipsec_sa_lost_counters);
return (NULL);
}
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
index 5f7caab44e4..ec644a7dca6 100644
--- a/src/vnet/ipsec/ipsec_format.c
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -272,6 +272,7 @@ format_ipsec_sa (u8 * s, va_list * args)
u32 sai = va_arg (*args, u32);
ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t);
vlib_counter_t counts;
+ counter_t lost;
ipsec_sa_t *sa;
if (pool_is_free_index (ipsec_sa_pool, sai))
@@ -312,7 +313,9 @@ format_ipsec_sa (u8 * s, va_list * args)
clib_host_to_net_u16 (sa->udp_hdr.dst_port));
vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts);
- s = format (s, "\n packets %u bytes %u", counts.packets, counts.bytes);
+ lost = vlib_get_simple_counter (&ipsec_sa_lost_counters, sai);
+ s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld], lost:[packets:%Ld]",
+ counts.packets, counts.bytes, lost);
if (ipsec_sa_is_set_IS_TUNNEL (sa))
s = format (s, "\n%U", format_tunnel, &sa->tunnel, 3);
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index b5d58d0c053..387d8a747a3 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -28,6 +28,10 @@ vlib_combined_counter_main_t ipsec_sa_counters = {
.name = "SA",
.stat_segment_name = "/net/ipsec/sa",
};
+vlib_simple_counter_main_t ipsec_sa_lost_counters = {
+ .name = "SA-lost",
+ .stat_segment_name = "/net/ipsec/sa/lost",
+};
ipsec_sa_t *ipsec_sa_pool;
@@ -193,6 +197,8 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
+ vlib_validate_simple_counter (&ipsec_sa_lost_counters, sa_index);
+ vlib_zero_simple_counter (&ipsec_sa_lost_counters, sa_index);
tunnel_copy (tun, &sa->tunnel);
sa->id = id;
@@ -422,6 +428,7 @@ void
ipsec_sa_clear (index_t sai)
{
vlib_zero_combined_counter (&ipsec_sa_counters, sai);
+ vlib_zero_simple_counter (&ipsec_sa_lost_counters, sai);
}
void
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 14461ad2cdd..2cc64e19546 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -261,6 +261,7 @@ foreach_ipsec_sa_flags
* SA packet & bytes counters
*/
extern vlib_combined_counter_main_t ipsec_sa_counters;
+extern vlib_simple_counter_main_t ipsec_sa_lost_counters;
extern void ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len);
@@ -522,6 +523,48 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
return 0;
}
+always_inline u32
+ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc)
+{
+ u32 n_lost = 0;
+
+ if (inc < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
+ {
+ if (sa->seq > IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
+ {
+ /*
+ * count how many holes there are in the portion
+ * of the window that we will right shift of the end
+ * as a result of this increments
+ */
+ u64 mask = (((u64) 1 << inc) - 1) << (BITS (u64) - inc);
+ u64 old = sa->replay_window & mask;
+ /* the number of packets we saw in this section of the window */
+ u64 seen = count_set_bits (old);
+
+ /*
+ * the number we missed is the size of the window section
+ * minus the number we saw.
+ */
+ n_lost = inc - seen;
+ }
+ sa->replay_window = ((sa->replay_window) << inc) | 1;
+ }
+ else
+ {
+ /* holes in the replay window are lost packets */
+ n_lost = BITS (u64) - count_set_bits (sa->replay_window);
+
+ /* any sequence numbers that now fall outside the window
+ * are forever lost */
+ n_lost += inc - IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE;
+
+ sa->replay_window = 1;
+ }
+
+ return (n_lost);
+}
+
/*
* Anti replay window advance
* inputs need to be in host byte order.
@@ -531,9 +574,11 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* However, updating the window is trivial, so we do it anyway to save
* the branch cost.
*/
-always_inline void
-ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq)
+always_inline u64
+ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 thread_index, u32 seq,
+ u32 hi_seq)
{
+ u64 n_lost = 0;
u32 pos;
if (ipsec_sa_is_set_USE_ESN (sa))
@@ -543,19 +588,13 @@ ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq)
if (wrap == 0 && seq > sa->seq)
{
pos = seq - sa->seq;
- if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
+ n_lost = ipsec_sa_anti_replay_window_shift (sa, pos);
sa->seq = seq;
}
else if (wrap > 0)
{
pos = ~seq + sa->seq + 1;
- if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
+ n_lost = ipsec_sa_anti_replay_window_shift (sa, pos);
sa->seq = seq;
sa->seq_hi = hi_seq;
}
@@ -575,10 +614,7 @@ ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq)
if (seq > sa->seq)
{
pos = seq - sa->seq;
- if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
+ n_lost = ipsec_sa_anti_replay_window_shift (sa, pos);
sa->seq = seq;
}
else
@@ -587,6 +623,8 @@ ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 seq, u32 hi_seq)
sa->replay_window |= (1ULL << pos);
}
}
+
+ return n_lost;
}
diff --git a/test/template_ipsec.py b/test/template_ipsec.py
index d5f13322a59..e4797353ecd 100644
--- a/test/template_ipsec.py
+++ b/test/template_ipsec.py
@@ -792,6 +792,87 @@ class IpsecTra4(object):
p.scapy_tra_sa.seq_num = 351
p.vpp_tra_sa.seq_num = 351
+ def verify_tra_lost(self):
+ p = self.params[socket.AF_INET]
+ esn_en = p.vpp_tra_sa.esn_en
+
+ #
+ # send packets with seq numbers 1->34
+ # this means the window size is still in Case B (see RFC4303
+ # Appendix A)
+ #
+ # for reasons i haven't investigated Scapy won't create a packet with
+ # seq_num=0
+ #
+ pkts = [(Ether(src=self.tra_if.remote_mac,
+ dst=self.tra_if.local_mac) /
+ p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
+ dst=self.tra_if.local_ip4) /
+ ICMP(),
+ seq_num=seq))
+ for seq in range(1, 3)]
+ self.send_and_expect(self.tra_if, pkts, self.tra_if)
+
+ self.assertEqual(p.tra_sa_out.get_lost(), 0)
+
+ # skip a sequence number
+ pkts = [(Ether(src=self.tra_if.remote_mac,
+ dst=self.tra_if.local_mac) /
+ p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
+ dst=self.tra_if.local_ip4) /
+ ICMP(),
+ seq_num=seq))
+ for seq in range(4, 6)]
+ self.send_and_expect(self.tra_if, pkts, self.tra_if)
+
+ self.assertEqual(p.tra_sa_out.get_lost(), 0)
+
+ # the lost packet are counted untill we get up past the first
+ # sizeof(replay_window) packets
+ pkts = [(Ether(src=self.tra_if.remote_mac,
+ dst=self.tra_if.local_mac) /
+ p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
+ dst=self.tra_if.local_ip4) /
+ ICMP(),
+ seq_num=seq))
+ for seq in range(6, 100)]
+ self.send_and_expect(self.tra_if, pkts, self.tra_if)
+
+ self.assertEqual(p.tra_sa_out.get_lost(), 1)
+
+ # lost of holes in the sequence
+ pkts = [(Ether(src=self.tra_if.remote_mac,
+ dst=self.tra_if.local_mac) /
+ p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
+ dst=self.tra_if.local_ip4) /
+ ICMP(),
+ seq_num=seq))
+ for seq in range(100, 200, 2)]
+ self.send_and_expect(self.tra_if, pkts, self.tra_if, n_rx=50)
+
+ pkts = [(Ether(src=self.tra_if.remote_mac,
+ dst=self.tra_if.local_mac) /
+ p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
+ dst=self.tra_if.local_ip4) /
+ ICMP(),
+ seq_num=seq))
+ for seq in range(200, 300)]
+ self.send_and_expect(self.tra_if, pkts, self.tra_if)
+
+ self.assertEqual(p.tra_sa_out.get_lost(), 51)
+
+ # a big hole in the seq number space
+ pkts = [(Ether(src=self.tra_if.remote_mac,
+ dst=self.tra_if.local_mac) /
+ p.scapy_tra_sa.encrypt(IP(src=self.tra_if.remote_ip4,
+ dst=self.tra_if.local_ip4) /
+ ICMP(),
+ seq_num=seq))
+ for seq in range(400, 500)]
+ self.send_and_expect(self.tra_if, pkts, self.tra_if)
+
+ self.assertEqual(p.tra_sa_out.get_lost(), 151)
+
def verify_tra_basic4(self, count=1, payload_size=54):
""" ipsec v4 transport basic test """
self.vapi.cli("clear errors")
@@ -826,6 +907,8 @@ class IpsecTra4(object):
self.assertEqual(pkts, count,
"incorrect SA out counts: expected %d != %d" %
(count, pkts))
+ self.assertEqual(p.tra_sa_out.get_lost(), 0)
+ self.assertEqual(p.tra_sa_in.get_lost(), 0)
self.assert_packet_counter_equal(self.tra4_encrypt_node_name, count)
self.assert_packet_counter_equal(self.tra4_decrypt_node_name[0], count)
@@ -837,6 +920,10 @@ class IpsecTra4Tests(IpsecTra4):
""" ipsec v4 transport anti-replay test """
self.verify_tra_anti_replay()
+ def test_tra_lost(self):
+ """ ipsec v4 transport lost packet test """
+ self.verify_tra_lost()
+
def test_tra_basic(self, count=1):
""" ipsec v4 transport basic test """
self.verify_tra_basic4(count=1)
diff --git a/test/test_ipsec_esp.py b/test/test_ipsec_esp.py
index 14112d6d71a..0e0aaee425c 100644
--- a/test/test_ipsec_esp.py
+++ b/test/test_ipsec_esp.py
@@ -974,6 +974,9 @@ class RunTestIpsecEspAll(ConfigIpsecESP,
self.unconfig_network()
self.config_network(self.params.values())
self.verify_hi_seq_num()
+ self.unconfig_network()
+ self.config_network(self.params.values())
+ self.verify_tra_lost()
#
# swap the handlers while SAs are up
diff --git a/test/vpp_ipsec.py b/test/vpp_ipsec.py
index f9b7bc43752..76080e05c3a 100644
--- a/test/vpp_ipsec.py
+++ b/test/vpp_ipsec.py
@@ -313,6 +313,17 @@ class VppIpsecSA(VppObject):
# +1 to skip main thread
return c[worker+1][self.stat_index]
+ def get_lost(self, worker=None):
+ c = self.test.statistics.get_counter("/net/ipsec/sa/lost")
+ if worker is None:
+ total = 0
+ for t in c:
+ total += t[self.stat_index]
+ return total
+ else:
+ # +1 to skip main thread
+ return c[worker+1][self.stat_index]
+
class VppIpsecTunProtect(VppObject):
"""