aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatus Fabian <matfabia@cisco.com>2018-01-04 04:03:14 -0800
committerMatus Fabian <matfabia@cisco.com>2018-01-08 01:51:24 -0800
commita431ad1c486ad0fd9ca35e14c527fe7611965fc2 (patch)
tree38326f15ef87cba103db422bf7744a33fd8413b1
parent1049139a6d323e07bfb87710c9d2f1d467e980a9 (diff)
NAT64: IPFix (VPP-1106)
Change-Id: Ib90dc5613c9fdac0344b3bd7f163e2f7163c64d8 Signed-off-by: Matus Fabian <matfabia@cisco.com>
-rwxr-xr-xsrc/plugins/nat/in2out.c22
-rw-r--r--src/plugins/nat/nat64_db.c63
-rw-r--r--src/plugins/nat/nat64_db.h7
-rw-r--r--src/plugins/nat/nat_det.h3
-rw-r--r--src/plugins/nat/nat_ipfix_logging.c1197
-rw-r--r--src/plugins/nat/nat_ipfix_logging.h51
-rw-r--r--src/plugins/nat/nat_reass.c17
-rw-r--r--src/vnet/flow/ipfix_info_elements.h6
-rw-r--r--test/ipfix.py7
-rw-r--r--test/test_nat.py502
10 files changed, 1825 insertions, 50 deletions
diff --git a/src/plugins/nat/in2out.c b/src/plugins/nat/in2out.c
index b0dbbc8fa11..5f2f43f7f81 100755
--- a/src/plugins/nat/in2out.c
+++ b/src/plugins/nat/in2out.c
@@ -162,7 +162,7 @@ typedef enum {
/**
* @brief Check if packet should be translated
*
- * Packets aimed at outside interface and external addresss with active session
+ * Packets aimed at outside interface and external address with active session
* should be translated.
*
* @param sm NAT main
@@ -273,6 +273,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0,
if (PREDICT_FALSE (maximum_sessions_exceeded(sm, thread_index)))
{
b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED];
+ nat_ipfix_logging_max_sessions(sm->max_translations);
return SNAT_IN2OUT_NEXT_DROP;
}
@@ -294,13 +295,6 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0,
return SNAT_IN2OUT_NEXT_DROP;
}
- s = nat_session_alloc_or_recycle (sm, u, thread_index);
- if (!s)
- {
- clib_warning ("create NAT session failed");
- return SNAT_IN2OUT_NEXT_DROP;
- }
-
/* First try to match static mapping by local address and port */
if (snat_static_mapping_match (sm, *key0, &key1, 0, 0, 0))
{
@@ -319,9 +313,17 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0,
else
{
u->nstaticsessions++;
- s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING;
}
+ s = nat_session_alloc_or_recycle (sm, u, thread_index);
+ if (!s)
+ {
+ clib_warning ("create NAT session failed");
+ return SNAT_IN2OUT_NEXT_DROP;
+ }
+
+ if (address_index == ~0)
+ s->flags |= SNAT_SESSION_FLAG_STATIC_MAPPING;
s->outside_address_index = address_index;
s->in2out = *key0;
s->out2in = key1;
@@ -1002,6 +1004,7 @@ snat_in2out_unknown_proto (snat_main_t *sm,
if (PREDICT_FALSE (maximum_sessions_exceeded(sm, thread_index)))
{
b->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED];
+ nat_ipfix_logging_max_sessions(sm->max_translations);
return 0;
}
@@ -1189,6 +1192,7 @@ snat_in2out_lb (snat_main_t *sm,
if (PREDICT_FALSE (maximum_sessions_exceeded (sm, thread_index)))
{
b->error = node->errors[SNAT_IN2OUT_ERROR_MAX_SESSIONS_EXCEEDED];
+ nat_ipfix_logging_max_sessions(sm->max_translations);
return 0;
}
diff --git a/src/plugins/nat/nat64_db.c b/src/plugins/nat/nat64_db.c
index 7ce28bc6c7f..3edc1a43006 100644
--- a/src/plugins/nat/nat64_db.c
+++ b/src/plugins/nat/nat64_db.c
@@ -17,6 +17,8 @@
* @brief NAT64 DB
*/
#include <nat/nat64_db.h>
+#include <nat/nat_ipfix_logging.h>
+#include <vnet/fib/fib_table.h>
int
nat64_db_init (nat64_db_t * db, u32 bib_buckets, u32 bib_memory_size,
@@ -36,6 +38,12 @@ nat64_db_init (nat64_db_t * db, u32 bib_buckets, u32 bib_memory_size,
st_memory_size);
db->free_addr_port_cb = free_addr_port_cb;
+ db->bib.limit = 10 * bib_buckets;
+ db->bib.bib_entries_num = 0;
+ db->st.limit = 10 * st_buckets;
+ db->st.st_entries_num = 0;
+ db->addr_free = 0;
+
return 0;
}
@@ -48,6 +56,14 @@ nat64_db_bib_entry_create (nat64_db_t * db, ip6_address_t * in_addr,
nat64_db_bib_entry_t *bibe;
nat64_db_bib_entry_key_t bibe_key;
clib_bihash_kv_24_8_t kv;
+ fib_table_t *fib;
+
+ if (db->bib.bib_entries_num >= db->bib.limit)
+ {
+ db->free_addr_port_cb (db, out_addr, out_port, proto);
+ nat_ipfix_logging_max_bibs (db->bib.limit);
+ return 0;
+ }
/* create pool entry */
switch (ip_proto_to_snat_proto (proto))
@@ -66,6 +82,9 @@ nat64_db_bib_entry_create (nat64_db_t * db, ip6_address_t * in_addr,
kv.value = bibe - db->bib._unk_proto_bib;
break;
}
+
+ db->bib.bib_entries_num++;
+
memset (bibe, 0, sizeof (*bibe));
bibe->in_addr.as_u64[0] = in_addr->as_u64[0];
bibe->in_addr.as_u64[1] = in_addr->as_u64[1];
@@ -97,6 +116,9 @@ nat64_db_bib_entry_create (nat64_db_t * db, ip6_address_t * in_addr,
kv.key[2] = bibe_key.as_u64[2];
clib_bihash_add_del_24_8 (&db->bib.out2in, &kv, 1);
+ fib = fib_table_get (bibe->fib_index, FIB_PROTOCOL_IP6);
+ nat_ipfix_logging_nat64_bib (in_addr, out_addr, proto, in_port, out_port,
+ fib->ft_table_id, 1);
return bibe;
}
@@ -108,6 +130,7 @@ nat64_db_bib_entry_free (nat64_db_t * db, nat64_db_bib_entry_t * bibe)
nat64_db_bib_entry_t *bib;
u32 *ste_to_be_free = 0, *ste_index, bibe_index;
nat64_db_st_entry_t *st, *ste;
+ fib_table_t *fib;
switch (ip_proto_to_snat_proto (bibe->proto))
{
@@ -126,6 +149,8 @@ nat64_db_bib_entry_free (nat64_db_t * db, nat64_db_bib_entry_t * bibe)
break;
}
+ db->bib.bib_entries_num--;
+
bibe_index = bibe - bib;
/* delete ST entries for static BIB entry */
@@ -162,7 +187,14 @@ nat64_db_bib_entry_free (nat64_db_t * db, nat64_db_bib_entry_t * bibe)
kv.key[2] = bibe_key.as_u64[2];
clib_bihash_add_del_24_8 (&db->bib.out2in, &kv, 0);
- db->free_addr_port_cb (db, &bibe->out_addr, bibe->out_port, bibe->proto);
+ if (!db->addr_free)
+ db->free_addr_port_cb (db, &bibe->out_addr, bibe->out_port, bibe->proto);
+
+ fib = fib_table_get (bibe->fib_index, FIB_PROTOCOL_IP6);
+ nat_ipfix_logging_nat64_bib (&bibe->in_addr, &bibe->out_addr, bibe->proto,
+ bibe->in_port, bibe->out_port,
+ fib->ft_table_id, 0);
+
/* delete from pool */
pool_put (bib, bibe);
@@ -344,6 +376,13 @@ nat64_db_st_entry_create (nat64_db_t * db, nat64_db_bib_entry_t * bibe,
nat64_db_bib_entry_t *bib;
nat64_db_st_entry_key_t ste_key;
clib_bihash_kv_48_8_t kv;
+ fib_table_t *fib;
+
+ if (db->st.st_entries_num >= db->st.limit)
+ {
+ nat_ipfix_logging_max_sessions (db->st.limit);
+ return 0;
+ }
/* create pool entry */
switch (ip_proto_to_snat_proto (bibe->proto))
@@ -364,6 +403,9 @@ nat64_db_st_entry_create (nat64_db_t * db, nat64_db_bib_entry_t * bibe,
bib = db->bib._unk_proto_bib;
break;
}
+
+ db->st.st_entries_num++;
+
memset (ste, 0, sizeof (*ste));
ste->in_r_addr.as_u64[0] = in_r_addr->as_u64[0];
ste->in_r_addr.as_u64[1] = in_r_addr->as_u64[1];
@@ -407,6 +449,13 @@ nat64_db_st_entry_create (nat64_db_t * db, nat64_db_bib_entry_t * bibe,
kv.key[5] = ste_key.as_u64[5];
clib_bihash_add_del_48_8 (&db->st.out2in, &kv, 1);
+ fib = fib_table_get (bibe->fib_index, FIB_PROTOCOL_IP6);
+ nat_ipfix_logging_nat64_session (&bibe->in_addr, &bibe->out_addr,
+ bibe->proto, bibe->in_port, bibe->out_port,
+ &ste->in_r_addr, &ste->out_r_addr,
+ ste->r_port, ste->r_port, fib->ft_table_id,
+ 1);
+
return ste;
}
@@ -417,6 +466,7 @@ nat64_db_st_entry_free (nat64_db_t * db, nat64_db_st_entry_t * ste)
nat64_db_bib_entry_t *bib, *bibe;
nat64_db_st_entry_key_t ste_key;
clib_bihash_kv_48_8_t kv;
+ fib_table_t *fib;
switch (ip_proto_to_snat_proto (ste->proto))
{
@@ -437,6 +487,8 @@ nat64_db_st_entry_free (nat64_db_t * db, nat64_db_st_entry_t * ste)
bibe = pool_elt_at_index (bib, ste->bibe_index);
+ db->st.st_entries_num--;
+
/* delete hash lookup */
memset (&ste_key, 0, sizeof (ste_key));
ste_key.l_addr.as_u64[0] = bibe->in_addr.as_u64[0];
@@ -469,6 +521,13 @@ nat64_db_st_entry_free (nat64_db_t * db, nat64_db_st_entry_t * ste)
kv.key[5] = ste_key.as_u64[5];
clib_bihash_add_del_48_8 (&db->st.out2in, &kv, 0);
+ fib = fib_table_get (bibe->fib_index, FIB_PROTOCOL_IP6);
+ nat_ipfix_logging_nat64_session (&bibe->in_addr, &bibe->out_addr,
+ bibe->proto, bibe->in_port, bibe->out_port,
+ &ste->in_r_addr, &ste->out_r_addr,
+ ste->r_port, ste->r_port, fib->ft_table_id,
+ 0);
+
/* delete from pool */
pool_put (st, ste);
@@ -613,6 +672,7 @@ nat64_db_free_out_addr (nat64_db_t * db, ip4_address_t * out_addr)
nat64_db_st_entry_t *st, *ste;
nat64_db_bib_entry_t *bibe;
+ db->addr_free = 1;
/* *INDENT-OFF* */
#define _(N, i, n, s) \
st = db->st._##n##_st; \
@@ -636,6 +696,7 @@ nat64_db_free_out_addr (nat64_db_t * db, ip4_address_t * out_addr)
vec_foreach (ste_index, ste_to_be_free)
nat64_db_st_entry_free (db, pool_elt_at_index(st, ste_index[0]));
vec_free (ste_to_be_free);
+ db->addr_free = 0;
/* *INDENT-ON* */
}
diff --git a/src/plugins/nat/nat64_db.h b/src/plugins/nat/nat64_db.h
index 4345fd30974..f1b93cf8928 100644
--- a/src/plugins/nat/nat64_db.h
+++ b/src/plugins/nat/nat64_db.h
@@ -68,6 +68,9 @@ typedef struct
/* BIB lookup */
clib_bihash_24_8_t in2out;
clib_bihash_24_8_t out2in;
+
+ u32 limit;
+ u32 bib_entries_num;
} nat64_db_bib_t;
typedef struct
@@ -115,6 +118,9 @@ typedef struct
/* session lookup */
clib_bihash_48_8_t in2out;
clib_bihash_48_8_t out2in;
+
+ u32 limit;
+ u32 st_entries_num;
} nat64_db_st_t;
struct nat64_db_s;
@@ -132,6 +138,7 @@ typedef struct nat64_db_s
nat64_db_bib_t bib;
nat64_db_st_t st;
nat64_db_free_addr_port_function_t free_addr_port_cb;
+ u8 addr_free;
} nat64_db_t;
/**
diff --git a/src/plugins/nat/nat_det.h b/src/plugins/nat/nat_det.h
index 2ab7f27eece..ce876bca9fb 100644
--- a/src/plugins/nat/nat_det.h
+++ b/src/plugins/nat/nat_det.h
@@ -171,7 +171,8 @@ snat_det_ses_create (snat_det_map_t * dm, ip4_address_t * in_addr,
}
}
- snat_ipfix_logging_max_entries_per_user (in_addr->as_u32);
+ snat_ipfix_logging_max_entries_per_user (SNAT_DET_SES_PER_USER,
+ in_addr->as_u32);
return 0;
}
diff --git a/src/plugins/nat/nat_ipfix_logging.c b/src/plugins/nat/nat_ipfix_logging.c
index 18430f5a7b8..ef1ab859d8d 100644
--- a/src/plugins/nat/nat_ipfix_logging.c
+++ b/src/plugins/nat/nat_ipfix_logging.c
@@ -17,18 +17,28 @@
#include <vnet/flow/flow_report.h>
#include <vlibmemory/api.h>
-#include <nat/nat.h>
#include <nat/nat_ipfix_logging.h>
snat_ipfix_logging_main_t snat_ipfix_logging_main;
#define NAT44_SESSION_CREATE_LEN 26
#define NAT_ADDRESSES_EXHAUTED_LEN 13
-#define MAX_ENTRIES_PER_USER_LEN 17
+#define MAX_ENTRIES_PER_USER_LEN 21
+#define MAX_SESSIONS_LEN 17
+#define MAX_BIBS_LEN 17
+#define MAX_FRAGMENTS_IP4_LEN 21
+#define MAX_FRAGMENTS_IP6_LEN 33
+#define NAT64_BIB_LEN 38
+#define NAT64_SES_LEN 62
#define NAT44_SESSION_CREATE_FIELD_COUNT 8
#define NAT_ADDRESSES_EXHAUTED_FIELD_COUNT 3
-#define MAX_ENTRIES_PER_USER_FIELD_COUNT 4
+#define MAX_ENTRIES_PER_USER_FIELD_COUNT 5
+#define MAX_SESSIONS_FIELD_COUNT 4
+#define MAX_BIBS_FIELD_COUNT 4
+#define MAX_FRAGMENTS_FIELD_COUNT 5
+#define NAT64_BIB_FIELD_COUNT 8
+#define NAT64_SES_FIELD_COUNT 12
typedef struct
{
@@ -48,9 +58,58 @@ typedef struct
typedef struct
{
+ u32 limit;
u32 src_ip;
} snat_ipfix_logging_max_entries_per_user_args_t;
+typedef struct
+{
+ u32 limit;
+} nat_ipfix_logging_max_sessions_args_t;
+
+typedef struct
+{
+ u32 limit;
+} nat_ipfix_logging_max_bibs_args_t;
+
+typedef struct
+{
+ u32 limit;
+ u32 src;
+} nat_ipfix_logging_max_frags_ip4_args_t;
+
+typedef struct
+{
+ u32 limit;
+ u64 src[2];
+} nat_ipfix_logging_max_frags_ip6_args_t;
+
+typedef struct
+{
+ u8 nat_event;
+ u64 src_ip[2];
+ u32 nat_src_ip;
+ u8 proto;
+ u16 src_port;
+ u16 nat_src_port;
+ u64 dst_ip[2];
+ u32 nat_dst_ip;
+ u32 vrf_id;
+ u16 dst_port;
+ u16 nat_dst_port;
+} nat_ipfix_logging_nat64_ses_args_t;
+
+typedef struct
+{
+ u8 nat_event;
+ u64 src_ip[2];
+ u32 nat_src_ip;
+ u8 proto;
+ u16 src_port;
+ u16 nat_src_port;
+ u32 vrf_id;
+} nat_ipfix_logging_nat64_bib_args_t;
+
#define skip_if_disabled() \
do { \
snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main; \
@@ -105,6 +164,16 @@ snat_template_rewrite (flow_report_main_t * frm,
field_count = NAT44_SESSION_CREATE_FIELD_COUNT;
silm->nat44_session_template_id = fr->template_id;
}
+ else if (event == NAT64_BIB_CREATE)
+ {
+ field_count = NAT64_BIB_FIELD_COUNT;
+ silm->nat64_bib_template_id = fr->template_id;
+ }
+ else if (event == NAT64_SESSION_CREATE)
+ {
+ field_count = NAT64_SES_FIELD_COUNT;
+ silm->nat64_ses_template_id = fr->template_id;
+ }
else if (event == QUOTA_EXCEEDED)
{
if (quota_event == MAX_ENTRIES_PER_USER)
@@ -112,6 +181,26 @@ snat_template_rewrite (flow_report_main_t * frm,
field_count = MAX_ENTRIES_PER_USER_FIELD_COUNT;
silm->max_entries_per_user_template_id = fr->template_id;
}
+ else if (quota_event == MAX_SESSION_ENTRIES)
+ {
+ field_count = MAX_SESSIONS_FIELD_COUNT;
+ silm->max_sessions_template_id = fr->template_id;
+ }
+ else if (quota_event == MAX_BIB_ENTRIES)
+ {
+ field_count = MAX_BIBS_FIELD_COUNT;
+ silm->max_bibs_template_id = fr->template_id;
+ }
+ else if (quota_event == MAX_FRAGMENTS_PENDING_REASSEMBLY)
+ {
+ field_count = MAX_FRAGMENTS_FIELD_COUNT;
+ silm->max_frags_ip4_template_id = fr->template_id;
+ }
+ else if (quota_event == MAX_FRAGMENTS_PENDING_REASSEMBLY_IP6)
+ {
+ field_count = MAX_FRAGMENTS_FIELD_COUNT;
+ silm->max_frags_ip6_template_id = fr->template_id;
+ }
}
/* allocate rewrite space */
@@ -169,6 +258,53 @@ snat_template_rewrite (flow_report_main_t * frm,
f->e_id_length = ipfix_e_id_length (0, ingressVRFID, 4);
f++;
}
+ else if (event == NAT64_BIB_CREATE)
+ {
+ f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natEvent, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, sourceIPv6Address, 16);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, postNATSourceIPv4Address, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, protocolIdentifier, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, sourceTransportPort, 2);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, postNAPTSourceTransportPort, 2);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, ingressVRFID, 4);
+ f++;
+ }
+ else if (event == NAT64_SESSION_CREATE)
+ {
+ f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds, 8);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natEvent, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, sourceIPv6Address, 16);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, postNATSourceIPv4Address, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, protocolIdentifier, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, sourceTransportPort, 2);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, postNAPTSourceTransportPort, 2);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, destinationIPv6Address, 16);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, postNATDestinationIPv4Address, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, destinationTransportPort, 2);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, postNAPTDestinationTransportPort,
+ 2);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, ingressVRFID, 4);
+ f++;
+ }
else if (event == QUOTA_EXCEEDED)
{
if (quota_event == MAX_ENTRIES_PER_USER)
@@ -180,9 +316,65 @@ snat_template_rewrite (flow_report_main_t * frm,
f++;
f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4);
f++;
+ f->e_id_length = ipfix_e_id_length (0, maxEntriesPerUser, 4);
+ f++;
f->e_id_length = ipfix_e_id_length (0, sourceIPv4Address, 4);
f++;
}
+ else if (quota_event == MAX_SESSION_ENTRIES)
+ {
+ f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds,
+ 8);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natEvent, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, maxSessionEntries, 4);
+ f++;
+ }
+ else if (quota_event == MAX_BIB_ENTRIES)
+ {
+ f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds,
+ 8);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natEvent, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, maxBIBEntries, 4);
+ f++;
+ }
+ else if (quota_event == MAX_FRAGMENTS_PENDING_REASSEMBLY)
+ {
+ f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds,
+ 8);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natEvent, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, maxFragmentsPendingReassembly,
+ 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, sourceIPv4Address, 4);
+ f++;
+ }
+ else if (quota_event == MAX_FRAGMENTS_PENDING_REASSEMBLY_IP6)
+ {
+ f->e_id_length = ipfix_e_id_length (0, observationTimeMilliseconds,
+ 8);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natEvent, 1);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, natQuotaExceededEvent, 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, maxFragmentsPendingReassembly,
+ 4);
+ f++;
+ f->e_id_length = ipfix_e_id_length (0, sourceIPv6Address, 16);
+ f++;
+ }
}
/* Back to the template packet... */
@@ -240,6 +432,76 @@ snat_template_rewrite_max_entries_per_usr (flow_report_main_t * frm,
MAX_ENTRIES_PER_USER);
}
+u8 *
+nat_template_rewrite_max_sessions (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return snat_template_rewrite (frm, fr, collector_address, src_address,
+ collector_port, QUOTA_EXCEEDED,
+ MAX_SESSION_ENTRIES);
+}
+
+u8 *
+nat_template_rewrite_max_bibs (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return snat_template_rewrite (frm, fr, collector_address, src_address,
+ collector_port, QUOTA_EXCEEDED,
+ MAX_BIB_ENTRIES);
+}
+
+u8 *
+nat_template_rewrite_max_frags_ip4 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return snat_template_rewrite (frm, fr, collector_address, src_address,
+ collector_port, QUOTA_EXCEEDED,
+ MAX_FRAGMENTS_PENDING_REASSEMBLY);
+}
+
+u8 *
+nat_template_rewrite_max_frags_ip6 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return snat_template_rewrite (frm, fr, collector_address, src_address,
+ collector_port, QUOTA_EXCEEDED,
+ MAX_FRAGMENTS_PENDING_REASSEMBLY_IP6);
+}
+
+u8 *
+nat_template_rewrite_nat64_bib (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return snat_template_rewrite (frm, fr, collector_address, src_address,
+ collector_port, NAT64_BIB_CREATE, 0);
+}
+
+u8 *
+nat_template_rewrite_nat64_session (flow_report_main_t * frm,
+ flow_report_t * fr,
+ ip4_address_t * collector_address,
+ ip4_address_t * src_address,
+ u16 collector_port)
+{
+ return snat_template_rewrite (frm, fr, collector_address, src_address,
+ collector_port, NAT64_SESSION_CREATE, 0);
+}
+
static inline void
snat_ipfix_header_create (flow_report_main_t * frm,
vlib_buffer_t * b0, u32 * offset)
@@ -518,7 +780,7 @@ snat_ipfix_logging_addr_exhausted (u32 pool_id, int do_flush)
}
static void
-snat_ipfix_logging_max_entries_per_usr (u32 src_ip, int do_flush)
+snat_ipfix_logging_max_entries_per_usr (u32 limit, u32 src_ip, int do_flush)
{
snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
flow_report_main_t *frm = &flow_report_main;
@@ -590,6 +852,9 @@ snat_ipfix_logging_max_entries_per_usr (u32 src_ip, int do_flush)
clib_memcpy (b0->data + offset, &quota_event, sizeof (quota_event));
offset += sizeof (quota_event);
+ clib_memcpy (b0->data + offset, &limit, sizeof (limit));
+ offset += sizeof (limit);
+
clib_memcpy (b0->data + offset, &src_ip, sizeof (src_ip));
offset += sizeof (src_ip);
@@ -608,6 +873,590 @@ snat_ipfix_logging_max_entries_per_usr (u32 src_ip, int do_flush)
}
static void
+nat_ipfix_logging_max_ses (u32 limit, int do_flush)
+{
+ snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0 = 0;
+ u32 bi0 = ~0;
+ u32 offset;
+ vlib_main_t *vm = frm->vlib_main;
+ u64 now;
+ vlib_buffer_free_list_t *fl;
+ u8 nat_event = QUOTA_EXCEEDED;
+ u32 quota_event = MAX_SESSION_ENTRIES;
+
+ if (!silm->enabled)
+ return;
+
+ now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3);
+ now += silm->milisecond_time_0;
+
+ b0 = silm->max_sessions_buffer;
+
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (do_flush)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ clib_warning ("can't allocate buffer for NAT IPFIX event");
+ return;
+ }
+
+ b0 = silm->max_sessions_buffer = vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = silm->max_sessions_next_record_offset;
+ }
+
+ f = silm->max_sessions_frame;
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ silm->max_sessions_frame = f;
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ if (PREDICT_FALSE (offset == 0))
+ snat_ipfix_header_create (frm, b0, &offset);
+
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+ u64 time_stamp = clib_host_to_net_u64 (now);
+ clib_memcpy (b0->data + offset, &time_stamp, sizeof (time_stamp));
+ offset += sizeof (time_stamp);
+
+ clib_memcpy (b0->data + offset, &nat_event, sizeof (nat_event));
+ offset += sizeof (nat_event);
+
+ clib_memcpy (b0->data + offset, &quota_event, sizeof (quota_event));
+ offset += sizeof (quota_event);
+
+ clib_memcpy (b0->data + offset, &limit, sizeof (limit));
+ offset += sizeof (limit);
+
+ b0->current_length += MAX_SESSIONS_LEN;
+ }
+
+ if (PREDICT_FALSE
+ (do_flush || (offset + MAX_SESSIONS_LEN) > frm->path_mtu))
+ {
+ snat_ipfix_send (frm, f, b0, silm->max_sessions_template_id);
+ silm->max_sessions_frame = 0;
+ silm->max_sessions_buffer = 0;
+ offset = 0;
+ }
+ silm->max_sessions_next_record_offset = offset;
+}
+
+static void
+nat_ipfix_logging_max_bib (u32 limit, int do_flush)
+{
+ snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0 = 0;
+ u32 bi0 = ~0;
+ u32 offset;
+ vlib_main_t *vm = frm->vlib_main;
+ u64 now;
+ vlib_buffer_free_list_t *fl;
+ u8 nat_event = QUOTA_EXCEEDED;
+ u32 quota_event = MAX_BIB_ENTRIES;
+
+ if (!silm->enabled)
+ return;
+
+ now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3);
+ now += silm->milisecond_time_0;
+
+ b0 = silm->max_bibs_buffer;
+
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (do_flush)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ clib_warning ("can't allocate buffer for NAT IPFIX event");
+ return;
+ }
+
+ b0 = silm->max_bibs_buffer = vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = silm->max_bibs_next_record_offset;
+ }
+
+ f = silm->max_bibs_frame;
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ silm->max_bibs_frame = f;
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ if (PREDICT_FALSE (offset == 0))
+ snat_ipfix_header_create (frm, b0, &offset);
+
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+ u64 time_stamp = clib_host_to_net_u64 (now);
+ clib_memcpy (b0->data + offset, &time_stamp, sizeof (time_stamp));
+ offset += sizeof (time_stamp);
+
+ clib_memcpy (b0->data + offset, &nat_event, sizeof (nat_event));
+ offset += sizeof (nat_event);
+
+ clib_memcpy (b0->data + offset, &quota_event, sizeof (quota_event));
+ offset += sizeof (quota_event);
+
+ clib_memcpy (b0->data + offset, &limit, sizeof (limit));
+ offset += sizeof (limit);
+
+ b0->current_length += MAX_BIBS_LEN;
+ }
+
+ if (PREDICT_FALSE
+ (do_flush || (offset + MAX_BIBS_LEN) > frm->path_mtu))
+ {
+ snat_ipfix_send (frm, f, b0, silm->max_bibs_template_id);
+ silm->max_bibs_frame = 0;
+ silm->max_bibs_buffer = 0;
+ offset = 0;
+ }
+ silm->max_bibs_next_record_offset = offset;
+}
+
+static void
+nat_ipfix_logging_max_frag_ip4 (u32 limit, u32 src, int do_flush)
+{
+ snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0 = 0;
+ u32 bi0 = ~0;
+ u32 offset;
+ vlib_main_t *vm = frm->vlib_main;
+ u64 now;
+ vlib_buffer_free_list_t *fl;
+ u8 nat_event = QUOTA_EXCEEDED;
+ u32 quota_event = MAX_FRAGMENTS_PENDING_REASSEMBLY;
+
+ if (!silm->enabled)
+ return;
+
+ now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3);
+ now += silm->milisecond_time_0;
+
+ b0 = silm->max_frags_ip4_buffer;
+
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (do_flush)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ clib_warning ("can't allocate buffer for NAT IPFIX event");
+ return;
+ }
+
+ b0 = silm->max_frags_ip4_buffer = vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = silm->max_frags_ip4_next_record_offset;
+ }
+
+ f = silm->max_frags_ip4_frame;
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ silm->max_frags_ip4_frame = f;
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ if (PREDICT_FALSE (offset == 0))
+ snat_ipfix_header_create (frm, b0, &offset);
+
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+ u64 time_stamp = clib_host_to_net_u64 (now);
+ clib_memcpy (b0->data + offset, &time_stamp, sizeof (time_stamp));
+ offset += sizeof (time_stamp);
+
+ clib_memcpy (b0->data + offset, &nat_event, sizeof (nat_event));
+ offset += sizeof (nat_event);
+
+ clib_memcpy (b0->data + offset, &quota_event, sizeof (quota_event));
+ offset += sizeof (quota_event);
+
+ clib_memcpy (b0->data + offset, &limit, sizeof (limit));
+ offset += sizeof (limit);
+
+ clib_memcpy (b0->data + offset, &src, sizeof (src));
+ offset += sizeof (src);
+
+ b0->current_length += MAX_FRAGMENTS_IP4_LEN;
+ }
+
+ if (PREDICT_FALSE
+ (do_flush || (offset + MAX_BIBS_LEN) > frm->path_mtu))
+ {
+ snat_ipfix_send (frm, f, b0, silm->max_frags_ip4_template_id);
+ silm->max_frags_ip4_frame = 0;
+ silm->max_frags_ip4_buffer = 0;
+ offset = 0;
+ }
+ silm->max_frags_ip4_next_record_offset = offset;
+}
+
+static void
+nat_ipfix_logging_max_frag_ip6 (u32 limit, ip6_address_t * src, int do_flush)
+{
+ snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0 = 0;
+ u32 bi0 = ~0;
+ u32 offset;
+ vlib_main_t *vm = frm->vlib_main;
+ u64 now;
+ vlib_buffer_free_list_t *fl;
+ u8 nat_event = QUOTA_EXCEEDED;
+ u32 quota_event = MAX_FRAGMENTS_PENDING_REASSEMBLY;
+
+ if (!silm->enabled)
+ return;
+
+ now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3);
+ now += silm->milisecond_time_0;
+
+ b0 = silm->max_frags_ip6_buffer;
+
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (do_flush)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ clib_warning ("can't allocate buffer for NAT IPFIX event");
+ return;
+ }
+
+ b0 = silm->max_frags_ip6_buffer = vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = silm->max_frags_ip6_next_record_offset;
+ }
+
+ f = silm->max_frags_ip6_frame;
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ silm->max_frags_ip6_frame = f;
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ if (PREDICT_FALSE (offset == 0))
+ snat_ipfix_header_create (frm, b0, &offset);
+
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+ u64 time_stamp = clib_host_to_net_u64 (now);
+ clib_memcpy (b0->data + offset, &time_stamp, sizeof (time_stamp));
+ offset += sizeof (time_stamp);
+
+ clib_memcpy (b0->data + offset, &nat_event, sizeof (nat_event));
+ offset += sizeof (nat_event);
+
+ clib_memcpy (b0->data + offset, &quota_event, sizeof (quota_event));
+ offset += sizeof (quota_event);
+
+ clib_memcpy (b0->data + offset, &limit, sizeof (limit));
+ offset += sizeof (limit);
+
+ clib_memcpy (b0->data + offset, src, sizeof (ip6_address_t));
+ offset += sizeof (ip6_address_t);
+
+ b0->current_length += MAX_FRAGMENTS_IP6_LEN;
+ }
+
+ if (PREDICT_FALSE
+ (do_flush || (offset + MAX_BIBS_LEN) > frm->path_mtu))
+ {
+ snat_ipfix_send (frm, f, b0, silm->max_frags_ip6_template_id);
+ silm->max_frags_ip6_frame = 0;
+ silm->max_frags_ip6_buffer = 0;
+ offset = 0;
+ }
+ silm->max_frags_ip6_next_record_offset = offset;
+}
+
+static void
+nat_ipfix_logging_nat64_bibe (u8 nat_event, ip6_address_t * src_ip,
+ u32 nat_src_ip, u8 proto, u16 src_port,
+ u16 nat_src_port, u32 vrf_id, int do_flush)
+{
+ snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0 = 0;
+ u32 bi0 = ~0;
+ u32 offset;
+ vlib_main_t *vm = frm->vlib_main;
+ u64 now;
+ vlib_buffer_free_list_t *fl;
+
+ if (!silm->enabled)
+ return;
+
+ now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3);
+ now += silm->milisecond_time_0;
+
+ b0 = silm->nat64_bib_buffer;
+
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (do_flush)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ clib_warning ("can't allocate buffer for NAT IPFIX event");
+ return;
+ }
+
+ b0 = silm->nat64_bib_buffer = vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = silm->nat64_bib_next_record_offset;
+ }
+
+ f = silm->nat64_bib_frame;
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ silm->nat64_bib_frame = f;
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ if (PREDICT_FALSE (offset == 0))
+ snat_ipfix_header_create (frm, b0, &offset);
+
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+ u64 time_stamp = clib_host_to_net_u64 (now);
+ clib_memcpy (b0->data + offset, &time_stamp, sizeof (time_stamp));
+ offset += sizeof (time_stamp);
+
+ clib_memcpy (b0->data + offset, &nat_event, sizeof (nat_event));
+ offset += sizeof (nat_event);
+
+ clib_memcpy (b0->data + offset, src_ip, sizeof (ip6_address_t));
+ offset += sizeof (ip6_address_t);
+
+ clib_memcpy (b0->data + offset, &nat_src_ip, sizeof (nat_src_ip));
+ offset += sizeof (nat_src_ip);
+
+ clib_memcpy (b0->data + offset, &proto, sizeof (proto));
+ offset += sizeof (proto);
+
+ clib_memcpy (b0->data + offset, &src_port, sizeof (src_port));
+ offset += sizeof (src_port);
+
+ clib_memcpy (b0->data + offset, &nat_src_port, sizeof (nat_src_port));
+ offset += sizeof (nat_src_port);
+
+ clib_memcpy (b0->data + offset, &vrf_id, sizeof (vrf_id));
+ offset += sizeof (vrf_id);
+
+ b0->current_length += NAT64_BIB_LEN;
+ }
+
+ if (PREDICT_FALSE
+ (do_flush || (offset + NAT64_BIB_LEN) > frm->path_mtu))
+ {
+ snat_ipfix_send (frm, f, b0, silm->nat64_bib_template_id);
+ silm->nat64_bib_frame = 0;
+ silm->nat64_bib_buffer = 0;
+ offset = 0;
+ }
+ silm->nat64_bib_next_record_offset = offset;
+}
+
+static void
+nat_ipfix_logging_nat64_ses (u8 nat_event, ip6_address_t * src_ip,
+ u32 nat_src_ip, u8 proto, u16 src_port,
+ u16 nat_src_port, ip6_address_t * dst_ip,
+ u32 nat_dst_ip, u16 dst_port, u16 nat_dst_port,
+ u32 vrf_id, int do_flush)
+{
+ snat_ipfix_logging_main_t *silm = &snat_ipfix_logging_main;
+ flow_report_main_t *frm = &flow_report_main;
+ vlib_frame_t *f;
+ vlib_buffer_t *b0 = 0;
+ u32 bi0 = ~0;
+ u32 offset;
+ vlib_main_t *vm = frm->vlib_main;
+ u64 now;
+ vlib_buffer_free_list_t *fl;
+
+ if (!silm->enabled)
+ return;
+
+ now = (u64) ((vlib_time_now (vm) - silm->vlib_time_0) * 1e3);
+ now += silm->milisecond_time_0;
+
+ b0 = silm->nat64_ses_buffer;
+
+ if (PREDICT_FALSE (b0 == 0))
+ {
+ if (do_flush)
+ return;
+
+ if (vlib_buffer_alloc (vm, &bi0, 1) != 1)
+ {
+ clib_warning ("can't allocate buffer for NAT IPFIX event");
+ return;
+ }
+
+ b0 = silm->nat64_ses_buffer = vlib_get_buffer (vm, bi0);
+ fl =
+ vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ vlib_buffer_init_for_free_list (b0, fl);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ offset = 0;
+ }
+ else
+ {
+ bi0 = vlib_get_buffer_index (vm, b0);
+ offset = silm->nat64_ses_next_record_offset;
+ }
+
+ f = silm->nat64_ses_frame;
+ if (PREDICT_FALSE (f == 0))
+ {
+ u32 *to_next;
+ f = vlib_get_frame_to_node (vm, ip4_lookup_node.index);
+ silm->nat64_ses_frame = f;
+ to_next = vlib_frame_vector_args (f);
+ to_next[0] = bi0;
+ f->n_vectors = 1;
+ }
+
+ if (PREDICT_FALSE (offset == 0))
+ snat_ipfix_header_create (frm, b0, &offset);
+
+ if (PREDICT_TRUE (do_flush == 0))
+ {
+ u64 time_stamp = clib_host_to_net_u64 (now);
+ clib_memcpy (b0->data + offset, &time_stamp, sizeof (time_stamp));
+ offset += sizeof (time_stamp);
+
+ clib_memcpy (b0->data + offset, &nat_event, sizeof (nat_event));
+ offset += sizeof (nat_event);
+
+ clib_memcpy (b0->data + offset, src_ip, sizeof (ip6_address_t));
+ offset += sizeof (ip6_address_t);
+
+ clib_memcpy (b0->data + offset, &nat_src_ip, sizeof (nat_src_ip));
+ offset += sizeof (nat_src_ip);
+
+ clib_memcpy (b0->data + offset, &proto, sizeof (proto));
+ offset += sizeof (proto);
+
+ clib_memcpy (b0->data + offset, &src_port, sizeof (src_port));
+ offset += sizeof (src_port);
+
+ clib_memcpy (b0->data + offset, &nat_src_port, sizeof (nat_src_port));
+ offset += sizeof (nat_src_port);
+
+ clib_memcpy (b0->data + offset, dst_ip, sizeof (ip6_address_t));
+ offset += sizeof (ip6_address_t);
+
+ clib_memcpy (b0->data + offset, &nat_dst_ip, sizeof (nat_dst_ip));
+ offset += sizeof (nat_dst_ip);
+
+ clib_memcpy (b0->data + offset, &dst_port, sizeof (dst_port));
+ offset += sizeof (dst_port);
+
+ clib_memcpy (b0->data + offset, &nat_dst_port, sizeof (nat_dst_port));
+ offset += sizeof (nat_dst_port);
+
+ clib_memcpy (b0->data + offset, &vrf_id, sizeof (vrf_id));
+ offset += sizeof (vrf_id);
+
+ b0->current_length += NAT64_SES_LEN;
+ }
+
+ if (PREDICT_FALSE
+ (do_flush || (offset + NAT64_SES_LEN) > frm->path_mtu))
+ {
+ snat_ipfix_send (frm, f, b0, silm->nat64_ses_template_id);
+ silm->nat64_ses_frame = 0;
+ silm->nat64_ses_buffer = 0;
+ offset = 0;
+ }
+ silm->nat64_ses_next_record_offset = offset;
+}
+
+static void
snat_ipfix_logging_nat44_ses_rpc_cb (snat_ipfix_logging_nat44_ses_args_t * a)
{
snat_ipfix_logging_nat44_ses (a->nat_event, a->src_ip, a->nat_src_ip,
@@ -731,22 +1580,24 @@ static void
snat_ipfix_logging_max_entries_per_usr_rpc_cb
(snat_ipfix_logging_max_entries_per_user_args_t * a)
{
- snat_ipfix_logging_max_entries_per_usr (a->src_ip, 0);
+ snat_ipfix_logging_max_entries_per_usr (a->limit, a->src_ip, 0);
}
/**
* @brief Generate maximum entries per user exceeded event
*
+ * @param limit maximum NAT entries that can be created per user
* @param src_ip source IPv4 address
*/
void
-snat_ipfix_logging_max_entries_per_user (u32 src_ip)
+snat_ipfix_logging_max_entries_per_user (u32 limit, u32 src_ip)
{
//TODO: This event SHOULD be rate limited
snat_ipfix_logging_max_entries_per_user_args_t a;
skip_if_disabled ();
+ a.limit = limit;
a.src_ip = src_ip;
vl_api_rpc_call_main_thread (snat_ipfix_logging_max_entries_per_usr_rpc_cb,
@@ -759,7 +1610,279 @@ snat_data_callback_max_entries_per_usr (flow_report_main_t * frm,
vlib_frame_t * f,
u32 * to_next, u32 node_index)
{
- snat_ipfix_logging_max_entries_per_usr (0, 1);
+ snat_ipfix_logging_max_entries_per_usr (0, 0, 1);
+ return f;
+}
+
+static void
+nat_ipfix_logging_max_ses_rpc_cb (nat_ipfix_logging_max_sessions_args_t * a)
+{
+ nat_ipfix_logging_max_ses (a->limit, 0);
+}
+
+/**
+ * @brief Generate maximum session entries exceeded event
+ *
+ * @param limit configured limit
+ */
+void
+nat_ipfix_logging_max_sessions (u32 limit)
+{
+ //TODO: This event SHOULD be rate limited
+ nat_ipfix_logging_max_sessions_args_t a;
+
+ skip_if_disabled ();
+
+ a.limit = limit;
+
+ vl_api_rpc_call_main_thread (nat_ipfix_logging_max_ses_rpc_cb,
+ (u8 *) & a, sizeof (a));
+}
+
+vlib_frame_t *
+nat_data_callback_max_sessions (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next, u32 node_index)
+{
+ nat_ipfix_logging_max_ses (0, 1);
+ return f;
+}
+
+static void
+nat_ipfix_logging_max_bib_rpc_cb (nat_ipfix_logging_max_bibs_args_t * a)
+{
+ nat_ipfix_logging_max_bib (a->limit, 0);
+}
+
+/**
+ * @brief Generate maximum BIB entries exceeded event
+ *
+ * @param limit configured limit
+ */
+void
+nat_ipfix_logging_max_bibs (u32 limit)
+{
+ //TODO: This event SHOULD be rate limited
+ nat_ipfix_logging_max_bibs_args_t a;
+
+ skip_if_disabled ();
+
+ a.limit = limit;
+
+ vl_api_rpc_call_main_thread (nat_ipfix_logging_max_bib_rpc_cb,
+ (u8 *) & a, sizeof (a));
+}
+
+vlib_frame_t *
+nat_data_callback_max_bibs (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next, u32 node_index)
+{
+ nat_ipfix_logging_max_bib (0, 1);
+ return f;
+}
+
+static void
+nat_ipfix_logging_max_frag_ip4_rpc_cb (nat_ipfix_logging_max_frags_ip4_args_t * a)
+{
+ nat_ipfix_logging_max_frag_ip4 (a->limit, a->src, 0);
+}
+
+/**
+ * @brief Generate maximum IPv4 fragments pending reassembly exceeded event
+ *
+ * @param limit configured limit
+ * @param src source IPv4 address
+ */
+void
+nat_ipfix_logging_max_fragments_ip4 (u32 limit, ip4_address_t * src)
+{
+ //TODO: This event SHOULD be rate limited
+ nat_ipfix_logging_max_frags_ip4_args_t a;
+
+ skip_if_disabled ();
+
+ a.limit = limit;
+ a.src = src->as_u32;
+
+ vl_api_rpc_call_main_thread (nat_ipfix_logging_max_frag_ip4_rpc_cb,
+ (u8 *) & a, sizeof (a));
+}
+
+vlib_frame_t *
+nat_data_callback_max_frags_ip4 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next, u32 node_index)
+{
+ nat_ipfix_logging_max_frag_ip4 (0, 0, 1);
+ return f;
+}
+
+static void
+nat_ipfix_logging_max_frag_ip6_rpc_cb (nat_ipfix_logging_max_frags_ip6_args_t * a)
+{
+ ip6_address_t src;
+ src.as_u64[0] = a->src[0];
+ src.as_u64[1] = a->src[1];
+ nat_ipfix_logging_max_frag_ip6 (a->limit, &src, 0);
+}
+
+/**
+ * @brief Generate maximum IPv6 fragments pending reassembly exceeded event
+ *
+ * @param limit configured limit
+ * @param src source IPv6 address
+ */
+void
+nat_ipfix_logging_max_fragments_ip6 (u32 limit, ip6_address_t * src)
+{
+ //TODO: This event SHOULD be rate limited
+ nat_ipfix_logging_max_frags_ip6_args_t a;
+
+ skip_if_disabled ();
+
+ a.limit = limit;
+ a.src[0] = src->as_u64[0];
+ a.src[1] = src->as_u64[1];
+
+ vl_api_rpc_call_main_thread (nat_ipfix_logging_max_frag_ip6_rpc_cb,
+ (u8 *) & a, sizeof (a));
+}
+
+vlib_frame_t *
+nat_data_callback_max_frags_ip6 (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next, u32 node_index)
+{
+ nat_ipfix_logging_max_frag_ip6 (0, 0, 1);
+ return f;
+}
+
+static void
+nat_ipfix_logging_nat64_bib_rpc_cb (nat_ipfix_logging_nat64_bib_args_t * a)
+{
+ ip6_address_t src_ip;
+ src_ip.as_u64[0] = a->src_ip[0];
+ src_ip.as_u64[1] = a->src_ip[1];
+ nat_ipfix_logging_nat64_bibe (a->nat_event, &src_ip, a->nat_src_ip,
+ a->proto, a->src_port, a->nat_src_port,
+ a->vrf_id, 0);
+}
+
+/**
+ * @brief Generate NAT64 BIB create and delete events
+ *
+ * @param src_ip source IPv6 address
+ * @param nat_src_ip transaltes source IPv4 address
+ * @param proto L4 protocol
+ * @param src_port source port
+ * @param nat_src_port translated source port
+ * @param vrf_id VRF ID
+ * @param is_create non-zero value if create event otherwise delete event
+ */
+void
+nat_ipfix_logging_nat64_bib (ip6_address_t * src_ip,
+ ip4_address_t * nat_src_ip, u8 proto,
+ u16 src_port, u16 nat_src_port, u32 vrf_id,
+ u8 is_create)
+{
+ nat_ipfix_logging_nat64_bib_args_t a;
+
+ skip_if_disabled ();
+
+ a.src_ip[0] = src_ip->as_u64[0];
+ a.src_ip[1] = src_ip->as_u64[1];
+ a.nat_src_ip = nat_src_ip->as_u32;
+ a.proto = proto;
+ a.src_port = src_port;
+ a.nat_src_port = nat_src_port;
+ a.vrf_id = vrf_id;
+ a.nat_event = is_create ? NAT64_BIB_CREATE : NAT64_BIB_DELETE;
+
+ vl_api_rpc_call_main_thread (nat_ipfix_logging_nat64_bib_rpc_cb,
+ (u8 *) & a, sizeof (a));
+}
+
+vlib_frame_t *
+nat_data_callback_nat64_bib (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next, u32 node_index)
+{
+ nat_ipfix_logging_nat64_bibe (0, 0, 0, 0, 0, 0, 0, 1);
+ return f;
+}
+
+static void
+nat_ipfix_logging_nat64_ses_rpc_cb (nat_ipfix_logging_nat64_ses_args_t * a)
+{
+ ip6_address_t src_ip, dst_ip;
+ src_ip.as_u64[0] = a->src_ip[0];
+ src_ip.as_u64[1] = a->src_ip[1];
+ dst_ip.as_u64[0] = a->dst_ip[0];
+ dst_ip.as_u64[1] = a->dst_ip[1];
+ nat_ipfix_logging_nat64_ses (a->nat_event, &src_ip, a->nat_src_ip,
+ a->proto, a->src_port, a->nat_src_port,
+ &dst_ip, a->nat_dst_ip, a->dst_port,
+ a->nat_dst_port, a->vrf_id, 0);
+}
+
+/**
+ * @brief Generate NAT64 session create and delete events
+ *
+ * @param src_ip source IPv6 address
+ * @param nat_src_ip transaltes source IPv4 address
+ * @param proto L4 protocol
+ * @param src_port source port
+ * @param nat_src_port translated source port
+ * @param dst_ip destination IPv6 address
+ * @param nat_dst_ip destination IPv4 address
+ * @param dst_port destination port
+ * @param nat_dst_port translated destination port
+ * @param vrf_id VRF ID
+ * @param is_create non-zero value if create event otherwise delete event
+ */
+void
+nat_ipfix_logging_nat64_session (ip6_address_t * src_ip,
+ ip4_address_t * nat_src_ip, u8 proto,
+ u16 src_port, u16 nat_src_port,
+ ip6_address_t * dst_ip,
+ ip4_address_t * nat_dst_ip, u16 dst_port,
+ u16 nat_dst_port, u32 vrf_id, u8 is_create)
+{
+ nat_ipfix_logging_nat64_ses_args_t a;
+
+ skip_if_disabled ();
+
+ a.src_ip[0] = src_ip->as_u64[0];
+ a.src_ip[1] = src_ip->as_u64[1];
+ a.nat_src_ip = nat_src_ip->as_u32;
+ a.proto = proto;
+ a.src_port = src_port;
+ a.nat_src_port = nat_src_port;
+ a.dst_ip[0] = dst_ip->as_u64[0];
+ a.dst_ip[1] = dst_ip->as_u64[1];
+ a.nat_dst_ip = nat_dst_ip->as_u32;
+ a.dst_port = dst_port;
+ a.nat_dst_port = nat_dst_port;
+ a.vrf_id = vrf_id;
+ a.nat_event = is_create ? NAT64_SESSION_CREATE : NAT64_SESSION_DELETE;
+
+ vl_api_rpc_call_main_thread (nat_ipfix_logging_nat64_ses_rpc_cb,
+ (u8 *) & a, sizeof (a));
+}
+
+vlib_frame_t *
+nat_data_callback_nat64_session (flow_report_main_t * frm,
+ flow_report_t * fr,
+ vlib_frame_t * f,
+ u32 * to_next, u32 node_index)
+{
+ nat_ipfix_logging_nat64_ses (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
return f;
}
@@ -825,6 +1948,66 @@ snat_ipfix_logging_enable_disable (int enable, u32 domain_id, u16 src_port)
clib_warning ("vnet_flow_report_add_del returned %d", rv);
return -1;
}
+
+ a.rewrite_callback = nat_template_rewrite_max_sessions;
+ a.flow_data_callback = nat_data_callback_max_sessions;
+
+ rv = vnet_flow_report_add_del (frm, &a, NULL);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
+
+ a.rewrite_callback = nat_template_rewrite_max_bibs;
+ a.flow_data_callback = nat_data_callback_max_bibs;
+
+ rv = vnet_flow_report_add_del (frm, &a, NULL);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
+
+ a.rewrite_callback = nat_template_rewrite_max_frags_ip4;
+ a.flow_data_callback = nat_data_callback_max_frags_ip4;
+
+ rv = vnet_flow_report_add_del (frm, &a, NULL);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
+
+ a.rewrite_callback = nat_template_rewrite_max_frags_ip6;
+ a.flow_data_callback = nat_data_callback_max_frags_ip6;
+
+ rv = vnet_flow_report_add_del (frm, &a, NULL);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
+
+ a.rewrite_callback = nat_template_rewrite_nat64_bib;
+ a.flow_data_callback = nat_data_callback_nat64_bib;
+
+ rv = vnet_flow_report_add_del (frm, &a, NULL);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
+
+ a.rewrite_callback = nat_template_rewrite_nat64_session;
+ a.flow_data_callback = nat_data_callback_nat64_session;
+
+ rv = vnet_flow_report_add_del (frm, &a, NULL);
+ if (rv)
+ {
+ clib_warning ("vnet_flow_report_add_del returned %d", rv);
+ return -1;
+ }
}
return 0;
diff --git a/src/plugins/nat/nat_ipfix_logging.h b/src/plugins/nat/nat_ipfix_logging.h
index 6dbf6627fba..0750149d7a5 100644
--- a/src/plugins/nat/nat_ipfix_logging.h
+++ b/src/plugins/nat/nat_ipfix_logging.h
@@ -17,16 +17,26 @@
#ifndef __included_nat_ipfix_logging_h__
#define __included_nat_ipfix_logging_h__
+#include <nat/nat.h>
+
typedef enum {
NAT_ADDRESSES_EXHAUTED = 3,
NAT44_SESSION_CREATE = 4,
NAT44_SESSION_DELETE = 5,
+ NAT64_SESSION_CREATE = 6,
+ NAT64_SESSION_DELETE = 7,
+ NAT64_BIB_CREATE = 10,
+ NAT64_BIB_DELETE = 11,
NAT_PORTS_EXHAUSTED = 12,
QUOTA_EXCEEDED = 13,
} nat_event_t;
typedef enum {
+ MAX_SESSION_ENTRIES = 1,
+ MAX_BIB_ENTRIES = 2,
MAX_ENTRIES_PER_USER = 3,
+ MAX_FRAGMENTS_PENDING_REASSEMBLY = 5,
+ MAX_FRAGMENTS_PENDING_REASSEMBLY_IP6,
} quota_exceed_event_t;
typedef struct {
@@ -37,16 +47,34 @@ typedef struct {
vlib_buffer_t *nat44_session_buffer;
vlib_buffer_t *addr_exhausted_buffer;
vlib_buffer_t *max_entries_per_user_buffer;
+ vlib_buffer_t *max_sessions_buffer;
+ vlib_buffer_t *max_bibs_buffer;
+ vlib_buffer_t *max_frags_ip4_buffer;
+ vlib_buffer_t *max_frags_ip6_buffer;
+ vlib_buffer_t *nat64_bib_buffer;
+ vlib_buffer_t *nat64_ses_buffer;
/** frames containing ipfix buffers */
vlib_frame_t *nat44_session_frame;
vlib_frame_t *addr_exhausted_frame;
vlib_frame_t *max_entries_per_user_frame;
+ vlib_frame_t *max_sessions_frame;
+ vlib_frame_t *max_bibs_frame;
+ vlib_frame_t *max_frags_ip4_frame;
+ vlib_frame_t *max_frags_ip6_frame;
+ vlib_frame_t *nat64_bib_frame;
+ vlib_frame_t *nat64_ses_frame;
/** next record offset */
u32 nat44_session_next_record_offset;
u32 addr_exhausted_next_record_offset;
u32 max_entries_per_user_next_record_offset;
+ u32 max_sessions_next_record_offset;
+ u32 max_bibs_next_record_offset;
+ u32 max_frags_ip4_next_record_offset;
+ u32 max_frags_ip6_next_record_offset;
+ u32 nat64_bib_next_record_offset;
+ u32 nat64_ses_next_record_offset;
/** Time reference pair */
u64 milisecond_time_0;
@@ -56,6 +84,12 @@ typedef struct {
u16 nat44_session_template_id;
u16 addr_exhausted_template_id;
u16 max_entries_per_user_template_id;
+ u16 max_sessions_template_id;
+ u16 max_bibs_template_id;
+ u16 max_frags_ip4_template_id;
+ u16 max_frags_ip6_template_id;
+ u16 nat64_bib_template_id;
+ u16 nat64_ses_template_id;
/** stream index */
u32 stream_index;
@@ -74,6 +108,21 @@ void snat_ipfix_logging_nat44_ses_delete (u32 src_ip, u32 nat_src_ip,
u16 src_port, u16 nat_src_port,
u32 vrf_id);
void snat_ipfix_logging_addresses_exhausted(u32 pool_id);
-void snat_ipfix_logging_max_entries_per_user(u32 src_ip);
+void snat_ipfix_logging_max_entries_per_user(u32 limit, u32 src_ip);
+void nat_ipfix_logging_max_sessions(u32 limit);
+void nat_ipfix_logging_max_bibs(u32 limit);
+void nat_ipfix_logging_max_fragments_ip4(u32 limit, ip4_address_t * src);
+void nat_ipfix_logging_max_fragments_ip6(u32 limit, ip6_address_t * src);
+void nat_ipfix_logging_nat64_session(ip6_address_t * src_ip,
+ ip4_address_t * nat_src_ip, u8 proto,
+ u16 src_port, u16 nat_src_port,
+ ip6_address_t * dst_ip,
+ ip4_address_t * nat_dst_ip,
+ u16 dst_port, u16 nat_dst_port,
+ u32 vrf_id, u8 is_create);
+void nat_ipfix_logging_nat64_bib(ip6_address_t * src_ip,
+ ip4_address_t * nat_src_ip, u8 proto,
+ u16 src_port, u16 nat_src_port,
+ u32 vrf_id, u8 is_create);
#endif /* __included_nat_ipfix_logging_h__ */
diff --git a/src/plugins/nat/nat_reass.c b/src/plugins/nat/nat_reass.c
index a97d8f017e9..0a3660d9200 100644
--- a/src/plugins/nat/nat_reass.c
+++ b/src/plugins/nat/nat_reass.c
@@ -19,6 +19,7 @@
#include <vnet/vnet.h>
#include <nat/nat_reass.h>
+#include <nat/nat_ipfix_logging.h>
nat_reass_main_t nat_reass_main;
@@ -325,7 +326,11 @@ nat_ip4_reass_add_fragment (nat_reass_ip4_t * reass, u32 bi)
u32 elt_index;
if (reass->frag_n >= srm->ip4_max_frag)
- return -1;
+ {
+ nat_ipfix_logging_max_fragments_ip4 (srm->ip4_max_frag,
+ &reass->key.src);
+ return -1;
+ }
clib_spinlock_lock_if_init (&srm->ip4_reass_lock);
@@ -517,7 +522,11 @@ nat_ip6_reass_add_fragment (nat_reass_ip6_t * reass, u32 bi)
u32 elt_index;
if (reass->frag_n >= srm->ip6_max_frag)
- return -1;
+ {
+ nat_ipfix_logging_max_fragments_ip6 (srm->ip6_max_frag,
+ &reass->key.src);
+ return -1;
+ }
clib_spinlock_lock_if_init (&srm->ip6_reass_lock);
@@ -718,7 +727,7 @@ show_nat_reass_command_fn (vlib_main_t * vm, unformat_input_t * input,
{
vlib_cli_output (vm, "NAT IPv4 virtual fragmentation reassembly is %s",
nat_reass_is_drop_frag (0) ? "DISABLED" : "ENABLED");
- vlib_cli_output (vm, " max-reasssemblies %u", nat_reass_get_max_reass (0));
+ vlib_cli_output (vm, " max-reassemblies %u", nat_reass_get_max_reass (0));
vlib_cli_output (vm, " max-fragments %u", nat_reass_get_max_frag (0));
vlib_cli_output (vm, " timeout %usec", nat_reass_get_timeout (0));
vlib_cli_output (vm, " reassemblies:");
@@ -726,7 +735,7 @@ show_nat_reass_command_fn (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_output (vm, "NAT IPv6 virtual fragmentation reassembly is %s",
nat_reass_is_drop_frag (1) ? "DISABLED" : "ENABLED");
- vlib_cli_output (vm, " max-reasssemblies %u", nat_reass_get_max_reass (1));
+ vlib_cli_output (vm, " max-reassemblies %u", nat_reass_get_max_reass (1));
vlib_cli_output (vm, " max-fragments %u", nat_reass_get_max_frag (1));
vlib_cli_output (vm, " timeout %usec", nat_reass_get_timeout (1));
vlib_cli_output (vm, " reassemblies:");
diff --git a/src/vnet/flow/ipfix_info_elements.h b/src/vnet/flow/ipfix_info_elements.h
index ee6f53804bf..171cdfe702e 100644
--- a/src/vnet/flow/ipfix_info_elements.h
+++ b/src/vnet/flow/ipfix_info_elements.h
@@ -419,7 +419,11 @@ _(layer2FrameDeltaCount, 430, u64) \
_(layer2FrameTotalCount, 431, u64) \
_(pseudoWireDestinationIPv4Address, 432, ip4_address_t) \
_(ignoredLayer2FrameTotalCount, 433, u64) \
-_(natQuotaExceededEvent, 466, u32)
+_(natQuotaExceededEvent, 466, u32) \
+_(maxSessionEntries, 471, u32) \
+_(maxBIBEntries, 472, u32) \
+_(maxEntriesPerUser, 473, u32) \
+_(maxFragmentsPendingReassembly, 475, u32)
typedef enum
{
diff --git a/test/ipfix.py b/test/ipfix.py
index deaff67ba42..97b559e083b 100644
--- a/test/ipfix.py
+++ b/test/ipfix.py
@@ -436,7 +436,12 @@ information_elements = {
459: "httpRequestMethod",
460: "httpRequestHost",
461: "httpRequestTarget",
- 462: "httpMessageVersion"
+ 462: "httpMessageVersion",
+ 466: "natQuotaExceededEvent",
+ 471: "maxSessionEntries",
+ 472: "maxBIBEntries",
+ 473: "maxEntriesPerUser",
+ 475: "maxFragmentsPendingReassembly"
}
diff --git a/test/test_nat.py b/test/test_nat.py
index 4ced0af46e0..7194704046d 100644
--- a/test/test_nat.py
+++ b/test/test_nat.py
@@ -749,6 +749,148 @@ class MethodHolder(VppTestCase):
# natPoolID
self.assertEqual(struct.pack("!I", 0), record[283])
+ def verify_ipfix_max_sessions(self, data, limit):
+ """
+ Verify IPFIX maximum session entries exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum session entries that can be created.
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(ord(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 1), record[466])
+ # maxSessionEntries
+ self.assertEqual(struct.pack("I", limit), record[471])
+
+ def verify_ipfix_max_bibs(self, data, limit):
+ """
+ Verify IPFIX maximum BIB entries exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum BIB entries that can be created.
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(ord(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 2), record[466])
+ # maxBIBEntries
+ self.assertEqual(struct.pack("I", limit), record[472])
+
+ def verify_ipfix_max_fragments_ip6(self, data, limit, src_addr):
+ """
+ Verify IPFIX maximum IPv6 fragments pending reassembly exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum fragments pending reassembly
+ :param src_addr: IPv6 source address
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(ord(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 5), record[466])
+ # maxFragmentsPendingReassembly
+ self.assertEqual(struct.pack("I", limit), record[475])
+ # sourceIPv6Address
+ self.assertEqual(src_addr, record[27])
+
+ def verify_ipfix_max_fragments_ip4(self, data, limit, src_addr):
+ """
+ Verify IPFIX maximum IPv4 fragments pending reassembly exceeded event
+
+ :param data: Decoded IPFIX data records
+ :param limit: Number of maximum fragments pending reassembly
+ :param src_addr: IPv4 source address
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ self.assertEqual(ord(record[230]), 13)
+ # natQuotaExceededEvent
+ self.assertEqual(struct.pack("I", 5), record[466])
+ # maxFragmentsPendingReassembly
+ self.assertEqual(struct.pack("I", limit), record[475])
+ # sourceIPv4Address
+ self.assertEqual(src_addr, record[8])
+
+ def verify_ipfix_bib(self, data, is_create, src_addr):
+ """
+ Verify IPFIX NAT64 BIB create and delete events
+
+ :param data: Decoded IPFIX data records
+ :param is_create: Create event if nonzero value otherwise delete event
+ :param src_addr: IPv6 source address
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ if is_create:
+ self.assertEqual(ord(record[230]), 10)
+ else:
+ self.assertEqual(ord(record[230]), 11)
+ # sourceIPv6Address
+ self.assertEqual(src_addr, record[27])
+ # postNATSourceIPv4Address
+ self.assertEqual(self.nat_addr_n, record[225])
+ # protocolIdentifier
+ self.assertEqual(IP_PROTOS.tcp, ord(record[4]))
+ # ingressVRFID
+ self.assertEqual(struct.pack("!I", 0), record[234])
+ # sourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_in), record[7])
+ # postNAPTSourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_out), record[227])
+
+ def verify_ipfix_nat64_ses(self, data, is_create, src_addr, dst_addr,
+ dst_port):
+ """
+ Verify IPFIX NAT64 session create and delete events
+
+ :param data: Decoded IPFIX data records
+ :param is_create: Create event if nonzero value otherwise delete event
+ :param src_addr: IPv6 source address
+ :param dst_addr: IPv4 destination address
+ :param dst_port: destination TCP port
+ """
+ self.assertEqual(1, len(data))
+ record = data[0]
+ # natEvent
+ if is_create:
+ self.assertEqual(ord(record[230]), 6)
+ else:
+ self.assertEqual(ord(record[230]), 7)
+ # sourceIPv6Address
+ self.assertEqual(src_addr, record[27])
+ # destinationIPv6Address
+ self.assertEqual(socket.inet_pton(socket.AF_INET6,
+ self.compose_ip6(dst_addr,
+ '64:ff9b::',
+ 96)),
+ record[28])
+ # postNATSourceIPv4Address
+ self.assertEqual(self.nat_addr_n, record[225])
+ # postNATDestinationIPv4Address
+ self.assertEqual(socket.inet_pton(socket.AF_INET, dst_addr),
+ record[226])
+ # protocolIdentifier
+ self.assertEqual(IP_PROTOS.tcp, ord(record[4]))
+ # ingressVRFID
+ self.assertEqual(struct.pack("!I", 0), record[234])
+ # sourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_in), record[7])
+ # postNAPTSourceTransportPort
+ self.assertEqual(struct.pack("!H", self.tcp_port_out), record[227])
+ # destinationTransportPort
+ self.assertEqual(struct.pack("!H", dst_port), record[11])
+ # postNAPTDestinationTransportPort
+ self.assertEqual(struct.pack("!H", dst_port), record[228])
+
class TestNAT44(MethodHolder):
""" NAT44 Test Cases """
@@ -2064,7 +2206,7 @@ class TestNAT44(MethodHolder):
self.verify_capture_out(capture)
self.nat44_add_address(self.nat_addr, is_add=0)
self.vapi.cli("ipfix flush") # FIXME this should be an API call
- capture = self.pg3.get_capture(3)
+ capture = self.pg3.get_capture(9)
ipfix = IPFIXDecoder()
# first load template
for p in capture:
@@ -2103,7 +2245,7 @@ class TestNAT44(MethodHolder):
self.pg_start()
capture = self.pg1.get_capture(0)
self.vapi.cli("ipfix flush") # FIXME this should be an API call
- capture = self.pg3.get_capture(3)
+ capture = self.pg3.get_capture(9)
ipfix = IPFIXDecoder()
# first load template
for p in capture:
@@ -2122,6 +2264,63 @@ class TestNAT44(MethodHolder):
data = ipfix.decode_data_set(p.getlayer(Set))
self.verify_ipfix_addr_exhausted(data)
+ @unittest.skipUnless(running_extended_tests(), "part of extended tests")
+ def test_ipfix_max_sessions(self):
+ """ IPFIX logging maximum session entries exceeded """
+ self.nat44_add_address(self.nat_addr)
+ self.vapi.nat44_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.nat44_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+
+ nat44_config = self.vapi.nat_show_config()
+ max_sessions = 10 * nat44_config.translation_buckets
+
+ pkts = []
+ for i in range(0, max_sessions):
+ src = "10.10.%u.%u" % ((i & 0xFF00) >> 8, i & 0xFF)
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=src, dst=self.pg1.remote_ip4) /
+ TCP(sport=1025))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+
+ self.pg1.get_capture(max_sessions)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4n,
+ src_address=self.pg3.local_ip4n,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port)
+
+ p = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) /
+ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4) /
+ TCP(sport=1025))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(0)
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(9)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_sessions(data, max_sessions)
+
def test_pool_addr_fib(self):
""" NAT44 add pool addresses to FIB """
static_addr = '10.0.0.10'
@@ -3196,6 +3395,52 @@ class TestNAT44(MethodHolder):
adresses = self.vapi.nat44_address_dump()
self.assertEqual(0, len(adresses))
+ def test_ipfix_max_frags(self):
+ """ IPFIX logging maximum fragments pending reassembly exceeded """
+ self.nat44_add_address(self.nat_addr)
+ self.vapi.nat44_interface_add_del_feature(self.pg0.sw_if_index)
+ self.vapi.nat44_interface_add_del_feature(self.pg1.sw_if_index,
+ is_inside=0)
+ self.vapi.nat_set_reass(max_frag=0)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4n,
+ src_address=self.pg3.local_ip4n,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port)
+
+ data = "A" * 4 + "B" * 16 + "C" * 3
+ self.tcp_port_in = random.randint(1025, 65535)
+ pkts = self.create_stream_frag(self.pg0,
+ self.pg1.remote_ip4,
+ self.tcp_port_in,
+ 20,
+ data)
+ self.pg0.add_stream(pkts[-1])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ frags = self.pg1.get_capture(0)
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(9)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_fragments_ip4(data, 0,
+ self.pg0.remote_ip4n)
+
def tearDown(self):
super(TestNAT44, self).tearDown()
if not self.vpp_dead:
@@ -3499,6 +3744,8 @@ class TestDeterministicNAT(MethodHolder):
self.assertEqual(ord(record[230]), 13)
# natQuotaExceededEvent
self.assertEqual('\x03\x00\x00\x00', record[466])
+ # maxEntriesPerUser
+ self.assertEqual('\xe8\x03\x00\x00', record[473])
# sourceIPv4Address
self.assertEqual(self.pg0.remote_ip4n, record[8])
@@ -3969,6 +4216,12 @@ class TestNAT64(MethodHolder):
""" NAT64 Test Cases """
@classmethod
+ def setUpConstants(cls):
+ super(TestNAT64, cls).setUpConstants()
+ cls.vpp_cmdline.extend(["nat", "{", "nat64 bib hash buckets 128",
+ "nat64 st hash buckets 256", "}"])
+
+ @classmethod
def setUpClass(cls):
super(TestNAT64, cls).setUpClass()
@@ -3985,6 +4238,8 @@ class TestNAT64(MethodHolder):
cls.vrf1_nat_addr = '10.0.10.3'
cls.vrf1_nat_addr_n = socket.inet_pton(socket.AF_INET,
cls.vrf1_nat_addr)
+ cls.ipfix_src_port = 4739
+ cls.ipfix_domain_id = 1
cls.create_pg_interfaces(range(5))
cls.ip6_interfaces = list(cls.pg_interfaces[0:1])
@@ -4914,6 +5169,220 @@ class TestNAT64(MethodHolder):
addresses = self.vapi.nat64_pool_addr_dump()
self.assertEqual(0, len(adresses))
+ @unittest.skipUnless(running_extended_tests(), "part of extended tests")
+ def test_ipfix_max_bibs_sessions(self):
+ """ IPFIX logging maximum session and BIB entries exceeded """
+ max_bibs = 1280
+ max_sessions = 2560
+ remote_host_ip6 = self.compose_ip6(self.pg1.remote_ip4,
+ '64:ff9b::',
+ 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(self.nat_addr_n,
+ self.nat_addr_n)
+ self.vapi.nat64_add_del_interface(self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(self.pg1.sw_if_index, is_inside=0)
+
+ pkts = []
+ src = ""
+ for i in range(0, max_bibs):
+ src = "fd01:aa::%x" % (i)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=src, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=80))
+ pkts.append(p)
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=src, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=22))
+ pkts.append(p)
+ self.pg0.add_stream(pkts)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(max_sessions)
+
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4n,
+ src_address=self.pg3.local_ip4n,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=src, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=25))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(0)
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(9)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_sessions(data, max_sessions)
+
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_host_ip6) /
+ TCP(sport=12345, dport=80))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(0)
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(1)
+ # verify events in data set
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_bibs(data, max_bibs)
+
+ def test_ipfix_max_frags(self):
+ """ IPFIX logging maximum fragments pending reassembly exceeded """
+ self.vapi.nat64_add_del_pool_addr_range(self.nat_addr_n,
+ self.nat_addr_n)
+ self.vapi.nat64_add_del_interface(self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(self.pg1.sw_if_index, is_inside=0)
+ self.vapi.nat_set_reass(max_frag=0, is_ip6=1)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4n,
+ src_address=self.pg3.local_ip4n,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port)
+
+ data = 'a' * 200
+ pkts = self.create_stream_frag_ip6(self.pg0, self.pg1.remote_ip4,
+ self.tcp_port_in, 20, data)
+ self.pg0.add_stream(pkts[-1])
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ self.pg1.get_capture(0)
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(9)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ self.verify_ipfix_max_fragments_ip6(data, 0,
+ self.pg0.remote_ip6n)
+
+ def test_ipfix_bib_ses(self):
+ """ IPFIX logging NAT64 BIB/session create and delete events """
+ self.tcp_port_in = random.randint(1025, 65535)
+ remote_host_ip6 = self.compose_ip6(self.pg1.remote_ip4,
+ '64:ff9b::',
+ 96)
+
+ self.vapi.nat64_add_del_pool_addr_range(self.nat_addr_n,
+ self.nat_addr_n)
+ self.vapi.nat64_add_del_interface(self.pg0.sw_if_index)
+ self.vapi.nat64_add_del_interface(self.pg1.sw_if_index, is_inside=0)
+ self.vapi.set_ipfix_exporter(collector_address=self.pg3.remote_ip4n,
+ src_address=self.pg3.local_ip4n,
+ path_mtu=512,
+ template_interval=10)
+ self.vapi.nat_ipfix(domain_id=self.ipfix_domain_id,
+ src_port=self.ipfix_src_port)
+
+ # Create
+ p = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
+ IPv6(src=self.pg0.remote_ip6, dst=remote_host_ip6) /
+ TCP(sport=self.tcp_port_in, dport=25))
+ self.pg0.add_stream(p)
+ self.pg_enable_capture(self.pg_interfaces)
+ self.pg_start()
+ p = self.pg1.get_capture(1)
+ self.tcp_port_out = p[0][TCP].sport
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(10)
+ ipfix = IPFIXDecoder()
+ # first load template
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Template):
+ ipfix.add_template(p.getlayer(Template))
+ # verify events in data set
+ for p in capture:
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ if ord(data[0][230]) == 10:
+ self.verify_ipfix_bib(data, 1, self.pg0.remote_ip6n)
+ elif ord(data[0][230]) == 6:
+ self.verify_ipfix_nat64_ses(data,
+ 1,
+ self.pg0.remote_ip6n,
+ self.pg1.remote_ip4,
+ 25)
+ else:
+ self.logger.error(ppp("Unexpected or invalid packet: ", p))
+
+ # Delete
+ self.pg_enable_capture(self.pg_interfaces)
+ self.vapi.nat64_add_del_pool_addr_range(self.nat_addr_n,
+ self.nat_addr_n,
+ is_add=0)
+ self.vapi.cli("ipfix flush") # FIXME this should be an API call
+ capture = self.pg3.get_capture(2)
+ # verify events in data set
+ for p in capture:
+ self.assertTrue(p.haslayer(IPFIX))
+ self.assertEqual(p[IP].src, self.pg3.local_ip4)
+ self.assertEqual(p[IP].dst, self.pg3.remote_ip4)
+ self.assertEqual(p[UDP].sport, self.ipfix_src_port)
+ self.assertEqual(p[UDP].dport, 4739)
+ self.assertEqual(p[IPFIX].observationDomainID,
+ self.ipfix_domain_id)
+ if p.haslayer(Data):
+ data = ipfix.decode_data_set(p.getlayer(Set))
+ if ord(data[0][230]) == 11:
+ self.verify_ipfix_bib(data, 0, self.pg0.remote_ip6n)
+ elif ord(data[0][230]) == 7:
+ self.verify_ipfix_nat64_ses(data,
+ 0,
+ self.pg0.remote_ip6n,
+ self.pg1.remote_ip4,
+ 25)
+ else:
+ self.logger.error(ppp("Unexpected or invalid packet: ", p))
+
def nat64_get_ses_num(self):
"""
Return number of active NAT64 sessions.
@@ -4925,6 +5394,11 @@ class TestNAT64(MethodHolder):
"""
Clear NAT64 configuration.
"""
+ self.vapi.nat_ipfix(enable=0, src_port=self.ipfix_src_port,
+ domain_id=self.ipfix_domain_id)
+ self.ipfix_src_port = 4739
+ self.ipfix_domain_id = 1
+
self.vapi.nat64_set_timeouts()
interfaces = self.vapi.nat64_interface_dump()
@@ -4937,29 +5411,7 @@ class TestNAT64(MethodHolder):
intf.is_inside,
is_add=0)
- bib = self.vapi.nat64_bib_dump(IP_PROTOS.tcp)
- for bibe in bib:
- if bibe.is_static:
- self.vapi.nat64_add_del_static_bib(bibe.i_addr,
- bibe.o_addr,
- bibe.i_port,
- bibe.o_port,
- bibe.proto,
- bibe.vrf_id,
- is_add=0)
-
- bib = self.vapi.nat64_bib_dump(IP_PROTOS.udp)
- for bibe in bib:
- if bibe.is_static:
- self.vapi.nat64_add_del_static_bib(bibe.i_addr,
- bibe.o_addr,
- bibe.i_port,
- bibe.o_port,
- bibe.proto,
- bibe.vrf_id,
- is_add=0)
-
- bib = self.vapi.nat64_bib_dump(IP_PROTOS.icmp)
+ bib = self.vapi.nat64_bib_dump(255)
for bibe in bib:
if bibe.is_static:
self.vapi.nat64_add_del_static_bib(bibe.i_addr,