aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorJuraj Sloboda <jsloboda@cisco.com>2018-07-23 12:22:54 +0200
committerOle Trøan <otroan@employees.org>2018-09-20 07:15:03 +0000
commitfe0aa7648703f63159be8362836b39d4727fbb0c (patch)
tree623ae6470eba01b87ed41a94da14c8f7dbb31930 /src
parent6e06d66795a3948bb75cf5fd1cfc5745fec29505 (diff)
NAT44 virtual fragmentation reassembly for endpoint-dependent mode (VPP-1325)
Change-Id: I36ece2ef2eaef9fa559d69ec7f7f07e7c16a7a9d Signed-off-by: Juraj Sloboda <jsloboda@cisco.com>
Diffstat (limited to 'src')
-rwxr-xr-xsrc/plugins/nat/in2out.c711
-rwxr-xr-xsrc/plugins/nat/nat.c265
-rw-r--r--src/plugins/nat/nat_inlines.h3
-rwxr-xr-xsrc/plugins/nat/nat_reass.c49
-rw-r--r--src/plugins/nat/nat_reass.h10
-rwxr-xr-xsrc/plugins/nat/out2in.c405
6 files changed, 1296 insertions, 147 deletions
diff --git a/src/plugins/nat/in2out.c b/src/plugins/nat/in2out.c
index 661d8c1bef9..335bd85c298 100755
--- a/src/plugins/nat/in2out.c
+++ b/src/plugins/nat/in2out.c
@@ -123,6 +123,7 @@ vlib_node_registration_t nat44_ed_in2out_output_slowpath_node;
vlib_node_registration_t nat44_ed_hairpin_dst_node;
vlib_node_registration_t nat44_ed_hairpin_src_node;
vlib_node_registration_t nat44_ed_hairpinning_node;
+vlib_node_registration_t nat44_ed_in2out_reass_node;
#define foreach_snat_in2out_error \
_(UNSUPPORTED_PROTOCOL, "Unsupported protocol") \
@@ -711,6 +712,13 @@ out2:
return next0;
}
+static inline u32
+snat_icmp_hairpinning (snat_main_t *sm,
+ vlib_buffer_t * b0,
+ ip4_header_t * ip0,
+ icmp46_header_t * icmp0,
+ int is_ed);
+
static inline u32 icmp_in2out (snat_main_t *sm,
vlib_buffer_t * b0,
ip4_header_t * ip0,
@@ -732,6 +740,7 @@ static inline u32 icmp_in2out (snat_main_t *sm,
u8 dont_translate;
u32 new_addr0, old_addr0;
u16 old_id0, new_id0;
+ u16 old_checksum0, new_checksum0;
ip_csum_t sum0;
u16 checksum0;
u32 next0_tmp;
@@ -745,19 +754,20 @@ static inline u32 icmp_in2out (snat_main_t *sm,
if (next0 == SNAT_IN2OUT_NEXT_DROP || dont_translate)
goto out;
- sum0 = ip_incremental_checksum (0, icmp0,
- ntohs(ip0->length) - ip4_header_bytes (ip0));
- checksum0 = ~ip_csum_fold (sum0);
- if (PREDICT_FALSE(checksum0 != 0 && checksum0 != 0xffff))
+ if (PREDICT_TRUE (!ip4_is_fragment (ip0)))
{
- next0 = SNAT_IN2OUT_NEXT_DROP;
- goto out;
+ sum0 = ip_incremental_checksum (0, icmp0,
+ ntohs(ip0->length) - ip4_header_bytes (ip0));
+ checksum0 = ~ip_csum_fold (sum0);
+ if (PREDICT_FALSE(checksum0 != 0 && checksum0 != 0xffff))
+ {
+ next0 = SNAT_IN2OUT_NEXT_DROP;
+ goto out;
+ }
}
old_addr0 = ip0->src_address.as_u32;
new_addr0 = ip0->src_address.as_u32 = sm0.addr.as_u32;
- if (vnet_buffer(b0)->sw_if_index[VLIB_TX] == ~0)
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
sum0 = ip0->checksum;
sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
@@ -793,15 +803,27 @@ static inline u32 icmp_in2out (snat_main_t *sm,
goto out;
}
+ /* update inner destination IP address */
old_addr0 = inner_ip0->dst_address.as_u32;
inner_ip0->dst_address = sm0.addr;
new_addr0 = inner_ip0->dst_address.as_u32;
-
sum0 = icmp0->checksum;
sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
dst_address /* changed member */);
icmp0->checksum = ip_csum_fold (sum0);
+ /* update inner IP header checksum */
+ old_checksum0 = inner_ip0->checksum;
+ sum0 = inner_ip0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
+ dst_address /* changed member */);
+ inner_ip0->checksum = ip_csum_fold (sum0);
+ new_checksum0 = inner_ip0->checksum;
+ sum0 = icmp0->checksum;
+ sum0 = ip_csum_update (sum0, old_checksum0, new_checksum0, ip4_header_t,
+ checksum);
+ icmp0->checksum = ip_csum_fold (sum0);
+
switch (protocol)
{
case SNAT_PROTOCOL_ICMP:
@@ -833,6 +855,14 @@ static inline u32 icmp_in2out (snat_main_t *sm,
}
}
+ if (vnet_buffer(b0)->sw_if_index[VLIB_TX] == ~0)
+ {
+ if (sm->deterministic ||
+ 0 != snat_icmp_hairpinning(sm, b0, ip0, icmp0,
+ sm->endpoint_dependent))
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
+ }
+
out:
return next0;
}
@@ -958,84 +988,153 @@ snat_hairpinning (snat_main_t *sm,
return 0;
}
-static inline void
+static inline u32
snat_icmp_hairpinning (snat_main_t *sm,
vlib_buffer_t * b0,
ip4_header_t * ip0,
icmp46_header_t * icmp0,
int is_ed)
{
- snat_session_key_t key0, sm0;
+ snat_session_key_t key0;
clib_bihash_kv_8_8_t kv0, value0;
- u32 new_dst_addr0 = 0, old_dst_addr0, si, ti = 0;
+ u32 old_dst_addr0, new_dst_addr0;
+ u32 old_addr0, new_addr0;
+ u16 old_port0, new_port0;
+ u16 old_checksum0, new_checksum0;
+ u32 si, ti = 0;
ip_csum_t sum0;
snat_session_t *s0;
- int rv;
+ snat_static_mapping_t *m0;
- if (!icmp_is_error_message (icmp0))
+ if (icmp_is_error_message (icmp0))
{
- icmp_echo_header_t *echo0 = (icmp_echo_header_t *)(icmp0+1);
- u16 icmp_id0 = echo0->identifier;
- key0.addr = ip0->dst_address;
- key0.port = icmp_id0;
- key0.protocol = SNAT_PROTOCOL_ICMP;
- key0.fib_index = sm->outside_fib_index;
- kv0.key = key0.as_u64;
+ ip4_header_t *inner_ip0 = 0;
+ tcp_udp_header_t *l4_header = 0;
- if (sm->num_workers > 1)
- ti = (clib_net_to_host_u16 (icmp_id0) - 1024) / sm->port_per_thread;
- else
- ti = sm->num_workers;
+ inner_ip0 = (ip4_header_t *)((icmp_echo_header_t *)(icmp0+1)+1);
+ l4_header = ip4_next_header (inner_ip0);
+ u32 protocol = ip_proto_to_snat_proto (inner_ip0->protocol);
+
+ if (protocol != SNAT_PROTOCOL_TCP && protocol != SNAT_PROTOCOL_UDP)
+ return 1;
- /* Check if destination is in active sessions */
if (is_ed)
{
clib_bihash_kv_16_8_t ed_kv, ed_value;
make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address,
- IP_PROTOCOL_ICMP, sm->outside_fib_index, icmp_id0, 0);
- rv = clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed,
- &ed_kv, &ed_value);
+ inner_ip0->protocol, sm->outside_fib_index,
+ l4_header->src_port, l4_header->dst_port);
+ if (clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed,
+ &ed_kv, &ed_value))
+ return 1;
si = ed_value.value;
}
else
{
- rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
- &value0);
+ key0.addr = ip0->dst_address;
+ key0.port = l4_header->src_port;
+ key0.protocol = protocol;
+ key0.fib_index = sm->outside_fib_index;
+ kv0.key = key0.as_u64;
+ if (clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
+ &value0))
+ return 1;
si = value0.value;
}
- if (rv)
+ s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
+ new_dst_addr0 = s0->in2out.addr.as_u32;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
+
+ /* update inner source IP address */
+ old_addr0 = inner_ip0->src_address.as_u32;
+ inner_ip0->src_address.as_u32 = new_dst_addr0;
+ new_addr0 = inner_ip0->src_address.as_u32;
+ sum0 = icmp0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
+ src_address);
+ icmp0->checksum = ip_csum_fold (sum0);
+
+ /* update inner IP header checksum */
+ old_checksum0 = inner_ip0->checksum;
+ sum0 = inner_ip0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0, ip4_header_t,
+ src_address);
+ inner_ip0->checksum = ip_csum_fold (sum0);
+ new_checksum0 = inner_ip0->checksum;
+ sum0 = icmp0->checksum;
+ sum0 = ip_csum_update (sum0, old_checksum0, new_checksum0, ip4_header_t,
+ checksum);
+ icmp0->checksum = ip_csum_fold (sum0);
+
+ /* update inner source port */
+ old_port0 = l4_header->src_port;
+ l4_header->src_port = s0->in2out.port;
+ new_port0 = l4_header->src_port;
+ sum0 = icmp0->checksum;
+ sum0 = ip_csum_update (sum0, old_port0, new_port0, tcp_udp_header_t,
+ src_port);
+ icmp0->checksum = ip_csum_fold (sum0);
+ }
+ else
+ {
+ if (!is_ed)
{
- /* or static mappings */
- if (!snat_static_mapping_match(sm, key0, &sm0, 1, 0, 0, 0, 0))
+ icmp_echo_header_t *echo0 = (icmp_echo_header_t *)(icmp0+1);
+ u16 icmp_id0 = echo0->identifier;
+ key0.addr = ip0->dst_address;
+ key0.port = icmp_id0;
+ key0.protocol = SNAT_PROTOCOL_ICMP;
+ key0.fib_index = sm->outside_fib_index;
+ kv0.key = key0.as_u64;
+ if (sm->num_workers > 1)
+ ti = (clib_net_to_host_u16 (icmp_id0) - 1024) / sm->port_per_thread;
+ else
+ ti = sm->num_workers;
+ int rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
+ &value0);
+ if (!rv)
{
- new_dst_addr0 = sm0.addr.as_u32;
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = sm0.fib_index;
+ si = value0.value;
+ s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
+ new_dst_addr0 = s0->in2out.addr.as_u32;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
+ echo0->identifier = s0->in2out.port;
+ sum0 = icmp0->checksum;
+ sum0 = ip_csum_update (sum0, icmp_id0, s0->in2out.port,
+ icmp_echo_header_t, identifier);
+ icmp0->checksum = ip_csum_fold (sum0);
+ goto change_addr;
}
- }
- else
- {
- s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
- new_dst_addr0 = s0->in2out.addr.as_u32;
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
- echo0->identifier = s0->in2out.port;
- sum0 = icmp0->checksum;
- sum0 = ip_csum_update (sum0, icmp_id0, s0->in2out.port,
- icmp_echo_header_t, identifier);
- icmp0->checksum = ip_csum_fold (sum0);
+ ti = 0;
}
- /* Destination is behind the same NAT, use internal address and port */
- if (new_dst_addr0)
- {
- old_dst_addr0 = ip0->dst_address.as_u32;
- ip0->dst_address.as_u32 = new_dst_addr0;
- sum0 = ip0->checksum;
- sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
- ip4_header_t, dst_address);
- ip0->checksum = ip_csum_fold (sum0);
- }
- }
+ key0.addr = ip0->dst_address;
+ key0.port = 0;
+ key0.protocol = 0;
+ key0.fib_index = sm->outside_fib_index;
+ kv0.key = key0.as_u64;
+
+ if (clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv0, &value0))
+ return 1;
+
+ m0 = pool_elt_at_index (sm->static_mappings, value0.value);
+ new_dst_addr0 = m0->local_addr.as_u32;
+ if (vnet_buffer(b0)->sw_if_index[VLIB_TX] == ~0)
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = m0->fib_index;
+ }
+change_addr:
+ /* Destination is behind the same NAT, use internal address and port */
+ if (new_dst_addr0)
+ {
+ old_dst_addr0 = ip0->dst_address.as_u32;
+ ip0->dst_address.as_u32 = new_dst_addr0;
+ sum0 = ip0->checksum;
+ sum0 = ip_csum_update (sum0, old_dst_addr0, new_dst_addr0,
+ ip4_header_t, dst_address);
+ ip0->checksum = ip_csum_fold (sum0);
+ }
+ return 0;
}
static inline u32 icmp_in2out_slow_path (snat_main_t *sm,
@@ -1055,9 +1154,6 @@ static inline u32 icmp_in2out_slow_path (snat_main_t *sm,
snat_session_t * s0 = *p_s0;
if (PREDICT_TRUE(next0 != SNAT_IN2OUT_NEXT_DROP && s0))
{
- /* Hairpinning */
- if (vnet_buffer(b0)->sw_if_index[VLIB_TX] == 0)
- snat_icmp_hairpinning(sm, b0, ip0, icmp0, sm->endpoint_dependent);
/* Accounting */
nat44_session_update_counters (s0, now,
vlib_buffer_length_in_chain (sm->vlib_main, b0));
@@ -2014,7 +2110,8 @@ nat44_reass_hairpinning (snat_main_t *sm,
ip4_header_t * ip0,
u16 sport,
u16 dport,
- u32 proto0)
+ u32 proto0,
+ int is_ed)
{
snat_session_key_t key0, sm0;
snat_session_t * s0;
@@ -2024,6 +2121,7 @@ nat44_reass_hairpinning (snat_main_t *sm,
u16 new_dst_port0, old_dst_port0;
udp_header_t * udp0;
tcp_header_t * tcp0;
+ int rv;
key0.addr = ip0->dst_address;
key0.port = dport;
@@ -2048,9 +2146,24 @@ nat44_reass_hairpinning (snat_main_t *sm,
else
ti = sm->num_workers;
- if (!clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0, &value0))
+ if (is_ed)
{
+ clib_bihash_kv_16_8_t ed_kv, ed_value;
+ make_ed_kv (&ed_kv, &ip0->dst_address, &ip0->src_address,
+ ip0->protocol, sm->outside_fib_index, udp0->dst_port,
+ udp0->src_port);
+ rv = clib_bihash_search_16_8 (&sm->per_thread_data[ti].out2in_ed,
+ &ed_kv, &ed_value);
+ si = ed_value.value;
+ }
+ else
+ {
+ rv = clib_bihash_search_8_8 (&sm->per_thread_data[ti].out2in, &kv0,
+ &value0);
si = value0.value;
+ }
+ if (!rv)
+ {
s0 = pool_elt_at_index (sm->per_thread_data[ti].sessions, si);
new_dst_addr0 = s0->in2out.addr.as_u32;
new_dst_port0 = s0->in2out.port;
@@ -2273,7 +2386,7 @@ nat44_in2out_reass_node_fn (vlib_main_t * vm,
/* Hairpinning */
nat44_reass_hairpinning (sm, b0, ip0, s0->out2in.port,
- s0->ext_host_port, proto0);
+ s0->ext_host_port, proto0, 0);
/* Accounting */
nat44_session_update_counters (s0, now,
@@ -3259,6 +3372,18 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm,
}
else
{
+ if (PREDICT_FALSE (proto0 == ~0))
+ {
+ next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+ goto trace00;
+ }
+
+ if (ip4_is_fragment (ip0))
+ {
+ next0 = SNAT_IN2OUT_NEXT_REASS;
+ goto trace00;
+ }
+
if (is_output_feature)
{
if (PREDICT_FALSE(nat_not_translate_output_feature_fwd(
@@ -3266,18 +3391,11 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm,
goto trace00;
}
- if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP))
- {
- next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
- goto trace00;
- }
-
- if (ip4_is_fragment (ip0))
- {
- b0->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT];
- next0 = SNAT_IN2OUT_NEXT_DROP;
- goto trace00;
- }
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
+ {
+ next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+ goto trace00;
+ }
}
make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, ip0->protocol,
@@ -3444,6 +3562,18 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm,
}
else
{
+ if (PREDICT_FALSE (proto1 == ~0))
+ {
+ next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+ goto trace01;
+ }
+
+ if (ip4_is_fragment (ip1))
+ {
+ next1 = SNAT_IN2OUT_NEXT_REASS;
+ goto trace01;
+ }
+
if (is_output_feature)
{
if (PREDICT_FALSE(nat_not_translate_output_feature_fwd(
@@ -3451,18 +3581,11 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm,
goto trace01;
}
- if (PREDICT_FALSE (proto1 == ~0 || proto1 == SNAT_PROTOCOL_ICMP))
- {
- next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
- goto trace01;
- }
-
- if (ip4_is_fragment (ip1))
- {
- b1->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT];
- next1 = SNAT_IN2OUT_NEXT_DROP;
- goto trace01;
- }
+ if (PREDICT_FALSE (proto1 == SNAT_PROTOCOL_ICMP))
+ {
+ next1 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+ goto trace01;
+ }
}
make_ed_kv (&kv1, &ip1->src_address, &ip1->dst_address, ip1->protocol,
@@ -3658,23 +3781,28 @@ nat44_ed_in2out_node_fn_inline (vlib_main_t * vm,
}
else
{
- if (is_output_feature)
+ if (PREDICT_FALSE (proto0 == ~0))
{
- if (PREDICT_FALSE(nat_not_translate_output_feature_fwd(
- sm, ip0, thread_index, now, vm, b0)))
- goto trace0;
+ next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+ goto trace0;
}
- if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP))
+ if (ip4_is_fragment (ip0))
{
- next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
+ next0 = SNAT_IN2OUT_NEXT_REASS;
goto trace0;
}
- if (ip4_is_fragment (ip0))
+ if (is_output_feature)
+ {
+ if (PREDICT_FALSE(nat_not_translate_output_feature_fwd(
+ sm, ip0, thread_index, now, vm, b0)))
+ goto trace0;
+ }
+
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
{
- b0->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT];
- next0 = SNAT_IN2OUT_NEXT_DROP;
+ next0 = SNAT_IN2OUT_NEXT_SLOW_PATH;
goto trace0;
}
}
@@ -3835,7 +3963,7 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_node) = {
[SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
[SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-slowpath",
[SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass",
+ [SNAT_IN2OUT_NEXT_REASS] = "nat44-ed-in2out-reass",
},
};
@@ -3869,7 +3997,7 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_output_node) = {
[SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output",
[SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-output-slowpath",
[SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass",
+ [SNAT_IN2OUT_NEXT_REASS] = "nat44-ed-in2out-reass-output",
},
};
@@ -3904,7 +4032,7 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_slowpath_node) = {
[SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
[SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-slowpath",
[SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass",
+ [SNAT_IN2OUT_NEXT_REASS] = "nat44-ed-in2out-reass",
},
};
@@ -3939,13 +4067,394 @@ VLIB_REGISTER_NODE (nat44_ed_in2out_output_slowpath_node) = {
[SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output",
[SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-ed-in2out-output-slowpath",
[SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
- [SNAT_IN2OUT_NEXT_REASS] = "nat44-in2out-reass",
+ [SNAT_IN2OUT_NEXT_REASS] = "nat44-ed-in2out-reass",
},
};
VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_output_slowpath_node,
nat44_ed_in2out_output_slow_path_fn);
+static inline uword
+nat44_ed_in2out_reass_node_fn_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ int is_output_feature)
+{
+ u32 n_left_from, *from, *to_next;
+ snat_in2out_next_t next_index;
+ u32 pkts_processed = 0;
+ snat_main_t *sm = &snat_main;
+ f64 now = vlib_time_now (vm);
+ u32 thread_index = vm->thread_index;
+ snat_main_per_thread_data_t *per_thread_data =
+ &sm->per_thread_data[thread_index];
+ u32 *fragments_to_drop = 0;
+ u32 *fragments_to_loopback = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0, proto0, rx_fib_index0, new_addr0, old_addr0;
+ u32 iph_offset0 = 0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u8 cached0 = 0;
+ ip4_header_t *ip0 = 0;
+ nat_reass_ip4_t *reass0;
+ udp_header_t * udp0;
+ tcp_header_t * tcp0;
+ icmp46_header_t * icmp0;
+ clib_bihash_kv_16_8_t kv0, value0;
+ snat_session_t * s0 = 0;
+ u16 old_port0, new_port0;
+ ip_csum_t sum0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ next0 = SNAT_IN2OUT_NEXT_LOOKUP;
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index0);
+
+ if (PREDICT_FALSE (nat_reass_is_drop_frag(0)))
+ {
+ next0 = SNAT_IN2OUT_NEXT_DROP;
+ b0->error = node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT];
+ goto trace0;
+ }
+
+ if (is_output_feature)
+ iph_offset0 = vnet_buffer (b0)->ip.save_rewrite_length;
+
+ ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
+ iph_offset0);
+
+ udp0 = ip4_next_header (ip0);
+ tcp0 = (tcp_header_t *) udp0;
+ icmp0 = (icmp46_header_t *) udp0;
+ proto0 = ip_proto_to_snat_proto (ip0->protocol);
+
+ reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
+ ip0->dst_address,
+ ip0->fragment_id,
+ ip0->protocol,
+ 1,
+ &fragments_to_drop);
+
+ if (PREDICT_FALSE (!reass0))
+ {
+ next0 = SNAT_IN2OUT_NEXT_DROP;
+ b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_REASS];
+ nat_log_notice ("maximum reassemblies exceeded");
+ goto trace0;
+ }
+
+ if (PREDICT_FALSE (ip4_is_first_fragment (ip0)))
+ {
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
+ {
+ if (is_output_feature)
+ {
+ if (PREDICT_FALSE(nat_not_translate_output_feature_fwd(
+ sm, ip0, thread_index, now, vm, b0)))
+ reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE;
+ goto trace0;
+ }
+
+ next0 = icmp_in2out_slow_path
+ (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node,
+ next0, now, thread_index, &s0);
+
+ if (PREDICT_TRUE(next0 != SNAT_IN2OUT_NEXT_DROP))
+ {
+ if (s0)
+ reass0->sess_index = s0 - per_thread_data->sessions;
+ else
+ reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE;
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ }
+
+ goto trace0;
+ }
+
+ make_ed_kv (&kv0, &ip0->src_address, &ip0->dst_address, ip0->protocol,
+ rx_fib_index0, udp0->src_port, udp0->dst_port);
+
+ if (clib_bihash_search_16_8 (&per_thread_data->in2out_ed, &kv0, &value0))
+ {
+ if (is_output_feature)
+ {
+ if (PREDICT_FALSE(nat44_ed_not_translate_output_feature(
+ sm, ip0, ip0->protocol, udp0->src_port,
+ udp0->dst_port, thread_index, sw_if_index0,
+ vnet_buffer(b0)->sw_if_index[VLIB_TX])))
+ {
+ reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE;
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ goto trace0;
+ }
+ }
+ else
+ {
+ if (PREDICT_FALSE(nat44_ed_not_translate(sm, node,
+ sw_if_index0, ip0, proto0, rx_fib_index0,
+ thread_index)))
+ {
+ reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE;
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ goto trace0;
+ }
+ }
+
+ next0 = slow_path_ed (sm, b0, rx_fib_index0, &kv0,
+ &s0, node, next0, thread_index, now);
+
+ if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP))
+ goto trace0;
+
+ reass0->sess_index = s0 - per_thread_data->sessions;
+ }
+ else
+ {
+ s0 = pool_elt_at_index (per_thread_data->sessions,
+ value0.value);
+ reass0->sess_index = value0.value;
+ }
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ }
+ else
+ {
+ if (reass0->flags & NAT_REASS_FLAG_ED_DONT_TRANSLATE)
+ goto trace0;
+ if (PREDICT_FALSE (reass0->sess_index == (u32) ~0))
+ {
+ if (nat_ip4_reass_add_fragment (reass0, bi0, &fragments_to_drop))
+ {
+ b0->error = node->errors[SNAT_IN2OUT_ERROR_MAX_FRAG];
+ nat_log_notice ("maximum fragments per reassembly exceeded");
+ next0 = SNAT_IN2OUT_NEXT_DROP;
+ goto trace0;
+ }
+ cached0 = 1;
+ goto trace0;
+ }
+ s0 = pool_elt_at_index (per_thread_data->sessions,
+ reass0->sess_index);
+ }
+
+ old_addr0 = ip0->src_address.as_u32;
+ ip0->src_address = s0->out2in.addr;
+ new_addr0 = ip0->src_address.as_u32;
+ if (!is_output_feature)
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->out2in.fib_index;
+
+ sum0 = ip0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+ ip4_header_t,
+ src_address /* changed member */);
+ if (PREDICT_FALSE (is_twice_nat_session (s0)))
+ sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32,
+ s0->ext_host_addr.as_u32, ip4_header_t,
+ dst_address);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ if (PREDICT_FALSE (ip4_is_first_fragment (ip0)))
+ {
+ if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
+ {
+ old_port0 = tcp0->src_port;
+ tcp0->src_port = s0->out2in.port;
+ new_port0 = tcp0->src_port;
+
+ sum0 = tcp0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+ ip4_header_t,
+ dst_address /* changed member */);
+ sum0 = ip_csum_update (sum0, old_port0, new_port0,
+ ip4_header_t /* cheat */,
+ length /* changed member */);
+ if (PREDICT_FALSE (is_twice_nat_session (s0)))
+ {
+ sum0 = ip_csum_update (sum0, ip0->dst_address.as_u32,
+ s0->ext_host_addr.as_u32,
+ ip4_header_t, dst_address);
+ sum0 = ip_csum_update (sum0, tcp0->dst_port,
+ s0->ext_host_port, ip4_header_t,
+ length);
+ tcp0->dst_port = s0->ext_host_port;
+ ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32;
+ }
+ tcp0->checksum = ip_csum_fold(sum0);
+ }
+ else
+ {
+ old_port0 = udp0->src_port;
+ udp0->src_port = s0->out2in.port;
+ udp0->checksum = 0;
+ if (PREDICT_FALSE (is_twice_nat_session (s0)))
+ {
+ udp0->dst_port = s0->ext_host_port;
+ ip0->dst_address.as_u32 = s0->ext_host_addr.as_u32;
+ }
+ }
+ }
+
+ /* Hairpinning */
+ if (PREDICT_TRUE(proto0 != SNAT_PROTOCOL_ICMP))
+ nat44_reass_hairpinning (sm, b0, ip0, s0->out2in.port,
+ s0->ext_host_port, proto0, 1);
+ else
+ snat_icmp_hairpinning(sm, b0, ip0, icmp0, 1);
+
+ /* Accounting */
+ nat44_session_update_counters (s0, now,
+ vlib_buffer_length_in_chain (vm, b0));
+ /* Per-user LRU list maintenance */
+ nat44_session_update_lru (sm, s0, thread_index);
+
+ trace0:
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ nat44_in2out_reass_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->cached = cached0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ if (cached0)
+ {
+ n_left_to_next++;
+ to_next--;
+ }
+ else
+ {
+ pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ if (n_left_from == 0 && vec_len (fragments_to_loopback))
+ {
+ from = vlib_frame_vector_args (frame);
+ u32 len = vec_len (fragments_to_loopback);
+ if (len <= VLIB_FRAME_SIZE)
+ {
+ clib_memcpy (from, fragments_to_loopback, sizeof (u32) * len);
+ n_left_from = len;
+ vec_reset_length (fragments_to_loopback);
+ }
+ else
+ {
+ clib_memcpy (from,
+ fragments_to_loopback + (len - VLIB_FRAME_SIZE),
+ sizeof (u32) * VLIB_FRAME_SIZE);
+ n_left_from = VLIB_FRAME_SIZE;
+ _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+ }
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, nat44_in2out_reass_node.index,
+ SNAT_IN2OUT_ERROR_IN2OUT_PACKETS,
+ pkts_processed);
+
+ nat_send_all_to_node (vm, fragments_to_drop, node,
+ &node->errors[SNAT_IN2OUT_ERROR_DROP_FRAGMENT],
+ SNAT_IN2OUT_NEXT_DROP);
+
+ vec_free (fragments_to_drop);
+ vec_free (fragments_to_loopback);
+ return frame->n_vectors;
+}
+
+static uword
+nat44_ed_in2out_reass_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return nat44_ed_in2out_reass_node_fn_inline (vm, node, frame, 0);
+}
+
+VLIB_REGISTER_NODE (nat44_ed_in2out_reass_node) = {
+ .function = nat44_ed_in2out_reass_node_fn,
+ .name = "nat44-ed-in2out-reass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nat44_in2out_reass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(snat_in2out_error_strings),
+ .error_strings = snat_in2out_error_strings,
+
+ .n_next_nodes = SNAT_IN2OUT_N_NEXT,
+ .next_nodes = {
+ [SNAT_IN2OUT_NEXT_DROP] = "error-drop",
+ [SNAT_IN2OUT_NEXT_LOOKUP] = "ip4-lookup",
+ [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath",
+ [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ [SNAT_IN2OUT_NEXT_REASS] = "nat44-ed-in2out-reass",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_reass_node,
+ nat44_ed_in2out_reass_node_fn);
+
+static uword
+nat44_ed_in2out_reass_output_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return nat44_ed_in2out_reass_node_fn_inline (vm, node, frame, 1);
+}
+
+VLIB_REGISTER_NODE (nat44_ed_in2out_reass_output_node) = {
+ .function = nat44_ed_in2out_reass_output_node_fn,
+ .name = "nat44-ed-in2out-reass-output",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nat44_in2out_reass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(snat_in2out_error_strings),
+ .error_strings = snat_in2out_error_strings,
+
+ .n_next_nodes = SNAT_IN2OUT_N_NEXT,
+ .next_nodes = {
+ [SNAT_IN2OUT_NEXT_DROP] = "error-drop",
+ [SNAT_IN2OUT_NEXT_LOOKUP] = "interface-output",
+ [SNAT_IN2OUT_NEXT_SLOW_PATH] = "nat44-in2out-slowpath",
+ [SNAT_IN2OUT_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ [SNAT_IN2OUT_NEXT_REASS] = "nat44-ed-in2out-reass",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_in2out_reass_output_node,
+ nat44_ed_in2out_reass_output_node_fn);
+
/**************************/
/*** deterministic mode ***/
/**************************/
diff --git a/src/plugins/nat/nat.c b/src/plugins/nat/nat.c
index c3583cabc21..d4c266a2b16 100755
--- a/src/plugins/nat/nat.c
+++ b/src/plugins/nat/nat.c
@@ -178,9 +178,27 @@ vlib_node_registration_t nat44_ed_classify_node;
vlib_node_registration_t nat44_det_classify_node;
vlib_node_registration_t nat44_handoff_classify_node;
+#define foreach_nat44_classify_error \
+_(MAX_REASS, "Maximum reassemblies exceeded") \
+_(MAX_FRAG, "Maximum fragments per reassembly exceeded")
+
+typedef enum {
+#define _(sym,str) NAT44_CLASSIFY_ERROR_##sym,
+ foreach_nat44_classify_error
+#undef _
+ NAT44_CLASSIFY_N_ERROR,
+} nat44_classify_error_t;
+
+static char * nat44_classify_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nat44_classify_error
+#undef _
+};
+
typedef enum {
NAT44_CLASSIFY_NEXT_IN2OUT,
NAT44_CLASSIFY_NEXT_OUT2IN,
+ NAT44_CLASSIFY_NEXT_DROP,
NAT44_CLASSIFY_N_NEXT,
} nat44_classify_next_t;
@@ -438,6 +456,7 @@ nat_ed_session_alloc (snat_main_t *sm, snat_user_t *u, u32 thread_index)
typedef struct {
u8 next_in2out;
+ u8 cached;
} nat44_classify_trace_t;
static u8 * format_nat44_classify_trace (u8 * s, va_list * args)
@@ -447,9 +466,13 @@ static u8 * format_nat44_classify_trace (u8 * s, va_list * args)
nat44_classify_trace_t *t = va_arg (*args, nat44_classify_trace_t *);
char *next;
- next = t->next_in2out ? "nat44-in2out" : "nat44-out2in";
-
- s = format (s, "nat44-classify: next %s", next);
+ if (t->cached)
+ s = format (s, "nat44-classify: fragment cached");
+ else
+ {
+ next = t->next_in2out ? "nat44-in2out" : "nat44-out2in";
+ s = format (s, "nat44-classify: next %s", next);
+ }
return s;
}
@@ -466,6 +489,8 @@ nat44_classify_node_fn_inline (vlib_main_t * vm,
snat_static_mapping_t *m;
u32 thread_index = vm->thread_index;
snat_main_per_thread_data_t *tsm = &sm->per_thread_data[thread_index];
+ u32 *fragments_to_drop = 0;
+ u32 *fragments_to_loopback = 0;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -489,6 +514,8 @@ nat44_classify_node_fn_inline (vlib_main_t * vm,
clib_bihash_kv_8_8_t kv0, value0;
clib_bihash_kv_16_8_t ed_kv0, ed_value0;
udp_header_t *udp0;
+ nat_reass_ip4_t *reass0;
+ u8 cached0 = 0;
/* speculatively enqueue b0 to the current next frame */
bi0 = from[0];
@@ -502,17 +529,102 @@ nat44_classify_node_fn_inline (vlib_main_t * vm,
ip0 = vlib_buffer_get_current (b0);
udp0 = ip4_next_header (ip0);
- if (is_ed)
+ if (is_ed && ip0->protocol != IP_PROTOCOL_ICMP)
{
- sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
- rx_fib_index0 =
- fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
- sw_if_index0);
- make_ed_kv (&ed_kv0, &ip0->src_address, &ip0->dst_address,
- ip0->protocol, rx_fib_index0, udp0->src_port,
- udp0->dst_port);
- if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &ed_kv0, &ed_value0))
- goto enqueue0;
+ if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
+ {
+ /* process leading fragment/whole packet (with L4 header) */
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ rx_fib_index0 =
+ fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index0);
+ make_ed_kv (&ed_kv0, &ip0->src_address, &ip0->dst_address,
+ ip0->protocol, rx_fib_index0, udp0->src_port,
+ udp0->dst_port);
+ if (ip4_is_fragment (ip0))
+ {
+ reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
+ ip0->dst_address,
+ ip0->fragment_id,
+ ip0->protocol,
+ 1,
+ &fragments_to_drop);
+ if (PREDICT_FALSE (!reass0))
+ {
+ next0 = NAT44_CLASSIFY_NEXT_DROP;
+ b0->error = node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
+ nat_log_notice ("maximum reassemblies exceeded");
+ goto enqueue0;
+ }
+ if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &ed_kv0,
+ &ed_value0))
+ {
+ /* session exists so classify as IN2OUT,
+ * save this information for future fragments and set
+ * past fragments to be looped over and reprocessed */
+ reass0->sess_index = ed_value0.value;
+ reass0->classify_next = NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT;
+ nat_ip4_reass_get_frags (reass0,
+ &fragments_to_loopback);
+ goto enqueue0;
+ }
+ else
+ {
+ /* session doesn't exist so continue in the code,
+ * save this information for future fragments and set
+ * past fragments to be looped over and reprocessed */
+ reass0->flags |= NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE;
+ nat_ip4_reass_get_frags (reass0,
+ &fragments_to_loopback);
+ }
+ }
+ else
+ {
+ /* process whole packet */
+ if (!clib_bihash_search_16_8 (&tsm->in2out_ed, &ed_kv0,
+ &ed_value0))
+ goto enqueue0;
+ /* session doesn't exist so continue in code */
+ }
+ }
+ else
+ {
+ /* process non-first fragment */
+ reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
+ ip0->dst_address,
+ ip0->fragment_id,
+ ip0->protocol,
+ 1,
+ &fragments_to_drop);
+ if (PREDICT_FALSE (!reass0))
+ {
+ next0 = NAT44_CLASSIFY_NEXT_DROP;
+ b0->error = node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
+ nat_log_notice ("maximum reassemblies exceeded");
+ goto enqueue0;
+ }
+ /* check if first fragment has arrived */
+ if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE &&
+ !(reass0->flags & NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE))
+ {
+ /* first fragment still hasn't arrived, cache this fragment */
+ if (nat_ip4_reass_add_fragment (reass0, bi0,
+ &fragments_to_drop))
+ {
+ b0->error = node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
+ nat_log_notice ("maximum fragments per reassembly exceeded");
+ next0 = NAT44_CLASSIFY_NEXT_DROP;
+ goto enqueue0;
+ }
+ cached0 = 1;
+ goto enqueue0;
+ }
+ if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT)
+ goto enqueue0;
+ /* flag NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE is set
+ * so keep the default next0 and continue in code to
+ * potentially find other classification for this packet */
+ }
}
vec_foreach (ap, sm->addresses)
@@ -531,21 +643,86 @@ nat44_classify_node_fn_inline (vlib_main_t * vm,
m_key0.protocol = 0;
m_key0.fib_index = 0;
kv0.key = m_key0.as_u64;
- if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv0, &value0))
+ /* try to classify the fragment based on IP header alone */
+ if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
+ &kv0, &value0))
{
m = pool_elt_at_index (sm->static_mappings, value0.value);
if (m->local_addr.as_u32 != m->external_addr.as_u32)
next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
goto enqueue0;
}
- m_key0.port = clib_net_to_host_u16 (udp0->dst_port);
- m_key0.protocol = ip_proto_to_snat_proto (ip0->protocol);
- kv0.key = m_key0.as_u64;
- if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external, &kv0, &value0))
+ if (!ip4_is_fragment (ip0) || ip4_is_first_fragment (ip0))
{
- m = pool_elt_at_index (sm->static_mappings, value0.value);
- if (m->local_addr.as_u32 != m->external_addr.as_u32)
+ /* process leading fragment/whole packet (with L4 header) */
+ m_key0.port = clib_net_to_host_u16 (udp0->dst_port);
+ m_key0.protocol = ip_proto_to_snat_proto (ip0->protocol);
+ kv0.key = m_key0.as_u64;
+ if (!clib_bihash_search_8_8 (&sm->static_mapping_by_external,
+ &kv0, &value0))
+ {
+ m = pool_elt_at_index (sm->static_mappings, value0.value);
+ if (m->local_addr.as_u32 != m->external_addr.as_u32)
+ next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
+ }
+ if (ip4_is_fragment (ip0))
+ {
+ reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
+ ip0->dst_address,
+ ip0->fragment_id,
+ ip0->protocol,
+ 1,
+ &fragments_to_drop);
+ if (PREDICT_FALSE (!reass0))
+ {
+ next0 = NAT44_CLASSIFY_NEXT_DROP;
+ b0->error = node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
+ nat_log_notice ("maximum reassemblies exceeded");
+ goto enqueue0;
+ }
+ /* save classification for future fragments and set past
+ * fragments to be looped over and reprocessed */
+ if (next0 == NAT44_CLASSIFY_NEXT_OUT2IN)
+ reass0->classify_next = NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN;
+ else
+ reass0->classify_next = NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT;
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ }
+ }
+ else
+ {
+ /* process non-first fragment */
+ reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
+ ip0->dst_address,
+ ip0->fragment_id,
+ ip0->protocol,
+ 1,
+ &fragments_to_drop);
+ if (PREDICT_FALSE (!reass0))
+ {
+ next0 = NAT44_CLASSIFY_NEXT_DROP;
+ b0->error = node->errors[NAT44_CLASSIFY_ERROR_MAX_REASS];
+ nat_log_notice ("maximum reassemblies exceeded");
+ goto enqueue0;
+ }
+ if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NONE)
+ /* first fragment still hasn't arrived */
+ {
+ if (nat_ip4_reass_add_fragment (reass0, bi0,
+ &fragments_to_drop))
+ {
+ b0->error = node->errors[NAT44_CLASSIFY_ERROR_MAX_FRAG];
+ nat_log_notice ("maximum fragments per reassembly exceeded");
+ next0 = NAT44_CLASSIFY_NEXT_DROP;
+ goto enqueue0;
+ }
+ cached0 = 1;
+ goto enqueue0;
+ }
+ else if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN)
next0 = NAT44_CLASSIFY_NEXT_OUT2IN;
+ else if (reass0->classify_next == NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT)
+ next0 = NAT44_CLASSIFY_NEXT_IN2OUT;
}
}
@@ -555,18 +732,50 @@ nat44_classify_node_fn_inline (vlib_main_t * vm,
{
nat44_classify_trace_t *t =
vlib_add_trace (vm, node, b0, sizeof (*t));
- t->next_in2out = next0 == NAT44_CLASSIFY_NEXT_IN2OUT ? 1 : 0;
+ t->cached = cached0;
+ if (!cached0)
+ t->next_in2out = next0 == NAT44_CLASSIFY_NEXT_IN2OUT ? 1 : 0;
}
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ if (cached0)
+ {
+ n_left_to_next++;
+ to_next--;
+ }
+ else
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+
+ if (n_left_from == 0 && vec_len (fragments_to_loopback))
+ {
+ from = vlib_frame_vector_args (frame);
+ u32 len = vec_len (fragments_to_loopback);
+ if (len <= VLIB_FRAME_SIZE)
+ {
+ clib_memcpy (from, fragments_to_loopback, sizeof (u32) * len);
+ n_left_from = len;
+ vec_reset_length (fragments_to_loopback);
+ }
+ else
+ {
+ clib_memcpy (from,
+ fragments_to_loopback + (len - VLIB_FRAME_SIZE),
+ sizeof (u32) * VLIB_FRAME_SIZE);
+ n_left_from = VLIB_FRAME_SIZE;
+ _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+ }
+ }
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
+ nat_send_all_to_node (vm, fragments_to_drop, node, 0, NAT44_CLASSIFY_NEXT_DROP);
+
+ vec_free (fragments_to_drop);
+
return frame->n_vectors;
}
@@ -584,10 +793,13 @@ VLIB_REGISTER_NODE (nat44_classify_node) = {
.vector_size = sizeof (u32),
.format_trace = format_nat44_classify_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(nat44_classify_error_strings),
+ .error_strings = nat44_classify_error_strings,
.n_next_nodes = NAT44_CLASSIFY_N_NEXT,
.next_nodes = {
[NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-in2out",
[NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-out2in",
+ [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
},
};
@@ -611,6 +823,7 @@ VLIB_REGISTER_NODE (nat44_ed_classify_node) = {
.next_nodes = {
[NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-ed-in2out",
[NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-ed-out2in",
+ [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
},
};
@@ -635,6 +848,7 @@ VLIB_REGISTER_NODE (nat44_det_classify_node) = {
.next_nodes = {
[NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-det-in2out",
[NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-det-out2in",
+ [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
},
};
@@ -659,6 +873,7 @@ VLIB_REGISTER_NODE (nat44_handoff_classify_node) = {
.next_nodes = {
[NAT44_CLASSIFY_NEXT_IN2OUT] = "nat44-in2out-worker-handoff",
[NAT44_CLASSIFY_NEXT_OUT2IN] = "nat44-out2in-worker-handoff",
+ [NAT44_CLASSIFY_NEXT_DROP] = "error-drop",
},
};
diff --git a/src/plugins/nat/nat_inlines.h b/src/plugins/nat/nat_inlines.h
index 8922c05c393..4c79fd4124c 100644
--- a/src/plugins/nat/nat_inlines.h
+++ b/src/plugins/nat/nat_inlines.h
@@ -120,7 +120,8 @@ nat_send_all_to_node (vlib_main_t * vm, u32 * bi_vector,
to_next += 1;
n_left_to_next -= 1;
vlib_buffer_t *p0 = vlib_get_buffer (vm, bi0);
- p0->error = *error;
+ if (error)
+ p0->error = *error;
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next);
}
diff --git a/src/plugins/nat/nat_reass.c b/src/plugins/nat/nat_reass.c
index 8fd370de2fc..1a5aeebdf7c 100755
--- a/src/plugins/nat/nat_reass.c
+++ b/src/plugins/nat/nat_reass.c
@@ -250,7 +250,7 @@ nat_ip4_reass_find_or_create (ip4_address_t src, ip4_address_t dst,
reass->lru_list_index);
}
- if (reass->flags && NAT_REASS_FLAG_MAX_FRAG_DROP)
+ if (reass->flags & NAT_REASS_FLAG_MAX_FRAG_DROP)
{
reass = 0;
goto unlock;
@@ -320,6 +320,8 @@ nat_ip4_reass_find_or_create (ip4_address_t src, ip4_address_t dst,
reass->thread_index = (u32) ~ 0;
reass->last_heard = now;
reass->frag_n = 0;
+ reass->flags = 0;
+ reass->classify_next = NAT_REASS_IP4_CLASSIFY_NONE;
if (clib_bihash_add_del_16_8 (&srm->ip4_reass_hash, &kv, 1))
{
@@ -457,7 +459,7 @@ nat_ip6_reass_find_or_create (ip6_address_t src, ip6_address_t dst,
reass->lru_list_index);
}
- if (reass->flags && NAT_REASS_FLAG_MAX_FRAG_DROP)
+ if (reass->flags & NAT_REASS_FLAG_MAX_FRAG_DROP)
{
reass = 0;
goto unlock;
@@ -724,12 +726,51 @@ static int
nat_ip4_reass_walk_cli (nat_reass_ip4_t * reass, void *ctx)
{
vlib_main_t *vm = ctx;
+ u8 *flags_str = 0;
+ const char *classify_next_str;
- vlib_cli_output (vm, " src %U dst %U proto %u id 0x%04x cached %u",
+ if (reass->flags & NAT_REASS_FLAG_MAX_FRAG_DROP)
+ flags_str = format (flags_str, "MAX_FRAG_DROP");
+ if (reass->flags & NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE)
+ {
+ if (flags_str)
+ flags_str = format (flags_str, " | ");
+ flags_str = format (flags_str, "CLASSIFY_ED_CONTINUE");
+ }
+ if (reass->flags & NAT_REASS_FLAG_ED_DONT_TRANSLATE)
+ {
+ if (flags_str)
+ flags_str = format (flags_str, " | ");
+ flags_str = format (flags_str, "CLASSIFY_ED_DONT_TRANSLATE");
+ }
+ if (!flags_str)
+ flags_str = format (flags_str, "0");
+ flags_str = format (flags_str, "%c", 0);
+
+ switch (reass->classify_next)
+ {
+ case NAT_REASS_IP4_CLASSIFY_NONE:
+ classify_next_str = "NONE";
+ break;
+ case NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT:
+ classify_next_str = "IN2OUT";
+ break;
+ case NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN:
+ classify_next_str = "OUT2IN";
+ break;
+ default:
+ classify_next_str = "invalid value";
+ }
+
+ vlib_cli_output (vm, " src %U dst %U proto %u id 0x%04x cached %u "
+ "flags %s classify_next %s",
format_ip4_address, &reass->key.src,
format_ip4_address, &reass->key.dst,
reass->key.proto,
- clib_net_to_host_u16 (reass->key.frag_id), reass->frag_n);
+ clib_net_to_host_u16 (reass->key.frag_id), reass->frag_n,
+ flags_str, classify_next_str);
+
+ vec_free (flags_str);
return 0;
}
diff --git a/src/plugins/nat/nat_reass.h b/src/plugins/nat/nat_reass.h
index 5b18d1bd7b9..579961d72c9 100644
--- a/src/plugins/nat/nat_reass.h
+++ b/src/plugins/nat/nat_reass.h
@@ -31,6 +31,8 @@
#define NAT_REASS_HT_LOAD_FACTOR (0.75)
#define NAT_REASS_FLAG_MAX_FRAG_DROP 1
+#define NAT_REASS_FLAG_CLASSIFY_ED_CONTINUE 2
+#define NAT_REASS_FLAG_ED_DONT_TRANSLATE 4
typedef struct
{
@@ -49,6 +51,13 @@ typedef struct
};
} nat_reass_ip4_key_t;
+enum
+{
+ NAT_REASS_IP4_CLASSIFY_NONE,
+ NAT_REASS_IP4_CLASSIFY_NEXT_IN2OUT,
+ NAT_REASS_IP4_CLASSIFY_NEXT_OUT2IN
+};
+
/* *INDENT-OFF* */
typedef CLIB_PACKED(struct
{
@@ -60,6 +69,7 @@ typedef CLIB_PACKED(struct
u32 frags_per_reass_list_head_index;
u8 frag_n;
u8 flags;
+ u8 classify_next;
}) nat_reass_ip4_t;
/* *INDENT-ON* */
diff --git a/src/plugins/nat/out2in.c b/src/plugins/nat/out2in.c
index 3d4e9228e67..ba343ad72a5 100755
--- a/src/plugins/nat/out2in.c
+++ b/src/plugins/nat/out2in.c
@@ -106,6 +106,7 @@ vlib_node_registration_t snat_det_out2in_node;
vlib_node_registration_t nat44_out2in_reass_node;
vlib_node_registration_t nat44_ed_out2in_node;
vlib_node_registration_t nat44_ed_out2in_slowpath_node;
+vlib_node_registration_t nat44_ed_out2in_reass_node;
#define foreach_snat_out2in_error \
_(UNSUPPORTED_PROTOCOL, "Unsupported protocol") \
@@ -539,13 +540,16 @@ static inline u32 icmp_out2in (snat_main_t *sm,
if (next0 == SNAT_OUT2IN_NEXT_DROP || dont_translate)
goto out;
- sum0 = ip_incremental_checksum (0, icmp0,
- ntohs(ip0->length) - ip4_header_bytes (ip0));
- checksum0 = ~ip_csum_fold (sum0);
- if (checksum0 != 0 && checksum0 != 0xffff)
+ if (PREDICT_TRUE (!ip4_is_fragment (ip0)))
{
- next0 = SNAT_OUT2IN_NEXT_DROP;
- goto out;
+ sum0 = ip_incremental_checksum (0, icmp0,
+ ntohs(ip0->length) - ip4_header_bytes (ip0));
+ checksum0 = ~ip_csum_fold (sum0);
+ if (checksum0 != 0 && checksum0 != 0xffff)
+ {
+ next0 = SNAT_OUT2IN_NEXT_DROP;
+ goto out;
+ }
}
old_addr0 = ip0->dst_address.as_u32;
@@ -1582,6 +1586,7 @@ typedef enum {
NAT44_ED_OUT2IN_NEXT_ICMP_ERROR,
NAT44_ED_OUT2IN_NEXT_IN2OUT,
NAT44_ED_OUT2IN_NEXT_SLOW_PATH,
+ NAT44_ED_OUT2IN_NEXT_REASS,
NAT44_ED_OUT2IN_N_NEXT,
} nat44_ed_out2in_next_t;
@@ -2298,7 +2303,7 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
}
else
{
- if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP))
+ if (PREDICT_FALSE (proto0 == ~0))
{
next0 = NAT44_ED_OUT2IN_NEXT_SLOW_PATH;
goto trace00;
@@ -2306,8 +2311,13 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
if (ip4_is_fragment (ip0))
{
- b0->error = node->errors[SNAT_OUT2IN_ERROR_DROP_FRAGMENT];
- next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ next0 = NAT44_ED_OUT2IN_NEXT_REASS;
+ goto trace00;
+ }
+
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
+ {
+ next0 = NAT44_ED_OUT2IN_NEXT_SLOW_PATH;
goto trace00;
}
}
@@ -2501,7 +2511,7 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
}
else
{
- if (PREDICT_FALSE (proto1 == ~0 || proto1 == SNAT_PROTOCOL_ICMP))
+ if (PREDICT_FALSE (proto1 == ~0))
{
next1 = NAT44_ED_OUT2IN_NEXT_SLOW_PATH;
goto trace01;
@@ -2509,8 +2519,13 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
if (ip4_is_fragment (ip1))
{
- b1->error = node->errors[SNAT_OUT2IN_ERROR_DROP_FRAGMENT];
- next1 = NAT44_ED_OUT2IN_NEXT_DROP;
+ next1 = NAT44_ED_OUT2IN_NEXT_REASS;
+ goto trace01;
+ }
+
+ if (PREDICT_FALSE (proto1 == SNAT_PROTOCOL_ICMP))
+ {
+ next1 = NAT44_ED_OUT2IN_NEXT_SLOW_PATH;
goto trace01;
}
}
@@ -2736,7 +2751,7 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
}
else
{
- if (PREDICT_FALSE (proto0 == ~0 || proto0 == SNAT_PROTOCOL_ICMP))
+ if (PREDICT_FALSE (proto0 == ~0))
{
next0 = NAT44_ED_OUT2IN_NEXT_SLOW_PATH;
goto trace0;
@@ -2744,8 +2759,13 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
if (ip4_is_fragment (ip0))
{
- b0->error = node->errors[SNAT_OUT2IN_ERROR_DROP_FRAGMENT];
- next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ next0 = NAT44_ED_OUT2IN_NEXT_REASS;
+ goto trace0;
+ }
+
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
+ {
+ next0 = NAT44_ED_OUT2IN_NEXT_SLOW_PATH;
goto trace0;
}
}
@@ -2891,7 +2911,6 @@ nat44_ed_out2in_node_fn_inline (vlib_main_t * vm,
}
pkts_processed += next0 != NAT44_ED_OUT2IN_NEXT_DROP;
-
/* verify speculative enqueue, maybe switch current next frame */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
@@ -2936,6 +2955,7 @@ VLIB_REGISTER_NODE (nat44_ed_out2in_node) = {
[NAT44_ED_OUT2IN_NEXT_SLOW_PATH] = "nat44-ed-out2in-slowpath",
[NAT44_ED_OUT2IN_NEXT_ICMP_ERROR] = "ip4-icmp-error",
[NAT44_ED_OUT2IN_NEXT_IN2OUT] = "nat44-ed-in2out",
+ [NAT44_ED_OUT2IN_NEXT_REASS] = "nat44-ed-out2in-reass",
},
};
@@ -2970,12 +2990,365 @@ VLIB_REGISTER_NODE (nat44_ed_out2in_slowpath_node) = {
[NAT44_ED_OUT2IN_NEXT_SLOW_PATH] = "nat44-ed-out2in-slowpath",
[NAT44_ED_OUT2IN_NEXT_ICMP_ERROR] = "ip4-icmp-error",
[NAT44_ED_OUT2IN_NEXT_IN2OUT] = "nat44-ed-in2out",
+ [NAT44_ED_OUT2IN_NEXT_REASS] = "nat44-ed-out2in-reass",
},
};
VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_out2in_slowpath_node,
nat44_ed_out2in_slow_path_fn);
+static uword
+nat44_ed_out2in_reass_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_left_from, *from, *to_next;
+ snat_out2in_next_t next_index;
+ u32 pkts_processed = 0;
+ snat_main_t *sm = &snat_main;
+ f64 now = vlib_time_now (vm);
+ u32 thread_index = vm->thread_index;
+ snat_main_per_thread_data_t *per_thread_data =
+ &sm->per_thread_data[thread_index];
+ u32 *fragments_to_drop = 0;
+ u32 *fragments_to_loopback = 0;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, sw_if_index0, proto0, rx_fib_index0, new_addr0, old_addr0;
+ vlib_buffer_t *b0;
+ u32 next0;
+ u8 cached0 = 0;
+ ip4_header_t *ip0;
+ nat_reass_ip4_t *reass0;
+ udp_header_t * udp0;
+ tcp_header_t * tcp0;
+ icmp46_header_t * icmp0;
+ clib_bihash_kv_16_8_t kv0, value0;
+ snat_session_t * s0 = 0;
+ u16 old_port0, new_port0;
+ ip_csum_t sum0;
+ snat_session_key_t e_key0, l_key0;
+ lb_nat_type_t lb0;
+ twice_nat_type_t twice_nat0;
+
+ /* speculatively enqueue b0 to the current next frame */
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ next0 = NAT44_ED_OUT2IN_NEXT_LOOKUP;
+
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_RX];
+ rx_fib_index0 = fib_table_get_index_for_sw_if_index (FIB_PROTOCOL_IP4,
+ sw_if_index0);
+
+ if (PREDICT_FALSE (nat_reass_is_drop_frag(0)))
+ {
+ next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ b0->error = node->errors[SNAT_OUT2IN_ERROR_DROP_FRAGMENT];
+ goto trace0;
+ }
+
+ ip0 = (ip4_header_t *) vlib_buffer_get_current (b0);
+ udp0 = ip4_next_header (ip0);
+ tcp0 = (tcp_header_t *) udp0;
+ icmp0 = (icmp46_header_t *) udp0;
+ proto0 = ip_proto_to_snat_proto (ip0->protocol);
+
+ reass0 = nat_ip4_reass_find_or_create (ip0->src_address,
+ ip0->dst_address,
+ ip0->fragment_id,
+ ip0->protocol,
+ 1,
+ &fragments_to_drop);
+
+ if (PREDICT_FALSE (!reass0))
+ {
+ next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ b0->error = node->errors[SNAT_OUT2IN_ERROR_MAX_REASS];
+ nat_log_notice ("maximum reassemblies exceeded");
+ goto trace0;
+ }
+
+ if (PREDICT_FALSE (ip4_is_first_fragment (ip0)))
+ {
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_ICMP))
+ {
+ next0 = icmp_out2in_slow_path
+ (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node,
+ next0, now, thread_index, &s0);
+
+ if (PREDICT_TRUE(next0 != NAT44_ED_OUT2IN_NEXT_DROP))
+ {
+ if (s0)
+ reass0->sess_index = s0 - per_thread_data->sessions;
+ else
+ reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE;
+ reass0->thread_index = thread_index;
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ }
+
+ goto trace0;
+ }
+
+ make_ed_kv (&kv0, &ip0->dst_address, &ip0->src_address, ip0->protocol,
+ rx_fib_index0, udp0->dst_port, udp0->src_port);
+
+ if (clib_bihash_search_16_8 (&per_thread_data->out2in_ed, &kv0, &value0))
+ {
+ /* Try to match static mapping by external address and port,
+ destination address and port in packet */
+ e_key0.addr = ip0->dst_address;
+ e_key0.port = udp0->dst_port;
+ e_key0.protocol = proto0;
+ e_key0.fib_index = rx_fib_index0;
+ if (snat_static_mapping_match(sm, e_key0, &l_key0, 1, 0,
+ &twice_nat0, &lb0, 0))
+ {
+ /*
+ * Send DHCP packets to the ipv4 stack, or we won't
+ * be able to use dhcp client on the outside interface
+ */
+ if (PREDICT_FALSE (proto0 == SNAT_PROTOCOL_UDP
+ && (udp0->dst_port
+ == clib_host_to_net_u16(UDP_DST_PORT_dhcp_to_client))))
+ {
+ vnet_feature_next(&next0, b0);
+ goto trace0;
+ }
+
+ if (!sm->forwarding_enabled)
+ {
+ b0->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION];
+ next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ }
+ else
+ {
+ if (next_src_nat(sm, ip0, ip0->protocol,
+ udp0->src_port, udp0->dst_port,
+ thread_index, rx_fib_index0))
+ {
+ next0 = NAT44_ED_OUT2IN_NEXT_IN2OUT;
+ goto trace0;
+ }
+ create_bypass_for_fwd(sm, ip0, rx_fib_index0,
+ thread_index);
+ reass0->flags |= NAT_REASS_FLAG_ED_DONT_TRANSLATE;
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ }
+ goto trace0;
+ }
+
+ /* Create session initiated by host from external network */
+ s0 = create_session_for_static_mapping_ed(sm, b0, l_key0,
+ e_key0, node,
+ thread_index,
+ twice_nat0, lb0,
+ now);
+ if (!s0)
+ {
+ b0->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION];
+ next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ goto trace0;
+ }
+ reass0->sess_index = s0 - per_thread_data->sessions;
+ reass0->thread_index = thread_index;
+ }
+ else
+ {
+ s0 = pool_elt_at_index (per_thread_data->sessions,
+ value0.value);
+ reass0->sess_index = value0.value;
+ }
+ nat_ip4_reass_get_frags (reass0, &fragments_to_loopback);
+ }
+ else
+ {
+ if (reass0->flags & NAT_REASS_FLAG_ED_DONT_TRANSLATE)
+ goto trace0;
+ if (PREDICT_FALSE (reass0->sess_index == (u32) ~0))
+ {
+ if (nat_ip4_reass_add_fragment (reass0, bi0, &fragments_to_drop))
+ {
+ b0->error = node->errors[SNAT_OUT2IN_ERROR_MAX_FRAG];
+ nat_log_notice ("maximum fragments per reassembly exceeded");
+ next0 = NAT44_ED_OUT2IN_NEXT_DROP;
+ goto trace0;
+ }
+ cached0 = 1;
+ goto trace0;
+ }
+ s0 = pool_elt_at_index (per_thread_data->sessions,
+ reass0->sess_index);
+ }
+
+ old_addr0 = ip0->dst_address.as_u32;
+ ip0->dst_address = s0->in2out.addr;
+ new_addr0 = ip0->dst_address.as_u32;
+ vnet_buffer(b0)->sw_if_index[VLIB_TX] = s0->in2out.fib_index;
+
+ sum0 = ip0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+ ip4_header_t,
+ dst_address /* changed member */);
+ if (PREDICT_FALSE (is_twice_nat_session (s0)))
+ sum0 = ip_csum_update (sum0, ip0->src_address.as_u32,
+ s0->ext_host_nat_addr.as_u32, ip4_header_t,
+ src_address);
+ ip0->checksum = ip_csum_fold (sum0);
+
+ if (PREDICT_FALSE (ip4_is_first_fragment (ip0)))
+ {
+ if (PREDICT_TRUE(proto0 == SNAT_PROTOCOL_TCP))
+ {
+ old_port0 = tcp0->dst_port;
+ tcp0->dst_port = s0->in2out.port;
+ new_port0 = tcp0->dst_port;
+
+ sum0 = tcp0->checksum;
+ sum0 = ip_csum_update (sum0, old_addr0, new_addr0,
+ ip4_header_t,
+ dst_address /* changed member */);
+
+ sum0 = ip_csum_update (sum0, old_port0, new_port0,
+ ip4_header_t /* cheat */,
+ length /* changed member */);
+ if (is_twice_nat_session (s0))
+ {
+ sum0 = ip_csum_update (sum0, ip0->src_address.as_u32,
+ s0->ext_host_nat_addr.as_u32,
+ ip4_header_t, dst_address);
+ sum0 = ip_csum_update (sum0, tcp0->src_port,
+ s0->ext_host_nat_port, ip4_header_t,
+ length);
+ tcp0->src_port = s0->ext_host_nat_port;
+ ip0->src_address.as_u32 = s0->ext_host_nat_addr.as_u32;
+ }
+ tcp0->checksum = ip_csum_fold(sum0);
+ }
+ else
+ {
+ old_port0 = udp0->dst_port;
+ udp0->dst_port = s0->in2out.port;
+ if (is_twice_nat_session (s0))
+ {
+ udp0->src_port = s0->ext_host_nat_port;
+ ip0->src_address.as_u32 = s0->ext_host_nat_addr.as_u32;
+ }
+ udp0->checksum = 0;
+ }
+ }
+
+ /* Accounting */
+ nat44_session_update_counters (s0, now,
+ vlib_buffer_length_in_chain (vm, b0));
+ /* Per-user LRU list maintenance */
+ nat44_session_update_lru (sm, s0, thread_index);
+
+ trace0:
+ if (PREDICT_FALSE((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ nat44_out2in_reass_trace_t *t =
+ vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->cached = cached0;
+ t->sw_if_index = sw_if_index0;
+ t->next_index = next0;
+ }
+
+ if (cached0)
+ {
+ n_left_to_next++;
+ to_next--;
+ }
+ else
+ {
+ pkts_processed += next0 != NAT44_ED_OUT2IN_NEXT_DROP;
+
+ /* verify speculative enqueue, maybe switch current next frame */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ }
+
+ if (n_left_from == 0 && vec_len (fragments_to_loopback))
+ {
+ from = vlib_frame_vector_args (frame);
+ u32 len = vec_len (fragments_to_loopback);
+ if (len <= VLIB_FRAME_SIZE)
+ {
+ clib_memcpy (from, fragments_to_loopback, sizeof (u32) * len);
+ n_left_from = len;
+ vec_reset_length (fragments_to_loopback);
+ }
+ else
+ {
+ clib_memcpy (from,
+ fragments_to_loopback + (len - VLIB_FRAME_SIZE),
+ sizeof (u32) * VLIB_FRAME_SIZE);
+ n_left_from = VLIB_FRAME_SIZE;
+ _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
+ }
+ }
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_node_increment_counter (vm, nat44_out2in_reass_node.index,
+ SNAT_OUT2IN_ERROR_OUT2IN_PACKETS,
+ pkts_processed);
+
+ nat_send_all_to_node (vm, fragments_to_drop, node,
+ &node->errors[SNAT_OUT2IN_ERROR_DROP_FRAGMENT],
+ SNAT_OUT2IN_NEXT_DROP);
+
+ vec_free (fragments_to_drop);
+ vec_free (fragments_to_loopback);
+ return frame->n_vectors;
+}
+
+VLIB_REGISTER_NODE (nat44_ed_out2in_reass_node) = {
+ .function = nat44_ed_out2in_reass_node_fn,
+ .name = "nat44-ed-out2in-reass",
+ .vector_size = sizeof (u32),
+ .format_trace = format_nat44_out2in_reass_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(snat_out2in_error_strings),
+ .error_strings = snat_out2in_error_strings,
+
+ .n_next_nodes = NAT44_ED_OUT2IN_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [NAT44_ED_OUT2IN_NEXT_DROP] = "error-drop",
+ [NAT44_ED_OUT2IN_NEXT_LOOKUP] = "ip4-lookup",
+ [NAT44_ED_OUT2IN_NEXT_SLOW_PATH] = "nat44-ed-out2in-slowpath",
+ [NAT44_ED_OUT2IN_NEXT_ICMP_ERROR] = "ip4-icmp-error",
+ [NAT44_ED_OUT2IN_NEXT_IN2OUT] = "nat44-ed-in2out",
+ [NAT44_ED_OUT2IN_NEXT_REASS] = "nat44-ed-out2in-reass",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (nat44_ed_out2in_reass_node,
+ nat44_ed_out2in_reass_node_fn);
+
/**************************/
/*** deterministic mode ***/
/**************************/