summaryrefslogtreecommitdiffstats
path: root/src/plugins/map/ip4_map_t.c
diff options
context:
space:
mode:
authorKlement Sekera <ksekera@cisco.com>2019-09-23 09:00:30 +0000
committerOle Trøan <otroan@employees.org>2019-10-01 09:56:28 +0000
commit640edcd9016f2381ea1efeaab78e834068c0e862 (patch)
treee37599e1031fa8f4eb96f19b050a4f217ebbeed1 /src/plugins/map/ip4_map_t.c
parent839a7bce3872f992f4807ee6d6a76f9b361151dc (diff)
map: use SVR for MAP-T
This change is part of an effort to unify reassembly code. By removing shallow virtual reassembly functionality in MAP and using the common vnet provided shallow virtual reassembly, code size and complexity is reduced. Type: refactor Change-Id: Iec8edd039f7b967b53e17bb9bca228a8b452ac0c Signed-off-by: Klement Sekera <ksekera@cisco.com>
Diffstat (limited to 'src/plugins/map/ip4_map_t.c')
-rw-r--r--src/plugins/map/ip4_map_t.c79
1 files changed, 21 insertions, 58 deletions
diff --git a/src/plugins/map/ip4_map_t.c b/src/plugins/map/ip4_map_t.c
index 21d17d77910..2ab1af95922 100644
--- a/src/plugins/map/ip4_map_t.c
+++ b/src/plugins/map/ip4_map_t.c
@@ -62,41 +62,6 @@ typedef CLIB_PACKED (struct {
}) ip4_mapt_pseudo_header_t;
/* *INDENT-ON* */
-
-static_always_inline int
-ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
-{
- u32 *ignore = NULL;
- map_ip4_reass_lock ();
- map_ip4_reass_t *r =
- map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
- ip4->fragment_id,
- (ip4->protocol ==
- IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
- &ignore);
- if (r)
- r->port = port;
-
- map_ip4_reass_unlock ();
- return !r;
-}
-
-static_always_inline i32
-ip4_map_fragment_get_port (ip4_header_t * ip4)
-{
- u32 *ignore = NULL;
- map_ip4_reass_lock ();
- map_ip4_reass_t *r =
- map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32,
- ip4->fragment_id,
- (ip4->protocol ==
- IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
- &ignore);
- i32 ret = r ? r->port : -1;
- map_ip4_reass_unlock ();
- return ret;
-}
-
typedef struct
{
map_domain_t *d;
@@ -505,7 +470,7 @@ ip4_map_t_tcp_udp (vlib_main_t * vm,
static_always_inline void
ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
- u8 * error0, ip4_mapt_next_t * next0)
+ u8 * error0, ip4_mapt_next_t * next0, u16 l4_dst_port)
{
if (PREDICT_FALSE (ip4_get_fragment_offset (ip40)))
{
@@ -516,7 +481,7 @@ ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
}
else
{
- *dst_port0 = ip4_map_fragment_get_port (ip40);
+ *dst_port0 = l4_dst_port;
*error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
}
}
@@ -525,14 +490,14 @@ ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
vnet_buffer (p0)->map_t.checksum_offset = 36;
*next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
*error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
- *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
+ *dst_port0 = l4_dst_port;
}
else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
{
vnet_buffer (p0)->map_t.checksum_offset = 26;
*next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP;
*error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
- *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
+ *dst_port0 = l4_dst_port;
}
else if (ip40->protocol == IP_PROTOCOL_ICMP)
{
@@ -544,7 +509,7 @@ ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0,
|| ((icmp46_header_t *)
u8_ptr_add (ip40,
sizeof (*ip40)))->code == ICMP4_echo_request)
- *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
+ *dst_port0 = l4_dst_port;
}
else
{
@@ -588,6 +553,9 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
error0 = MAP_ERROR_NONE;
p0 = vlib_get_buffer (vm, pi0);
+
+ u16 l4_dst_port = vnet_buffer (p0)->ip.reass.l4_dst_port;
+
ip40 = vlib_buffer_get_current (p0);
ip4_len0 = clib_host_to_net_u16 (ip40->length);
if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
@@ -610,7 +578,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
dst_port0 = -1;
ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
- &next0);
+ &next0, l4_dst_port);
/* Verify that port is not among the well-known ports */
if ((d0->psid_length > 0 && d0->psid_offset > 0)
@@ -632,18 +600,6 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
pheader0->daddr.as_u64[1] =
map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
- // It is important to cache at this stage because the result
- // might be necessary for packets within the same vector.
- // Actually, this approach even provides some limited
- // out-of-order fragments support
- if (PREDICT_FALSE
- (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
- && (d0->ea_bits_len != 0 || !d0->rules)
- && ip4_map_fragment_cache (ip40, dst_port0)))
- {
- error0 = MAP_ERROR_UNKNOWN;
- }
-
if (PREDICT_TRUE
(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
{
@@ -657,6 +613,11 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame)
next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
p0->error = error_node->errors[error0];
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ map_add_trace (vm, node, p0, d0 - map_main.domains, dst_port0);
+ }
exit:
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next, pi0,
@@ -673,12 +634,14 @@ static char *map_t_error_strings[] = {
#undef _
};
-VNET_FEATURE_INIT (ip4_map_t_feature, static) =
-{
-.arc_name = "ip4-unicast",.node_name = "ip4-map-t",.runs_before =
- VNET_FEATURES ("ip4-flow-classify"),};
-
/* *INDENT-OFF* */
+VNET_FEATURE_INIT (ip4_map_t_feature, static) = {
+ .arc_name = "ip4-unicast",
+ .node_name = "ip4-map-t",
+ .runs_before = VNET_FEATURES ("ip4-flow-classify"),
+ .runs_after = VNET_FEATURES ("ip4-sv-reassembly-feature"),
+};
+
VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
.function = ip4_map_t_fragmented,
.name = "ip4-map-t-fragmented",