aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ip/reass
diff options
context:
space:
mode:
authorKlement Sekera <ksekera@cisco.com>2021-08-02 16:14:15 +0200
committerOle Tr�an <otroan@employees.org>2021-08-17 16:15:08 +0000
commit42cec0e451219c13002a28aa0a93c96853fb8fb7 (patch)
treeda953b5c9400c40a1abd4fc17f47e6bec3d4bd05 /src/vnet/ip/reass
parent6da37696511c764d1ebaa4660ade2c0cac4ce713 (diff)
ip: reassembly cleanup
Remove unused parameters and fix warnings. Type: fix Signed-off-by: Klement Sekera <ksekera@cisco.com> Change-Id: I2d0e7b84b56817999283ecb6be606159dcb26a28
Diffstat (limited to 'src/vnet/ip/reass')
-rw-r--r--src/vnet/ip/reass/ip4_full_reass.c126
-rw-r--r--src/vnet/ip/reass/ip4_sv_reass.c71
-rw-r--r--src/vnet/ip/reass/ip6_full_reass.c98
-rw-r--r--src/vnet/ip/reass/ip6_sv_reass.c90
4 files changed, 139 insertions, 246 deletions
diff --git a/src/vnet/ip/reass/ip4_full_reass.c b/src/vnet/ip/reass/ip4_full_reass.c
index fda73a43a1b..d2069c0876c 100644
--- a/src/vnet/ip/reass/ip4_full_reass.c
+++ b/src/vnet/ip/reass/ip4_full_reass.c
@@ -335,7 +335,6 @@ format_ip4_full_reass_trace (u8 * s, va_list * args)
static void
ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm,
ip4_full_reass_t * reass, u32 bi,
ip4_full_reass_trace_operation_e action,
u32 size_diff, u32 thread_id_to)
@@ -413,8 +412,8 @@ ip4_full_reass_free (ip4_full_reass_main_t * rm,
}
always_inline void
-ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
+ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_full_reass_t *reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
@@ -511,7 +510,7 @@ again:
if (now > reass->last_heard + rm->timeout)
{
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
reass = NULL;
}
@@ -717,8 +716,8 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
- FINALIZE, 0, ~0);
+ ip4_full_reass_add_trace (vm, node, reass, reass->first_bi, FINALIZE, 0,
+ ~0);
#if 0
// following code does a hexdump of packet fragments to stdout ...
do
@@ -764,8 +763,6 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
always_inline ip4_full_reass_rc_t
ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
- ip4_full_reass_main_t * rm,
- ip4_full_reass_per_thread_t * rt,
ip4_full_reass_t * reass,
u32 prev_range_bi, u32 new_next_bi)
{
@@ -799,7 +796,6 @@ ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
always_inline ip4_full_reass_rc_t
ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
vlib_node_runtime_t * node,
- ip4_full_reass_main_t * rm,
ip4_full_reass_t * reass,
u32 prev_range_bi, u32 discard_bi)
{
@@ -831,8 +827,8 @@ ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
u32 to_be_freed_bi = discard_bi;
if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
- RANGE_DISCARD, 0, ~0);
+ ip4_full_reass_add_trace (vm, node, reass, discard_bi, RANGE_DISCARD,
+ 0, ~0);
}
if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
@@ -890,16 +886,14 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
{
// starting a new reassembly
rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
}
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
- ~0);
+ ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
}
*bi0 = ~0;
reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
@@ -922,9 +916,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
~0 == candidate_range_bi)
{
// special case - this fragment falls beyond all known ranges
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (vm, reass,
+ prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
@@ -937,9 +930,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
if (fragment_last < candidate_vnb->ip.reass.range_first)
{
// this fragment ends before candidate range without any overlap
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
+ *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
@@ -954,7 +946,7 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
// this fragment is a (sub)part of existing range, ignore it
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
+ ip4_full_reass_add_trace (vm, node, reass, *bi0,
RANGE_OVERLAP, 0, ~0);
}
break;
@@ -974,14 +966,12 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
reass->data_len -= overlap;
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass,
+ ip4_full_reass_add_trace (vm, node, reass,
candidate_range_bi,
RANGE_SHRINK, 0, ~0);
}
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi,
- *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (
+ vm, reass, prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
@@ -1010,11 +1000,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
else
{
// special case - last range discarded
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt,
- reass,
- candidate_range_bi,
- *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (
+ vm, reass, candidate_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
@@ -1035,10 +1022,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
{
u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
// discard candidate range, probe next range
- rc =
- ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
- prev_range_bi,
- candidate_range_bi);
+ rc = ip4_full_reass_remove_range_from_chain (
+ vm, node, reass, prev_range_bi, candidate_range_bi);
if (IP4_REASS_RC_OK != rc)
{
return rc;
@@ -1051,10 +1036,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
else
{
// special case - last range discarded
- rc =
- ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi,
- *bi0);
+ rc = ip4_full_reass_insert_range_in_chain (
+ vm, reass, prev_range_bi, *bi0);
if (IP4_REASS_RC_OK != rc)
{
return rc;
@@ -1070,8 +1053,7 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
{
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
- ~0);
+ ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
}
}
if (~0 != reass->last_packet_octet &&
@@ -1207,14 +1189,14 @@ ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
break;
case IP4_REASS_RC_NO_BUF:
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_NO_BUF, 1);
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
break;
@@ -1223,7 +1205,7 @@ ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_node_increment_counter (vm, node->node_index,
IP4_ERROR_REASS_INTERNAL_ERROR,
1);
- ip4_full_reass_drop_all (vm, node, rm, reass);
+ ip4_full_reass_drop_all (vm, node, reass);
ip4_full_reass_free (rm, rt, reass);
goto next_packet;
break;
@@ -1255,10 +1237,9 @@ ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
- HANDOFF, 0,
- vnet_buffer (b0)->ip.
- reass.owner_thread_index);
+ ip4_full_reass_add_trace (
+ vm, node, NULL, bi0, HANDOFF, 0,
+ vnet_buffer (b0)->ip.reass.owner_thread_index);
}
}
else if (FEATURE == type && IP4_ERROR_NONE == error0)
@@ -1296,7 +1277,6 @@ VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
return ip4_full_reass_inline (vm, node, frame, NORMAL);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_node) = {
.name = "ip4-full-reassembly",
.vector_size = sizeof (u32),
@@ -1312,7 +1292,6 @@ VLIB_REGISTER_NODE (ip4_full_reass_node) = {
},
};
-/* *INDENT-ON* */
VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1321,7 +1300,6 @@ VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
return ip4_full_reass_inline (vm, node, frame, FEATURE);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
.name = "ip4-full-reassembly-feature",
.vector_size = sizeof (u32),
@@ -1336,9 +1314,7 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
[IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
.arc_name = "ip4-unicast",
.node_name = "ip4-full-reassembly-feature",
@@ -1346,7 +1322,6 @@ VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
"ipsec4-input-feature"),
.runs_after = 0,
};
-/* *INDENT-ON* */
VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1355,7 +1330,6 @@ VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
return ip4_full_reass_inline (vm, node, frame, CUSTOM);
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
.name = "ip4-full-reassembly-custom",
.vector_size = sizeof (u32),
@@ -1370,9 +1344,7 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
[IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
.arc_name = "ip4-unicast",
.node_name = "ip4-full-reassembly-feature",
@@ -1381,7 +1353,6 @@ VNET_FEATURE_INIT (ip4_full_reass_custom, static) = {
.runs_after = 0,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
uword
@@ -1542,8 +1513,8 @@ VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
#endif /* CLIB_MARCH_VARIANT */
static uword
-ip4_full_reass_walk_expired (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * f)
+ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node,
+ CLIB_UNUSED (vlib_frame_t *f))
{
ip4_full_reass_main_t *rm = &ip4_full_reass_main;
uword event_type, *event_data = 0;
@@ -1558,10 +1529,11 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
switch (event_type)
{
- case ~0: /* no events => timeout */
- /* nothing to do here */
- break;
+ case ~0:
+ /* no events => timeout */
+ /* fallthrough */
case IP4_EVENT_CONFIG_CHANGED:
+ /* nothing to do here */
break;
default:
clib_warning ("BUG: event type 0x%wx", event_type);
@@ -1582,7 +1554,6 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
clib_spinlock_lock (&rt->lock);
vec_reset_length (pool_indexes_to_free);
- /* *INDENT-OFF* */
pool_foreach_index (index, rt->pool) {
reass = pool_elt_at_index (rt->pool, index);
if (now > reass->last_heard + rm->timeout)
@@ -1590,16 +1561,13 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
vec_add1 (pool_indexes_to_free, index);
}
}
- /* *INDENT-ON* */
int *i;
- /* *INDENT-OFF* */
vec_foreach (i, pool_indexes_to_free)
{
ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
- ip4_full_reass_drop_all (vm, node, rm, reass);
- ip4_full_reass_free (rm, rt, reass);
- }
- /* *INDENT-ON* */
+ ip4_full_reass_drop_all (vm, node, reass);
+ ip4_full_reass_free (rm, rt, reass);
+ }
clib_spinlock_unlock (&rt->lock);
}
@@ -1614,7 +1582,6 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
return 0;
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
.function = ip4_full_reass_walk_expired,
.type = VLIB_NODE_TYPE_PROCESS,
@@ -1624,7 +1591,6 @@ VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
.error_strings = ip4_full_reass_error_strings,
};
-/* *INDENT-ON* */
static u8 *
format_ip4_full_reass_key (u8 * s, va_list * args)
@@ -1702,11 +1668,9 @@ show_ip4_reass (vlib_main_t * vm,
clib_spinlock_lock (&rt->lock);
if (details)
{
- /* *INDENT-OFF* */
pool_foreach (reass, rt->pool) {
vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
}
- /* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
clib_spinlock_unlock (&rt->lock);
@@ -1730,13 +1694,11 @@ show_ip4_reass (vlib_main_t * vm,
return 0;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
.path = "show ip4-full-reassembly",
.short_help = "show ip4-full-reassembly [details]",
.function = show_ip4_reass,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
vnet_api_error_t
@@ -1858,7 +1820,6 @@ VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
.name = "ip4-full-reassembly-handoff",
.vector_size = sizeof (u32),
@@ -1872,10 +1833,8 @@ VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
vlib_node_runtime_t *
node,
@@ -1883,10 +1842,8 @@ VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
{
return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE);
}
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
.name = "ip4-full-reass-feature-hoff",
.vector_size = sizeof (u32),
@@ -1900,9 +1857,7 @@ VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
vlib_node_runtime_t *
node,
@@ -1910,10 +1865,8 @@ VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
{
return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM);
}
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
.name = "ip4-full-reass-custom-hoff",
.vector_size = sizeof (u32),
@@ -1927,7 +1880,6 @@ VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
int
diff --git a/src/vnet/ip/reass/ip4_sv_reass.c b/src/vnet/ip/reass/ip4_sv_reass.c
index 9b3f1b98558..cd5e19b65d3 100644
--- a/src/vnet/ip/reass/ip4_sv_reass.c
+++ b/src/vnet/ip/reass/ip4_sv_reass.c
@@ -229,10 +229,10 @@ format_ip4_sv_reass_trace (u8 * s, va_list * args)
}
static void
-ip4_sv_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_sv_reass_main_t * rm, ip4_sv_reass_t * reass,
- u32 bi, ip4_sv_reass_trace_operation_e action,
- u32 ip_proto, u16 l4_src_port, u16 l4_dst_port)
+ip4_sv_reass_add_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_sv_reass_t *reass, u32 bi,
+ ip4_sv_reass_trace_operation_e action, u32 ip_proto,
+ u16 l4_src_port, u16 l4_dst_port)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
if (pool_is_free_index
@@ -378,9 +378,9 @@ ip4_sv_reass_find_or_create (vlib_main_t * vm, ip4_sv_reass_main_t * rm,
}
always_inline ip4_sv_reass_rc_t
-ip4_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip4_sv_reass_main_t * rm, ip4_sv_reass_per_thread_t * rt,
- ip4_header_t * ip0, ip4_sv_reass_t * reass, u32 bi0)
+ip4_sv_reass_update (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip4_sv_reass_main_t *rm, ip4_header_t *ip0,
+ ip4_sv_reass_t *reass, u32 bi0)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
ip4_sv_reass_rc_t rc = IP4_SV_REASS_RC_OK;
@@ -407,7 +407,7 @@ ip4_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, reass, bi0, REASS_FINISH,
+ ip4_sv_reass_add_trace (vm, node, reass, bi0, REASS_FINISH,
reass->ip_proto, reass->l4_src_port,
reass->l4_dst_port);
}
@@ -417,8 +417,8 @@ ip4_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
- REASS_FRAGMENT_CACHE, ~0, ~0, ~0);
+ ip4_sv_reass_add_trace (vm, node, reass, bi0, REASS_FRAGMENT_CACHE,
+ ~0, ~0, ~0);
}
if (vec_len (reass->cached_buffers) > rm->max_reass_len)
{
@@ -524,7 +524,7 @@ ip4_sv_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, NULL, from[(b - 2) - bufs],
+ ip4_sv_reass_add_trace (vm, node, NULL, from[(b - 2) - bufs],
REASS_PASSTHROUGH,
vnet_buffer (b0)->ip.reass.ip_proto,
vnet_buffer (b0)->ip.reass.l4_src_port,
@@ -559,7 +559,7 @@ ip4_sv_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_buffer (b1)->ip.reass.l4_dst_port = ip4_get_port (ip1, 0);
if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, NULL, from[(b - 1) - bufs],
+ ip4_sv_reass_add_trace (vm, node, NULL, from[(b - 1) - bufs],
REASS_PASSTHROUGH,
vnet_buffer (b1)->ip.reass.ip_proto,
vnet_buffer (b1)->ip.reass.l4_src_port,
@@ -626,7 +626,7 @@ ip4_sv_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, NULL, from[(b - 1) - bufs],
+ ip4_sv_reass_add_trace (vm, node, NULL, from[(b - 1) - bufs],
REASS_PASSTHROUGH,
vnet_buffer (b0)->ip.reass.ip_proto,
vnet_buffer (b0)->ip.reass.l4_src_port,
@@ -697,13 +697,11 @@ slow_path:
vnet_buffer (b0)->ip.reass.l4_dst_port = ip4_get_port (ip0, 0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, NULL, bi0,
- REASS_PASSTHROUGH,
- vnet_buffer (b0)->ip.reass.ip_proto,
- vnet_buffer (b0)->ip.
- reass.l4_src_port,
- vnet_buffer (b0)->ip.
- reass.l4_dst_port);
+ ip4_sv_reass_add_trace (
+ vm, node, NULL, bi0, REASS_PASSTHROUGH,
+ vnet_buffer (b0)->ip.reass.ip_proto,
+ vnet_buffer (b0)->ip.reass.l4_src_port,
+ vnet_buffer (b0)->ip.reass.l4_dst_port);
}
goto packet_enqueue;
}
@@ -771,17 +769,15 @@ slow_path:
vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
- REASS_FRAGMENT_FORWARD,
- reass->ip_proto,
- reass->l4_src_port,
- reass->l4_dst_port);
+ ip4_sv_reass_add_trace (
+ vm, node, reass, bi0, REASS_FRAGMENT_FORWARD,
+ reass->ip_proto, reass->l4_src_port, reass->l4_dst_port);
}
goto packet_enqueue;
}
ip4_sv_reass_rc_t rc =
- ip4_sv_reass_update (vm, node, rm, rt, ip0, reass, bi0);
+ ip4_sv_reass_update (vm, node, rm, ip0, reass, bi0);
switch (rc)
{
case IP4_SV_REASS_RC_OK:
@@ -796,8 +792,7 @@ slow_path:
break;
case IP4_SV_REASS_RC_UNSUPP_IP_PROTO:
vlib_node_increment_counter (vm, node->node_index,
- IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
- 1);
+ IP4_ERROR_REASS_UNSUPP_IP_PROT, 1);
ip4_sv_reass_free (vm, rm, rt, reass);
goto next_packet;
break;
@@ -846,11 +841,9 @@ slow_path:
vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip4_sv_reass_add_trace (vm, node, rm, reass, bi0,
- REASS_FRAGMENT_FORWARD,
- reass->ip_proto,
- reass->l4_src_port,
- reass->l4_dst_port);
+ ip4_sv_reass_add_trace (
+ vm, node, reass, bi0, REASS_FRAGMENT_FORWARD,
+ reass->ip_proto, reass->l4_src_port, reass->l4_dst_port);
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next, bi0,
@@ -1170,8 +1163,9 @@ VLIB_INIT_FUNCTION (ip4_sv_reass_init_function);
#endif /* CLIB_MARCH_VARIANT */
static uword
-ip4_sv_reass_walk_expired (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * f)
+ip4_sv_reass_walk_expired (vlib_main_t *vm,
+ CLIB_UNUSED (vlib_node_runtime_t *node),
+ CLIB_UNUSED (vlib_frame_t *f))
{
ip4_sv_reass_main_t *rm = &ip4_sv_reass_main;
uword event_type, *event_data = 0;
@@ -1186,10 +1180,11 @@ ip4_sv_reass_walk_expired (vlib_main_t * vm,
switch (event_type)
{
- case ~0: /* no events => timeout */
- /* nothing to do here */
- break;
+ case ~0:
+ /* no events => timeout */
+ /* fallthrough */
case IP4_EVENT_CONFIG_CHANGED:
+ /* nothing to do here */
break;
default:
clib_warning ("BUG: event type 0x%wx", event_type);
diff --git a/src/vnet/ip/reass/ip6_full_reass.c b/src/vnet/ip/reass/ip6_full_reass.c
index 67505689bca..9ec40cd347c 100644
--- a/src/vnet/ip/reass/ip6_full_reass.c
+++ b/src/vnet/ip/reass/ip6_full_reass.c
@@ -310,7 +310,6 @@ format_ip6_full_reass_trace (u8 * s, va_list * args)
static void
ip6_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_full_reass_main_t * rm,
ip6_full_reass_t * reass, u32 bi,
ip6_frag_hdr_t * ip6_frag_header,
ip6_full_reass_trace_operation_e action,
@@ -398,8 +397,8 @@ ip6_full_reass_free (ip6_full_reass_main_t * rm,
}
always_inline void
-ip6_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_full_reass_main_t * rm, ip6_full_reass_t * reass)
+ip6_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip6_full_reass_t *reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
@@ -461,7 +460,6 @@ ip6_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
always_inline void
ip6_full_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_full_reass_main_t * rm,
ip6_full_reass_t * reass, u32 * icmp_bi)
{
if (~0 == reass->first_bi)
@@ -476,8 +474,8 @@ ip6_full_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
*icmp_bi = reass->first_bi;
if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
- NULL, ICMP_ERROR_RT_EXCEEDED, ~0);
+ ip6_full_reass_add_trace (vm, node, reass, reass->first_bi, NULL,
+ ICMP_ERROR_RT_EXCEEDED, ~0);
}
// fragment with offset zero received - send icmp message back
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
@@ -495,7 +493,7 @@ ip6_full_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
0);
}
}
- ip6_full_reass_drop_all (vm, node, rm, reass);
+ ip6_full_reass_drop_all (vm, node, reass);
}
always_inline ip6_full_reass_t *
@@ -528,7 +526,7 @@ again:
if (now > reass->last_heard + rm->timeout)
{
- ip6_full_reass_on_timeout (vm, node, rm, reass, icmp_bi);
+ ip6_full_reass_on_timeout (vm, node, reass, icmp_bi);
ip6_full_reass_free (rm, rt, reass);
reass = NULL;
}
@@ -763,7 +761,7 @@ ip6_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi, NULL,
+ ip6_full_reass_add_trace (vm, node, reass, reass->first_bi, NULL,
FINALIZE, ~0);
#if 0
// following code does a hexdump of packet fragments to stdout ...
@@ -811,8 +809,6 @@ free_buffers_and_return:
always_inline void
ip6_full_reass_insert_range_in_chain (vlib_main_t * vm,
- ip6_full_reass_main_t * rm,
- ip6_full_reass_per_thread_t * rt,
ip6_full_reass_t * reass,
u32 prev_range_bi, u32 new_next_bi)
{
@@ -884,8 +880,7 @@ ip6_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
if (~0 == reass->first_bi)
{
// starting a new reassembly
- ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
- *bi0);
+ ip6_full_reass_insert_range_in_chain (vm, reass, prev_range_bi, *bi0);
reass->min_fragment_length = clib_net_to_host_u16 (fip->payload_length);
consumed = 1;
reass->fragments_n = 1;
@@ -907,8 +902,8 @@ ip6_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
~0 == candidate_range_bi)
{
// special case - this fragment falls beyond all known ranges
- ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ ip6_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
+ *bi0);
consumed = 1;
break;
}
@@ -917,8 +912,8 @@ ip6_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
if (fragment_last < candidate_vnb->ip.reass.range_first)
{
// this fragment ends before candidate range without any overlap
- ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
- prev_range_bi, *bi0);
+ ip6_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
+ *bi0);
consumed = 1;
}
else if (fragment_first == candidate_vnb->ip.reass.range_first &&
@@ -931,10 +926,10 @@ ip6_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
// overlapping fragment - not allowed by RFC 8200
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, frag_hdr,
+ ip6_full_reass_add_trace (vm, node, reass, *bi0, frag_hdr,
RANGE_OVERLAP, ~0);
}
- ip6_full_reass_drop_all (vm, node, rm, reass);
+ ip6_full_reass_drop_all (vm, node, reass);
ip6_full_reass_free (rm, rt, reass);
*next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
*error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
@@ -948,8 +943,8 @@ check_if_done_maybe:
{
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, frag_hdr,
- RANGE_NEW, ~0);
+ ip6_full_reass_add_trace (vm, node, reass, *bi0, frag_hdr, RANGE_NEW,
+ ~0);
}
}
if (~0 != reass->last_packet_octet &&
@@ -1010,7 +1005,6 @@ ip6_full_reass_verify_upper_layer_present (vlib_node_runtime_t * node,
always_inline bool
ip6_full_reass_verify_fragment_multiple_8 (vlib_main_t * vm,
- vlib_node_runtime_t * node,
vlib_buffer_t * b,
ip6_frag_hdr_t * frag_hdr)
{
@@ -1032,7 +1026,6 @@ ip6_full_reass_verify_fragment_multiple_8 (vlib_main_t * vm,
always_inline bool
ip6_full_reass_verify_packet_size_lt_64k (vlib_main_t * vm,
- vlib_node_runtime_t * node,
vlib_buffer_t * b,
ip6_frag_hdr_t * frag_hdr)
{
@@ -1111,10 +1104,8 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
goto skip_reass;
}
}
- if (!ip6_full_reass_verify_fragment_multiple_8
- (vm, node, b0, frag_hdr)
- || !ip6_full_reass_verify_packet_size_lt_64k (vm, node, b0,
- frag_hdr))
+ if (!ip6_full_reass_verify_fragment_multiple_8 (vm, b0, frag_hdr) ||
+ !ip6_full_reass_verify_packet_size_lt_64k (vm, b0, frag_hdr))
{
next0 = IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR;
goto skip_reass;
@@ -1170,14 +1161,14 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1);
- ip6_full_reass_drop_all (vm, node, rm, reass);
+ ip6_full_reass_drop_all (vm, node, reass);
ip6_full_reass_free (rm, rt, reass);
goto next_packet;
break;
case IP6_FULL_REASS_RC_NO_BUF:
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_NO_BUF, 1);
- ip6_full_reass_drop_all (vm, node, rm, reass);
+ ip6_full_reass_drop_all (vm, node, reass);
ip6_full_reass_free (rm, rt, reass);
goto next_packet;
break;
@@ -1185,7 +1176,7 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
vlib_node_increment_counter (vm, node->node_index,
IP6_ERROR_REASS_INTERNAL_ERROR,
1);
- ip6_full_reass_drop_all (vm, node, rm, reass);
+ ip6_full_reass_drop_all (vm, node, reass);
ip6_full_reass_free (rm, rt, reass);
goto next_packet;
break;
@@ -1223,10 +1214,9 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
{
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
- frag_hdr, HANDOFF,
- vnet_buffer (b0)->ip.
- reass.owner_thread_index);
+ ip6_full_reass_add_trace (
+ vm, node, NULL, bi0, frag_hdr, HANDOFF,
+ vnet_buffer (b0)->ip.reass.owner_thread_index);
}
}
else if (is_feature && IP6_ERROR_NONE == error0)
@@ -1273,7 +1263,6 @@ VLIB_NODE_FN (ip6_full_reass_node) (vlib_main_t * vm,
false /* is_custom_app */ );
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_full_reass_node) = {
.name = "ip6-full-reassembly",
.vector_size = sizeof (u32),
@@ -1289,7 +1278,6 @@ VLIB_REGISTER_NODE (ip6_full_reass_node) = {
[IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reassembly-handoff",
},
};
-/* *INDENT-ON* */
VLIB_NODE_FN (ip6_full_reass_node_feature) (vlib_main_t * vm,
vlib_node_runtime_t * node,
@@ -1299,7 +1287,6 @@ VLIB_NODE_FN (ip6_full_reass_node_feature) (vlib_main_t * vm,
false /* is_custom_app */ );
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_full_reass_node_feature) = {
.name = "ip6-full-reassembly-feature",
.vector_size = sizeof (u32),
@@ -1315,9 +1302,7 @@ VLIB_REGISTER_NODE (ip6_full_reass_node_feature) = {
[IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reass-feature-hoff",
},
};
-/* *INDENT-ON* */
-/* *INDENT-OFF* */
VNET_FEATURE_INIT (ip6_full_reassembly_feature, static) = {
.arc_name = "ip6-unicast",
.node_name = "ip6-full-reassembly-feature",
@@ -1325,7 +1310,6 @@ VNET_FEATURE_INIT (ip6_full_reassembly_feature, static) = {
"ipsec6-input-feature"),
.runs_after = 0,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
static u32
@@ -1486,8 +1470,8 @@ VLIB_INIT_FUNCTION (ip6_full_reass_init_function);
#endif /* CLIB_MARCH_VARIANT */
static uword
-ip6_full_reass_walk_expired (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * f)
+ip6_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node,
+ CLIB_UNUSED (vlib_frame_t *f))
{
ip6_full_reass_main_t *rm = &ip6_full_reass_main;
uword event_type, *event_data = 0;
@@ -1501,10 +1485,11 @@ ip6_full_reass_walk_expired (vlib_main_t * vm,
switch (event_type)
{
- case ~0: /* no events => timeout */
- /* nothing to do here */
- break;
+ case ~0:
+ /* no events => timeout */
+ /* fallthrough */
case IP6_EVENT_CONFIG_CHANGED:
+ /* nothing to do here */
break;
default:
clib_warning ("BUG: event type 0x%wx", event_type);
@@ -1526,7 +1511,6 @@ ip6_full_reass_walk_expired (vlib_main_t * vm,
clib_spinlock_lock (&rt->lock);
vec_reset_length (pool_indexes_to_free);
- /* *INDENT-OFF* */
pool_foreach_index (index, rt->pool) {
reass = pool_elt_at_index (rt->pool, index);
if (now > reass->last_heard + rm->timeout)
@@ -1534,20 +1518,17 @@ ip6_full_reass_walk_expired (vlib_main_t * vm,
vec_add1 (pool_indexes_to_free, index);
}
}
- /* *INDENT-ON* */
int *i;
- /* *INDENT-OFF* */
vec_foreach (i, pool_indexes_to_free)
{
ip6_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
u32 icmp_bi = ~0;
- ip6_full_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
- if (~0 != icmp_bi)
- vec_add1 (vec_icmp_bi, icmp_bi);
+ ip6_full_reass_on_timeout (vm, node, reass, &icmp_bi);
+ if (~0 != icmp_bi)
+ vec_add1 (vec_icmp_bi, icmp_bi);
- ip6_full_reass_free (rm, rt, reass);
- }
- /* *INDENT-ON* */
+ ip6_full_reass_free (rm, rt, reass);
+ }
clib_spinlock_unlock (&rt->lock);
}
@@ -1586,7 +1567,6 @@ ip6_full_reass_walk_expired (vlib_main_t * vm,
return 0;
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_full_reass_expire_node) = {
.function = ip6_full_reass_walk_expired,
.format_trace = format_ip6_full_reass_trace,
@@ -1597,7 +1577,6 @@ VLIB_REGISTER_NODE (ip6_full_reass_expire_node) = {
.error_strings = ip6_full_reassembly_error_strings,
};
-/* *INDENT-ON* */
static u8 *
format_ip6_full_reass_key (u8 * s, va_list * args)
@@ -1671,11 +1650,9 @@ show_ip6_full_reass (vlib_main_t * vm, unformat_input_t * input,
clib_spinlock_lock (&rt->lock);
if (details)
{
- /* *INDENT-OFF* */
pool_foreach (reass, rt->pool) {
vlib_cli_output (vm, "%U", format_ip6_full_reass, vm, reass);
}
- /* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
clib_spinlock_unlock (&rt->lock);
@@ -1701,13 +1678,11 @@ show_ip6_full_reass (vlib_main_t * vm, unformat_input_t * input,
return 0;
}
-/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_ip6_full_reassembly_cmd, static) = {
.path = "show ip6-full-reassembly",
.short_help = "show ip6-full-reassembly [details]",
.function = show_ip6_full_reass,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
vnet_api_error_t
@@ -1813,7 +1788,6 @@ VLIB_NODE_FN (ip6_full_reassembly_handoff_node) (vlib_main_t * vm,
false /* is_feature */ );
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_full_reassembly_handoff_node) = {
.name = "ip6-full-reassembly-handoff",
.vector_size = sizeof (u32),
@@ -1836,7 +1810,6 @@ VLIB_NODE_FN (ip6_full_reassembly_feature_handoff_node) (vlib_main_t * vm,
}
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node) = {
.name = "ip6-full-reass-feature-hoff",
.vector_size = sizeof (u32),
@@ -1850,7 +1823,6 @@ VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node) = {
[0] = "error-drop",
},
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
int
diff --git a/src/vnet/ip/reass/ip6_sv_reass.c b/src/vnet/ip/reass/ip6_sv_reass.c
index d5218a4fb1d..28941311f50 100644
--- a/src/vnet/ip/reass/ip6_sv_reass.c
+++ b/src/vnet/ip/reass/ip6_sv_reass.c
@@ -222,7 +222,6 @@ format_ip6_sv_reass_trace (u8 * s, va_list * args)
static void
ip6_sv_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_sv_reass_main_t * rm,
ip6_sv_reass_t * reass, u32 bi,
ip6_sv_reass_trace_operation_e action,
u32 ip_proto, u16 l4_src_port, u16 l4_dst_port)
@@ -303,11 +302,9 @@ ip6_sv_reass_init (ip6_sv_reass_t * reass)
}
always_inline ip6_sv_reass_t *
-ip6_sv_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_sv_reass_main_t * rm,
- ip6_sv_reass_per_thread_t * rt,
- ip6_sv_reass_kv_t * kv, u32 * icmp_bi,
- u8 * do_handoff)
+ip6_sv_reass_find_or_create (vlib_main_t *vm, ip6_sv_reass_main_t *rm,
+ ip6_sv_reass_per_thread_t *rt,
+ ip6_sv_reass_kv_t *kv, u8 *do_handoff)
{
ip6_sv_reass_t *reass = NULL;
f64 now = vlib_time_now (vm);
@@ -381,10 +378,9 @@ ip6_sv_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
}
always_inline ip6_sv_reass_rc_t
-ip6_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
- ip6_sv_reass_main_t * rm, ip6_sv_reass_per_thread_t * rt,
- ip6_sv_reass_t * reass, u32 bi0,
- ip6_frag_hdr_t * frag_hdr)
+ip6_sv_reass_update (vlib_main_t *vm, vlib_node_runtime_t *node,
+ ip6_sv_reass_main_t *rm, ip6_sv_reass_t *reass, u32 bi0,
+ ip6_frag_hdr_t *frag_hdr)
{
vlib_buffer_t *fb = vlib_get_buffer (vm, bi0);
vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
@@ -421,7 +417,7 @@ ip6_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_sv_reass_add_trace (vm, node, rm, reass, bi0, REASS_FINISH,
+ ip6_sv_reass_add_trace (vm, node, reass, bi0, REASS_FINISH,
reass->ip_proto, reass->l4_src_port,
reass->l4_dst_port);
}
@@ -431,9 +427,9 @@ ip6_sv_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
{
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_sv_reass_add_trace (vm, node, rm, reass, bi0,
- REASS_FRAGMENT_CACHE, reass->ip_proto,
- reass->l4_src_port, reass->l4_dst_port);
+ ip6_sv_reass_add_trace (vm, node, reass, bi0, REASS_FRAGMENT_CACHE,
+ reass->ip_proto, reass->l4_src_port,
+ reass->l4_dst_port);
}
if (vec_len (reass->cached_buffers) > rm->max_reass_len)
{
@@ -467,7 +463,6 @@ ip6_sv_reass_verify_upper_layer_present (vlib_node_runtime_t * node,
always_inline bool
ip6_sv_reass_verify_fragment_multiple_8 (vlib_main_t * vm,
- vlib_node_runtime_t * node,
vlib_buffer_t * b,
ip6_frag_hdr_t * frag_hdr)
{
@@ -489,7 +484,6 @@ ip6_sv_reass_verify_fragment_multiple_8 (vlib_main_t * vm,
always_inline bool
ip6_sv_reass_verify_packet_size_lt_64k (vlib_main_t * vm,
- vlib_node_runtime_t * node,
vlib_buffer_t * b,
ip6_frag_hdr_t * frag_hdr)
{
@@ -534,7 +528,6 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
vlib_buffer_t *b0;
u32 next0 = IP6_SV_REASSEMBLY_NEXT_DROP;
u32 error0 = IP6_ERROR_NONE;
- u32 icmp_bi = ~0;
bi0 = from[0];
b0 = vlib_get_buffer (vm, bi0);
@@ -570,13 +563,11 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
next0 = IP6_SV_REASSEMBLY_NEXT_INPUT;
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_sv_reass_add_trace (vm, node, rm, NULL, bi0,
- REASS_PASSTHROUGH,
- vnet_buffer (b0)->ip.reass.ip_proto,
- vnet_buffer (b0)->ip.
- reass.l4_src_port,
- vnet_buffer (b0)->ip.
- reass.l4_dst_port);
+ ip6_sv_reass_add_trace (
+ vm, node, NULL, bi0, REASS_PASSTHROUGH,
+ vnet_buffer (b0)->ip.reass.ip_proto,
+ vnet_buffer (b0)->ip.reass.l4_src_port,
+ vnet_buffer (b0)->ip.reass.l4_dst_port);
}
goto packet_enqueue;
}
@@ -592,10 +583,8 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
goto packet_enqueue;
}
}
- if (!ip6_sv_reass_verify_fragment_multiple_8
- (vm, node, b0, frag_hdr)
- || !ip6_sv_reass_verify_packet_size_lt_64k (vm, node, b0,
- frag_hdr))
+ if (!ip6_sv_reass_verify_fragment_multiple_8 (vm, b0, frag_hdr) ||
+ !ip6_sv_reass_verify_packet_size_lt_64k (vm, b0, frag_hdr))
{
next0 = IP6_SV_REASSEMBLY_NEXT_ICMP_ERROR;
goto packet_enqueue;
@@ -615,8 +604,7 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
kv.k.as_u64[5] = ip0->protocol;
ip6_sv_reass_t *reass =
- ip6_sv_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
- &do_handoff);
+ ip6_sv_reass_find_or_create (vm, rm, rt, &kv, &do_handoff);
if (PREDICT_FALSE (do_handoff))
{
@@ -650,17 +638,14 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
next0 = IP6_SV_REASSEMBLY_NEXT_INPUT;
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_sv_reass_add_trace (vm, node, rm, reass, bi0,
- REASS_FRAGMENT_FORWARD,
- reass->ip_proto,
- reass->l4_src_port,
- reass->l4_dst_port);
+ ip6_sv_reass_add_trace (
+ vm, node, reass, bi0, REASS_FRAGMENT_FORWARD,
+ reass->ip_proto, reass->l4_src_port, reass->l4_dst_port);
}
goto packet_enqueue;
}
- switch (ip6_sv_reass_update
- (vm, node, rm, rt, reass, bi0, frag_hdr))
+ switch (ip6_sv_reass_update (vm, node, rm, reass, bi0, frag_hdr))
{
case IP6_SV_REASS_RC_OK:
/* nothing to do here */
@@ -724,11 +709,9 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
vnet_buffer (b0)->ip.reass.l4_dst_port = reass->l4_dst_port;
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- ip6_sv_reass_add_trace (vm, node, rm, reass, bi0,
- REASS_FRAGMENT_FORWARD,
- reass->ip_proto,
- reass->l4_src_port,
- reass->l4_dst_port);
+ ip6_sv_reass_add_trace (
+ vm, node, reass, bi0, REASS_FRAGMENT_FORWARD,
+ reass->ip_proto, reass->l4_src_port, reass->l4_dst_port);
}
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next, bi0,
@@ -750,17 +733,6 @@ ip6_sv_reassembly_inline (vlib_main_t * vm,
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, bi0, next0);
- if (~0 != icmp_bi)
- {
- next0 = IP6_SV_REASSEMBLY_NEXT_ICMP_ERROR;
- to_next[0] = icmp_bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, icmp_bi,
- next0);
- }
-
next_packet:
from += 1;
n_left_from -= 1;
@@ -997,8 +969,9 @@ VLIB_INIT_FUNCTION (ip6_sv_reass_init_function);
#endif /* CLIB_MARCH_VARIANT */
static uword
-ip6_sv_reass_walk_expired (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * f)
+ip6_sv_reass_walk_expired (vlib_main_t *vm,
+ CLIB_UNUSED (vlib_node_runtime_t *node),
+ CLIB_UNUSED (vlib_frame_t *f))
{
ip6_sv_reass_main_t *rm = &ip6_sv_reass_main;
uword event_type, *event_data = 0;
@@ -1012,10 +985,11 @@ ip6_sv_reass_walk_expired (vlib_main_t * vm,
switch (event_type)
{
- case ~0: /* no events => timeout */
- /* nothing to do here */
- break;
+ case ~0:
+ /* no events => timeout */
+ /* fallthrough */
case IP6_EVENT_CONFIG_CHANGED:
+ /* nothing to do here */
break;
default:
clib_warning ("BUG: event type 0x%wx", event_type);