/*
* Copyright (c) 2016 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vnet/adj/adj_nbr.h>
#include <vnet/adj/adj_internal.h>
#include <vnet/ethernet/arp_packet.h>
#include <vnet/fib/fib_walk.h>
/*
* Vector Hash tables of neighbour (traditional) adjacencies
* Key: interface(for the vector index), address (and its proto),
* link-type/ether-type.
*/
static BVT(clib_bihash) **adj_nbr_tables[FIB_PROTOCOL_MAX];
// FIXME SIZE APPROPRIATELY. ASK DAVEB.
#define ADJ_NBR_DEFAULT_HASH_NUM_BUCKETS (64 * 64)
#define ADJ_NBR_DEFAULT_HASH_MEMORY_SIZE (32<<20)
#define ADJ_NBR_SET_KEY(_key, _lt, _nh) \
{ \
_key.key[0] = (_nh)->as_u64[0]; \
_key.key[1] = (_nh)->as_u64[1]; \
_key.key[2] = (_lt); \
}
#define ADJ_NBR_ITF_OK(_proto, _itf) \
(((_itf) < vec_len(adj_nbr_tables[_proto])) && \
(NULL != adj_nbr_tables[_proto][sw_if_index]))
static void
adj_nbr_insert (fib_protocol_t nh_proto,
vnet_link_t link_type,
const ip46_address_t *nh_addr,
u32 sw_if_index,
adj_index_t adj_index)
{
BVT(clib_bihash_kv) kv;
if (sw_if_index >= vec_len(adj_nbr_tables[nh_proto]))
{
vec_validate(adj_nbr_tables[nh_proto], sw_if_index);
}
if (NULL == adj_nbr_tables[nh_proto][sw_if_index])
{
adj_nbr_tables[nh_proto][sw_if_index] =
clib_mem_alloc_aligned(sizeof(BVT(clib_bihash)),
CLIB_CACHE_LINE_BYTES);
memset(adj_nbr_tables[nh_proto][sw_if_index],
0,
sizeof(BVT(clib_bihash)));
BV(clib_bihash_init) (adj_nbr_tables[nh_proto][sw_if_index],
"Adjacency Neighbour table",
ADJ_NBR_DEFAULT_HASH_NUM_BUCKETS,
ADJ_NBR_DEFAULT_HASH_MEMORY_SIZE);
}
ADJ_NBR_SET_KEY(kv, link_type, nh_addr);
kv.value = adj_index;
BV(clib_bihash_add_del) (adj_nbr_tables[nh_proto][sw_if_index], &kv, 1);
}
void
adj_nbr_remove (adj_index_t ai,
fib_protocol_t nh_proto,
vnet_link_t link_type,
const ip46_address_t *nh_addr,
u32 sw_if_index)
{
BVT(clib_bihash_kv) kv;
if (!ADJ_NBR_ITF_OK(nh_proto, sw_if_index))
return;
ADJ_NBR_SET_KEY(kv, link_type, nh_addr);
kv.value = ai;
BV(clib_bihash_add_del) (adj_nbr_tables[nh_proto][sw_if_index], &kv, 0);
}
static adj_index_t
adj_nbr_find (fib_protocol_t nh_proto,
vnet_link_t link_type,
const ip46_address_t *nh_addr,
u32 sw_if_index)
{
BVT(clib_bihash_kv) kv;
ADJ_NBR_SET_KEY(kv, link_type, nh_addr);
if (!ADJ_NBR_ITF_OK(nh_proto, sw_if_index))
return (ADJ_INDEX_INVALID);
if (BV(clib_bihash_search)(adj_nbr_tables[nh_proto][sw_if_index],
&kv, &kv) < 0)
{
return (ADJ_INDEX_INVALID);
}
else
{
return (kv.value);
}
}
static inline u32
adj_get_nd_node (fib_protocol_t proto)
{
switch (proto) {
case FIB_PROTOCOL_IP4:
return (ip4_arp_node.index);
case FIB_PROTOCOL_IP6:
return (ip6_discover_neighbor_node.index);
case FIB_PROTOCOL_MPLS:
break;
}
ASSERT(0);
return (ip4_arp_node.index);
}
/**
* @brief Check and set feature flags if o/p interface has any o/p features.
*/
static void
adj_nbr_evaluate_feature (adj_index_t ai)
{
ip_adjacency_t *adj;
vnet_feature_main_t *fm = &feature_main;
i16 feature_count;
u8 arc_index;
u32 sw_if_index;
adj = adj_get(ai);
switch (adj->ia_link)
{
case VNET_LINK_IP4:
arc_index = ip4_main.lookup_main.output_feature_arc_index;
break;
case VNET_LINK_IP6:
arc_index = ip6_main.lookup_main.o
}
/*
* Copyright (c) 2017 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* @brief IPv6 Reassembly.
*
* This file contains the source code for IPv6 reassembly.
*/
#include <vppinfra/vec.h>
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
#include <vppinfra/bihash_48_8.h>
#include <vnet/ip/ip6_reassembly.h>
#define MSEC_PER_SEC 1000
#define IP6_REASS_TIMEOUT_DEFAULT_MS 100
#define IP6_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
#define IP6_REASS_MAX_REASSEMBLIES_DEFAULT 1024
#define IP6_REASS_HT_LOAD_FACTOR (0.75)
static vlib_node_registration_t ip6_reass_node;
typedef struct
{
union
{
struct
{
ip6_address_t src;
ip6_address_t dst;
u32 xx_id;
u32 frag_id;
u8 unused[7];
u8 proto;
};
u64 as_u64[6];
};
} ip6_reass_key_t;
always_inline u32
ip6_reass_buffer_get_data_offset_no_check (vlib_buffer_t * b)
{
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
}
always_inline u32
ip6_reass_buffer_get_data_offset (vlib_buffer_t * b)
{
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
ASSERT (vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first);
return ip6_reass_buffer_get_data_offset_no_check (b);
}
always_inline u16
ip6_reass_buffer_get_data_len_no_check (vlib_buffer_t * b)
{
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
(vnb->ip.reass.fragment_first + ip6_reass_buffer_get_data_offset (b)) + 1;
}
always_inline u16
ip6_reass_buffer_get_data_len (vlib_buffer_t * b)
{
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
ASSERT (vnb->ip.reass.range_last > vnb->ip.reass.fragment_first);
return ip6_reass_buffer_get_data_len_no_check (b);
}
typedef struct
{
// hash table key
ip6_reass_key_t key;
// time when last packet was received
f64 last_heard;
// internal id of this reassembly
u64 id;
// buffer index of first buffer in this reassembly context
u32 first_bi;
// last octet of packet, ~0 until fragment without more_fragments arrives
u32 last_packet_octet;
// length of data collected so far
u32 data_len;
// trace operation counter
u32 trace_op_counter;
// next index - used by non-feature node
u8 next_index;
// minimum fragment length for this reassembly - used to estimate MTU
u16 min_fragment_length;
} ip6_reass_t;
typedef struct
{
ip6_reass_t *pool;
u32 reass_n;
u32 buffers_n;
u32 id_counter;
clib_spinlock_t lock;
} ip6_reass_per_thread_t;
typedef struct
{
// IPv6 config
u32 timeout_ms;
f64 timeout;
u32 expire_walk_interval_ms;
u32 max_reass_n;
// IPv6 runtime
clib_bihash_48_8_t hash;
// per-thread data
ip6_reass_per_thread_t *per_thread_data;
// convenience
vlib_main_t *vlib_main;
vnet_main_t *vnet_main;
// node index of ip6-drop node
u32 ip6_drop_idx;
u32 ip6_icmp_error_idx;
u32 ip6_reass_expire_node_idx;
} ip6_reass_main_t;
ip6_reass_main_t ip6_reass_main;
typedef enum
{
IP6_REASSEMBLY_NEXT_INPUT,
IP6_REASSEMBLY_NEXT_DROP,
IP6_REASSEMBLY_NEXT_ICMP_ERROR,
IP6_REASSEMBLY_N_NEXT,
} ip6_reass_next_t;
typedef enum
{
RANGE_NEW,
RANGE_OVERLAP,
ICMP_ERROR_RT_EXCEEDED,
ICMP_ERROR_FL_TOO_BIG,
ICMP_ERROR_FL_NOT_MULT_8,
FINALIZE,
} ip6_reass_trace_operation_e;
typedef struct
{
u16 range_first;
u16 range_last;
u32 range_bi;
i32 data_offset;
u32 data_len;
u32 first_bi;
} ip6_reass_range_trace_t;
typedef struct
{
ip6_reass_trace_operation_e action;
u32 reass_id;
ip6_reass_range_trace_t trace_range;
u32 size_diff;
u32 op_id;
u32 fragment_first;
u32 fragment_last;
u32 total_data_len;
} ip6_reass_trace_t;
static void
ip6_reass_trace_details (vlib_main_t * vm, u32 bi,
ip6_reass_range_trace_t * trace)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
trace->range_first = vnb->ip.reass.range_first;
trace->range_last = vnb->ip.reass.range_last;
trace->data_offset = ip6_reass_buffer_get_data_offset_no_check (b);
trace->data_len = ip6_reass_buffer_get_data_len_no_check (b);
trace->range_bi = bi;
}
static u8 *
format_ip6_reass_range_trace (u8 * s, va_list * args)
{
ip6_reass_range_trace_t *trace = va_arg (*args, ip6_reass_range_trace_t *);
s = format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
trace->range_last, trace->data_offset, trace->data_len,
trace->range_bi);
return s;
}
static u8 *
format_ip6_reass_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ip6_reass_trace_t *t = va_arg (*args, ip6_reass_trace_t *);
s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
u32 indent = format_get_indent (s);
s = format (s, "first bi: %u, data len: %u, ip/fragment[%u, %u]",
t->trace_range.first_bi, t->total_data_len, t->fragment_first,
t->fragment_last);
switch (t->action)
{
case RANGE_NEW:
s = format (s, "\n%Unew %U", format_white_space, indent,
format_ip6_reass_range_trace, &t->trace_range);
break;
case RANGE_OVERLAP:
s = format (s, "\n%Uoverlap %U", format_white_space, indent,
format_ip6_reass_range_trace, &t->trace_range);
break;
case ICMP_ERROR_FL_TOO_BIG:
s = format (s, "\n%Uicmp-error - frag_len > 65535 %U",
format_white_space, indent, format_ip6_reass_range_trace,
&t->trace_range);
break;
case ICMP_ERROR_FL_NOT_MULT_8:
s = format (s, "\n%Uicmp-error - frag_len mod 8 != 0 %U",
format_white_space, indent, format_ip6_reass_range_trace,
&t->trace_range);
break;
case ICMP_ERROR_RT_EXCEEDED:
s = format (s, "\n%Uicmp-error - reassembly time exceeded",
format_white_space, indent);
break;
case FINALIZE:
s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
break;
}
return s;
}
static void
ip6_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_t * reass,
u32 bi, ip6_reass_trace_operation_e action,
u32 size_diff)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
vnet_buffer_opaque_t *vnb = vnet_buffer (b);
if (pool_is_free_index (vm->trace_main.trace_buffer_pool, b->trace_index))
{
// this buffer's trace is gone
b->flags &= ~VLIB_BUFFER_IS_TRACED;
return;
}
ip6_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
t->reass_id = reass->id;
t->action = action;
ip6_reass_trace_details (vm, bi, &t->trace_range);
t->size_diff = size_diff;
t->op_id = reass->trace_op_counter;
++reass->trace_op_counter;
t->fragment_first = vnb->ip.reass.fragment_first;
t->fragment_last = vnb->ip.reass.fragment_last;
t->trace_range.first_bi = reass->first_bi;
t->total_data_len = reass->data_len;
#if 0
static u8 *s = NULL;
s = format (s, "%U", format_ip6_reass_trace, NULL, NULL, t);
printf ("%.*s\n", vec_len (s), s);
fflush (stdout);
vec_reset_length (s);
#endif
}
always_inline void
ip6_reass_free (ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass)
{
clib_bihash_kv_48_8_t kv;
kv.key[0] = reass->key.as_u64[0];
kv.key[1] = reass->key.as_u64[1];
kv.key[2] = reass->key.as_u64[2];
kv.key[3] = reass->key.as_u64[3];
kv.key[4] = reass->key.as_u64[4];
kv.key[5] = reass->key.as_u64[5];
clib_bihash_add_del_48_8 (&rm->hash, &kv, 0);
pool_put (rt->pool, reass);
--rt->reass_n;
}
always_inline void
ip6_reass_drop_all (vlib_main_t * vm, ip6_reass_main_t * rm,
ip6_reass_t * reass, u32 ** vec_drop_bi)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
vnet_buffer_opaque_t *range_vnb;
while (~0 != range_bi)
{
range_b = vlib_get_buffer (vm, range_bi);
range_vnb = vnet_buffer (range_b);
u32 bi = range_bi;
while (~0 != bi)
{
vec_add1 (*vec_drop_bi, bi);
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
bi = b->next_buffer;
b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
}
else
{
bi = ~0;
}
}
range_bi = range_vnb->ip.reass.next_range_bi;
}
}
always_inline void
ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_t * reass,
u32 * icmp_bi, u32 ** vec_timeout)
{
if (~0 == reass->first_bi)
{
return;
}
vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
if (0 == vnet_buffer (b)->ip.reass.fragment_first)
{
*icmp_bi = reass->first_bi;
if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_reass_add_trace (vm, node, rm, reass, reass->first_bi,
ICMP_ERROR_RT_EXCEEDED, 0);
}
// fragment with offset zero received - send icmp message back
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
// separate first buffer from chain and steer it towards icmp node
b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
reass->first_bi = b->next_buffer;
}
else
{
reass->first_bi = vnet_buffer (b)->ip.reass.next_range_bi;
}
icmp6_error_set_vnet_buffer (b, ICMP6_time_exceeded,
ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
0);
}
ip6_reass_drop_all (vm, rm, reass, vec_timeout);
}
always_inline ip6_reass_t *
ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_key_t * k, u32 * icmp_bi,
u32 ** vec_timeout)
{
ip6_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
clib_bihash_kv_48_8_t kv, value;
kv.key[0] = k->as_u64[0];
kv.key[1] = k->as_u64[1];
kv.key[2] = k->as_u64[2];
kv.key[3] = k->as_u64[3];
kv.key[4] = k->as_u64[4];
kv.key[5] = k->as_u64[5];
if (!clib_bihash_search_48_8 (&rm->hash, &kv, &value))
{
reass = pool_elt_at_index (rt->pool, value.value);
if (now > reass->last_heard + rm->timeout)
{
ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi, vec_timeout);
ip6_reass_free (rm, rt, reass);
reass = NULL;
}
}
if (reass)
{
reass->last_heard = now;
return reass;
}
if (rt->reass_n >= rm->max_reass_n)
{
reass = NULL;
return reass;
}
else
{
pool_get (rt->pool, reass);
clib_memset (reass, 0, sizeof (*reass));
reass->id =
((u64) os_get_thread_index () * 1000000000) + rt->id_counter;
++rt->id_counter;
reass->first_bi = ~0;
reass->last_packet_octet = ~0;
reass->data_len = 0;
++rt->reass_n;
}
reass->key.as_u64[0] = kv.key[0] = k->as_u64[0];
reass->key.as_u64[1] = kv.key[1] = k->as_u64[1];
reass->key.as_u64[2] = kv.key[2] = k->as_u64[2];
reass->key.as_u64[3] = kv.key[3] = k->as_u64[3];
reass->key.as_u64[4] = kv.key[4] = k->as_u64[4];
reass->key.as_u64[5] = kv.key[5] = k->as_u64[5];
kv.value = reass - rt->pool;
reass->last_heard = now;
if (clib_bihash_add_del_48_8 (&rm->hash, &kv, 1))
{
ip6_reass_free (rm, rt, reass);
reass = NULL;
}
return reass;
}
always_inline void
ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0,
u32 * error0, u32 ** vec_drop_compress, bool is_feature)
{
ASSERT (~0 != reass->first_bi);
*bi0 = reass->first_bi;
*error0 = IP6_ERROR_NONE;
ip6_frag_hdr_t *frag_hdr;
vlib_buffer_t *last_b = NULL;
u32 sub_chain_bi = reass->first_bi;
u32 total_length = 0;
u32 buf_cnt = 0;
u32 dropped_cnt = 0;
do
{
u32 tmp_bi = sub_chain_bi;
vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
u32 data_len = ip6_reass_buffer_get_data_len (tmp);
u32 trim_front = vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
sizeof (*frag_hdr) + ip6_reass_buffer_get_data_offset (tmp);
u32 trim_end =
vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
if (tmp_bi == reass->first_bi)
{
/* first buffer - keep ip6 header */
ASSERT (0 == ip6_reass_buffer_get_data_offset (tmp));
trim_front = 0;
trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
(vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
sizeof (*frag_hdr));
ASSERT (vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0);
}
u32 keep_data =
vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
while (1)
{
++buf_cnt;
if (trim_front)
{
if (trim_front > tmp->current_length)
{
/* drop whole buffer */
vec_add1 (*vec_drop_compress, tmp_bi);
++dropped_cnt;
trim_front -= tmp->current_length;
ASSERT (tmp->flags & VLIB_BUFFER_NEXT_PRESENT);
tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
tmp_bi = tmp->next_buffer;
tmp = vlib_get_buffer (vm, tmp_bi);
continue;
}
else
{
vlib_buffer_advance (tmp, trim_front);
trim_front = 0;
}
}
if (keep_data)
{
if (last_b)
{
last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
last_b->next_buffer = tmp_bi;
}
last_b = tmp;
if (keep_data <= tmp->current_length)
{
tmp->current_length = keep_data;
keep_data = 0;
}
else
{
keep_data -= tmp->current_length;
ASSERT (tmp->flags & VLIB_BUFFER_NEXT_PRESENT);
}
total_length += tmp->current_length;
}
else
{
vec_add1 (*vec_drop_compress, tmp_bi);
ASSERT (reass->first_bi != tmp_bi);
++dropped_cnt;
}
if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
{
tmp_bi = tmp->next_buffer;
tmp = vlib_get_buffer (vm, tmp->next_buffer);
}
else
{
break;
}
}
sub_chain_bi =
vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
reass.next_range_bi;
}
while (~0 != sub_chain_bi);
ASSERT (last_b != NULL);
last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
ASSERT (total_length >= first_b->current_length);
total_length -= first_b->current_length;
first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
first_b->total_length_not_including_first_buffer = total_length;
// drop fragment header
vnet_buffer_opaque_t *first_b_vnb = vnet_buffer (first_b);
ip6_header_t *ip = vlib_buffer_get_current (first_b);
u16 ip6_frag_hdr_offset = first_b_vnb->ip.reass.ip6_frag_hdr_offset;
ip6_ext_header_t *prev_hdr;
ip6_ext_header_find_t (ip, prev_hdr, frag_hdr,
IP_PROTOCOL_IPV6_FRAGMENTATION);
if (prev_hdr)
{
prev_hdr->next_hdr = frag_hdr->next_hdr;
}
else
{
ip->protocol = frag_hdr->next_hdr;
}
ASSERT ((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset);
memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
first_b->current_length - ip6_frag_hdr_offset -
sizeof (ip6_frag_hdr_t));
first_b->current_length -= sizeof (*frag_hdr);
ip->payload_length =
clib_host_to_net_u16 (total_length + first_b->current_length -
sizeof (*ip));
vlib_buffer_chain_compress (vm, first_b, vec_drop_compress);
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_reass_add_trace (vm, node, rm, reass, reass->first_bi, FINALIZE, 0);
#if 0
// following code does a hexdump of packet fragments to stdout ...
do
{
u32 bi = reass->first_bi;
u8 *s = NULL;
while (~0 != bi)
{
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
s = format (s, "%u: %U\n", bi, format_hexdump,
vlib_buffer_get_current (b), b->current_length);
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
bi = b->next_buffer;
}
else
{
break;
}
}
printf ("%.*s\n", vec_len (s), s);
fflush (stdout);
vec_free (s);
}
while (0);
#endif
}
if (is_feature)
{
*next0 = IP6_REASSEMBLY_NEXT_INPUT;
}
else
{
*next0 = reass->next_index;
}
vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
ip6_reass_free (rm, rt, reass);
reass = NULL;
}
always_inline u32
ip6_reass_get_buffer_chain_length (vlib_main_t * vm, vlib_buffer_t * b)
{
u32 len = 0;
while (b)
{
++len;
if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
{
b = vlib_get_buffer (vm, b->next_buffer);
}
else
{
break;
}
}
return len;
}
always_inline void
ip6_reass_insert_range_in_chain (vlib_main_t * vm, ip6_reass_main_t * rm,
ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 prev_range_bi,
u32 new_next_bi)
{
vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
if (~0 != prev_range_bi)
{
vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
prev_vnb->ip.reass.next_range_bi = new_next_bi;
}
else
{
if (~0 != reass->first_bi)
{
new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
}
reass->first_bi = new_next_bi;
}
reass->data_len += ip6_reass_buffer_get_data_len (new_next_b);
rt->buffers_n += ip6_reass_get_buffer_chain_length (vm, new_next_b);
}
always_inline void
ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
ip6_reass_t * reass, u32 * bi0, u32 * next0,
u32 * error0, ip6_frag_hdr_t * frag_hdr,
u32 ** vec_drop_overlap, u32 ** vec_drop_compress,
bool is_feature)
{
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
vnet_buffer_opaque_t