diff options
author | Klement Sekera <ksekera@cisco.com> | 2019-06-17 12:23:15 +0000 |
---|---|---|
committer | Ole Trøan <otroan@employees.org> | 2019-06-18 08:45:30 +0000 |
commit | e849865fb8819a3980658b251a8e24595170d436 (patch) | |
tree | 8217f2fc1eb7c3acb99bca4190d278c1cbb832c1 /src | |
parent | 3860a77e9ff1d945259301045db339667c2c2c6d (diff) |
ip: reassembly-separate feature and custom code
This change is made fix a crash, because is_feature flag semantics turn
out to be different from "custom app code" semantics. Introduce a flag
which custom plugins/apps can use to instead of tying that code to
is_feature flag.
Change-Id: Ief5898711e68529f9306cfac54c4dc9b3650f9e3
Ticket: N/A
Type: fix
Fixes: 21aa8f1022590b8b5caf819b4bbd485de0f1dfe5
Signed-off-by: Klement Sekera <ksekera@cisco.com>
Signed-off-by: Ole Troan <ot@cisco.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/vnet/buffer.h | 4 | ||||
-rw-r--r-- | src/vnet/ip/ip4_reassembly.c | 61 | ||||
-rw-r--r-- | src/vnet/ip/ip6_reassembly.c | 66 |
3 files changed, 67 insertions, 64 deletions
diff --git a/src/vnet/buffer.h b/src/vnet/buffer.h index 7065bd745d0..6738f3cdca8 100644 --- a/src/vnet/buffer.h +++ b/src/vnet/buffer.h @@ -188,8 +188,8 @@ typedef struct /* in/out variables */ struct { - u32 next_index; /* index of next node - ignored if "feature" node */ - u32 error_next_index; /* index of next node if error - ignored if 'feature' node */ + u32 next_index; /* index of next node - used by custom apps */ + u32 error_next_index; /* index of next node if error - used by custom apps */ u16 estimated_mtu; /* estimated MTU calculated during reassembly */ u16 owner_thread_index; }; diff --git a/src/vnet/ip/ip4_reassembly.c b/src/vnet/ip/ip4_reassembly.c index 73a83a9a632..85861025b2d 100644 --- a/src/vnet/ip/ip4_reassembly.c +++ b/src/vnet/ip/ip4_reassembly.c @@ -133,10 +133,8 @@ typedef struct u32 trace_op_counter; // next index - used by non-feature node u32 next_index; - // error next index - used by non-feature node + // error next index - used by custom apps (~0 if not used) u32 error_next_index; - // is_feature flag stored for non-inline code use - bool is_feature; // minimum fragment length for this reassembly - used to estimate MTU u16 min_fragment_length; // number of fragments in this reassembly @@ -337,8 +335,7 @@ ip4_reass_free (ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt, always_inline void ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node, - ip4_reass_main_t * rm, ip4_reass_t * reass, - bool is_feature) + ip4_reass_main_t * rm, ip4_reass_t * reass) { u32 range_bi = reass->first_bi; vlib_buffer_t *range_b; @@ -366,7 +363,7 @@ ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node, range_bi = range_vnb->ip.reass.next_range_bi; } /* send to next_error_index */ - if (!(is_feature)) + if (~0 != reass->error_next_index) { u32 n_left_to_next, *to_next, next_index; @@ -403,8 +400,7 @@ ip4_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node, static ip4_reass_t * ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt, - ip4_reass_kv_t * kv, u8 * do_handoff, - bool is_feature) + ip4_reass_kv_t * kv, u8 * do_handoff) { ip4_reass_t *reass = NULL; f64 now = vlib_time_now (rm->vlib_main); @@ -421,7 +417,7 @@ ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, if (now > reass->last_heard + rm->timeout) { - ip4_reass_drop_all (vm, node, rm, reass, is_feature); + ip4_reass_drop_all (vm, node, rm, reass); ip4_reass_free (rm, rt, reass); reass = NULL; } @@ -447,7 +443,8 @@ ip4_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, reass->first_bi = ~0; reass->last_packet_octet = ~0; reass->data_len = 0; - reass->is_feature = is_feature; + reass->next_index = ~0; + reass->error_next_index = ~0; ++rt->reass_n; } @@ -470,7 +467,7 @@ always_inline ip4_reass_rc_t ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node, ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt, ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0, - bool is_feature) + bool is_custom_app) { vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi); vlib_buffer_t *last_b = NULL; @@ -653,7 +650,7 @@ ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node, #endif } *bi0 = reass->first_bi; - if (is_feature) + if (!is_custom_app) { *next0 = IP4_REASSEMBLY_NEXT_INPUT; } @@ -762,14 +759,19 @@ always_inline ip4_reass_rc_t ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt, ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0, - bool is_feature) + bool is_custom_app) { ip4_reass_rc_t rc = IP4_REASS_RC_OK; int consumed = 0; vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0); ip4_header_t *fip = vlib_buffer_get_current (fb); vnet_buffer_opaque_t *fvnb = vnet_buffer (fb); - reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten + if (is_custom_app) + { + // store (error_)next_index before it's overwritten + reass->next_index = fvnb->ip.reass.next_index; + reass->error_next_index = fvnb->ip.reass.error_next_index; + } const u32 fragment_first = ip4_get_fragment_offset_bytes (fip); const u32 fragment_length = clib_net_to_host_u16 (fip->length) - ip4_header_bytes (fip); @@ -972,7 +974,7 @@ ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, reass->data_len == reass->last_packet_octet + 1) { return ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0, - is_feature); + is_custom_app); } else { @@ -994,9 +996,9 @@ ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, } always_inline uword -ip4_reassembly_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, bool is_feature) +ip4_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame, bool is_feature, + bool is_custom_app) { u32 *from = vlib_frame_vector_args (frame); u32 n_left_from, n_left_to_next, *to_next, next_index; @@ -1024,7 +1026,7 @@ ip4_reassembly_inline (vlib_main_t * vm, if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0)) { // this is a whole packet - no fragmentation - if (is_feature) + if (!is_custom_app) { next0 = IP4_REASSEMBLY_NEXT_INPUT; } @@ -1059,7 +1061,7 @@ ip4_reassembly_inline (vlib_main_t * vm, ip4_reass_t *reass = ip4_reass_find_or_create (vm, node, rm, rt, &kv, - &do_handoff, is_feature); + &do_handoff); if (PREDICT_FALSE (do_handoff)) { @@ -1076,7 +1078,7 @@ ip4_reassembly_inline (vlib_main_t * vm, { switch (ip4_reass_update (vm, node, rm, rt, reass, &bi0, &next0, - &error0, is_feature)) + &error0, is_custom_app)) { case IP4_REASS_RC_OK: /* nothing to do here */ @@ -1085,8 +1087,7 @@ ip4_reassembly_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG, 1); - ip4_reass_drop_all (vm, node, rm, reass, - is_feature); + ip4_reass_drop_all (vm, node, rm, reass); ip4_reass_free (rm, rt, reass); goto next_packet; break; @@ -1094,8 +1095,7 @@ ip4_reassembly_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_NO_BUF, 1); - ip4_reass_drop_all (vm, node, rm, reass, - is_feature); + ip4_reass_drop_all (vm, node, rm, reass); ip4_reass_free (rm, rt, reass); goto next_packet; break; @@ -1104,8 +1104,7 @@ ip4_reassembly_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_INTERNAL_ERROR, 1); - ip4_reass_drop_all (vm, node, rm, reass, - is_feature); + ip4_reass_drop_all (vm, node, rm, reass); ip4_reass_free (rm, rt, reass); goto next_packet; break; @@ -1158,7 +1157,8 @@ static char *ip4_reassembly_error_strings[] = { VLIB_NODE_FN (ip4_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_reassembly_inline (vm, node, frame, false /* is_feature */ ); + return ip4_reassembly_inline (vm, node, frame, false /* is_feature */ , + false /* is_custom_app */ ); } /* *INDENT-OFF* */ @@ -1183,7 +1183,8 @@ VLIB_NODE_FN (ip4_reass_node_feature) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip4_reassembly_inline (vm, node, frame, true /* is_feature */ ); + return ip4_reassembly_inline (vm, node, frame, true /* is_feature */ , + false /* is_custom_app */ ); } /* *INDENT-OFF* */ @@ -1412,7 +1413,7 @@ ip4_reass_walk_expired (vlib_main_t * vm, vec_foreach (i, pool_indexes_to_free) { ip4_reass_t *reass = pool_elt_at_index (rt->pool, i[0]); - ip4_reass_drop_all (vm, node, rm, reass, reass->is_feature); + ip4_reass_drop_all (vm, node, rm, reass); ip4_reass_free (rm, rt, reass); } /* *INDENT-ON* */ diff --git a/src/vnet/ip/ip6_reassembly.c b/src/vnet/ip/ip6_reassembly.c index 01f76aa1f0e..10fc6f1f559 100644 --- a/src/vnet/ip/ip6_reassembly.c +++ b/src/vnet/ip/ip6_reassembly.c @@ -110,12 +110,10 @@ typedef struct u32 data_len; // trace operation counter u32 trace_op_counter; - // next index - used by non-feature node + // next index - used by custom apps (~0 if not set) u32 next_index; - // error next index - used by non-feature node + // error next index - used by custom apps (~0 if not set) u32 error_next_index; - // is_feature flag stored for non-inline code use - bool is_feature; // minimum fragment length for this reassembly - used to estimate MTU u16 min_fragment_length; // number of fragments for this reassembly @@ -326,8 +324,7 @@ ip6_reass_free (ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt, always_inline void ip6_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node, - ip6_reass_main_t * rm, ip6_reass_t * reass, - bool is_feature) + ip6_reass_main_t * rm, ip6_reass_t * reass) { u32 range_bi = reass->first_bi; vlib_buffer_t *range_b; @@ -355,7 +352,7 @@ ip6_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node, range_bi = range_vnb->ip.reass.next_range_bi; } /* send to next_error_index */ - if (!(is_feature)) + if (~0 != reass->error_next_index) { u32 n_left_to_next, *to_next, next_index; @@ -393,13 +390,13 @@ ip6_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node, always_inline void ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node, ip6_reass_main_t * rm, ip6_reass_t * reass, - u32 * icmp_bi, bool is_feature) + u32 * icmp_bi) { if (~0 == reass->first_bi) { return; } - if (is_feature) + if (~0 == reass->next_index) // custom apps don't want icmp { vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi); if (0 == vnet_buffer (b)->ip.reass.fragment_first) @@ -426,14 +423,13 @@ ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node, 0); } } - ip6_reass_drop_all (vm, node, rm, reass, is_feature); + ip6_reass_drop_all (vm, node, rm, reass); } always_inline ip6_reass_t * ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt, - ip6_reass_kv_t * kv, u32 * icmp_bi, u8 * do_handoff, - bool is_feature) + ip6_reass_kv_t * kv, u32 * icmp_bi, u8 * do_handoff) { ip6_reass_t *reass = NULL; f64 now = vlib_time_now (rm->vlib_main); @@ -450,7 +446,7 @@ ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, if (now > reass->last_heard + rm->timeout) { - ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi, is_feature); + ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi); ip6_reass_free (rm, rt, reass); reass = NULL; } @@ -476,7 +472,8 @@ ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node, reass->first_bi = ~0; reass->last_packet_octet = ~0; reass->data_len = 0; - reass->is_feature = is_feature; + reass->next_index = ~0; + reass->error_next_index = ~0; ++rt->reass_n; } @@ -503,7 +500,7 @@ always_inline ip6_reass_rc_t ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node, ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt, ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0, - bool is_feature) + bool is_custom_app) { *bi0 = reass->first_bi; *error0 = IP6_ERROR_NONE; @@ -706,7 +703,7 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node, while (0); #endif } - if (is_feature) + if (!is_custom_app) { *next0 = IP6_REASSEMBLY_NEXT_INPUT; } @@ -754,13 +751,16 @@ always_inline ip6_reass_rc_t ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt, ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0, - ip6_frag_hdr_t * frag_hdr, bool is_feature) + ip6_frag_hdr_t * frag_hdr, bool is_custom_app) { int consumed = 0; vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0); vnet_buffer_opaque_t *fvnb = vnet_buffer (fb); - reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten - reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten + if (is_custom_app) + { + reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten + reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten + } fvnb->ip.reass.ip6_frag_hdr_offset = (u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb); @@ -837,7 +837,7 @@ ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node, else { // overlapping fragment - not allowed by RFC 8200 - ip6_reass_drop_all (vm, node, rm, reass, is_feature); + ip6_reass_drop_all (vm, node, rm, reass); ip6_reass_free (rm, rt, reass); if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED)) { @@ -863,7 +863,7 @@ check_if_done_maybe: reass->data_len == reass->last_packet_octet + 1) { return ip6_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0, - is_feature); + is_custom_app); } else { @@ -952,9 +952,9 @@ ip6_reass_verify_packet_size_lt_64k (vlib_main_t * vm, } always_inline uword -ip6_reassembly_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame, bool is_feature) +ip6_reassembly_inline (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame, bool is_feature, + bool is_custom_app) { u32 *from = vlib_frame_vector_args (frame); u32 n_left_from, n_left_to_next, *to_next, next_index; @@ -1027,7 +1027,7 @@ ip6_reassembly_inline (vlib_main_t * vm, ip6_reass_t *reass = ip6_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi, - &do_handoff, is_feature); + &do_handoff); if (PREDICT_FALSE (do_handoff)) { @@ -1042,7 +1042,7 @@ ip6_reassembly_inline (vlib_main_t * vm, else if (reass) { switch (ip6_reass_update (vm, node, rm, rt, reass, &bi0, &next0, - &error0, frag_hdr, is_feature)) + &error0, frag_hdr, is_custom_app)) { case IP6_REASS_RC_OK: /* nothing to do here */ @@ -1051,14 +1051,14 @@ ip6_reassembly_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, node->node_index, IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG, 1); - ip6_reass_drop_all (vm, node, rm, reass, is_feature); + ip6_reass_drop_all (vm, node, rm, reass); ip6_reass_free (rm, rt, reass); goto next_packet; break; case IP6_REASS_RC_NO_BUF: vlib_node_increment_counter (vm, node->node_index, IP6_ERROR_REASS_NO_BUF, 1); - ip6_reass_drop_all (vm, node, rm, reass, is_feature); + ip6_reass_drop_all (vm, node, rm, reass); ip6_reass_free (rm, rt, reass); goto next_packet; break; @@ -1067,7 +1067,7 @@ ip6_reassembly_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, node->node_index, IP6_ERROR_REASS_INTERNAL_ERROR, 1); - ip6_reass_drop_all (vm, node, rm, reass, is_feature); + ip6_reass_drop_all (vm, node, rm, reass); ip6_reass_free (rm, rt, reass); goto next_packet; break; @@ -1135,7 +1135,8 @@ static char *ip6_reassembly_error_strings[] = { VLIB_NODE_FN (ip6_reass_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip6_reassembly_inline (vm, node, frame, false /* is_feature */ ); + return ip6_reassembly_inline (vm, node, frame, false /* is_feature */ , + false /* is_custom_app */ ); } /* *INDENT-OFF* */ @@ -1160,7 +1161,8 @@ VLIB_NODE_FN (ip6_reass_node_feature) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return ip6_reassembly_inline (vm, node, frame, true /* is_feature */ ); + return ip6_reassembly_inline (vm, node, frame, true /* is_feature */ , + false /* is_custom_app */ ); } /* *INDENT-OFF* */ @@ -1407,7 +1409,7 @@ ip6_reass_walk_expired (vlib_main_t * vm, b->flags &= ~VLIB_BUFFER_IS_TRACED; } } - ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi, reass->is_feature); + ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi); if (~0 != icmp_bi) { vec_add1 (vec_icmp_bi, icmp_bi); |