diff options
author | Filip Varga <filipvarga89@gmail.com> | 2019-07-31 14:36:39 +0200 |
---|---|---|
committer | Ole Trøan <otroan@employees.org> | 2019-08-01 12:53:06 +0000 |
commit | 67eb4bb47739431bcb34bad8f1d6e3743a5e17ff (patch) | |
tree | 6a68b2ddc0b9f916ea63093395d0bb7ce5d8ca30 /src/plugins/nat/nat44_handoff.c | |
parent | fb1ccc7c369d879a20567df609d04cfdcec5f8c1 (diff) |
nat: handoff rewrite & fixes for multi-worker
Type: fix
Change-Id: Ib9164d8f6c681e8900e645306f3a2dc0ac0e40a8
Signed-off-by: Filip Varga <filipvarga89@gmail.com>
Diffstat (limited to 'src/plugins/nat/nat44_handoff.c')
-rw-r--r-- | src/plugins/nat/nat44_handoff.c | 165 |
1 files changed, 127 insertions, 38 deletions
diff --git a/src/plugins/nat/nat44_handoff.c b/src/plugins/nat/nat44_handoff.c index e4cb483a458..04590e409ac 100644 --- a/src/plugins/nat/nat44_handoff.c +++ b/src/plugins/nat/nat44_handoff.c @@ -68,35 +68,29 @@ format_nat44_handoff_trace (u8 * s, va_list * args) } static inline uword -nat44_worker_handoff_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node, +nat44_worker_handoff_fn_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame, u8 is_output, u8 is_in2out) { + u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0; + + u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; snat_main_t *sm = &snat_main; - vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; - u32 n_enq, n_left_from, *from; - u16 thread_indices[VLIB_FRAME_SIZE], *ti; - u32 fq_index; + snat_get_worker_function_t *get_worker; - u32 thread_index = vm->thread_index; - u32 do_handoff = 0, same_worker = 0; + u32 fq_index, thread_index = vm->thread_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; - vlib_get_buffers (vm, from, bufs, n_left_from); - b = bufs; - ti = thread_indices; - - ASSERT (vec_len (sm->workers)); + vlib_get_buffers (vm, from, b, n_left_from); if (is_in2out) { + fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index; get_worker = sm->worker_in2out_cb; - if (is_output) - fq_index = sm->fq_in2out_output_index; - else - fq_index = sm->fq_in2out_index; } else { @@ -104,45 +98,138 @@ nat44_worker_handoff_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node, get_worker = sm->worker_out2in_cb; } - while (n_left_from > 0) + while (n_left_from >= 4) { - u32 sw_if_index0; - u32 rx_fib_index0; - ip4_header_t *ip0; + u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3; + u32 rx_fib_index0 = 0, rx_fib_index1 = 0, + rx_fib_index2 = 0, rx_fib_index3 = 0; + ip4_header_t *ip0, *ip1, *ip2, *ip3; + + if (PREDICT_TRUE (n_left_from >= 8)) + { + vlib_prefetch_buffer_header (b[4], STORE); + vlib_prefetch_buffer_header (b[5], STORE); + vlib_prefetch_buffer_header (b[6], STORE); + vlib_prefetch_buffer_header (b[7], STORE); + CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, STORE); + } - sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; - rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); ip0 = vlib_buffer_get_current (b[0]); + ip1 = vlib_buffer_get_current (b[1]); + ip2 = vlib_buffer_get_current (b[2]); + ip3 = vlib_buffer_get_current (b[3]); + + if (PREDICT_FALSE (is_in2out)) + { + sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; + sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX]; + sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX]; + sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX]; + + rx_fib_index0 = + ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); + rx_fib_index1 = + ip4_fib_table_get_index_for_sw_if_index (sw_if_index1); + rx_fib_index2 = + ip4_fib_table_get_index_for_sw_if_index (sw_if_index2); + rx_fib_index3 = + ip4_fib_table_get_index_for_sw_if_index (sw_if_index3); + } + ti[0] = get_worker (ip0, rx_fib_index0); + ti[1] = get_worker (ip1, rx_fib_index1); + ti[2] = get_worker (ip2, rx_fib_index2); + ti[3] = get_worker (ip3, rx_fib_index3); - if (ti[0] != thread_index) + if (ti[0] == thread_index) + same_worker++; + else do_handoff++; + + if (ti[1] == thread_index) + same_worker++; + else + do_handoff++; + + if (ti[2] == thread_index) + same_worker++; else + do_handoff++; + + if (ti[3] == thread_index) same_worker++; + else + do_handoff++; + + b += 4; + ti += 4; + n_left_from -= 4; + } - if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) - && (b[0]->flags & VLIB_BUFFER_IS_TRACED))) + while (n_left_from > 0) + { + u32 sw_if_index0; + u32 rx_fib_index0 = 0; + ip4_header_t *ip0; + + ip0 = vlib_buffer_get_current (b[0]); + + if (PREDICT_FALSE (is_in2out)) { - nat44_handoff_trace_t *t = - vlib_add_trace (vm, node, b[0], sizeof (*t)); - t->next_worker_index = ti[0]; - t->trace_index = vlib_buffer_get_trace_index (b[0]); - t->in2out = is_in2out; + sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; + rx_fib_index0 = + ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); } - n_left_from -= 1; - ti += 1; + ti[0] = get_worker (ip0, rx_fib_index0); + + if (ti[0] == thread_index) + same_worker++; + else + do_handoff++; + b += 1; + ti += 1; + n_left_from -= 1; + } + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + u32 i; + b = bufs; + ti = thread_indices; + + for (i = 0; i < frame->n_vectors; i++) + { + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) + { + nat44_handoff_trace_t *t = + vlib_add_trace (vm, node, b[0], sizeof (*t)); + t->next_worker_index = ti[0]; + t->trace_index = vlib_buffer_get_trace_index (b[0]); + t->in2out = is_in2out; + + b++; + ti++; + } + else + break; + } } - n_enq = - vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices, - frame->n_vectors, 1); + n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices, + frame->n_vectors, 1); if (n_enq < frame->n_vectors) - vlib_node_increment_counter (vm, node->node_index, - NAT44_HANDOFF_ERROR_CONGESTION_DROP, - frame->n_vectors - n_enq); + { + vlib_node_increment_counter (vm, node->node_index, + NAT44_HANDOFF_ERROR_CONGESTION_DROP, + frame->n_vectors - n_enq); + } + vlib_node_increment_counter (vm, node->node_index, NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker); vlib_node_increment_counter (vm, node->node_index, @@ -150,6 +237,8 @@ nat44_worker_handoff_fn_inline (vlib_main_t * vm, vlib_node_runtime_t * node, return frame->n_vectors; } + + VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) |