aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/nat/nat44-ed/nat44_ed_handoff.c
diff options
context:
space:
mode:
authorFilip Varga <fivarga@cisco.com>2021-02-17 14:34:54 +0100
committerOle Tr�an <otroan@employees.org>2021-02-24 19:25:25 +0000
commit0eaf4e6784efb2d058fe2f031578251b6bcc0aa8 (patch)
treecc0c2b485bfee51068fa4970acc9f95ec595e8e5 /src/plugins/nat/nat44-ed/nat44_ed_handoff.c
parent5db2f4a4312112ab57043ce88f10edc4acc141ec (diff)
nat: Final NAT44 EI/ED split patch
This patch achieves complete separation of endpoint-dependent and endpoint-independent IPv4 NAT features. Some common stuff is also moved to NAT library. Type: refactor Change-Id: I52468b7e2b5ac28958a2baf8e2ea01787322e801 Signed-off-by: Filip Varga <fivarga@cisco.com>
Diffstat (limited to 'src/plugins/nat/nat44-ed/nat44_ed_handoff.c')
-rw-r--r--src/plugins/nat/nat44-ed/nat44_ed_handoff.c344
1 files changed, 344 insertions, 0 deletions
diff --git a/src/plugins/nat/nat44-ed/nat44_ed_handoff.c b/src/plugins/nat/nat44-ed/nat44_ed_handoff.c
new file mode 100644
index 00000000000..6715ce2f2c4
--- /dev/null
+++ b/src/plugins/nat/nat44-ed/nat44_ed_handoff.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @file
+ * @brief NAT44 worker handoff
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vnet/handoff.h>
+#include <vnet/fib/ip4_fib.h>
+#include <vppinfra/error.h>
+
+#include <nat/nat44-ed/nat44_ed.h>
+#include <nat/nat44-ed/nat44_ed_inlines.h>
+
+typedef struct
+{
+ u32 next_worker_index;
+ u32 trace_index;
+ u8 in2out;
+ u8 output;
+} nat44_handoff_trace_t;
+
+#define foreach_nat44_handoff_error \
+ _ (CONGESTION_DROP, "congestion drop") \
+ _ (SAME_WORKER, "same worker") \
+ _ (DO_HANDOFF, "do handoff")
+
+typedef enum
+{
+#define _(sym, str) NAT44_HANDOFF_ERROR_##sym,
+ foreach_nat44_handoff_error
+#undef _
+ NAT44_HANDOFF_N_ERROR,
+} nat44_handoff_error_t;
+
+static char *nat44_handoff_error_strings[] = {
+#define _(sym,string) string,
+ foreach_nat44_handoff_error
+#undef _
+};
+
+static u8 *
+format_nat44_handoff_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
+ char *tag, *output;
+
+ tag = t->in2out ? "IN2OUT" : "OUT2IN";
+ output = t->output ? "OUTPUT-FEATURE" : "";
+ s =
+ format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
+ tag, output, t->next_worker_index, t->trace_index);
+
+ return s;
+}
+
+static inline uword
+nat44_worker_handoff_fn_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u8 is_output,
+ u8 is_in2out)
+{
+ u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;
+
+ u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ snat_main_t *sm = &snat_main;
+
+ u32 fq_index, thread_index = vm->thread_index;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ vlib_get_buffers (vm, from, b, n_left_from);
+
+ if (is_in2out)
+ {
+ fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
+ }
+ else
+ {
+ fq_index = sm->fq_out2in_index;
+ }
+
+ while (n_left_from >= 4)
+ {
+ u32 arc_next0, arc_next1, arc_next2, arc_next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+ u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
+ u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
+ ip4_header_t *ip0, *ip1, *ip2, *ip3;
+
+ if (PREDICT_TRUE (n_left_from >= 8))
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
+ CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ if (is_output)
+ {
+ iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
+ iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
+ iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
+ iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
+ }
+
+ ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
+ iph_offset0);
+ ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
+ iph_offset1);
+ ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
+ iph_offset2);
+ ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
+ iph_offset3);
+
+ vnet_feature_next (&arc_next0, b[0]);
+ vnet_feature_next (&arc_next1, b[1]);
+ vnet_feature_next (&arc_next2, b[2]);
+ vnet_feature_next (&arc_next3, b[3]);
+
+ vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
+ vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
+ vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
+ vnet_buffer2 (b[3])->nat.arc_next = arc_next3;
+
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
+
+ rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
+ rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
+ rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
+ rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);
+
+ if (is_in2out)
+ {
+ ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
+ ti[1] = sm->worker_in2out_cb (ip1, rx_fib_index1, is_output);
+ ti[2] = sm->worker_in2out_cb (ip2, rx_fib_index2, is_output);
+ ti[3] = sm->worker_in2out_cb (ip3, rx_fib_index3, is_output);
+ }
+ else
+ {
+ ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
+ ti[1] = sm->worker_out2in_cb (b[1], ip1, rx_fib_index1, is_output);
+ ti[2] = sm->worker_out2in_cb (b[2], ip2, rx_fib_index2, is_output);
+ ti[3] = sm->worker_out2in_cb (b[3], ip3, rx_fib_index3, is_output);
+ }
+
+ if (ti[0] == thread_index)
+ same_worker++;
+ else
+ do_handoff++;
+
+ if (ti[1] == thread_index)
+ same_worker++;
+ else
+ do_handoff++;
+
+ if (ti[2] == thread_index)
+ same_worker++;
+ else
+ do_handoff++;
+
+ if (ti[3] == thread_index)
+ same_worker++;
+ else
+ do_handoff++;
+
+ b += 4;
+ ti += 4;
+ n_left_from -= 4;
+ }
+
+ while (n_left_from > 0)
+ {
+ u32 arc_next0;
+ u32 sw_if_index0;
+ u32 rx_fib_index0;
+ u32 iph_offset0 = 0;
+ ip4_header_t *ip0;
+
+
+ if (is_output)
+ iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
+
+ ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
+ iph_offset0);
+
+ vnet_feature_next (&arc_next0, b[0]);
+ vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
+
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
+
+ if (is_in2out)
+ {
+ ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
+ }
+ else
+ {
+ ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
+ }
+
+ if (ti[0] == thread_index)
+ same_worker++;
+ else
+ do_handoff++;
+
+ b += 1;
+ ti += 1;
+ n_left_from -= 1;
+ }
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ u32 i;
+ b = bufs;
+ ti = thread_indices;
+
+ for (i = 0; i < frame->n_vectors; i++)
+ {
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ nat44_handoff_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->next_worker_index = ti[0];
+ t->trace_index = vlib_buffer_get_trace_index (b[0]);
+ t->in2out = is_in2out;
+ t->output = is_output;
+
+ b += 1;
+ ti += 1;
+ }
+ else
+ break;
+ }
+ }
+
+ n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
+ frame->n_vectors, 1);
+
+ if (n_enq < frame->n_vectors)
+ {
+ vlib_node_increment_counter (vm, node->node_index,
+ NAT44_HANDOFF_ERROR_CONGESTION_DROP,
+ frame->n_vectors - n_enq);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
+ vlib_node_increment_counter (vm, node->node_index,
+ NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
+ return frame->n_vectors;
+}
+
+VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = {
+ .name = "nat44-in2out-worker-handoff",
+ .vector_size = sizeof (u32),
+ .sibling_of = "nat-default",
+ .format_trace = format_nat44_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
+ .error_strings = nat44_handoff_error_strings,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (snat_in2out_output_worker_handoff_node) (vlib_main_t * vm,
+ vlib_node_runtime_t *
+ node,
+ vlib_frame_t * frame)
+{
+ return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node) = {
+ .name = "nat44-in2out-output-worker-handoff",
+ .vector_size = sizeof (u32),
+ .sibling_of = "nat-default",
+ .format_trace = format_nat44_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
+ .error_strings = nat44_handoff_error_strings,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (snat_out2in_worker_handoff_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node) = {
+ .name = "nat44-out2in-worker-handoff",
+ .vector_size = sizeof (u32),
+ .sibling_of = "nat-default",
+ .format_trace = format_nat44_handoff_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
+ .error_strings = nat44_handoff_error_strings,
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */