aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorPablo Camarillo <pcamaril@cisco.com>2019-12-18 17:13:13 +0000
committerNeale Ranns <nranns@cisco.com>2020-03-12 08:31:42 +0000
commit79bfd272546dd436a4d12f0ac98571194965dab0 (patch)
tree2610496dd4a94cf3c0d4a3da1b1e7cc5855d7c71 /src/plugins
parentf2833e42c1e3721ce06b0f510447d8a6dd3e5eb4 (diff)
sr: SRv6 uN behavior
Implements the uN behavior Type: feature Change-Id: Icb5efc560e09052cd183b468772cf7f799f81194 Signed-off-by: pcamaril <pcamaril@cisco.com>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/srv6-mobile/node.c6
-rw-r--r--src/plugins/srv6-un/CMakeLists.txt21
-rw-r--r--src/plugins/srv6-un/node.c304
-rw-r--r--src/plugins/srv6-un/un.c185
-rw-r--r--src/plugins/srv6-un/un.h65
5 files changed, 578 insertions, 3 deletions
diff --git a/src/plugins/srv6-mobile/node.c b/src/plugins/srv6-mobile/node.c
index efc92215cac..6eafefd3f5b 100644
--- a/src/plugins/srv6-mobile/node.c
+++ b/src/plugins/srv6-mobile/node.c
@@ -372,8 +372,8 @@ VLIB_NODE_FN (srv6_end_m_gtp4_e) (vlib_main_t * vm,
len0 = vlib_buffer_length_in_chain (vm, b0);
- offset = ls0->localsid_len / 8;
- shift = ls0->localsid_len % 8;
+ offset = ls0->localsid_prefix_len / 8;
+ shift = ls0->localsid_prefix_len % 8;
gtpu_type = gtpu_type_get (tag);
@@ -1169,7 +1169,7 @@ VLIB_NODE_FN (srv6_end_m_gtp6_e) (vlib_main_t * vm,
u16 offset, shift;
u32 hdrlen = 0;
- index = ls0->localsid_len;
+ index = ls0->localsid_prefix_len;
index += 8;
offset = index / 8;
shift = index % 8;
diff --git a/src/plugins/srv6-un/CMakeLists.txt b/src/plugins/srv6-un/CMakeLists.txt
new file mode 100644
index 00000000000..9ecf0b3c8d0
--- /dev/null
+++ b/src/plugins/srv6-un/CMakeLists.txt
@@ -0,0 +1,21 @@
+# Copyright (c) 2018 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(SRv6uN
+ SOURCES
+ un.c
+ node.c
+
+ INSTALL_HEADERS
+ un.h
+)
diff --git a/src/plugins/srv6-un/node.c b/src/plugins/srv6-un/node.c
new file mode 100644
index 00000000000..caca18c56f2
--- /dev/null
+++ b/src/plugins/srv6-un/node.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <vlib/vlib.h>
+#include <vnet/vnet.h>
+#include <vppinfra/error.h>
+#include <srv6-un/un.h>
+
+
+/******************************* Packet tracing *******************************/
+typedef struct
+{
+ u32 localsid_index;
+} srv6_un_localsid_trace_t;
+
+
+static u8 *
+format_srv6_un_localsid_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ srv6_un_localsid_trace_t *t = va_arg (*args, srv6_un_localsid_trace_t *);
+
+ return format (s, "SRv6-uN-localsid: localsid_index %d", t->localsid_index);
+}
+
+/********************************* Next nodes *********************************/
+typedef enum
+{
+ SRV6_UN_LOCALSID_NEXT_ERROR,
+ SRV6_UN_LOCALSID_NEXT_IP6LOOKUP,
+ SRV6_UN_LOCALSID_N_NEXT,
+} srv6_un_localsid_next_t;
+
+/******************************* Local SID node *******************************/
+
+/**
+ * @brief Graph node for applying SRv6 uN.
+ */
+static uword
+srv6_un_localsid_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ip6_sr_main_t *sm = &sr_main;
+ u32 n_left_from, next_index, *from, *to_next;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+ next_index = node->cached_next_index;
+
+ u32 thread_index = vm->thread_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ /* Quad - Loop */
+ while (n_left_from >= 8 && n_left_to_next >= 4)
+ {
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ ip6_header_t *ip0, *ip1, *ip2, *ip3;
+ u32 next0, next1, next2, next3;
+ next0 = next1 = next2 = next3 = SRV6_UN_LOCALSID_NEXT_IP6LOOKUP;
+ ip6_sr_localsid_t *ls0, *ls1, *ls2, *ls3;
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ /* Prefetch the buffer header and packet for the N+4 loop iteration */
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
+ }
+
+ to_next[0] = bi0 = from[0];
+ to_next[1] = bi1 = from[1];
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ ls0 =
+ pool_elt_at_index (sm->localsids,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+ ls1 =
+ pool_elt_at_index (sm->localsids,
+ vnet_buffer (b1)->ip.adj_index[VLIB_TX]);
+ ls2 =
+ pool_elt_at_index (sm->localsids,
+ vnet_buffer (b2)->ip.adj_index[VLIB_TX]);
+ ls3 =
+ pool_elt_at_index (sm->localsids,
+ vnet_buffer (b3)->ip.adj_index[VLIB_TX]);
+
+ ip0 = vlib_buffer_get_current (b0);
+ ip1 = vlib_buffer_get_current (b1);
+ ip2 = vlib_buffer_get_current (b2);
+ ip3 = vlib_buffer_get_current (b3);
+
+ /* Set Destination Address to Last Segment (index 0) */
+ ip0->dst_address.as_u16[2] = ip0->dst_address.as_u16[3];
+ ip0->dst_address.as_u16[3] = ip0->dst_address.as_u16[4];
+ ip0->dst_address.as_u16[4] = ip0->dst_address.as_u16[5];
+ ip0->dst_address.as_u16[5] = ip0->dst_address.as_u16[6];
+ ip0->dst_address.as_u16[6] = ip0->dst_address.as_u16[7];
+ ip0->dst_address.as_u16[7] = 0x0000;
+
+ ip1->dst_address.as_u16[2] = ip1->dst_address.as_u16[3];
+ ip1->dst_address.as_u16[3] = ip1->dst_address.as_u16[4];
+ ip1->dst_address.as_u16[4] = ip1->dst_address.as_u16[5];
+ ip1->dst_address.as_u16[5] = ip1->dst_address.as_u16[6];
+ ip1->dst_address.as_u16[6] = ip1->dst_address.as_u16[7];
+ ip1->dst_address.as_u16[7] = 0x0000;
+
+ ip2->dst_address.as_u16[2] = ip2->dst_address.as_u16[3];
+ ip2->dst_address.as_u16[3] = ip2->dst_address.as_u16[4];
+ ip2->dst_address.as_u16[4] = ip2->dst_address.as_u16[5];
+ ip2->dst_address.as_u16[5] = ip2->dst_address.as_u16[6];
+ ip2->dst_address.as_u16[6] = ip2->dst_address.as_u16[7];
+ ip2->dst_address.as_u16[7] = 0x0000;
+
+ ip3->dst_address.as_u16[2] = ip3->dst_address.as_u16[3];
+ ip3->dst_address.as_u16[3] = ip3->dst_address.as_u16[4];
+ ip3->dst_address.as_u16[4] = ip3->dst_address.as_u16[5];
+ ip3->dst_address.as_u16[5] = ip3->dst_address.as_u16[6];
+ ip3->dst_address.as_u16[6] = ip3->dst_address.as_u16[7];
+ ip3->dst_address.as_u16[7] = 0x0000;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_un_localsid_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->localsid_index = ls0 - sm->localsids;
+ }
+
+ if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_un_localsid_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->localsid_index = ls1 - sm->localsids;
+ }
+
+ if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_un_localsid_trace_t *tr =
+ vlib_add_trace (vm, node, b2, sizeof (*tr));
+ tr->localsid_index = ls2 - sm->localsids;
+ }
+
+ if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_un_localsid_trace_t *tr =
+ vlib_add_trace (vm, node, b3, sizeof (*tr));
+ tr->localsid_index = ls3 - sm->localsids;
+ }
+
+ vlib_increment_combined_counter
+ (((next0 ==
+ SRV6_UN_LOCALSID_NEXT_ERROR) ?
+ &(sm->sr_ls_invalid_counters) :
+ &(sm->sr_ls_valid_counters)),
+ thread_index, ls0 - sm->localsids,
+ 1, vlib_buffer_length_in_chain (vm, b0));
+
+ vlib_increment_combined_counter
+ (((next1 ==
+ SRV6_UN_LOCALSID_NEXT_ERROR) ?
+ &(sm->sr_ls_invalid_counters) :
+ &(sm->sr_ls_valid_counters)),
+ thread_index, ls1 - sm->localsids,
+ 1, vlib_buffer_length_in_chain (vm, b1));
+
+ vlib_increment_combined_counter
+ (((next2 ==
+ SRV6_UN_LOCALSID_NEXT_ERROR) ?
+ &(sm->sr_ls_invalid_counters) :
+ &(sm->sr_ls_valid_counters)),
+ thread_index, ls2 - sm->localsids,
+ 1, vlib_buffer_length_in_chain (vm, b2));
+
+ vlib_increment_combined_counter
+ (((next3 ==
+ SRV6_UN_LOCALSID_NEXT_ERROR) ?
+ &(sm->sr_ls_invalid_counters) :
+ &(sm->sr_ls_valid_counters)),
+ thread_index, ls3 - sm->localsids,
+ 1, vlib_buffer_length_in_chain (vm, b3));
+
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ ip6_header_t *ip0 = 0;
+ u32 next0 = SRV6_UN_LOCALSID_NEXT_IP6LOOKUP;
+ ip6_sr_localsid_t *ls0;
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ ip0 = vlib_buffer_get_current (b0);
+
+ /* Lookup the SR End behavior based on IP DA (adj) */
+ ls0 =
+ pool_elt_at_index (sm->localsids,
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX]);
+
+ /* Set Destination Address to Last Segment (index 0) */
+ ip0->dst_address.as_u16[2] = ip0->dst_address.as_u16[3];
+ ip0->dst_address.as_u16[3] = ip0->dst_address.as_u16[4];
+ ip0->dst_address.as_u16[4] = ip0->dst_address.as_u16[5];
+ ip0->dst_address.as_u16[5] = ip0->dst_address.as_u16[6];
+ ip0->dst_address.as_u16[6] = ip0->dst_address.as_u16[7];
+ ip0->dst_address.as_u16[7] = 0x0000;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ srv6_un_localsid_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->localsid_index = ls0 - sm->localsids;
+ }
+
+ /* This increments the SRv6 per LocalSID counters. */
+ vlib_increment_combined_counter (((next0 ==
+ SRV6_UN_LOCALSID_NEXT_ERROR) ?
+ &(sm->sr_ls_invalid_counters) :
+ &(sm->sr_ls_valid_counters)),
+ thread_index, ls0 - sm->localsids,
+ 1, vlib_buffer_length_in_chain (vm,
+ b0));
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (srv6_un_localsid_node) = {
+ .function = srv6_un_localsid_fn,
+ .name = "srv6-un-localsid",
+ .vector_size = sizeof (u32),
+ .format_trace = format_srv6_un_localsid_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_next_nodes = SRV6_UN_LOCALSID_N_NEXT,
+ .next_nodes = {
+ [SRV6_UN_LOCALSID_NEXT_IP6LOOKUP] = "ip6-lookup",
+ [SRV6_UN_LOCALSID_NEXT_ERROR] = "error-drop",
+ },
+};
+/* *INDENT-ON* */
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/plugins/srv6-un/un.c b/src/plugins/srv6-un/un.c
new file mode 100644
index 00000000000..7fd02e2c934
--- /dev/null
+++ b/src/plugins/srv6-un/un.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ *------------------------------------------------------------------
+ * un.c - SRv6 Masquerading Proxy (AM) function
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/adj/adj.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+#include <srv6-un/un.h>
+
+unsigned char function_name[] = "SRv6-uN";
+unsigned char keyword_str[] = "uN(32b+16b)";
+unsigned char def_str[] = "SRv6 uSID uN";
+unsigned char params_str[] = "";
+u8 prefix_length = 48;
+
+srv6_un_main_t srv6_un_main;
+
+/*****************************************/
+/* SRv6 LocalSID instantiation and removal functions */
+static int
+srv6_un_localsid_creation_fn (ip6_sr_localsid_t * localsid)
+{
+ srv6_un_localsid_t *ls_mem = localsid->plugin_mem;
+ //Nothing to do here.
+ ls_mem->shift = 16;
+ return 0;
+}
+
+static int
+srv6_un_localsid_removal_fn (ip6_sr_localsid_t * localsid)
+{
+ //Nothing to do here.
+
+ /* Clean up local SID memory */
+ clib_mem_free (localsid->plugin_mem);
+
+ return 0;
+}
+
+/**********************************/
+/* SRv6 LocalSID format functions */
+/*
+ * Prints nicely the parameters of a localsid
+ * Example: print "Table 5"
+ */
+u8 *
+format_srv6_un_localsid (u8 * s, va_list * args)
+{
+ srv6_un_localsid_t *ls_mem = va_arg (*args, void *);
+
+ return (format (s, "Shift:\t\t%u", ls_mem->shift));
+}
+
+/*
+ * Process the parameters of a localsid
+ * Example: process from:
+ * sr localsid address cafe::1 behavior new_srv6_localsid 5
+ * everything from behavior on... so in this case 'new_srv6_localsid 5'
+ * Notice that it MUST match the keyword_str and params_str defined above.
+ */
+uword
+unformat_srv6_un_localsid (unformat_input_t * input, va_list * args)
+{
+ void **plugin_mem_p = va_arg (*args, void **);
+ srv6_un_localsid_t *ls_mem;
+
+ if (unformat (input, "uN(32b+16b)"))
+ {
+ /* Allocate a portion of memory */
+ ls_mem = clib_mem_alloc_aligned_at_offset (sizeof *ls_mem, 0, 0, 1);
+
+ /* Set to zero the memory */
+ clib_memset (ls_mem, 0, sizeof *ls_mem);
+
+ /* Dont forget to add it to the localsid */
+ *plugin_mem_p = ls_mem;
+ return 1;
+ }
+ return 0;
+}
+
+/*************************/
+/* SRv6 LocalSID FIB DPO */
+static u8 *
+format_srv6_un_dpo (u8 * s, va_list * args)
+{
+ index_t index = va_arg (*args, index_t);
+ CLIB_UNUSED (u32 indent) = va_arg (*args, u32);
+
+ return (format (s, "SR: shift-and-forward 16b:[%u]", index));
+}
+
+void
+srv6_un_dpo_lock (dpo_id_t * dpo)
+{
+}
+
+void
+srv6_un_dpo_unlock (dpo_id_t * dpo)
+{
+}
+
+const static dpo_vft_t srv6_un_vft = {
+ .dv_lock = srv6_un_dpo_lock,
+ .dv_unlock = srv6_un_dpo_unlock,
+ .dv_format = format_srv6_un_dpo,
+};
+
+const static char *const srv6_un_ip6_nodes[] = {
+ "srv6-un-localsid",
+ NULL,
+};
+
+const static char *const *const srv6_un_nodes[DPO_PROTO_NUM] = {
+ [DPO_PROTO_IP6] = srv6_un_ip6_nodes,
+};
+
+/**********************/
+static clib_error_t *
+srv6_un_init (vlib_main_t * vm)
+{
+ srv6_un_main_t *sm = &srv6_un_main;
+ int rv = 0;
+
+ sm->vlib_main = vm;
+ sm->vnet_main = vnet_get_main ();
+
+ /* Create DPO */
+ sm->srv6_un16_dpo_type =
+ dpo_register_new_type (&srv6_un_vft, srv6_un_nodes);
+
+ /* Register SRv6 LocalSID */
+ rv = sr_localsid_register_function (vm,
+ function_name,
+ keyword_str,
+ def_str,
+ params_str,
+ prefix_length,
+ &sm->srv6_un16_dpo_type,
+ format_srv6_un_localsid,
+ unformat_srv6_un_localsid,
+ srv6_un_localsid_creation_fn,
+ srv6_un_localsid_removal_fn);
+ if (rv < 0)
+ clib_error_return (0, "SRv6 LocalSID function could not be registered.");
+ else
+ sm->srv6_localsid_behavior_id = rv;
+
+ return 0;
+}
+
+
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (srv6_un_init);
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Segment Routing Shift And Forward uN 16b",
+};
+/* *INDENT-ON* */
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
diff --git a/src/plugins/srv6-un/un.h b/src/plugins/srv6-un/un.h
new file mode 100644
index 00000000000..750e2350d74
--- /dev/null
+++ b/src/plugins/srv6-un/un.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_srv6_un_h__
+#define __included_srv6_un_h__
+
+#include <vnet/vnet.h>
+#include <vnet/ip/ip.h>
+#include <vnet/srv6/sr.h>
+#include <vnet/srv6/sr_packet.h>
+
+#include <vppinfra/error.h>
+#include <vppinfra/elog.h>
+
+typedef struct
+{
+ u16 msg_id_base; /**< API message ID base */
+
+ vlib_main_t *vlib_main; /**< [convenience] vlib main */
+ vnet_main_t *vnet_main; /**< [convenience] vnet main */
+
+ dpo_type_t srv6_un16_dpo_type; /**< DPO type */
+
+ u32 srv6_localsid_behavior_id; /**< SRv6 LocalSID behavior number */
+} srv6_un_main_t;
+
+/*
+ * This is the memory that will be stored per each localsid
+ * the user instantiates
+ */
+typedef struct
+{
+ u16 shift;
+} srv6_un_localsid_t;
+
+extern srv6_un_main_t srv6_un_main;
+
+format_function_t format_srv6_un_localsid;
+unformat_function_t unformat_srv6_un_localsid;
+
+void srv6_un_dpo_lock (dpo_id_t * dpo);
+void srv6_un_dpo_unlock (dpo_id_t * dpo);
+
+extern vlib_node_registration_t srv6_un_localsid_node;
+
+#endif /* __included_srv6_un_h__ */
+
+/*
+* fd.io coding-style-patch-verification: ON
+*
+* Local Variables:
+* eval: (c-set-style "gnu")
+* End:
+*/
pan>ip.adj_index[VLIB_TX]; adj_index3 = vnet_buffer(b[3])->ip.adj_index[VLIB_TX]; adj0 = adj_get(adj_index0); adj1 = adj_get(adj_index1); adj2 = adj_get(adj_index2); adj3 = adj_get(adj_index3); dpo0 = &adj0->sub_type.midchain.next_dpo; dpo1 = &adj1->sub_type.midchain.next_dpo; dpo2 = &adj2->sub_type.midchain.next_dpo; dpo3 = &adj3->sub_type.midchain.next_dpo; next[0] = dpo0->dpoi_next_node; next[1] = dpo1->dpoi_next_node; next[2] = dpo2->dpoi_next_node; next[3] = dpo3->dpoi_next_node; vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vnet_buffer(b[2])->ip.adj_index[VLIB_TX] = dpo2->dpoi_index; vnet_buffer(b[3])->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; if (interface_count) { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b[0])); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj1->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b[1])); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj2->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b[2])); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj3->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b[3])); } if (PREDICT_FALSE(node->flags & VLIB_NODE_FLAG_TRACE)) { if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->ai = adj_index0; } if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b[1], sizeof (*tr)); tr->ai = adj_index1; } if (PREDICT_FALSE(b[2]->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b[2], sizeof (*tr)); tr->ai = adj_index2; } if (PREDICT_FALSE(b[3]->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b[3], sizeof (*tr)); tr->ai = adj_index3; } } n_left -= 4; b += 4; next += 4; } while (n_left) { const ip_adjacency_t * adj0; const dpo_id_t *dpo0; u32 adj_index0; /* Follow the DPO on which the midchain is stacked */ adj_index0 = vnet_buffer(b[0])->ip.adj_index[VLIB_TX]; adj0 = adj_get(adj_index0); dpo0 = &adj0->sub_type.midchain.next_dpo; next[0] = dpo0->dpoi_next_node; vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; if (interface_count) { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b[0])); } if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED)) { adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->ai = adj_index0; } n_left -= 1; b += 1; next += 1; } vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); return frame->n_vectors; } static u8 * format_adj_midchain_tx_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); adj_midchain_tx_trace_t *tr = va_arg (*args, adj_midchain_tx_trace_t*); s = format(s, "adj-midchain:[%d]:%U", tr->ai, format_ip_adjacency, tr->ai, FORMAT_IP_ADJACENCY_NONE); return (s); } static uword adj_midchain_tx (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return (adj_midchain_tx_inline(vm, node, frame, 1)); } VLIB_REGISTER_NODE (adj_midchain_tx_node) = { .function = adj_midchain_tx, .name = "adj-midchain-tx", .vector_size = sizeof (u32), .format_trace = format_adj_midchain_tx_trace, .n_next_nodes = 1, .next_nodes = { [0] = "error-drop", }, }; static uword adj_midchain_tx_no_count (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return (adj_midchain_tx_inline(vm, node, frame, 0)); } VLIB_REGISTER_NODE (adj_midchain_tx_no_count_node) = { .function = adj_midchain_tx_no_count, .name = "adj-midchain-tx-no-count", .vector_size = sizeof (u32), .format_trace = format_adj_midchain_tx_trace, .sibling_of = "adj-midchain-tx", }; #ifndef CLIB_MARCH_VARIANT u8 adj_is_midchain (adj_index_t ai) { ip_adjacency_t *adj; adj = adj_get(ai); switch (adj->lookup_next_index) { case IP_LOOKUP_NEXT_MIDCHAIN: case IP_LOOKUP_NEXT_MCAST_MIDCHAIN: return (1); case IP_LOOKUP_NEXT_ARP: case IP_LOOKUP_NEXT_GLEAN: case IP_LOOKUP_NEXT_BCAST: case IP_LOOKUP_NEXT_MCAST: case IP_LOOKUP_NEXT_DROP: case IP_LOOKUP_NEXT_PUNT: case IP_LOOKUP_NEXT_LOCAL: case IP_LOOKUP_NEXT_REWRITE: case IP_LOOKUP_NEXT_ICMP_ERROR: case IP_LOOKUP_N_NEXT: return (0); } return (0); } static inline u32 adj_get_midchain_node (vnet_link_t link) { switch (link) { case VNET_LINK_IP4: return (ip4_midchain_node.index); case VNET_LINK_IP6: return (ip6_midchain_node.index); case VNET_LINK_MPLS: return (mpls_midchain_node.index); case VNET_LINK_ETHERNET: return (adj_l2_midchain_node.index); case VNET_LINK_NSH: return (adj_nsh_midchain_node.index); case VNET_LINK_ARP: break; } ASSERT(0); return (0); } static u8 adj_midchain_get_feature_arc_index_for_link_type (const ip_adjacency_t *adj) { u8 arc = (u8) ~0; switch (adj->ia_link) { case VNET_LINK_IP4: { arc = ip4_main.lookup_main.output_feature_arc_index; break; } case VNET_LINK_IP6: { arc = ip6_main.lookup_main.output_feature_arc_index; break; } case VNET_LINK_MPLS: { arc = mpls_main.output_feature_arc_index; break; } case VNET_LINK_ETHERNET: { arc = ethernet_main.output_feature_arc_index; break; } case VNET_LINK_NSH: { arc = nsh_main_dummy.output_feature_arc_index; break; } case VNET_LINK_ARP: ASSERT(0); break; } ASSERT (arc != (u8) ~0); return (arc); } static u32 adj_nbr_midchain_get_tx_node (ip_adjacency_t *adj) { return ((adj->ia_flags & ADJ_FLAG_MIDCHAIN_NO_COUNT) ? adj_midchain_tx_no_count_node.index : adj_midchain_tx_node.index); } /** * adj_midchain_setup * * Setup the adj as a mid-chain */ void adj_midchain_teardown (ip_adjacency_t *adj) { vlib_main_t *vm = vlib_get_main(); dpo_reset(&adj->sub_type.midchain.next_dpo); vlib_worker_thread_barrier_sync(vm); adj->ia_cfg_index = vnet_feature_modify_end_node( adj_midchain_get_feature_arc_index_for_link_type (adj), adj->rewrite_header.sw_if_index, vlib_get_node_by_name (vlib_get_main(), (u8*) "interface-output")->index); vlib_worker_thread_barrier_release(vm); } /** * adj_midchain_setup * * Setup the adj as a mid-chain */ void adj_midchain_setup (adj_index_t adj_index, adj_midchain_fixup_t fixup, const void *data, adj_flags_t flags) { vlib_main_t *vm = vlib_get_main(); ip_adjacency_t *adj; u32 tx_node; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); adj->sub_type.midchain.fixup_func = fixup; adj->sub_type.midchain.fixup_data = data; adj->sub_type.midchain.fei = FIB_NODE_INDEX_INVALID; adj->ia_flags |= flags; if (flags & ADJ_FLAG_MIDCHAIN_FIXUP_IP4O4_HDR) { adj->rewrite_header.flags |= VNET_REWRITE_FIXUP_IP4_O_4; } else { adj->rewrite_header.flags &= ~VNET_REWRITE_FIXUP_IP4_O_4; } tx_node = adj_nbr_midchain_get_tx_node(adj); vlib_worker_thread_barrier_sync(vm); adj->ia_cfg_index = vnet_feature_modify_end_node( adj_midchain_get_feature_arc_index_for_link_type (adj), adj->rewrite_header.sw_if_index, tx_node); vlib_worker_thread_barrier_release(vm); /* * stack the midchain on the drop so it's ready to forward in the adj-midchain-tx. * The graph arc used/created here is from the midchain-tx node to the * child's registered node. This is because post adj processing the next * node are any output features, then the midchain-tx. from there we * need to get to the stacked child's node. */ dpo_stack_from_node(tx_node, &adj->sub_type.midchain.next_dpo, drop_dpo_get(vnet_link_to_dpo_proto(adj->ia_link))); } /** * adj_nbr_midchain_update_rewrite * * Update the adjacency's rewrite string. A NULL string implies the * rewrite is reset (i.e. when ARP/ND entry is gone). * NB: the adj being updated may be handling traffic in the DP. */ void adj_nbr_midchain_update_rewrite (adj_index_t adj_index, adj_midchain_fixup_t fixup, const void *fixup_data, adj_flags_t flags, u8 *rewrite) { ip_adjacency_t *adj; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); /* * one time only update. since we don't support changing the tunnel * src,dst, this is all we need. */ if (adj->lookup_next_index != IP_LOOKUP_NEXT_MIDCHAIN && adj->lookup_next_index != IP_LOOKUP_NEXT_MCAST_MIDCHAIN) { adj_midchain_setup(adj_index, fixup, fixup_data, flags); } /* * update the rewrite with the workers paused. */ adj_nbr_update_rewrite_internal(adj, IP_LOOKUP_NEXT_MIDCHAIN, adj_get_midchain_node(adj->ia_link), adj_nbr_midchain_get_tx_node(adj), rewrite); } void adj_nbr_midchain_update_next_node (adj_index_t adj_index, u32 next_node) { ip_adjacency_t *adj; vlib_main_t * vm; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); vm = vlib_get_main(); vlib_worker_thread_barrier_sync(vm); adj->rewrite_header.next_index = vlib_node_add_next(vlib_get_main(), adj->ia_node_index, next_node); adj->ia_cfg_index = vnet_feature_modify_end_node( adj_midchain_get_feature_arc_index_for_link_type (adj), adj->rewrite_header.sw_if_index, next_node); vlib_worker_thread_barrier_release(vm); } void adj_nbr_midchain_reset_next_node (adj_index_t adj_index) { ip_adjacency_t *adj; vlib_main_t * vm; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); vm = vlib_get_main(); vlib_worker_thread_barrier_sync(vm); adj->rewrite_header.next_index = vlib_node_add_next(vlib_get_main(), adj->ia_node_index, adj_nbr_midchain_get_tx_node(adj)); adj->ia_cfg_index = vnet_feature_modify_end_node( adj_midchain_get_feature_arc_index_for_link_type (adj), adj->rewrite_header.sw_if_index, adj_nbr_midchain_get_tx_node(adj)); vlib_worker_thread_barrier_release(vm); } /** * adj_nbr_midchain_unstack * * Unstack the adj. stack it on drop */ void adj_nbr_midchain_unstack (adj_index_t adj_index) { fib_node_index_t *entry_indicies, tmp; ip_adjacency_t *adj; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get (adj_index); /* * check to see if this unstacking breaks a recursion loop */ entry_indicies = NULL; tmp = adj->sub_type.midchain.fei; adj->sub_type.midchain.fei = FIB_NODE_INDEX_INVALID; if (FIB_NODE_INDEX_INVALID != tmp) { fib_entry_recursive_loop_detect(tmp, &entry_indicies); vec_free(entry_indicies); } /* * stack on the drop */ dpo_stack(DPO_ADJACENCY_MIDCHAIN, vnet_link_to_dpo_proto(adj->ia_link), &adj->sub_type.midchain.next_dpo, drop_dpo_get(vnet_link_to_dpo_proto(adj->ia_link))); CLIB_MEMORY_BARRIER(); } void adj_nbr_midchain_stack_on_fib_entry (adj_index_t ai, fib_node_index_t fei, fib_forward_chain_type_t fct) { fib_node_index_t *entry_indicies; dpo_id_t tmp = DPO_INVALID; ip_adjacency_t *adj; adj = adj_get (ai); /* * check to see if this stacking will form a recursion loop */ entry_indicies = NULL; adj->sub_type.midchain.fei = fei; if (fib_entry_recursive_loop_detect(adj->sub_type.midchain.fei, &entry_indicies)) { /* * loop formed, stack on the drop. */ dpo_copy(&tmp, drop_dpo_get(fib_forw_chain_type_to_dpo_proto(fct))); } else { fib_entry_contribute_forwarding (fei, fct, &tmp); if ((adj->ia_flags & ADJ_FLAG_MIDCHAIN_IP_STACK) && (DPO_LOAD_BALANCE == tmp.dpoi_type)) { /* * do that hash now and stack on the choice. * If the choice is an incomplete adj then we will need a poke when * it becomes complete. This happens since the adj update walk propagates * as far a recursive paths. */ const dpo_id_t *choice; load_balance_t *lb; int hash; lb = load_balance_get (tmp.dpoi_index); if (FIB_FORW_CHAIN_TYPE_UNICAST_IP4 == fct) { hash = ip4_compute_flow_hash ((ip4_header_t *) adj_get_rewrite (ai), lb->lb_hash_config); } else if (FIB_FORW_CHAIN_TYPE_UNICAST_IP6 == fct) { hash = ip6_compute_flow_hash ((ip6_header_t *) adj_get_rewrite (ai), lb->lb_hash_config); } else { hash = 0; ASSERT(0); } choice = load_balance_get_bucket_i (lb, hash & lb->lb_n_buckets_minus_1); dpo_copy (&tmp, choice); } } adj_nbr_midchain_stack (ai, &tmp); dpo_reset(&tmp); vec_free(entry_indicies); } /** * adj_nbr_midchain_stack */ void adj_nbr_midchain_stack (adj_index_t adj_index, const dpo_id_t *next) { ip_adjacency_t *adj; ASSERT(ADJ_INDEX_INVALID != adj_index); adj = adj_get(adj_index); ASSERT((IP_LOOKUP_NEXT_MIDCHAIN == adj->lookup_next_index) || (IP_LOOKUP_NEXT_MCAST_MIDCHAIN == adj->lookup_next_index)); dpo_stack_from_node(adj_nbr_midchain_get_tx_node(adj), &adj->sub_type.midchain.next_dpo, next); } int adj_ndr_midchain_recursive_loop_detect (adj_index_t ai, fib_node_index_t **entry_indicies) { fib_node_index_t *entry_index, *entries; ip_adjacency_t * adj; adj = adj_get(ai); entries = *entry_indicies; vec_foreach(entry_index, entries) { if (*entry_index == adj->sub_type.midchain.fei) { /* * The entry this midchain links to is already in the set * of visited entries, this is a loop */ adj->ia_flags |= ADJ_FLAG_MIDCHAIN_LOOPED; return (1); } } adj->ia_flags &= ~ADJ_FLAG_MIDCHAIN_LOOPED; return (0); } u8* format_adj_midchain (u8* s, va_list *ap) { index_t index = va_arg(*ap, index_t); u32 indent = va_arg(*ap, u32); ip_adjacency_t * adj = adj_get(index); s = format (s, "%U", format_vnet_link, adj->ia_link); if (adj->rewrite_header.flags & VNET_REWRITE_HAS_FEATURES) s = format(s, " [features]"); s = format (s, " via %U", format_ip46_address, &adj->sub_type.nbr.next_hop, adj_proto_to_46(adj->ia_nh_proto)); s = format (s, " %U", format_vnet_rewrite, &adj->rewrite_header, sizeof (adj->rewrite_data), indent); s = format (s, "\n%Ustacked-on", format_white_space, indent); if (FIB_NODE_INDEX_INVALID != adj->sub_type.midchain.fei) { s = format (s, " entry:%d", adj->sub_type.midchain.fei); } s = format (s, ":\n%U%U", format_white_space, indent+2, format_dpo_id, &adj->sub_type.midchain.next_dpo, indent+2); return (s); } static void adj_dpo_lock (dpo_id_t *dpo) { adj_lock(dpo->dpoi_index); } static void adj_dpo_unlock (dpo_id_t *dpo) { adj_unlock(dpo->dpoi_index); } const static dpo_vft_t adj_midchain_dpo_vft = { .dv_lock = adj_dpo_lock, .dv_unlock = adj_dpo_unlock, .dv_format = format_adj_midchain, .dv_get_urpf = adj_dpo_get_urpf, }; /** * @brief The per-protocol VLIB graph nodes that are assigned to a midchain * object. * * this means that these graph nodes are ones from which a midchain is the * parent object in the DPO-graph. */ const static char* const midchain_ip4_nodes[] = { "ip4-midchain", NULL, }; const static char* const midchain_ip6_nodes[] = { "ip6-midchain", NULL, }; const static char* const midchain_mpls_nodes[] = { "mpls-midchain", NULL, }; const static char* const midchain_ethernet_nodes[] = { "adj-l2-midchain", NULL, }; const static char* const midchain_nsh_nodes[] = { "adj-nsh-midchain", NULL, }; const static char* const * const midchain_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = midchain_ip4_nodes, [DPO_PROTO_IP6] = midchain_ip6_nodes, [DPO_PROTO_MPLS] = midchain_mpls_nodes, [DPO_PROTO_ETHERNET] = midchain_ethernet_nodes, [DPO_PROTO_NSH] = midchain_nsh_nodes, }; void adj_midchain_module_init (void) { dpo_register(DPO_ADJACENCY_MIDCHAIN, &adj_midchain_dpo_vft, midchain_nodes); } #endif