aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/dpo
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/dpo')
-rw-r--r--src/vnet/dpo/dpo.c10
-rw-r--r--src/vnet/dpo/dpo.h8
-rw-r--r--src/vnet/dpo/interface_dpo.c416
-rw-r--r--src/vnet/dpo/interface_dpo.h67
-rw-r--r--src/vnet/dpo/lookup_dpo.c211
-rw-r--r--src/vnet/dpo/lookup_dpo.h20
-rw-r--r--src/vnet/dpo/mpls_disposition.c364
-rw-r--r--src/vnet/dpo/mpls_disposition.h85
-rw-r--r--src/vnet/dpo/mpls_label_dpo.c6
-rw-r--r--src/vnet/dpo/replicate_dpo.c48
-rw-r--r--src/vnet/dpo/replicate_dpo.h2
11 files changed, 1213 insertions, 24 deletions
diff --git a/src/vnet/dpo/dpo.c b/src/vnet/dpo/dpo.c
index d8e075a7f2f..dfc2bd923ce 100644
--- a/src/vnet/dpo/dpo.c
+++ b/src/vnet/dpo/dpo.c
@@ -37,6 +37,8 @@
#include <vnet/dpo/classify_dpo.h>
#include <vnet/dpo/ip_null_dpo.h>
#include <vnet/dpo/replicate_dpo.h>
+#include <vnet/dpo/interface_dpo.h>
+#include <vnet/dpo/mpls_disposition.h>
/**
* Array of char* names for the DPO types and protos
@@ -182,6 +184,12 @@ dpo_set (dpo_id_t *dpo,
case IP_LOOKUP_NEXT_MIDCHAIN:
dpo->dpoi_type = DPO_ADJACENCY_MIDCHAIN;
break;
+ case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
+ dpo->dpoi_type = DPO_ADJACENCY_MCAST_MIDCHAIN;
+ break;
+ case IP_LOOKUP_NEXT_MCAST:
+ dpo->dpoi_type = DPO_ADJACENCY_MCAST;
+ break;
default:
break;
}
@@ -453,6 +461,8 @@ dpo_module_init (vlib_main_t * vm)
lookup_dpo_module_init();
ip_null_dpo_module_init();
replicate_module_init();
+ interface_dpo_module_init();
+ mpls_disp_dpo_module_init();
return (NULL);
}
diff --git a/src/vnet/dpo/dpo.h b/src/vnet/dpo/dpo.h
index 48b92d3d3d6..5aa4e2d2165 100644
--- a/src/vnet/dpo/dpo.h
+++ b/src/vnet/dpo/dpo.h
@@ -108,12 +108,15 @@ typedef enum dpo_type_t_ {
DPO_ADJACENCY_MIDCHAIN,
DPO_ADJACENCY_GLEAN,
DPO_ADJACENCY_MCAST,
+ DPO_ADJACENCY_MCAST_MIDCHAIN,
DPO_RECEIVE,
DPO_LOOKUP,
DPO_LISP_CP,
DPO_CLASSIFY,
DPO_MPLS_LABEL,
+ DPO_MPLS_DISPOSITION,
DPO_MFIB_ENTRY,
+ DPO_INTERFACE,
DPO_LAST,
} __attribute__((packed)) dpo_type_t;
@@ -129,6 +132,7 @@ typedef enum dpo_type_t_ {
[DPO_ADJACENCY_MIDCHAIN] = "dpo-adjacency-midcahin", \
[DPO_ADJACENCY_GLEAN] = "dpo-glean", \
[DPO_ADJACENCY_MCAST] = "dpo-adj-mcast", \
+ [DPO_ADJACENCY_MCAST_MIDCHAIN] = "dpo-adj-mcast-midchain", \
[DPO_RECEIVE] = "dpo-receive", \
[DPO_LOOKUP] = "dpo-lookup", \
[DPO_LOAD_BALANCE] = "dpo-load-balance", \
@@ -136,7 +140,9 @@ typedef enum dpo_type_t_ {
[DPO_LISP_CP] = "dpo-lisp-cp", \
[DPO_CLASSIFY] = "dpo-classify", \
[DPO_MPLS_LABEL] = "dpo-mpls-label", \
- [DPO_MFIB_ENTRY] = "dpo-mfib_entry" \
+ [DPO_MPLS_DISPOSITION] = "dpo-mpls-diposition", \
+ [DPO_MFIB_ENTRY] = "dpo-mfib_entry", \
+ [DPO_INTERFACE] = "dpo-interface" \
}
/**
diff --git a/src/vnet/dpo/interface_dpo.c b/src/vnet/dpo/interface_dpo.c
new file mode 100644
index 00000000000..50ca756f9da
--- /dev/null
+++ b/src/vnet/dpo/interface_dpo.c
@@ -0,0 +1,416 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/dpo/interface_dpo.h>
+#include <vnet/fib/fib_node.h>
+
+/*
+ * The 'DB' of interface DPOs.
+ * There is only one per-interface per-protocol, so this is a per-interface
+ * vector
+ */
+static index_t *interface_dpo_db[DPO_PROTO_NUM];
+
+static interface_dpo_t *
+interface_dpo_alloc (void)
+{
+ interface_dpo_t *ido;
+
+ pool_get(interface_dpo_pool, ido);
+
+ return (ido);
+}
+
+static inline interface_dpo_t *
+interface_dpo_get_from_dpo (const dpo_id_t *dpo)
+{
+ ASSERT(DPO_INTERFACE == dpo->dpoi_type);
+
+ return (interface_dpo_get(dpo->dpoi_index));
+}
+
+static inline index_t
+interface_dpo_get_index (interface_dpo_t *ido)
+{
+ return (ido - interface_dpo_pool);
+}
+
+static void
+interface_dpo_lock (dpo_id_t *dpo)
+{
+ interface_dpo_t *ido;
+
+ ido = interface_dpo_get_from_dpo(dpo);
+ ido->ido_locks++;
+}
+
+static void
+interface_dpo_unlock (dpo_id_t *dpo)
+{
+ interface_dpo_t *ido;
+
+ ido = interface_dpo_get_from_dpo(dpo);
+ ido->ido_locks--;
+
+ if (0 == ido->ido_locks)
+ {
+ interface_dpo_db[ido->ido_proto][ido->ido_sw_if_index] =
+ INDEX_INVALID;
+ pool_put(interface_dpo_pool, ido);
+ }
+}
+
+/*
+ * interface_dpo_add_or_lock
+ *
+ * Add/create and lock a new or lock an existing for the interface DPO
+ * on the interface and protocol given
+ */
+void
+interface_dpo_add_or_lock (dpo_proto_t proto,
+ u32 sw_if_index,
+ dpo_id_t *dpo)
+{
+ interface_dpo_t *ido;
+
+ vec_validate_init_empty(interface_dpo_db[proto],
+ sw_if_index,
+ INDEX_INVALID);
+
+ if (INDEX_INVALID == interface_dpo_db[proto][sw_if_index])
+ {
+ ido = interface_dpo_alloc();
+
+ ido->ido_sw_if_index = sw_if_index;
+ ido->ido_proto = proto;
+
+ interface_dpo_db[proto][sw_if_index] =
+ interface_dpo_get_index(ido);
+ }
+ else
+ {
+ ido = interface_dpo_get(interface_dpo_db[proto][sw_if_index]);
+ }
+
+ dpo_set(dpo, DPO_INTERFACE, proto, interface_dpo_get_index(ido));
+}
+
+
+static clib_error_t *
+interface_dpo_interface_state_change (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 flags)
+{
+ /*
+ */
+ return (NULL);
+}
+
+VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION(
+ interface_dpo_interface_state_change);
+
+/**
+ * @brief Registered callback for HW interface state changes
+ */
+static clib_error_t *
+interface_dpo_hw_interface_state_change (vnet_main_t * vnm,
+ u32 hw_if_index,
+ u32 flags)
+{
+ return (NULL);
+}
+
+VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION(
+ interface_dpo_hw_interface_state_change);
+
+static clib_error_t *
+interface_dpo_interface_delete (vnet_main_t * vnm,
+ u32 sw_if_index,
+ u32 is_add)
+{
+ return (NULL);
+}
+
+VNET_SW_INTERFACE_ADD_DEL_FUNCTION(
+ interface_dpo_interface_delete);
+
+u8*
+format_interface_dpo (u8* s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ vnet_main_t * vnm = vnet_get_main();
+ interface_dpo_t *ido = interface_dpo_get(index);
+
+ return (format(s, "%U-dpo: %U",
+ format_vnet_sw_interface_name,
+ vnm,
+ vnet_get_sw_interface(vnm, ido->ido_sw_if_index),
+ format_dpo_proto, ido->ido_proto));
+}
+
+static void
+interface_dpo_mem_show (void)
+{
+ fib_show_memory_usage("Interface",
+ pool_elts(interface_dpo_pool),
+ pool_len(interface_dpo_pool),
+ sizeof(interface_dpo_t));
+}
+
+
+const static dpo_vft_t interface_dpo_vft = {
+ .dv_lock = interface_dpo_lock,
+ .dv_unlock = interface_dpo_unlock,
+ .dv_format = format_interface_dpo,
+ .dv_mem_show = interface_dpo_mem_show,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a glean
+ * object.
+ *
+ * this means that these graph nodes are ones from which a glean is the
+ * parent object in the DPO-graph.
+ */
+const static char* const interface_dpo_ip4_nodes[] =
+{
+ "interface-dpo-ip4",
+ NULL,
+};
+const static char* const interface_dpo_ip6_nodes[] =
+{
+ "interface-dpo-ip4",
+ NULL,
+};
+
+const static char* const * const interface_dpo_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = interface_dpo_ip4_nodes,
+ [DPO_PROTO_IP6] = interface_dpo_ip6_nodes,
+ [DPO_PROTO_MPLS] = NULL,
+};
+
+void
+interface_dpo_module_init (void)
+{
+ dpo_register(DPO_INTERFACE,
+ &interface_dpo_vft,
+ interface_dpo_nodes);
+}
+
+/**
+ * @brief Interface DPO trace data
+ */
+typedef struct interface_dpo_trace_t_
+{
+ u32 sw_if_index;
+} interface_dpo_trace_t;
+
+typedef enum interface_dpo_next_t_
+{
+ INTERFACE_DPO_DROP = 0,
+ INTERFACE_DPO_INPUT = 1,
+} interface_dpo_next_t;
+
+always_inline uword
+interface_dpo_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+ u32 cpu_index = os_get_cpu_number();
+ vnet_interface_main_t *im;
+
+ im = &vnet_get_main ()->interface_main;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next > 2)
+ {
+ const interface_dpo_t *ido0, *ido1;
+ u32 bi0, idoi0, bi1, idoi1;
+ vlib_buffer_t *b0, *b1;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ bi1 = from[1];
+ to_next[1] = bi1;
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ idoi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ idoi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
+ ido0 = interface_dpo_get(idoi0);
+ ido1 = interface_dpo_get(idoi1);
+
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = ido0->ido_sw_if_index;
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = ido1->ido_sw_if_index;
+
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ ido0->ido_sw_if_index,
+ 1,
+ vlib_buffer_length_in_chain (vm, b0));
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ ido1->ido_sw_if_index,
+ 1,
+ vlib_buffer_length_in_chain (vm, b1));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ interface_dpo_trace_t *tr0;
+
+ tr0 = vlib_add_trace (vm, node, b0, sizeof (*tr0));
+ tr0->sw_if_index = ido0->ido_sw_if_index;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ interface_dpo_trace_t *tr1;
+
+ tr1 = vlib_add_trace (vm, node, b1, sizeof (*tr1));
+ tr1->sw_if_index = ido1->ido_sw_if_index;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1,
+ INTERFACE_DPO_INPUT,
+ INTERFACE_DPO_INPUT);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ const interface_dpo_t * ido0;
+ vlib_buffer_t * b0;
+ u32 bi0, idoi0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ idoi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ ido0 = interface_dpo_get(idoi0);
+
+ /* Swap the RX interface of the packet to the one the
+ * interface DPR represents */
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = ido0->ido_sw_if_index;
+
+ /* Bump the interface's RX coutners */
+ vlib_increment_combined_counter (im->combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ cpu_index,
+ ido0->ido_sw_if_index,
+ 1,
+ vlib_buffer_length_in_chain (vm, b0));
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ interface_dpo_trace_t *tr;
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->sw_if_index = ido0->ido_sw_if_index;
+ }
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0,
+ INTERFACE_DPO_INPUT);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_interface_dpo_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ interface_dpo_trace_t * t = va_arg (*args, interface_dpo_trace_t *);
+ uword indent = format_get_indent (s);
+ s = format (s, "%U sw_if_index:%d",
+ format_white_space, indent,
+ t->sw_if_index);
+ return s;
+}
+
+static uword
+interface_dpo_ip4 (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (interface_dpo_inline(vm, node, from_frame));
+}
+
+static uword
+interface_dpo_ip6 (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (interface_dpo_inline(vm, node, from_frame));
+}
+
+VLIB_REGISTER_NODE (interface_dpo_ip4_node) = {
+ .function = interface_dpo_ip4,
+ .name = "interface-dpo-ip4",
+ .vector_size = sizeof (u32),
+ .format_trace = format_interface_dpo_trace,
+
+ .n_next_nodes = 2,
+ .next_nodes = {
+ [INTERFACE_DPO_DROP] = "ip4-drop",
+ [INTERFACE_DPO_INPUT] = "ip4-input",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (interface_dpo_ip4_node,
+ interface_dpo_ip4)
+
+VLIB_REGISTER_NODE (interface_dpo_ip6_node) = {
+ .function = interface_dpo_ip6,
+ .name = "interface-dpo-ip6",
+ .vector_size = sizeof (u32),
+ .format_trace = format_interface_dpo_trace,
+
+ .n_next_nodes = 2,
+ .next_nodes = {
+ [INTERFACE_DPO_DROP] = "ip6-drop",
+ [INTERFACE_DPO_INPUT] = "ip6-input",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (interface_dpo_ip6_node,
+ interface_dpo_ip6)
+
diff --git a/src/vnet/dpo/interface_dpo.h b/src/vnet/dpo/interface_dpo.h
new file mode 100644
index 00000000000..1538dfbbf06
--- /dev/null
+++ b/src/vnet/dpo/interface_dpo.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing interfaceing the packet, i.e. it's for-us
+ */
+
+#ifndef __INTERFACE_DPO_H__
+#define __INTERFACE_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+
+typedef struct interface_dpo_t_
+{
+ /**
+ * The Software interface index that the packets will be given
+ * as the ingress/rx interface
+ */
+ u32 ido_sw_if_index;
+
+ /**
+ * next VLIB node. A '<proto>-input' node.
+ */
+ u32 ido_next_node;
+
+ /**
+ * DPO protocol that the packets will have as they 'ingress'
+ * on this interface
+ */
+ dpo_proto_t ido_proto;
+
+ /**
+ * number of locks.
+ */
+ u16 ido_locks;
+} interface_dpo_t;
+
+extern void interface_dpo_add_or_lock (dpo_proto_t proto,
+ u32 sw_if_index,
+ dpo_id_t *dpo);
+
+extern void interface_dpo_module_init(void);
+
+/**
+ * @brief pool of all interface DPOs
+ */
+interface_dpo_t *interface_dpo_pool;
+
+static inline interface_dpo_t *
+interface_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(interface_dpo_pool, index));
+}
+
+#endif
diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c
index 97ad0a44cce..e5b00a79e52 100644
--- a/src/vnet/dpo/lookup_dpo.c
+++ b/src/vnet/dpo/lookup_dpo.c
@@ -21,8 +21,12 @@
#include <vnet/fib/ip4_fib.h>
#include <vnet/fib/ip6_fib.h>
#include <vnet/fib/mpls_fib.h>
+#include <vnet/mfib/mfib_table.h>
+#include <vnet/mfib/ip4_mfib.h>
+#include <vnet/mfib/ip6_mfib.h>
static const char *const lookup_input_names[] = LOOKUP_INPUTS;
+static const char *const lookup_cast_names[] = LOOKUP_CASTS;
/**
* @brief Enumeration of the lookup subtypes
@@ -31,6 +35,7 @@ typedef enum lookup_sub_type_t_
{
LOOKUP_SUB_TYPE_SRC,
LOOKUP_SUB_TYPE_DST,
+ LOOKUP_SUB_TYPE_DST_MCAST,
LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE,
} lookup_sub_type_t;
#define LOOKUP_SUB_TYPE_NUM (LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE+1)
@@ -67,6 +72,7 @@ lookup_dpo_get_index (lookup_dpo_t *lkd)
static void
lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
dpo_proto_t proto,
+ lookup_cast_t cast,
lookup_input_t input,
lookup_table_t table_config,
dpo_id_t *dpo)
@@ -79,6 +85,7 @@ lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
lkd->lkd_proto = proto;
lkd->lkd_input = input;
lkd->lkd_table = table_config;
+ lkd->lkd_cast = cast;
/*
* use the input type to select the lookup sub-type
@@ -100,6 +107,10 @@ lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST];
break;
}
+ if (LOOKUP_MULTICAST == cast)
+ {
+ type = lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST];
+ }
}
if (0 == type)
@@ -115,20 +126,29 @@ lookup_dpo_add_or_lock_i (fib_node_index_t fib_index,
void
lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index,
dpo_proto_t proto,
+ lookup_cast_t cast,
lookup_input_t input,
lookup_table_t table_config,
dpo_id_t *dpo)
{
if (LOOKUP_TABLE_FROM_CONFIG == table_config)
{
- fib_table_lock(fib_index, dpo_proto_to_fib(proto));
+ if (LOOKUP_UNICAST == cast)
+ {
+ fib_table_lock(fib_index, dpo_proto_to_fib(proto));
+ }
+ else
+ {
+ mfib_table_lock(fib_index, dpo_proto_to_fib(proto));
+ }
}
- lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
+ lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
}
void
lookup_dpo_add_or_lock_w_table_id (u32 table_id,
dpo_proto_t proto,
+ lookup_cast_t cast,
lookup_input_t input,
lookup_table_t table_config,
dpo_id_t *dpo)
@@ -137,13 +157,22 @@ lookup_dpo_add_or_lock_w_table_id (u32 table_id,
if (LOOKUP_TABLE_FROM_CONFIG == table_config)
{
- fib_index =
- fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
- table_id);
+ if (LOOKUP_UNICAST == cast)
+ {
+ fib_index =
+ fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
+ table_id);
+ }
+ else
+ {
+ fib_index =
+ mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto),
+ table_id);
+ }
}
ASSERT(FIB_NODE_INDEX_INVALID != fib_index);
- lookup_dpo_add_or_lock_i(fib_index, proto, input, table_config, dpo);
+ lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo);
}
u8*
@@ -156,16 +185,29 @@ format_lookup_dpo (u8 *s, va_list *args)
if (LOOKUP_TABLE_FROM_INPUT_INTERFACE == lkd->lkd_table)
{
- s = format(s, "%s lookup in interface's %U table",
+ s = format(s, "%s,%s lookup in interface's %U table",
lookup_input_names[lkd->lkd_input],
+ lookup_cast_names[lkd->lkd_cast],
format_dpo_proto, lkd->lkd_proto);
}
else
{
- s = format(s, "%s lookup in %U",
- lookup_input_names[lkd->lkd_input],
- format_fib_table_name, lkd->lkd_fib_index,
- dpo_proto_to_fib(lkd->lkd_proto));
+ if (LOOKUP_UNICAST == lkd->lkd_cast)
+ {
+ s = format(s, "%s,%s lookup in %U",
+ lookup_input_names[lkd->lkd_input],
+ lookup_cast_names[lkd->lkd_cast],
+ format_fib_table_name, lkd->lkd_fib_index,
+ dpo_proto_to_fib(lkd->lkd_proto));
+ }
+ else
+ {
+ s = format(s, "%s,%s lookup in %U",
+ lookup_input_names[lkd->lkd_input],
+ lookup_cast_names[lkd->lkd_cast],
+ format_mfib_table_name, lkd->lkd_fib_index,
+ dpo_proto_to_fib(lkd->lkd_proto));
+ }
}
return (s);
}
@@ -193,8 +235,16 @@ lookup_dpo_unlock (dpo_id_t *dpo)
{
if (LOOKUP_TABLE_FROM_CONFIG == lkd->lkd_table)
{
- fib_table_unlock(lkd->lkd_fib_index,
- dpo_proto_to_fib(lkd->lkd_proto));
+ if (LOOKUP_UNICAST == lkd->lkd_cast)
+ {
+ fib_table_unlock(lkd->lkd_fib_index,
+ dpo_proto_to_fib(lkd->lkd_proto));
+ }
+ else
+ {
+ mfib_table_unlock(lkd->lkd_fib_index,
+ dpo_proto_to_fib(lkd->lkd_proto));
+ }
}
pool_put(lookup_dpo_pool, lkd);
}
@@ -1069,6 +1119,123 @@ VLIB_REGISTER_NODE (lookup_mpls_dst_itf_node) = {
};
VLIB_NODE_FUNCTION_MULTIARCH (lookup_mpls_dst_itf_node, lookup_mpls_dst_itf)
+typedef enum lookup_ip_dst_mcast_next_t_ {
+ LOOKUP_IP_DST_MCAST_NEXT_RPF,
+ LOOKUP_IP_DST_MCAST_N_NEXT,
+} mfib_forward_lookup_next_t;
+
+always_inline uword
+lookup_dpo_ip_dst_mcast_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ int is_v4)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = LOOKUP_IP_DST_MCAST_NEXT_RPF;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ /* while (n_left_from >= 4 && n_left_to_next >= 2) */
+ /* } */
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, lkdi0, fib_index0, next0;
+ const lookup_dpo_t * lkd0;
+ fib_node_index_t mfei0;
+ vlib_buffer_t * b0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* dst lookup was done by mpls lookup */
+ lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ lkd0 = lookup_dpo_get(lkdi0);
+ fib_index0 = lkd0->lkd_fib_index;
+ next0 = LOOKUP_IP_DST_MCAST_NEXT_RPF;
+
+ if (is_v4)
+ {
+ ip4_header_t * ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ mfei0 = ip4_mfib_table_lookup(ip4_mfib_get(fib_index0),
+ &ip0->src_address,
+ &ip0->dst_address,
+ 64);
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = mfei0;
+ tr->addr.ip4 = ip0->dst_address;
+ }
+ }
+ else
+ {
+ ip6_header_t * ip0;
+
+ ip0 = vlib_buffer_get_current (b0);
+ mfei0 = ip6_mfib_table_lookup2(ip6_mfib_get(fib_index0),
+ &ip0->src_address,
+ &ip0->dst_address);
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ lookup_trace_t *tr = vlib_add_trace (vm, node,
+ b0, sizeof (*tr));
+ tr->fib_index = fib_index0;
+ tr->lbi = mfei0;
+ tr->addr.ip6 = ip0->dst_address;
+ }
+ }
+
+ vnet_buffer (b0)->ip.adj_index[VLIB_TX] = mfei0;
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+always_inline uword
+lookup_ip4_dst_mcast (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return (lookup_dpo_ip_dst_mcast_inline(vm, node, from_frame, 1));
+}
+
+VLIB_REGISTER_NODE (lookup_ip4_dst_mcast_node) = {
+ .function = lookup_ip4_dst_mcast,
+ .name = "lookup-ip4-dst-mcast",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_lookup_trace,
+ .n_next_nodes = LOOKUP_IP_DST_MCAST_N_NEXT,
+ .next_nodes = {
+ [LOOKUP_IP_DST_MCAST_NEXT_RPF] = "ip4-mfib-forward-rpf",
+ },
+};
+VLIB_NODE_FUNCTION_MULTIARCH (lookup_ip4_dst_mcast_node,
+ lookup_ip4_dst_mcast)
+
static void
lookup_dpo_mem_show (void)
{
@@ -1129,6 +1296,22 @@ const static char* const * const lookup_dst_nodes[DPO_PROTO_NUM] =
[DPO_PROTO_MPLS] = lookup_dst_mpls_nodes,
};
+const static char* const lookup_dst_mcast_ip4_nodes[] =
+{
+ "lookup-ip4-dst-mcast",
+ NULL,
+};
+const static char* const lookup_dst_mcast_ip6_nodes[] =
+{
+ "lookup-ip6-dst-mcast",
+ NULL,
+};
+const static char* const * const lookup_dst_mcast_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = lookup_dst_mcast_ip4_nodes,
+ [DPO_PROTO_IP6] = lookup_dst_mcast_ip6_nodes,
+};
+
const static char* const lookup_dst_from_interface_ip4_nodes[] =
{
"lookup-ip4-dst-itf",
@@ -1168,6 +1351,8 @@ lookup_dpo_module_init (void)
dpo_register_new_type(&lkd_vft, lookup_src_nodes);
lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST] =
dpo_register_new_type(&lkd_vft, lookup_dst_nodes);
+ lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_MCAST] =
+ dpo_register_new_type(&lkd_vft, lookup_dst_mcast_nodes);
lookup_dpo_sub_types[LOOKUP_SUB_TYPE_DST_TABLE_FROM_INTERFACE] =
dpo_register_new_type(&lkd_vft, lookup_dst_from_interface_nodes);
}
diff --git a/src/vnet/dpo/lookup_dpo.h b/src/vnet/dpo/lookup_dpo.h
index ff283388868..7dfd0385a9d 100644
--- a/src/vnet/dpo/lookup_dpo.h
+++ b/src/vnet/dpo/lookup_dpo.h
@@ -47,6 +47,19 @@ typedef enum lookup_table_t_ {
}
/**
+ * Switch to use the packet's source or destination address for lookup
+ */
+typedef enum lookup_cast_t_ {
+ LOOKUP_UNICAST,
+ LOOKUP_MULTICAST,
+} __attribute__ ((packed)) lookup_cast_t;
+
+#define LOOKUP_CASTS { \
+ [LOOKUP_UNICAST] = "unicast", \
+ [LOOKUP_MULTICAST] = "multicast", \
+}
+
+/**
* A representation of an MPLS label for imposition in the data-path
*/
typedef struct lookup_dpo_t
@@ -74,6 +87,11 @@ typedef struct lookup_dpo_t
lookup_table_t lkd_table;
/**
+ * Unicast of rmulticast FIB lookup
+ */
+ lookup_cast_t lkd_cast;
+
+ /**
* Number of locks
*/
u16 lkd_locks;
@@ -81,11 +99,13 @@ typedef struct lookup_dpo_t
extern void lookup_dpo_add_or_lock_w_fib_index(fib_node_index_t fib_index,
dpo_proto_t proto,
+ lookup_cast_t cast,
lookup_input_t input,
lookup_table_t table,
dpo_id_t *dpo);
extern void lookup_dpo_add_or_lock_w_table_id(u32 table_id,
dpo_proto_t proto,
+ lookup_cast_t cast,
lookup_input_t input,
lookup_table_t table,
dpo_id_t *dpo);
diff --git a/src/vnet/dpo/mpls_disposition.c b/src/vnet/dpo/mpls_disposition.c
new file mode 100644
index 00000000000..5dc33fcfdbd
--- /dev/null
+++ b/src/vnet/dpo/mpls_disposition.c
@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ip/ip.h>
+#include <vnet/dpo/mpls_disposition.h>
+#include <vnet/mpls/mpls.h>
+
+/*
+ * pool of all MPLS Label DPOs
+ */
+mpls_disp_dpo_t *mpls_disp_dpo_pool;
+
+static mpls_disp_dpo_t *
+mpls_disp_dpo_alloc (void)
+{
+ mpls_disp_dpo_t *mdd;
+
+ pool_get_aligned(mpls_disp_dpo_pool, mdd, CLIB_CACHE_LINE_BYTES);
+ memset(mdd, 0, sizeof(*mdd));
+
+ dpo_reset(&mdd->mdd_dpo);
+
+ return (mdd);
+}
+
+static index_t
+mpls_disp_dpo_get_index (mpls_disp_dpo_t *mdd)
+{
+ return (mdd - mpls_disp_dpo_pool);
+}
+
+index_t
+mpls_disp_dpo_create (dpo_proto_t payload_proto,
+ fib_rpf_id_t rpf_id,
+ const dpo_id_t *dpo)
+{
+ mpls_disp_dpo_t *mdd;
+
+ mdd = mpls_disp_dpo_alloc();
+
+ mdd->mdd_payload_proto = payload_proto;
+ mdd->mdd_rpf_id = rpf_id;
+
+ dpo_stack(DPO_MPLS_DISPOSITION,
+ mdd->mdd_payload_proto,
+ &mdd->mdd_dpo,
+ dpo);
+
+ return (mpls_disp_dpo_get_index(mdd));
+}
+
+u8*
+format_mpls_disp_dpo (u8 *s, va_list *args)
+{
+ index_t index = va_arg (*args, index_t);
+ u32 indent = va_arg (*args, u32);
+ mpls_disp_dpo_t *mdd;
+
+ mdd = mpls_disp_dpo_get(index);
+
+ s = format(s, "mpls-disposition:[%d]:[%U]",
+ index,
+ format_dpo_proto, mdd->mdd_payload_proto);
+
+ s = format(s, "\n%U", format_white_space, indent);
+ s = format(s, "%U", format_dpo_id, &mdd->mdd_dpo, indent+2);
+
+ return (s);
+}
+
+static void
+mpls_disp_dpo_lock (dpo_id_t *dpo)
+{
+ mpls_disp_dpo_t *mdd;
+
+ mdd = mpls_disp_dpo_get(dpo->dpoi_index);
+
+ mdd->mdd_locks++;
+}
+
+static void
+mpls_disp_dpo_unlock (dpo_id_t *dpo)
+{
+ mpls_disp_dpo_t *mdd;
+
+ mdd = mpls_disp_dpo_get(dpo->dpoi_index);
+
+ mdd->mdd_locks--;
+
+ if (0 == mdd->mdd_locks)
+ {
+ dpo_reset(&mdd->mdd_dpo);
+ pool_put(mpls_disp_dpo_pool, mdd);
+ }
+}
+
+/**
+ * @brief A struct to hold tracing information for the MPLS label disposition
+ * node.
+ */
+typedef struct mpls_label_disposition_trace_t_
+{
+ index_t mdd;
+} mpls_label_disposition_trace_t;
+
+always_inline uword
+mpls_label_disposition_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame,
+ u8 payload_is_ip4,
+ u8 payload_is_ip6)
+{
+ u32 n_left_from, next_index, * from, * to_next;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from >= 4 && n_left_to_next >= 2)
+ {
+ mpls_disp_dpo_t *mdd0, *mdd1;
+ u32 bi0, mddi0, bi1, mddi1;
+ vlib_buffer_t * b0, *b1;
+ u32 next0, next1;
+
+ bi0 = to_next[0] = from[0];
+ bi1 = to_next[1] = from[1];
+
+ /* Prefetch next iteration. */
+ {
+ vlib_buffer_t * p2, * p3;
+
+ p2 = vlib_get_buffer (vm, from[2]);
+ p3 = vlib_get_buffer (vm, from[3]);
+
+ vlib_prefetch_buffer_header (p2, STORE);
+ vlib_prefetch_buffer_header (p3, STORE);
+
+ CLIB_PREFETCH (p2->data, sizeof (ip6_header_t), STORE);
+ CLIB_PREFETCH (p3->data, sizeof (ip6_header_t), STORE);
+ }
+
+ from += 2;
+ to_next += 2;
+ n_left_from -= 2;
+ n_left_to_next -= 2;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+
+ /* dst lookup was done by ip4 lookup */
+ mddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ mddi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX];
+ mdd0 = mpls_disp_dpo_get(mddi0);
+ mdd1 = mpls_disp_dpo_get(mddi1);
+
+ if (payload_is_ip4)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ }
+ else if (payload_is_ip6)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ }
+
+ next0 = mdd0->mdd_dpo.dpoi_next_node;
+ next1 = mdd1->mdd_dpo.dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mdd0->mdd_dpo.dpoi_index;
+ vnet_buffer(b1)->ip.adj_index[VLIB_TX] = mdd1->mdd_dpo.dpoi_index;
+ vnet_buffer(b0)->ip.rpf_id = mdd0->mdd_rpf_id;
+ vnet_buffer(b1)->ip.rpf_id = mdd1->mdd_rpf_id;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_label_disposition_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+
+ tr->mdd = mddi0;
+ }
+ if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_label_disposition_trace_t *tr =
+ vlib_add_trace (vm, node, b1, sizeof (*tr));
+ tr->mdd = mddi1;
+ }
+
+ vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
+ n_left_to_next,
+ bi0, bi1, next0, next1);
+ }
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ mpls_disp_dpo_t *mdd0;
+ vlib_buffer_t * b0;
+ u32 bi0, mddi0;
+ u32 next0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ /* dst lookup was done by ip4 lookup */
+ mddi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX];
+ mdd0 = mpls_disp_dpo_get(mddi0);
+
+ if (payload_is_ip4)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ }
+ else if (payload_is_ip6)
+ {
+ /*
+ * decrement the TTL on ingress to the LSP
+ */
+ }
+ else
+ {
+ }
+
+ next0 = mdd0->mdd_dpo.dpoi_next_node;
+ vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mdd0->mdd_dpo.dpoi_index;
+ vnet_buffer(b0)->ip.rpf_id = mdd0->mdd_rpf_id;
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ mpls_label_disposition_trace_t *tr =
+ vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->mdd = mddi0;
+ }
+
+ vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ return from_frame->n_vectors;
+}
+
+static u8 *
+format_mpls_label_disposition_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ CLIB_UNUSED (mpls_label_disposition_trace_t * t);
+
+ t = va_arg (*args, mpls_label_disposition_trace_t *);
+
+ s = format(s, "disp:%d", t->mdd);
+ return (s);
+}
+
+static uword
+ip4_mpls_label_disposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_disposition_inline(vm, node, frame, 1, 0));
+}
+
+VLIB_REGISTER_NODE (ip4_mpls_label_disposition_node) = {
+ .function = ip4_mpls_label_disposition,
+ .name = "ip4-mpls-label-disposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_disposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip4-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_disposition_node,
+ ip4_mpls_label_disposition)
+
+static uword
+ip6_mpls_label_disposition (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (mpls_label_disposition_inline(vm, node, frame, 0, 1));
+}
+
+VLIB_REGISTER_NODE (ip6_mpls_label_disposition_node) = {
+ .function = ip6_mpls_label_disposition,
+ .name = "ip6-mpls-label-disposition",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_mpls_label_disposition_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "ip6-drop",
+ }
+};
+VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_disposition_node,
+ ip6_mpls_label_disposition)
+
+static void
+mpls_disp_dpo_mem_show (void)
+{
+ fib_show_memory_usage("MPLS label",
+ pool_elts(mpls_disp_dpo_pool),
+ pool_len(mpls_disp_dpo_pool),
+ sizeof(mpls_disp_dpo_t));
+}
+
+const static dpo_vft_t mdd_vft = {
+ .dv_lock = mpls_disp_dpo_lock,
+ .dv_unlock = mpls_disp_dpo_unlock,
+ .dv_format = format_mpls_disp_dpo,
+ .dv_mem_show = mpls_disp_dpo_mem_show,
+};
+
+const static char* const mpls_label_disp_ip4_nodes[] =
+{
+ "ip4-mpls-label-disposition",
+ NULL,
+};
+const static char* const mpls_label_disp_ip6_nodes[] =
+{
+ "ip6-mpls-label-disposition",
+ NULL,
+};
+const static char* const * const mpls_label_disp_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = mpls_label_disp_ip4_nodes,
+ [DPO_PROTO_IP6] = mpls_label_disp_ip6_nodes,
+};
+
+
+void
+mpls_disp_dpo_module_init (void)
+{
+ dpo_register(DPO_MPLS_DISPOSITION, &mdd_vft, mpls_label_disp_nodes);
+}
diff --git a/src/vnet/dpo/mpls_disposition.h b/src/vnet/dpo/mpls_disposition.h
new file mode 100644
index 00000000000..9c0150830d2
--- /dev/null
+++ b/src/vnet/dpo/mpls_disposition.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __MPLS_DISP_DPO_H__
+#define __MPLS_DISP_DPO_H__
+
+#include <vnet/vnet.h>
+#include <vnet/mpls/packet.h>
+#include <vnet/dpo/dpo.h>
+#include <vnet/mfib/mfib_types.h>
+
+/**
+ * A representation of an MPLS label for imposition in the data-path
+ */
+typedef struct mpls_disp_dpo_t
+{
+ /**
+ * Next DPO in the graph
+ */
+ dpo_id_t mdd_dpo;
+
+ /**
+ * The protocol of the payload/packets that are being encapped
+ */
+ dpo_proto_t mdd_payload_proto;
+
+ /**
+ * RPF-ID (if this is an mcast disposition)
+ */
+ fib_rpf_id_t mdd_rpf_id;
+
+ /**
+ * Number of locks/users of the label
+ */
+ u16 mdd_locks;
+} mpls_disp_dpo_t;
+
+/**
+ * @brief Assert that the MPLS label object is less than a cache line in size.
+ * Should this get any bigger then we will need to reconsider how many labels
+ * can be pushed in one object.
+ */
+_Static_assert((sizeof(mpls_disp_dpo_t) <= CLIB_CACHE_LINE_BYTES),
+ "MPLS Disposition DPO is larger than one cache line.");
+
+/**
+ * @brief Create an MPLS label object
+ *
+ * @param payload_proto The ptocool of the payload packets that will
+ * be imposed with this label header.
+ * @param dpo The parent of the created MPLS label object
+ */
+extern index_t mpls_disp_dpo_create(dpo_proto_t payload_proto,
+ fib_rpf_id_t rpf_id,
+ const dpo_id_t *dpo);
+
+extern u8* format_mpls_disp_dpo(u8 *s, va_list *args);
+
+
+/*
+ * Encapsulation violation for fast data-path access
+ */
+extern mpls_disp_dpo_t *mpls_disp_dpo_pool;
+
+static inline mpls_disp_dpo_t *
+mpls_disp_dpo_get (index_t index)
+{
+ return (pool_elt_at_index(mpls_disp_dpo_pool, index));
+}
+
+extern void mpls_disp_dpo_module_init(void);
+
+#endif
diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c
index be9b28507ba..4d84b9001ce 100644
--- a/src/vnet/dpo/mpls_label_dpo.c
+++ b/src/vnet/dpo/mpls_label_dpo.c
@@ -562,7 +562,7 @@ VLIB_REGISTER_NODE (mpls_label_imposition_node) = {
.format_trace = format_mpls_label_imposition_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "mpls-drop",
}
};
VLIB_NODE_FUNCTION_MULTIARCH (mpls_label_imposition_node,
@@ -584,7 +584,7 @@ VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = {
.format_trace = format_mpls_label_imposition_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "ip4-drop",
}
};
VLIB_NODE_FUNCTION_MULTIARCH (ip4_mpls_label_imposition_node,
@@ -606,7 +606,7 @@ VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = {
.format_trace = format_mpls_label_imposition_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "ip6-drop",
}
};
VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node,
diff --git a/src/vnet/dpo/replicate_dpo.c b/src/vnet/dpo/replicate_dpo.c
index e25ceae91fe..9fdb9a05071 100644
--- a/src/vnet/dpo/replicate_dpo.c
+++ b/src/vnet/dpo/replicate_dpo.c
@@ -17,6 +17,7 @@
#include <vnet/dpo/replicate_dpo.h>
#include <vnet/dpo/drop_dpo.h>
#include <vnet/adj/adj.h>
+#include <vnet/mpls/mpls_types.h>
#undef REP_DEBUG
@@ -106,6 +107,7 @@ replicate_format (index_t repi,
dpo_id_t *buckets;
u32 i;
+ repi &= ~MPLS_IS_REPLICATE;
rep = replicate_get(repi);
vlib_get_combined_counter(&(replicate_main.repm_counters), repi, &to);
buckets = replicate_get_buckets(rep);
@@ -187,6 +189,7 @@ replicate_set_bucket (index_t repi,
replicate_t *rep;
dpo_id_t *buckets;
+ repi &= ~MPLS_IS_REPLICATE;
rep = replicate_get(repi);
buckets = replicate_get_buckets(rep);
@@ -199,11 +202,13 @@ int
replicate_is_drop (const dpo_id_t *dpo)
{
replicate_t *rep;
+ index_t repi;
if (DPO_REPLICATE != dpo->dpoi_type)
return (0);
- rep = replicate_get(dpo->dpoi_index);
+ repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
+ rep = replicate_get(repi);
if (1 == rep->rep_n_buckets)
{
@@ -218,6 +223,7 @@ replicate_get_bucket (index_t repi,
{
replicate_t *rep;
+ repi &= ~MPLS_IS_REPLICATE;
rep = replicate_get(repi);
return (replicate_get_bucket_i(rep, bucket));
@@ -288,9 +294,11 @@ replicate_multipath_update (const dpo_id_t *dpo,
dpo_id_t *tmp_dpo;
u32 ii, n_buckets;
replicate_t *rep;
+ index_t repi;
ASSERT(DPO_REPLICATE == dpo->dpoi_type);
- rep = replicate_get(dpo->dpoi_index);
+ repi = dpo->dpoi_index & ~MPLS_IS_REPLICATE;
+ rep = replicate_get(repi);
nhs = replicate_multipath_next_hop_fixup(next_hops,
rep->rep_proto);
n_buckets = vec_len(nhs);
@@ -718,7 +726,7 @@ format_replicate_trace (u8 * s, va_list * args)
s = format (s, "replicate: %d via %U",
t->rep_index,
- format_dpo_id, &t->dpo);
+ format_dpo_id, &t->dpo, 0);
return s;
}
@@ -731,7 +739,7 @@ ip4_replicate (vlib_main_t * vm,
}
/**
- * @brief
+ * @brief IP4 replication node
*/
VLIB_REGISTER_NODE (ip4_replicate_node) = {
.function = ip4_replicate,
@@ -744,7 +752,7 @@ VLIB_REGISTER_NODE (ip4_replicate_node) = {
.format_trace = format_replicate_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "ip4-drop",
},
};
@@ -757,7 +765,7 @@ ip6_replicate (vlib_main_t * vm,
}
/**
- * @brief
+ * @brief IPv6 replication node
*/
VLIB_REGISTER_NODE (ip6_replicate_node) = {
.function = ip6_replicate,
@@ -770,7 +778,33 @@ VLIB_REGISTER_NODE (ip6_replicate_node) = {
.format_trace = format_replicate_trace,
.n_next_nodes = 1,
.next_nodes = {
- [0] = "error-drop",
+ [0] = "ip6-drop",
+ },
+};
+
+static uword
+mpls_replicate (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (replicate_inline (vm, node, frame));
+}
+
+/**
+ * @brief MPLS replication node
+ */
+VLIB_REGISTER_NODE (mpls_replicate_node) = {
+ .function = mpls_replicate,
+ .name = "mpls-replicate",
+ .vector_size = sizeof (u32),
+
+ .n_errors = ARRAY_LEN(replicate_dpo_error_strings),
+ .error_strings = replicate_dpo_error_strings,
+
+ .format_trace = format_replicate_trace,
+ .n_next_nodes = 1,
+ .next_nodes = {
+ [0] = "mpls-drop",
},
};
diff --git a/src/vnet/dpo/replicate_dpo.h b/src/vnet/dpo/replicate_dpo.h
index 77273015c9e..7383184a2ea 100644
--- a/src/vnet/dpo/replicate_dpo.h
+++ b/src/vnet/dpo/replicate_dpo.h
@@ -25,6 +25,7 @@
#include <vnet/dpo/dpo.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/fib/fib_types.h>
+#include <vnet/mpls/mpls_types.h>
/**
* replicate main
@@ -119,6 +120,7 @@ extern replicate_t *replicate_pool;
static inline replicate_t*
replicate_get (index_t repi)
{
+ repi &= ~MPLS_IS_REPLICATE;
return (pool_elt_at_index(replicate_pool, repi));
}