summaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'vnet')
-rw-r--r--vnet/Makefile.am2
-rw-r--r--vnet/vnet/dpo/dpo.c4
-rw-r--r--vnet/vnet/dpo/dpo.h6
-rw-r--r--vnet/vnet/dpo/drop_dpo.h10
-rw-r--r--vnet/vnet/dpo/ip_null_dpo.c408
-rw-r--r--vnet/vnet/dpo/ip_null_dpo.h56
-rw-r--r--vnet/vnet/fib/fib_entry.c78
-rw-r--r--vnet/vnet/fib/fib_entry.h4
-rw-r--r--vnet/vnet/fib/fib_entry_src.c50
-rw-r--r--vnet/vnet/fib/fib_entry_src.h4
-rw-r--r--vnet/vnet/fib/fib_entry_src_mpls.c2
-rw-r--r--vnet/vnet/fib/fib_path.c2
-rw-r--r--vnet/vnet/fib/fib_path_ext.c2
-rw-r--r--vnet/vnet/fib/fib_table.c60
-rw-r--r--vnet/vnet/fib/fib_table.h14
-rw-r--r--vnet/vnet/fib/fib_test.c36
-rw-r--r--vnet/vnet/fib/mpls_fib.c2
-rw-r--r--vnet/vnet/ip/ip4_forward.c12
-rw-r--r--vnet/vnet/ip/ip4_source_and_port_range_check.c14
-rw-r--r--vnet/vnet/ip/ip6_forward.c4
-rw-r--r--vnet/vnet/ip/lookup.c9
-rw-r--r--vnet/vnet/lisp-gpe/lisp_gpe_adjacency.c2
-rw-r--r--vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c6
-rw-r--r--vnet/vnet/map/map.c6
-rw-r--r--vnet/vnet/mpls/interface.c2
-rw-r--r--vnet/vnet/sr/sr.c4
26 files changed, 700 insertions, 99 deletions
diff --git a/vnet/Makefile.am b/vnet/Makefile.am
index bff8418fb6e..56baf40715b 100644
--- a/vnet/Makefile.am
+++ b/vnet/Makefile.am
@@ -805,6 +805,7 @@ nobase_include_HEADERS += \
libvnet_la_SOURCES += \
vnet/dpo/dpo.c \
vnet/dpo/drop_dpo.c \
+ vnet/dpo/ip_null_dpo.c \
vnet/dpo/punt_dpo.c \
vnet/dpo/receive_dpo.c \
vnet/dpo/load_balance.c \
@@ -820,6 +821,7 @@ nobase_include_HEADERS += \
vnet/dpo/punt_dpo.h \
vnet/dpo/classify_dpo.h \
vnet/dpo/receive_dpo.h \
+ vnet/dpo/ip_null_dpo.h \
vnet/dpo/dpo.h
########################################
diff --git a/vnet/vnet/dpo/dpo.c b/vnet/vnet/dpo/dpo.c
index 9f09dff81d1..efee6d6b4eb 100644
--- a/vnet/vnet/dpo/dpo.c
+++ b/vnet/vnet/dpo/dpo.c
@@ -35,6 +35,7 @@
#include <vnet/dpo/receive_dpo.h>
#include <vnet/dpo/punt_dpo.h>
#include <vnet/dpo/classify_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
/**
* Array of char* names for the DPO types and protos
@@ -342,7 +343,7 @@ dpo_stack_i (u32 edge,
* in order to get an atomic update of the parent we create a temporary,
* from a copy of the child, and add the next_node. then we copy to the parent
*/
- dpo_id_t tmp = DPO_NULL;
+ dpo_id_t tmp = DPO_INVALID;
dpo_copy(&tmp, parent);
/*
@@ -417,6 +418,7 @@ dpo_module_init (vlib_main_t * vm)
mpls_label_dpo_module_init();
classify_dpo_module_init();
lookup_dpo_module_init();
+ ip_null_dpo_module_init();
return (NULL);
}
diff --git a/vnet/vnet/dpo/dpo.h b/vnet/vnet/dpo/dpo.h
index 7ba47569d8d..2ab936e5f00 100644
--- a/vnet/vnet/dpo/dpo.h
+++ b/vnet/vnet/dpo/dpo.h
@@ -94,6 +94,7 @@ typedef enum dpo_type_t_ {
*/
DPO_FIRST,
DPO_DROP,
+ DPO_IP_NULL,
DPO_PUNT,
/**
* @brief load-balancing over a choice of [un]equal cost paths
@@ -116,6 +117,7 @@ typedef enum dpo_type_t_ {
#define DPO_TYPES { \
[DPO_FIRST] = "dpo-invalid", \
[DPO_DROP] = "dpo-drop", \
+ [DPO_IP_NULL] = "dpo-ip-null", \
[DPO_PUNT] = "dpo-punt", \
[DPO_ADJACENCY] = "dpo-adjacency", \
[DPO_ADJACENCY_INCOMPLETE] = "dpo-adjacency-incomplete", \
@@ -126,7 +128,7 @@ typedef enum dpo_type_t_ {
[DPO_LOAD_BALANCE] = "dpo-load-balance", \
[DPO_LISP_CP] = "dpo-lisp-cp", \
[DPO_CLASSIFY] = "dpo-classify", \
- [DPO_MPLS_LABEL] = "dpo-mpls-label", \
+ [DPO_MPLS_LABEL] = "dpo-mpls-label" \
}
/**
@@ -159,7 +161,7 @@ _Static_assert(sizeof(dpo_id_t) <= sizeof(u64),
/**
* @brief An initialiser for DPos declared on the stack.
*/
-#define DPO_NULL {0}
+#define DPO_INVALID {0}
/**
* @brief Return true if the DPO object is valid, i.e. has been initialised.
diff --git a/vnet/vnet/dpo/drop_dpo.h b/vnet/vnet/dpo/drop_dpo.h
index e7bd8f5156e..436df36c84e 100644
--- a/vnet/vnet/dpo/drop_dpo.h
+++ b/vnet/vnet/dpo/drop_dpo.h
@@ -13,14 +13,8 @@
* limitations under the License.
*/
/**
- * @brief
- * A Data-Path Object is an object that represents actions that are
- * applied to packets are they are switched through VPP.
- *
- * The DPO is a base class that is specialised by other objects to provide
- * concreate actions
- *
- * The VLIB graph nodes are graph of types, the DPO graph is a graph of instances.
+ * @brief The Drop DPO will drop all packets, no questions asked. It is valid
+ * for any packet protocol.
*/
#ifndef __DROP_DPO_H__
diff --git a/vnet/vnet/dpo/ip_null_dpo.c b/vnet/vnet/dpo/ip_null_dpo.c
new file mode 100644
index 00000000000..22682e4eee4
--- /dev/null
+++ b/vnet/vnet/dpo/ip_null_dpo.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The data-path object representing dropping the packet
+ */
+
+#include <vnet/dpo/ip_null_dpo.h>
+#include <vnet/ip/ip.h>
+
+/**
+ * @brief A representation of the IP_NULL DPO
+ */
+typedef struct ip_null_dpo_t_
+{
+ /**
+ * @brief The action to take on a packet
+ */
+ ip_null_dpo_action_t ind_action;
+ /**
+ * @brief The next VLIB node
+ */
+ u32 ind_next_index;
+ /**
+ * rate limits
+ */
+} ip_null_dpo_t;
+
+/**
+ * @brief the IP_NULL dpos are shared by all routes, hence they are global.
+ * As the neame implies this is only for IP, hence 2.
+ */
+static ip_null_dpo_t ip_null_dpos[2 * IP_NULL_DPO_ACTION_NUM] = {
+ [0] = {
+ /* proto ip4, no action */
+ .ind_action = IP_NULL_ACTION_NONE,
+ },
+ [1] = {
+ /* proto ip4, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ },
+ [2] = {
+ /* proto ip4, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+ },
+ [3] = {
+ /* proto ip6, no action */
+ .ind_action = IP_NULL_ACTION_NONE,
+ },
+ [4] = {
+ /* proto ip6, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ },
+ [5] = {
+ /* proto ip6, action send unreach */
+ .ind_action = IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+ },
+};
+
+/**
+ * @brief Action strings
+ */
+const char *ip_null_action_strings[] = IP_NULL_ACTIONS;
+
+void
+ip_null_dpo_add_and_lock (dpo_proto_t proto,
+ ip_null_dpo_action_t action,
+ dpo_id_t *dpo)
+{
+ int i;
+
+ ASSERT((proto == DPO_PROTO_IP4) ||
+ (proto == DPO_PROTO_IP6));
+ ASSERT(action < IP_NULL_DPO_ACTION_NUM);
+
+ i = (proto == DPO_PROTO_IP4 ? 0 : 1);
+
+ dpo_set(dpo, DPO_IP_NULL, proto, (i*IP_NULL_DPO_ACTION_NUM) + action);
+}
+
+always_inline const ip_null_dpo_t*
+ip_null_dpo_get (index_t indi)
+{
+ return (&ip_null_dpos[indi]);
+}
+
+static void
+ip_null_dpo_lock (dpo_id_t *dpo)
+{
+ /*
+ * not maintaining a lock count on the ip_null, they are const global and
+ * never die.
+ */
+}
+static void
+ip_null_dpo_unlock (dpo_id_t *dpo)
+{
+}
+
+static u8*
+format_ip_null_dpo (u8 *s, va_list *ap)
+{
+ index_t index = va_arg(*ap, index_t);
+ CLIB_UNUSED(u32 indent) = va_arg(*ap, u32);
+ const ip_null_dpo_t *ind;
+ dpo_proto_t proto;
+
+ ind = ip_null_dpo_get(index);
+ proto = (index < IP_NULL_DPO_ACTION_NUM ? DPO_PROTO_IP4 : DPO_PROTO_IP6);
+
+ return (format(s, "%U-null action:%s",
+ format_dpo_proto, proto,
+ ip_null_action_strings[ind->ind_action]));
+}
+
+const static dpo_vft_t ip_null_vft = {
+ .dv_lock = ip_null_dpo_lock,
+ .dv_unlock = ip_null_dpo_unlock,
+ .dv_format = format_ip_null_dpo,
+};
+
+/**
+ * @brief The per-protocol VLIB graph nodes that are assigned to a ip_null
+ * object.
+ *
+ * this means that these graph nodes are ones from which a ip_null is the
+ * parent object in the DPO-graph.
+ */
+const static char* const ip4_null_nodes[] =
+{
+ "ip4-null",
+ NULL,
+};
+const static char* const ip6_null_nodes[] =
+{
+ "ip6-null",
+ NULL,
+};
+
+const static char* const * const ip_null_nodes[DPO_PROTO_NUM] =
+{
+ [DPO_PROTO_IP4] = ip4_null_nodes,
+ [DPO_PROTO_IP6] = ip6_null_nodes,
+};
+
+typedef struct ip_null_dpo_trace_t_
+{
+ index_t ind_index;
+} ip_null_dpo_trace_t;
+
+/**
+ * @brief Exit nodes from a IP_NULL
+ */
+typedef enum ip_null_next_t_
+{
+ IP_NULL_NEXT_DROP,
+ IP_NULL_NEXT_ICMP,
+ IP_NULL_NEXT_NUM,
+} ip_null_next_t;
+
+always_inline uword
+ip_null_dpo_switch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame,
+ u8 is_ip4)
+{
+ u32 n_left_from, next_index, *from, *to_next;
+ static f64 time_last_seed_change = -1e100;
+ static u32 hash_seeds[3];
+ static uword hash_bitmap[256 / BITS (uword)];
+ f64 time_now;
+
+ from = vlib_frame_vector_args (frame);
+ n_left_from = frame->n_vectors;
+
+ time_now = vlib_time_now (vm);
+ if (time_now - time_last_seed_change > 1e-1)
+ {
+ uword i;
+ u32 * r = clib_random_buffer_get_data (&vm->random_buffer,
+ sizeof (hash_seeds));
+ for (i = 0; i < ARRAY_LEN (hash_seeds); i++)
+ hash_seeds[i] = r[i];
+
+ /* Mark all hash keys as been not-seen before. */
+ for (i = 0; i < ARRAY_LEN (hash_bitmap); i++)
+ hash_bitmap[i] = 0;
+
+ time_last_seed_change = time_now;
+ }
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 a0, b0, c0, m0, drop0;
+ vlib_buffer_t *p0;
+ u32 bi0, indi0, next0;
+ const ip_null_dpo_t *ind0;
+ uword bm0;
+
+ bi0 = from[0];
+ to_next[0] = bi0;
+ from += 1;
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ p0 = vlib_get_buffer (vm, bi0);
+
+ /* lookup dst + src mac */
+ indi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+ ind0 = ip_null_dpo_get(indi0);
+ next0 = IP_NULL_NEXT_DROP;
+
+ /*
+ * rate limit - don't DoS the sender.
+ */
+ a0 = hash_seeds[0];
+ b0 = hash_seeds[1];
+ c0 = hash_seeds[2];
+
+ if (is_ip4)
+ {
+ ip4_header_t *ip0 = vlib_buffer_get_current (p0);
+
+ a0 ^= ip0->dst_address.data_u32;
+ b0 ^= ip0->src_address.data_u32;
+
+ hash_v3_finalize32 (a0, b0, c0);
+ }
+ else
+ {
+ ip6_header_t *ip0 = vlib_buffer_get_current (p0);
+
+ a0 ^= ip0->dst_address.as_u32[0];
+ b0 ^= ip0->src_address.as_u32[0];
+ c0 ^= ip0->src_address.as_u32[1];
+
+ hash_v3_mix32 (a0, b0, c0);
+
+ a0 ^= ip0->dst_address.as_u32[1];
+ b0 ^= ip0->src_address.as_u32[2];
+ c0 ^= ip0->src_address.as_u32[3];
+
+ hash_v3_finalize32 (a0, b0, c0);
+ }
+
+ c0 &= BITS (hash_bitmap) - 1;
+ c0 = c0 / BITS (uword);
+ m0 = (uword) 1 << (c0 % BITS (uword));
+
+ bm0 = hash_bitmap[c0];
+ drop0 = (bm0 & m0) != 0;
+
+ /* Mark it as seen. */
+ hash_bitmap[c0] = bm0 | m0;
+
+ if (PREDICT_FALSE(!drop0))
+ {
+ if (is_ip4)
+ {
+ /*
+ * There's a trade-off here. This conditinal statement
+ * versus a graph node per-condition. Given the number
+ * expect number of packets to reach a null route is 0
+ * we favour the run-time cost over the graph complexity
+ */
+ if (IP_NULL_ACTION_SEND_ICMP_UNREACH == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp4_error_set_vnet_buffer(
+ p0,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_destination_unreachable_host,
+ 0);
+ }
+ else if (IP_NULL_ACTION_SEND_ICMP_PROHIBIT == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp4_error_set_vnet_buffer(
+ p0,
+ ICMP4_destination_unreachable,
+ ICMP4_destination_unreachable_host_administratively_prohibited,
+ 0);
+ }
+ }
+ else
+ {
+ if (IP_NULL_ACTION_SEND_ICMP_UNREACH == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp6_error_set_vnet_buffer(
+ p0,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_no_route_to_destination,
+ 0);
+ }
+ else if (IP_NULL_ACTION_SEND_ICMP_PROHIBIT == ind0->ind_action)
+ {
+ next0 = IP_NULL_NEXT_ICMP;
+ icmp6_error_set_vnet_buffer(
+ p0,
+ ICMP6_destination_unreachable,
+ ICMP6_destination_unreachable_destination_administratively_prohibited,
+ 0);
+ }
+ }
+ }
+
+ if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ip_null_dpo_trace_t *tr = vlib_add_trace (vm, node, p0,
+ sizeof (*tr));
+ tr->ind_index = indi0;
+ }
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ return frame->n_vectors;
+}
+
+static u8 *
+format_ip_null_dpo_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ip_null_dpo_trace_t *t = va_arg (*args, ip_null_dpo_trace_t *);
+
+ s = format (s, "%U", format_ip_null_dpo, t->ind_index, 0);
+ return s;
+}
+
+static uword
+ip4_null_dpo_switch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (ip_null_dpo_switch(vm, node, frame, 1));
+}
+
+/**
+ * @brief
+ */
+VLIB_REGISTER_NODE (ip4_null_dpo_node) = {
+ .function = ip4_null_dpo_switch,
+ .name = "ip4-null",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_ip_null_dpo_trace,
+ .n_next_nodes = IP_NULL_NEXT_NUM,
+ .next_nodes = {
+ [IP_NULL_NEXT_DROP] = "ip4-drop",
+ [IP_NULL_NEXT_ICMP] = "ip4-icmp-error",
+ },
+};
+
+static uword
+ip6_null_dpo_switch (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (ip_null_dpo_switch(vm, node, frame, 0));
+}
+
+/**
+ * @brief
+ */
+VLIB_REGISTER_NODE (ip6_null_dpo_node) = {
+ .function = ip6_null_dpo_switch,
+ .name = "ip6-null",
+ .vector_size = sizeof (u32),
+
+ .format_trace = format_ip_null_dpo_trace,
+ .n_next_nodes = IP_NULL_NEXT_NUM,
+ .next_nodes = {
+ [IP_NULL_NEXT_DROP] = "ip6-drop",
+ [IP_NULL_NEXT_ICMP] = "ip6-icmp-error",
+ },
+};
+
+void
+ip_null_dpo_module_init (void)
+{
+ dpo_register(DPO_IP_NULL, &ip_null_vft, ip_null_nodes);
+}
diff --git a/vnet/vnet/dpo/ip_null_dpo.h b/vnet/vnet/dpo/ip_null_dpo.h
new file mode 100644
index 00000000000..002a2a7016d
--- /dev/null
+++ b/vnet/vnet/dpo/ip_null_dpo.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief
+ * The IP NULL DPO represents the rubbish bin for IP traffic. Without specifying an
+ * action (i.e. send IMCP type X to sender) it is equivalent to using a drop DPO.
+ * However, in contrast to the drop DPO any route that resovles via a NULL, is
+ * considered to 'resolved' by FIB, i.e. a IP NULL is used when the control plane
+ * is explicitly expressing the desire to drop packets. Drop DPOs are used
+ * internally by FIB when resolution is not possible.
+ *
+ * Any replies to sender are rate limited.
+ */
+
+#ifndef __IP_NULL_DPO_H__
+#define __IP_NULL_DPO_H__
+
+#include <vnet/dpo/dpo.h>
+
+/**
+ * @brief Actions to take when a packet encounters the NULL DPO
+ */
+typedef enum ip_null_dpo_action_t_
+{
+ IP_NULL_ACTION_NONE,
+ IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ IP_NULL_ACTION_SEND_ICMP_PROHIBIT,
+} ip_null_dpo_action_t;
+
+#define IP_NULL_ACTIONS { \
+ [IP_NULL_ACTION_NONE] = "discard", \
+ [IP_NULL_ACTION_SEND_ICMP_UNREACH] = "send-unreachable", \
+ [IP_NULL_ACTION_SEND_ICMP_PROHIBIT] = "send-prohibited", \
+}
+
+#define IP_NULL_DPO_ACTION_NUM (IP_NULL_ACTION_SEND_ICMP_PROHIBIT+1)
+
+extern void ip_null_dpo_add_and_lock (dpo_proto_t proto,
+ ip_null_dpo_action_t action,
+ dpo_id_t *dpo);
+
+extern void ip_null_dpo_module_init(void);
+
+#endif
diff --git a/vnet/vnet/fib/fib_entry.c b/vnet/vnet/fib/fib_entry.c
index 404f0f40da7..1047c50e1d2 100644
--- a/vnet/vnet/fib/fib_entry.c
+++ b/vnet/vnet/fib/fib_entry.c
@@ -753,39 +753,26 @@ fib_entry_post_update_actions (fib_entry_t *fib_entry,
fib_entry_post_install_actions(fib_entry, source, old_flags);
}
-void
-fib_entry_special_add (fib_node_index_t fib_entry_index,
- fib_source_t source,
- fib_entry_flag_t flags,
- const dpo_id_t *dpo)
+static void
+fib_entry_source_change (fib_entry_t *fib_entry,
+ fib_source_t best_source,
+ fib_source_t new_source,
+ fib_entry_flag_t old_flags)
{
- fib_source_t best_source;
- fib_entry_flag_t bflags;
- fib_entry_t *fib_entry;
- fib_entry_src_t *bsrc;
-
- fib_entry = fib_entry_get(fib_entry_index);
-
- bsrc = fib_entry_get_best_src_i(fib_entry);
- best_source = fib_entry_src_get_source(bsrc);
- bflags = fib_entry_src_get_flags(bsrc);
-
- fib_entry = fib_entry_src_action_add(fib_entry, source, flags, dpo);
-
/*
* if the path list for the source passed is invalid,
* then we need to create a new one. else we are updating
* an existing.
*/
- if (source < best_source)
+ if (new_source < best_source)
{
/*
* we have a new winning source.
*/
fib_entry_src_action_deactivate(fib_entry, best_source);
- fib_entry_src_action_activate(fib_entry, source);
+ fib_entry_src_action_activate(fib_entry, new_source);
}
- else if (source > best_source)
+ else if (new_source > best_source)
{
/*
* the new source loses. nothing to do here.
@@ -800,13 +787,56 @@ fib_entry_special_add (fib_node_index_t fib_entry_index,
* But the path-list was updated, which will contribute new forwarding,
* so install it.
*/
- fib_entry_src_action_deactivate(fib_entry, source);
- fib_entry_src_action_activate(fib_entry, source);
+ fib_entry_src_action_deactivate(fib_entry, new_source);
+ fib_entry_src_action_activate(fib_entry, new_source);
}
- fib_entry_post_update_actions(fib_entry, source, bflags);
+ fib_entry_post_update_actions(fib_entry, new_source, old_flags);
+}
+
+void
+fib_entry_special_add (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ fib_entry = fib_entry_src_action_add(fib_entry, source, flags, dpo);
+ fib_entry_source_change(fib_entry, best_source, source, bflags);
+}
+
+void
+fib_entry_special_update (fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_source_t best_source;
+ fib_entry_flag_t bflags;
+ fib_entry_t *fib_entry;
+ fib_entry_src_t *bsrc;
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ bsrc = fib_entry_get_best_src_i(fib_entry);
+ best_source = fib_entry_src_get_source(bsrc);
+ bflags = fib_entry_src_get_flags(bsrc);
+
+ fib_entry = fib_entry_src_action_update(fib_entry, source, flags, dpo);
+ fib_entry_source_change(fib_entry, best_source, source, bflags);
}
+
void
fib_entry_path_add (fib_node_index_t fib_entry_index,
fib_source_t source,
diff --git a/vnet/vnet/fib/fib_entry.h b/vnet/vnet/fib/fib_entry.h
index bfebe5dd6c8..2b7ea636acb 100644
--- a/vnet/vnet/fib/fib_entry.h
+++ b/vnet/vnet/fib/fib_entry.h
@@ -446,6 +446,10 @@ extern void fib_entry_special_add(fib_node_index_t fib_entry_index,
fib_source_t source,
fib_entry_flag_t flags,
const dpo_id_t *dpo);
+extern void fib_entry_special_update(fib_node_index_t fib_entry_index,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
extern fib_entry_src_flag_t fib_entry_special_remove(fib_node_index_t fib_entry_index,
fib_source_t source);
diff --git a/vnet/vnet/fib/fib_entry_src.c b/vnet/vnet/fib/fib_entry_src.c
index 66bb3df2843..2cbdf187c05 100644
--- a/vnet/vnet/fib/fib_entry_src.c
+++ b/vnet/vnet/fib/fib_entry_src.c
@@ -745,6 +745,56 @@ fib_entry_src_action_add (fib_entry_t *fib_entry,
return (fib_entry);
}
+/*
+ * fib_entry_src_action_update
+ *
+ * Adding a source can result in a new fib_entry being created, which
+ * can inturn mean the pool is realloc'd and thus the entry passed as
+ * an argument it also realloc'd
+ * @return the original entry
+ */
+fib_entry_t *
+fib_entry_src_action_update (fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index, old_path_list_index;
+ fib_entry_src_t *esrc;
+
+ esrc = fib_entry_src_find_or_create(fib_entry, source, NULL);
+
+ if (NULL == esrc)
+ return (fib_entry_src_action_add(fib_entry, source, flags, dpo));
+
+ old_path_list_index = esrc->fes_pl;
+ esrc->fes_entry_flags = flags;
+
+ /*
+ * save variable so we can recover from a fib_entry realloc.
+ */
+ fib_entry_index = fib_entry_get_index(fib_entry);
+
+ if (NULL != fib_entry_src_vft[source].fesv_add)
+ {
+ fib_entry_src_vft[source].fesv_add(esrc,
+ fib_entry,
+ flags,
+ fib_entry_get_proto(fib_entry),
+ dpo);
+ }
+
+ fib_entry = fib_entry_get(fib_entry_index);
+
+ esrc->fes_flags |= FIB_ENTRY_SRC_FLAG_ADDED;
+
+ fib_path_list_lock(esrc->fes_pl);
+ fib_path_list_unlock(old_path_list_index);
+
+ return (fib_entry);
+}
+
+
fib_entry_src_flag_t
fib_entry_src_action_remove (fib_entry_t *fib_entry,
fib_source_t source)
diff --git a/vnet/vnet/fib/fib_entry_src.h b/vnet/vnet/fib/fib_entry_src.h
index d70aabc9c00..0b98c1c35c0 100644
--- a/vnet/vnet/fib/fib_entry_src.h
+++ b/vnet/vnet/fib/fib_entry_src.h
@@ -233,6 +233,10 @@ extern fib_entry_t* fib_entry_src_action_add(fib_entry_t *fib_entry,
fib_source_t source,
fib_entry_flag_t flags,
const dpo_id_t *dpo);
+extern fib_entry_t* fib_entry_src_action_update(fib_entry_t *fib_entry,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo);
extern fib_entry_src_flag_t fib_entry_src_action_remove(fib_entry_t *fib_entry,
fib_source_t source);
diff --git a/vnet/vnet/fib/fib_entry_src_mpls.c b/vnet/vnet/fib/fib_entry_src_mpls.c
index 4079d8fc8cd..4c316f79a84 100644
--- a/vnet/vnet/fib/fib_entry_src_mpls.c
+++ b/vnet/vnet/fib/fib_entry_src_mpls.c
@@ -104,7 +104,7 @@ fib_entry_src_mpls_set_data (fib_entry_src_t *src,
.fp_label = label,
};
fib_node_index_t fib_index;
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
/*
* adding a new local label. make sure the MPLS fib exists.
diff --git a/vnet/vnet/fib/fib_path.c b/vnet/vnet/fib/fib_path.c
index b67fedffe72..e92e2333272 100644
--- a/vnet/vnet/fib/fib_path.c
+++ b/vnet/vnet/fib/fib_path.c
@@ -563,7 +563,7 @@ fib_path_recursive_adj_update (fib_path_t *path,
fib_forward_chain_type_t fct,
dpo_id_t *dpo)
{
- dpo_id_t via_dpo = DPO_NULL;
+ dpo_id_t via_dpo = DPO_INVALID;
/*
* get the DPO to resolve through from the via-entry
diff --git a/vnet/vnet/fib/fib_path_ext.c b/vnet/vnet/fib/fib_path_ext.c
index 05e62d96a54..6603b64f02f 100644
--- a/vnet/vnet/fib/fib_path_ext.c
+++ b/vnet/vnet/fib/fib_path_ext.c
@@ -135,7 +135,7 @@ fib_path_ext_stack (fib_path_ext_t *path_ext,
break;
}
- dpo_id_t via_dpo = DPO_NULL;
+ dpo_id_t via_dpo = DPO_INVALID;
/*
* The next object in the graph after the imposition of the label
diff --git a/vnet/vnet/fib/fib_table.c b/vnet/vnet/fib/fib_table.c
index d293d8152af..54bc8081993 100644
--- a/vnet/vnet/fib/fib_table.c
+++ b/vnet/vnet/fib/fib_table.c
@@ -324,6 +324,48 @@ fib_table_entry_special_dpo_add (u32 fib_index,
}
fib_node_index_t
+fib_table_entry_special_dpo_update (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t flags,
+ const dpo_id_t *dpo)
+{
+ fib_node_index_t fib_entry_index;
+ fib_table_t *fib_table;
+
+ fib_table = fib_table_get(fib_index, prefix->fp_proto);
+ fib_entry_index = fib_table_lookup_exact_match_i(fib_table, prefix);
+
+ if (FIB_NODE_INDEX_INVALID == fib_entry_index)
+ {
+ fib_entry_index = fib_entry_create_special(fib_index, prefix,
+ source, flags,
+ dpo);
+
+ fib_table_entry_insert(fib_table, prefix, fib_entry_index);
+ fib_table->ft_src_route_counts[source]++;
+ }
+ else
+ {
+ int was_sourced;
+
+ was_sourced = fib_entry_is_sourced(fib_entry_index, source);
+
+ if (was_sourced)
+ fib_entry_special_update(fib_entry_index, source, flags, dpo);
+ else
+ fib_entry_special_add(fib_entry_index, source, flags, dpo);
+
+ if (was_sourced != fib_entry_is_sourced(fib_entry_index, source))
+ {
+ fib_table->ft_src_route_counts[source]++;
+ }
+ }
+
+ return (fib_entry_index);
+}
+
+fib_node_index_t
fib_table_entry_special_add (u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source,
@@ -331,7 +373,7 @@ fib_table_entry_special_add (u32 fib_index,
adj_index_t adj_index)
{
fib_node_index_t fib_entry_index;
- dpo_id_t tmp_dpo = DPO_NULL;
+ dpo_id_t tmp_dpo = DPO_INVALID;
if (ADJ_INDEX_INVALID != adj_index)
{
@@ -354,22 +396,6 @@ fib_table_entry_special_add (u32 fib_index,
}
void
-fib_table_entry_special_dpo_update (fib_node_index_t fib_entry_index,
- fib_source_t source,
- fib_entry_flag_t flags,
- const dpo_id_t *dpo)
-{
- fib_prefix_t prefix;
- u32 fib_index;
-
- fib_entry_get_prefix(fib_entry_index, &prefix);
- fib_index = fib_entry_get_fib_index(fib_entry_index);
-
- fib_table_entry_special_dpo_add(fib_index, &prefix, source, flags, dpo);
- fib_table_entry_special_remove(fib_index, &prefix, source);
-}
-
-void
fib_table_entry_special_remove (u32 fib_index,
const fib_prefix_t *prefix,
fib_source_t source)
diff --git a/vnet/vnet/fib/fib_table.h b/vnet/vnet/fib/fib_table.h
index d7c604f9de9..ef7599a7bf5 100644
--- a/vnet/vnet/fib/fib_table.h
+++ b/vnet/vnet/fib/fib_table.h
@@ -210,9 +210,8 @@ extern fib_node_index_t fib_table_entry_special_dpo_add(u32 fib_index,
* Instead the client/source provides the DPO to link to.
* Special entries are add/remove reference counted per-source. So n
* 'removes' are required for n 'adds', if the entry is no longer required.
- * An 'update' can only be used after an 'add' and is therefore assumed to act
- * on the reference instance of that add (an update is implemented as add/remove
- * pair).
+ * An 'update' is an 'add' if no 'add' has already been called, otherwise an 'add'
+ * is therefore assumed to act on the reference instance of that add.
*
* @param fib_entry_index
* The index of the FIB entry to update
@@ -229,10 +228,11 @@ extern fib_node_index_t fib_table_entry_special_dpo_add(u32 fib_index,
* @return
* the index of the fib_entry_t that is created (or existed already).
*/
-extern void fib_table_entry_special_dpo_update (fib_node_index_t fib_entry_index,
- fib_source_t source,
- fib_entry_flag_t stype,
- const dpo_id_t *dpo);
+extern fib_node_index_t fib_table_entry_special_dpo_update (u32 fib_index,
+ const fib_prefix_t *prefix,
+ fib_source_t source,
+ fib_entry_flag_t stype,
+ const dpo_id_t *dpo);
/**
* @brief
diff --git a/vnet/vnet/fib/fib_test.c b/vnet/vnet/fib/fib_test.c
index 9b4179569a9..800f4e6a667 100644
--- a/vnet/vnet/fib/fib_test.c
+++ b/vnet/vnet/fib/fib_test.c
@@ -23,6 +23,7 @@
#include <vnet/dpo/lookup_dpo.h>
#include <vnet/dpo/drop_dpo.h>
#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
#include <vnet/mpls/mpls.h>
@@ -181,7 +182,7 @@ fib_test_urpf_is_equal (fib_node_index_t fei,
fib_forward_chain_type_t fct,
u32 num, ...)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
fib_urpf_list_t *urpf;
index_t ui;
va_list ap;
@@ -940,7 +941,7 @@ fib_test_v4 (void)
* An EXCLUSIVE route; one where the user (me) provides the exclusive
* adjacency through which the route will resovle
*/
- dpo_id_t ex_dpo = DPO_NULL;
+ dpo_id_t ex_dpo = DPO_INVALID;
lookup_dpo_add_or_lock_w_fib_index(fib_index,
DPO_PROTO_IP4,
@@ -958,6 +959,21 @@ fib_test_v4 (void)
FIB_TEST(!dpo_cmp(&ex_dpo, load_balance_get_bucket(dpo->dpoi_index, 0)),
"exclusive remote uses lookup DPO");
+ /*
+ * update the exclusive to use a different DPO
+ */
+ ip_null_dpo_add_and_lock(FIB_PROTOCOL_IP4,
+ IP_NULL_ACTION_SEND_ICMP_UNREACH,
+ &ex_dpo);
+ fib_table_entry_special_dpo_update(fib_index,
+ &ex_pfx,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_EXCLUSIVE,
+ &ex_dpo);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(!dpo_cmp(&ex_dpo, load_balance_get_bucket(dpo->dpoi_index, 0)),
+ "exclusive remote uses now uses NULL DPO");
+
fib_table_entry_special_remove(fib_index,
&ex_pfx,
FIB_SOURCE_SPECIAL);
@@ -1181,7 +1197,7 @@ fib_test_v4 (void)
/*
* test the uRPF check functions
*/
- dpo_id_t dpo_44 = DPO_NULL;
+ dpo_id_t dpo_44 = DPO_INVALID;
index_t urpfi;
fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
@@ -4822,7 +4838,7 @@ fib_test_validate_entry (fib_node_index_t fei,
...)
{
const load_balance_t *lb;
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
fib_prefix_t pfx;
index_t fw_lbi;
u32 fib_index;
@@ -5214,7 +5230,7 @@ fib_test_label (void)
/*
* get and lock a reference to the non-eos of the via entry 1.1.1.1/32
*/
- dpo_id_t non_eos_1_1_1_1 = DPO_NULL;
+ dpo_id_t non_eos_1_1_1_1 = DPO_INVALID;
fib_entry_contribute_forwarding(fei,
FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
&non_eos_1_1_1_1);
@@ -5273,7 +5289,7 @@ fib_test_label (void)
"2.2.2.2.2/32 LB 1 buckets via: "
"label 1600 over 1.1.1.1");
- dpo_id_t dpo_44 = DPO_NULL;
+ dpo_id_t dpo_44 = DPO_INVALID;
index_t urpfi;
fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
@@ -5339,7 +5355,7 @@ fib_test_label (void)
* test that the pre-failover load-balance has been in-place
* modified
*/
- dpo_id_t current = DPO_NULL;
+ dpo_id_t current = DPO_INVALID;
fib_entry_contribute_forwarding(fei,
FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
&current);
@@ -5663,7 +5679,7 @@ fib_test_label (void)
"1.1.1.2/32 LB 1 buckets via: "
"label 101 over 10.10.10.1");
- dpo_id_t non_eos_1_1_1_2 = DPO_NULL;
+ dpo_id_t non_eos_1_1_1_2 = DPO_INVALID;
fib_entry_contribute_forwarding(fib_table_lookup(fib_index,
&pfx_1_1_1_1_s_32),
FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
@@ -5781,7 +5797,7 @@ fib_test_label (void)
.ip4.as_u32 = clib_host_to_net_u32(0x02020203),
},
};
- dpo_id_t ip_1_1_1_1 = DPO_NULL;
+ dpo_id_t ip_1_1_1_1 = DPO_INVALID;
fib_table_entry_update_one_path(fib_index,
&pfx_2_2_2_3_s_32,
@@ -6396,7 +6412,7 @@ lfib_test_deagg (void)
const mpls_label_t deag_label = 50;
const u32 lfib_index = 0;
const u32 fib_index = 0;
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
const dpo_id_t *dpo1;
fib_node_index_t lfe;
lookup_dpo_t *lkd;
diff --git a/vnet/vnet/fib/mpls_fib.c b/vnet/vnet/fib/mpls_fib.c
index 8f1ccef9061..6a9b1ac2989 100644
--- a/vnet/vnet/fib/mpls_fib.c
+++ b/vnet/vnet/fib/mpls_fib.c
@@ -89,7 +89,7 @@ mpls_fib_index_from_table_id (u32 table_id)
static u32
mpls_fib_create_with_table_id (u32 table_id)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
fib_table_t *fib_table;
mpls_eos_bit_t eos;
mpls_fib_t *mf;
diff --git a/vnet/vnet/ip/ip4_forward.c b/vnet/vnet/ip/ip4_forward.c
index 9e712f5493a..bfa9f0a54f4 100644
--- a/vnet/vnet/ip/ip4_forward.c
+++ b/vnet/vnet/ip/ip4_forward.c
@@ -649,7 +649,7 @@ ip4_add_interface_routes (u32 sw_if_index,
lm->classify_table_index_by_sw_if_index [sw_if_index];
if (classify_table_index != (u32) ~0)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
dpo_set(&dpo,
DPO_CLASSIFY,
@@ -1899,10 +1899,6 @@ ip4_arp_inline (vlib_main_t * vm,
adj0 = ip_get_adjacency (lm, adj_index0);
ip0 = vlib_buffer_get_current (p0);
- /*
- * this is the Glean case, so we are ARPing for the
- * packet's destination
- */
a0 = hash_seeds[0];
b0 = hash_seeds[1];
c0 = hash_seeds[2];
@@ -1912,6 +1908,10 @@ ip4_arp_inline (vlib_main_t * vm,
if (is_glean)
{
+ /*
+ * this is the Glean case, so we are ARPing for the
+ * packet's destination
+ */
a0 ^= ip0->dst_address.data_u32;
}
else
@@ -3296,7 +3296,7 @@ int vnet_set_ip4_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
if (table_index != (u32) ~0)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
dpo_set(&dpo,
DPO_CLASSIFY,
diff --git a/vnet/vnet/ip/ip4_source_and_port_range_check.c b/vnet/vnet/ip/ip4_source_and_port_range_check.c
index 677f610fd45..6ee648cea51 100644
--- a/vnet/vnet/ip/ip4_source_and_port_range_check.c
+++ b/vnet/vnet/ip/ip4_source_and_port_range_check.c
@@ -955,7 +955,7 @@ add_port_range_adjacency (u32 fib_index,
u32 length, u16 * low_ports, u16 * high_ports)
{
protocol_port_range_dpo_t *ppr_dpo;
- dpo_id_t dpop = DPO_NULL;
+ dpo_id_t dpop = DPO_INVALID;
int i, j, k;
fib_node_index_t fei;
@@ -985,7 +985,7 @@ add_port_range_adjacency (u32 fib_index,
* the prefix is already there.
* check it was sourced by us, and if so get the ragne DPO from it.
*/
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
const dpo_id_t *bucket;
if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_SPECIAL, &dpo))
@@ -1045,9 +1045,9 @@ add_port_range_adjacency (u32 fib_index,
}
else
{
- fib_table_entry_special_dpo_update (fei,
- FIB_SOURCE_SPECIAL,
- FIB_ENTRY_FLAG_NONE, &dpop);
+ fib_entry_special_update (fei,
+ FIB_SOURCE_SPECIAL,
+ FIB_ENTRY_FLAG_NONE, &dpop);
}
return 0;
@@ -1088,7 +1088,7 @@ remove_port_range_adjacency (u32 fib_index,
* the prefix is already there.
* check it was sourced by us
*/
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
const dpo_id_t *bucket;
if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_SPECIAL, &dpo))
@@ -1361,7 +1361,7 @@ show_source_and_port_range_check_fn (vlib_main_t * vm,
* find the longest prefix match on the address requested,
* check it was sourced by us
*/
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
const dpo_id_t *bucket;
if (!fib_entry_get_dpo_for_source (fib_table_lookup (fib_index, &pfx),
diff --git a/vnet/vnet/ip/ip6_forward.c b/vnet/vnet/ip/ip6_forward.c
index 766b26c48e3..c285af9544f 100644
--- a/vnet/vnet/ip/ip6_forward.c
+++ b/vnet/vnet/ip/ip6_forward.c
@@ -350,7 +350,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
lm->classify_table_index_by_sw_if_index [sw_if_index];
if (classify_table_index != (u32) ~0)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
dpo_set(&dpo,
DPO_CLASSIFY,
@@ -3079,7 +3079,7 @@ int vnet_set_ip6_classify_intfc (vlib_main_t * vm, u32 sw_if_index,
if (table_index != (u32) ~0)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
dpo_set(&dpo,
DPO_CLASSIFY,
diff --git a/vnet/vnet/ip/lookup.c b/vnet/vnet/ip/lookup.c
index 1f8735b54e2..78152f01c04 100644
--- a/vnet/vnet/ip/lookup.c
+++ b/vnet/vnet/ip/lookup.c
@@ -47,6 +47,7 @@
#include <vnet/dpo/classify_dpo.h>
#include <vnet/dpo/punt_dpo.h>
#include <vnet/dpo/receive_dpo.h>
+#include <vnet/dpo/ip_null_dpo.h>
/**
* @file
@@ -278,6 +279,12 @@ static uword unformat_dpo (unformat_input_t * input, va_list * args)
dpo_copy(dpo, punt_dpo_get(proto));
else if (unformat (input, "local"))
receive_dpo_add_or_lock(proto, ~0, NULL, dpo);
+ else if (unformat (input, "null-send-unreach"))
+ ip_null_dpo_add_and_lock(proto, IP_NULL_ACTION_SEND_ICMP_UNREACH, dpo);
+ else if (unformat (input, "null-send-prohibit"))
+ ip_null_dpo_add_and_lock(proto, IP_NULL_ACTION_SEND_ICMP_PROHIBIT, dpo);
+ else if (unformat (input, "null"))
+ ip_null_dpo_add_and_lock(proto, IP_NULL_ACTION_NONE, dpo);
else if (unformat (input, "classify"))
{
u32 classify_table_index;
@@ -337,7 +344,7 @@ vnet_ip_route_cmd (vlib_main_t * vm,
{
unformat_input_t _line_input, * line_input = &_line_input;
fib_route_path_t *rpaths = NULL, rpath;
- dpo_id_t dpo = DPO_NULL, *dpos = NULL;
+ dpo_id_t dpo = DPO_INVALID, *dpos = NULL;
fib_prefix_t *prefixs = NULL, pfx;
clib_error_t * error = NULL;
mpls_label_t out_label;
diff --git a/vnet/vnet/lisp-gpe/lisp_gpe_adjacency.c b/vnet/vnet/lisp-gpe/lisp_gpe_adjacency.c
index dadd09db96b..8c96a25cc5d 100644
--- a/vnet/vnet/lisp-gpe/lisp_gpe_adjacency.c
+++ b/vnet/vnet/lisp-gpe/lisp_gpe_adjacency.c
@@ -128,7 +128,7 @@ static void
lisp_gpe_adj_stack_one (lisp_gpe_adjacency_t * ladj, adj_index_t ai)
{
const lisp_gpe_tunnel_t *lgt;
- dpo_id_t tmp = DPO_NULL;
+ dpo_id_t tmp = DPO_INVALID;
lgt = lisp_gpe_tunnel_get (ladj->tunnel_index);
fib_entry_contribute_forwarding (lgt->fib_entry_index,
diff --git a/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c b/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
index 018fad4ba00..54ee86d83c5 100644
--- a/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
+++ b/vnet/vnet/lisp-gpe/lisp_gpe_fwd_entry.c
@@ -61,7 +61,7 @@ ip_dst_fib_add_route (u32 dst_fib_index, const ip_prefix_t * dst_prefix)
if (dst_fei == FIB_NODE_INDEX_INVALID ||
NULL == fib_entry_get_source_data (dst_fei, FIB_SOURCE_LISP))
{
- dpo_id_t src_lkup_dpo = DPO_NULL;
+ dpo_id_t src_lkup_dpo = DPO_INVALID;
/* create a new src FIB. */
src_fib_index =
@@ -274,7 +274,7 @@ create_fib_entries (lisp_gpe_fwd_entry_t * lfe)
if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE == lfe->type)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
switch (lfe->action)
{
@@ -659,7 +659,7 @@ static void
lisp_gpe_l2_update_fwding (lisp_gpe_fwd_entry_t * lfe)
{
lisp_gpe_main_t *lgm = &lisp_gpe_main;
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE != lfe->type)
{
diff --git a/vnet/vnet/map/map.c b/vnet/vnet/map/map.c
index 74a99057c90..beeaf1121a6 100644
--- a/vnet/vnet/map/map.c
+++ b/vnet/vnet/map/map.c
@@ -166,8 +166,8 @@ map_create_domain (ip4_address_t * ip4_prefix,
{
u8 suffix_len, suffix_shift;
map_main_t *mm = &map_main;
- dpo_id_t dpo_v4 = DPO_NULL;
- dpo_id_t dpo_v6 = DPO_NULL;
+ dpo_id_t dpo_v4 = DPO_INVALID;
+ dpo_id_t dpo_v6 = DPO_INVALID;
fib_node_index_t fei;
map_domain_t *d;
@@ -275,7 +275,7 @@ map_create_domain (ip4_address_t * ip4_prefix,
if (FIB_NODE_INDEX_INVALID != fei)
{
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_MAP, &dpo))
{
diff --git a/vnet/vnet/mpls/interface.c b/vnet/vnet/mpls/interface.c
index fc297cd10ae..726e6720bf2 100644
--- a/vnet/vnet/mpls/interface.c
+++ b/vnet/vnet/mpls/interface.c
@@ -934,7 +934,7 @@ int vnet_mpls_ethernet_add_del_policy_tunnel (u8 *dst,
.fp_len = tp->mask_width,
.fp_proto = FIB_PROTOCOL_IP4,
};
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
if (is_add)
{
diff --git a/vnet/vnet/sr/sr.c b/vnet/vnet/sr/sr.c
index 6a7856592b8..287d52116bd 100644
--- a/vnet/vnet/sr/sr.c
+++ b/vnet/vnet/sr/sr.c
@@ -834,7 +834,7 @@ ip6_sr_add_del_tunnel (ip6_sr_add_del_tunnel_args_t * a)
u8 hmac_key_index = 0;
ip6_sr_policy_t *pt;
int i;
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
/* Make sure that the rx FIB exists */
p = hash_get (im->fib_index_by_table_id, a->rx_table_id);
@@ -1728,7 +1728,7 @@ ip6_sr_add_del_multicastmap (ip6_sr_add_del_multicastmap_args_t * a)
* We don't handle ugly RFC-related cases yet, but I'm sure PL will complain
* at some point...
*/
- dpo_id_t dpo = DPO_NULL;
+ dpo_id_t dpo = DPO_INVALID;
dpo_set (&dpo, sr_dpo_type, DPO_PROTO_IP6, t - sm->tunnels);