aboutsummaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'vnet')
-rw-r--r--vnet/Makefile.am1
-rw-r--r--vnet/etc/scripts/urpf86
-rw-r--r--vnet/vnet/dpo/load_balance.c37
-rw-r--r--vnet/vnet/dpo/load_balance.h8
-rw-r--r--vnet/vnet/dpo/load_balance_map.c9
-rw-r--r--vnet/vnet/dpo/load_balance_map.h1
-rw-r--r--vnet/vnet/fib/fib_entry.c15
-rw-r--r--vnet/vnet/fib/fib_entry.h12
-rw-r--r--vnet/vnet/fib/fib_entry_src.c40
-rw-r--r--vnet/vnet/fib/fib_entry_src_rr.c1
-rw-r--r--vnet/vnet/fib/fib_path.c69
-rw-r--r--vnet/vnet/fib/fib_path.h2
-rw-r--r--vnet/vnet/fib/fib_path_list.c72
-rw-r--r--vnet/vnet/fib/fib_path_list.h9
-rw-r--r--vnet/vnet/fib/fib_test.c310
-rw-r--r--vnet/vnet/fib/fib_types.h15
-rw-r--r--vnet/vnet/fib/fib_urpf_list.c260
-rw-r--r--vnet/vnet/fib/fib_urpf_list.h146
-rw-r--r--vnet/vnet/gre/gre.c5
-rw-r--r--vnet/vnet/ip/ip4_forward.c58
-rw-r--r--vnet/vnet/ip/ip4_source_check.c260
-rw-r--r--vnet/vnet/ip/lookup.c14
22 files changed, 1223 insertions, 207 deletions
diff --git a/vnet/Makefile.am b/vnet/Makefile.am
index 88d55640..c0ae70dd 100644
--- a/vnet/Makefile.am
+++ b/vnet/Makefile.am
@@ -763,6 +763,7 @@ libvnet_la_SOURCES += \
vnet/fib/fib_path_list.c \
vnet/fib/fib_path.c \
vnet/fib/fib_path_ext.c \
+ vnet/fib/fib_urpf_list.c \
vnet/fib/fib_attached_export.c
nobase_include_HEADERS += \
diff --git a/vnet/etc/scripts/urpf b/vnet/etc/scripts/urpf
new file mode 100644
index 00000000..a4d87527
--- /dev/null
+++ b/vnet/etc/scripts/urpf
@@ -0,0 +1,86 @@
+
+create loop int
+
+set int state loop0 up
+set int ip addr loop0 10.10.10.10/24
+
+packet-generator new {
+ name transit-deny
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ UDP: 1.2.3.4 -> 2.2.2.2
+ UDP: 3000 -> 3001
+ length 128 checksum 0 incrementing 1
+ }
+}
+
+packet-generator new {
+ name transit-allow
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ UDP: 1.1.1.1 -> 2.2.2.2
+ UDP: 3000 -> 3001
+ length 128 checksum 0 incrementing 1
+ }
+}
+
+packet-generator new {
+ name transit-allow-from-excemption
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ UDP: 11.11.12.13 -> 2.2.2.2
+ UDP: 6000 -> 6001
+ length 128 checksum 0 incrementing 1
+ }
+}
+
+packet-generator new {
+ name for-us-allow-from-excemption
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ UDP: 11.11.12.13 -> 10.10.10.10
+ UDP: 6000 -> 6001
+ length 128 checksum 0 incrementing 1
+ }
+}
+
+packet-generator new {
+ name for-us-allow
+ limit 1
+ node ip4-input
+ size 64-64
+ no-recycle
+ data {
+ UDP: 1.1.1.1 -> 10.10.10.10
+ UDP: 3000 -> 3001
+ length 128 checksum 0 incrementing 1
+ }
+}
+
+tr add pg-input 100
+
+set int ip addr pg0 10.10.11.10/24
+
+set interface ip source-check pg0 strict
+
+ip route add 1.1.1.1/32 via 10.10.11.11 pg0
+ip route add 2.2.2.2/32 via 10.10.10.11 loop0
+
+ip urpf-accept 11.11.0.0/16
+
+#set interface ip source-check pg0 strict del
+#set interface ip source-check pg0 loose
+
+#ip urpf-accept del 11.11.0.0/16
diff --git a/vnet/vnet/dpo/load_balance.c b/vnet/vnet/dpo/load_balance.c
index 093661d8..fc788508 100644
--- a/vnet/vnet/dpo/load_balance.c
+++ b/vnet/vnet/dpo/load_balance.c
@@ -20,6 +20,7 @@
#include <vppinfra/math.h> /* for fabs */
#include <vnet/adj/adj.h>
#include <vnet/adj/adj_internal.h>
+#include <vnet/fib/fib_urpf_list.h>
/*
* distribution error tolerance for load-balancing
@@ -87,6 +88,7 @@ load_balance_alloc_i (void)
memset(lb, 0, sizeof(*lb));
lb->lb_map = INDEX_INVALID;
+ lb->lb_urpf = INDEX_INVALID;
vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
load_balance_get_index(lb));
vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
@@ -117,7 +119,7 @@ load_balance_format (index_t lbi,
s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets);
- s = format(s, "locks:%d ", lb->lb_locks);
+ s = format(s, "uRPF:%d ", lb->lb_urpf);
s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
if (0 != via.packets)
{
@@ -236,6 +238,35 @@ load_balance_is_drop (const dpo_id_t *dpo)
return (0);
}
+void
+load_balance_set_urpf (index_t lbi,
+ index_t urpf)
+{
+ load_balance_t *lb;
+ index_t old;
+
+ lb = load_balance_get(lbi);
+
+ /*
+ * packets in flight we see this change. but it's atomic, so :P
+ */
+ old = lb->lb_urpf;
+ lb->lb_urpf = urpf;
+
+ fib_urpf_list_unlock(old);
+ fib_urpf_list_lock(urpf);
+}
+
+index_t
+load_balance_get_urpf (index_t lbi)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_get(lbi);
+
+ return (lb->lb_urpf);
+}
+
const dpo_id_t *
load_balance_get_bucket (index_t lbi,
u32 bucket)
@@ -652,6 +683,9 @@ load_balance_destroy (load_balance_t *lb)
vec_free(lb->lb_buckets);
}
+ fib_urpf_list_unlock(lb->lb_urpf);
+ load_balance_map_unlock(lb->lb_map);
+
pool_put(load_balance_pool, lb);
}
@@ -677,6 +711,7 @@ load_balance_mem_show (void)
pool_elts(load_balance_pool),
pool_len(load_balance_pool),
sizeof(load_balance_t));
+ load_balance_map_show_mem();
}
const static dpo_vft_t lb_vft = {
diff --git a/vnet/vnet/dpo/load_balance.h b/vnet/vnet/dpo/load_balance.h
index d630a2c2..4bf3ecff 100644
--- a/vnet/vnet/dpo/load_balance.h
+++ b/vnet/vnet/dpo/load_balance.h
@@ -112,6 +112,11 @@ typedef struct load_balance_t_ {
index_t lb_map;
/**
+ * This is the index of the uRPF list for this LB
+ */
+ index_t lb_urpf;
+
+ /**
* the hash config to use when selecting a bucket. this is a u16
*/
flow_hash_config_t lb_hash_config;
@@ -160,6 +165,9 @@ extern void load_balance_multipath_update(
extern void load_balance_set_bucket(index_t lbi,
u32 bucket,
const dpo_id_t *next);
+extern void load_balance_set_urpf(index_t lbi,
+ index_t urpf);
+extern index_t load_balance_get_urpf(index_t lbi);
extern u8* format_load_balance(u8 * s, va_list * args);
diff --git a/vnet/vnet/dpo/load_balance_map.c b/vnet/vnet/dpo/load_balance_map.c
index f08801f1..70ce1bf7 100644
--- a/vnet/vnet/dpo/load_balance_map.c
+++ b/vnet/vnet/dpo/load_balance_map.c
@@ -526,6 +526,15 @@ load_balance_map_module_init (void)
lb_maps_by_path_index = hash_create(0, sizeof(fib_node_list_t));
}
+void
+load_balance_map_show_mem (void)
+{
+ fib_show_memory_usage("Load-Balance Map",
+ pool_elts(load_balance_map_pool),
+ pool_len(load_balance_map_pool),
+ sizeof(load_balance_map_t));
+}
+
static clib_error_t *
load_balance_map_show (vlib_main_t * vm,
unformat_input_t * input,
diff --git a/vnet/vnet/dpo/load_balance_map.h b/vnet/vnet/dpo/load_balance_map.h
index f080e97c..454bf4b3 100644
--- a/vnet/vnet/dpo/load_balance_map.h
+++ b/vnet/vnet/dpo/load_balance_map.h
@@ -60,6 +60,7 @@ extern void load_balance_map_unlock(index_t lbmi);
extern void load_balance_map_path_state_change(fib_node_index_t path_index);
extern u8* format_load_balance_map(u8 *s, va_list ap);
+extern void load_balance_map_show_mem(void);
/**
* The encapsulation breakages are for fast DP access
diff --git a/vnet/vnet/fib/fib_entry.c b/vnet/vnet/fib/fib_entry.c
index 1821319d..06922004 100644
--- a/vnet/vnet/fib/fib_entry.c
+++ b/vnet/vnet/fib/fib_entry.c
@@ -473,6 +473,21 @@ static const fib_node_vft_t fib_entry_vft = {
.fnv_mem_show = fib_entry_show_memory,
};
+/**
+ * @brief Contribute the set of Adjacencies that this entry forwards with
+ * to build the uRPF list of its children
+ */
+void
+fib_entry_contribute_urpf (fib_node_index_t entry_index,
+ index_t urpf)
+{
+ fib_entry_t *fib_entry;
+
+ fib_entry = fib_entry_get(entry_index);
+
+ return (fib_path_list_contribute_urpf(fib_entry->fe_parent, urpf));
+}
+
/*
* fib_entry_contribute_forwarding
*
diff --git a/vnet/vnet/fib/fib_entry.h b/vnet/vnet/fib/fib_entry.h
index 1016bb21..bfebe5dd 100644
--- a/vnet/vnet/fib/fib_entry.h
+++ b/vnet/vnet/fib/fib_entry.h
@@ -99,10 +99,15 @@ typedef enum fib_source_t_ {
FIB_SOURCE_AE,
/**
* Recursive resolution source.
- * Used to install an entry that is thre resolution traget of another.
+ * Used to install an entry that is the resolution traget of another.
*/
FIB_SOURCE_RR,
/**
+ * uRPF bypass/exemption.
+ * Used to install an entry that is exempt from the loose uRPF check
+ */
+ FIB_SOURCE_URPF_EXEMPT,
+ /**
* The default route source.
* The default route is always added to the FIB table (like the
* special sources) but we need to be able to over-ride it with
@@ -138,6 +143,7 @@ _Static_assert (sizeof(fib_source_t) == 1,
[FIB_SOURCE_RR] = "recursive-resolution", \
[FIB_SOURCE_AE] = "attached_export", \
[FIB_SOURCE_MPLS] = "mpls", \
+ [FIB_SOURCE_URPF_EXEMPT] = "urpf-exempt", \
[FIB_SOURCE_DEFAULT_ROUTE] = "default-route", \
}
@@ -376,7 +382,7 @@ typedef struct fib_entry_t_ {
* paint the header straight on without the need to check the packet
* type to derive the EOS bit value.
*/
- dpo_id_t fe_lb[FIB_FORW_CHAIN_NUM];
+ dpo_id_t fe_lb[FIB_FORW_CHAIN_MPLS_NUM];
/**
* Vector of source infos.
* Most entries will only have 1 source. So we optimise for memory usage,
@@ -449,6 +455,8 @@ extern fib_entry_src_flag_t fib_entry_path_remove(fib_node_index_t fib_entry_ind
extern fib_entry_src_flag_t fib_entry_delete(fib_node_index_t fib_entry_index,
fib_source_t source);
+extern void fib_entry_contribute_urpf(fib_node_index_t path_index,
+ index_t urpf);
extern void fib_entry_contribute_forwarding(
fib_node_index_t fib_entry_index,
fib_forward_chain_type_t type,
diff --git a/vnet/vnet/fib/fib_entry_src.c b/vnet/vnet/fib/fib_entry_src.c
index 0bca17dd..6107e3e1 100644
--- a/vnet/vnet/fib/fib_entry_src.c
+++ b/vnet/vnet/fib/fib_entry_src.c
@@ -18,9 +18,10 @@
#include <vnet/dpo/mpls_label_dpo.h>
#include <vnet/dpo/drop_dpo.h>
-#include "fib_entry_src.h"
-#include "fib_table.h"
-#include "fib_path_ext.h"
+#include <vnet/fib/fib_entry_src.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_path_ext.h>
+#include <vnet/fib/fib_urpf_list.h>
/*
* per-source type vft
@@ -368,6 +369,33 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry,
load_balance_multipath_update(dpo_lb,
ctx.next_hops,
fib_entry_calc_lb_flags(&ctx));
+
+ /*
+ * if this entry is sourced by the uRPF-exempt source then we
+ * append the always present local0 interface (index 0) to the
+ * uRPF list so it is not empty. that way packets pass the loose check.
+ */
+ index_t ui = fib_path_list_get_urpf(esrc->fes_pl);
+
+ if (fib_entry_is_sourced(fib_entry_get_index(fib_entry),
+ FIB_SOURCE_URPF_EXEMPT) &&
+ (0 == fib_urpf_check_size(ui)))
+ {
+ /*
+ * The uRPF list we get from the path-list is shared by all
+ * other users of the list, but the uRPF exemption applies
+ * only to this prefix. So we need our own list.
+ */
+ ui = fib_urpf_list_alloc_and_lock();
+ fib_urpf_list_append(ui, 0);
+ fib_urpf_list_bake(ui);
+ load_balance_set_urpf(dpo_lb->dpoi_index, ui);
+ fib_urpf_list_unlock(ui);
+ }
+ else
+ {
+ load_balance_set_urpf(dpo_lb->dpoi_index, ui);
+ }
}
void
@@ -425,17 +453,13 @@ fib_entry_src_action_uninstall (fib_entry_t *fib_entry)
fct = fib_entry_get_default_chain_type(fib_entry);
/*
- * uninstall the forwarding chain for the given source from the
- * forwarding tables
+ * uninstall the forwarding chain from the forwarding tables
*/
FIB_ENTRY_DBG(fib_entry, "uninstall: %d",
fib_entry->fe_adj_index);
if (dpo_id_is_valid(&fib_entry->fe_lb[fct]))
{
- /* fib_forward_chain_type_t fct; */
- /* fib_path_ext_t *path_ext; */
-
fib_table_fwding_dpo_remove(
fib_entry->fe_fib_index,
&fib_entry->fe_prefix,
diff --git a/vnet/vnet/fib/fib_entry_src_rr.c b/vnet/vnet/fib/fib_entry_src_rr.c
index f6b89603..9219d58e 100644
--- a/vnet/vnet/fib/fib_entry_src_rr.c
+++ b/vnet/vnet/fib/fib_entry_src_rr.c
@@ -244,4 +244,5 @@ void
fib_entry_src_rr_register (void)
{
fib_entry_src_register(FIB_SOURCE_RR, &rr_src_vft);
+ fib_entry_src_register(FIB_SOURCE_URPF_EXEMPT, &rr_src_vft);
}
diff --git a/vnet/vnet/fib/fib_path.c b/vnet/vnet/fib/fib_path.c
index 03cc7be5..1b82f464 100644
--- a/vnet/vnet/fib/fib_path.c
+++ b/vnet/vnet/fib/fib_path.c
@@ -24,12 +24,13 @@
#include <vnet/adj/adj.h>
-#include "fib_path.h"
-#include "fib_node.h"
-#include "fib_table.h"
-#include "fib_entry.h"
-#include "fib_path_list.h"
-#include "fib_internal.h"
+#include <vnet/fib/fib_path.h>
+#include <vnet/fib/fib_node.h>
+#include <vnet/fib/fib_table.h>
+#include <vnet/fib/fib_entry.h>
+#include <vnet/fib/fib_path_list.h>
+#include <vnet/fib/fib_internal.h>
+#include <vnet/fib/fib_urpf_list.h>
/**
* Enurmeration of path types
@@ -1551,6 +1552,62 @@ fib_path_get_weight (fib_node_index_t path_index)
return (path->fp_weight);
}
+/**
+ * @brief Contribute the path's adjacency to the list passed.
+ * By calling this function over all paths, recursively, a child
+ * can construct its full set of forwarding adjacencies, and hence its
+ * uRPF list.
+ */
+void
+fib_path_contribute_urpf (fib_node_index_t path_index,
+ index_t urpf)
+{
+ fib_path_t *path;
+
+ if (!fib_path_is_resolved(path_index))
+ return;
+
+ path = fib_path_get(path_index);
+
+ switch (path->fp_type)
+ {
+ case FIB_PATH_TYPE_ATTACHED_NEXT_HOP:
+ fib_urpf_list_append(urpf, path->attached_next_hop.fp_interface);
+ break;
+
+ case FIB_PATH_TYPE_ATTACHED:
+ fib_urpf_list_append(urpf, path->attached.fp_interface);
+ break;
+
+ case FIB_PATH_TYPE_RECURSIVE:
+ fib_entry_contribute_urpf(path->fp_via_fib, urpf);
+ break;
+
+ case FIB_PATH_TYPE_EXCLUSIVE:
+ case FIB_PATH_TYPE_SPECIAL:
+ /*
+ * these path types may link to an adj, if that's what
+ * the clinet gave
+ */
+ if (dpo_is_adj(&path->fp_dpo))
+ {
+ ip_adjacency_t *adj;
+
+ adj = adj_get(path->fp_dpo.dpoi_index);
+
+ fib_urpf_list_append(urpf, adj->rewrite_header.sw_if_index);
+ }
+ break;
+
+ case FIB_PATH_TYPE_DEAG:
+ case FIB_PATH_TYPE_RECEIVE:
+ /*
+ * these path types don't link to an adj
+ */
+ break;
+ }
+}
+
void
fib_path_contribute_forwarding (fib_node_index_t path_index,
fib_forward_chain_type_t fct,
diff --git a/vnet/vnet/fib/fib_path.h b/vnet/vnet/fib/fib_path.h
index 16ca358c..2c141a41 100644
--- a/vnet/vnet/fib/fib_path.h
+++ b/vnet/vnet/fib/fib_path.h
@@ -141,6 +141,8 @@ extern load_balance_path_t * fib_path_append_nh_for_multipath_hash(
extern void fib_path_contribute_forwarding(fib_node_index_t path_index,
fib_forward_chain_type_t type,
dpo_id_t *dpo);
+extern void fib_path_contribute_urpf(fib_node_index_t path_index,
+ index_t urpf);
extern adj_index_t fib_path_get_adj(fib_node_index_t path_index);
extern int fib_path_recursive_loop_detect(fib_node_index_t path_index,
fib_node_index_t **entry_indicies);
diff --git a/vnet/vnet/fib/fib_path_list.c b/vnet/vnet/fib/fib_path_list.c
index 611fe9fb..a5827490 100644
--- a/vnet/vnet/fib/fib_path_list.c
+++ b/vnet/vnet/fib/fib_path_list.c
@@ -23,6 +23,7 @@
#include <vnet/fib/fib_internal.h>
#include <vnet/fib/fib_node_list.h>
#include <vnet/fib/fib_walk.h>
+#include <vnet/fib/fib_urpf_list.h>
/**
* FIB path-list
@@ -47,10 +48,15 @@ typedef struct fib_path_list_t_ {
fib_protocol_t fpl_nh_proto;
/**
- * Vector of paths indecies for all configured paths.
+ * Vector of paths indicies for all configured paths.
* For shareable path-lists this list MUST not change.
*/
fib_node_index_t *fpl_paths;
+
+ /**
+ * the RPF list calculated for this path list
+ */
+ fib_node_index_t fpl_urpf;
} fib_path_list_t;
/*
@@ -138,6 +144,8 @@ format_fib_path_list (u8 * s, va_list * args)
}
}
}
+ s = format (s, " %U\n", format_fib_urpf_list, path_list->fpl_urpf);
+
vec_foreach (path_index, path_list->fpl_paths)
{
s = fib_path_format(*path_index, s);
@@ -321,9 +329,10 @@ fib_path_list_destroy (fib_path_list_t *path_list)
vec_foreach (path_index, path_list->fpl_paths)
{
fib_path_destroy(*path_index);
- }
+ }
vec_free(path_list->fpl_paths);
+ fib_urpf_list_unlock(path_list->fpl_urpf);
fib_node_deinit(&path_list->fpl_node);
pool_put(fib_path_list_pool, path_list);
@@ -396,6 +405,60 @@ fib_path_list_mk_lb (fib_path_list_t *path_list,
vec_free(hash_key);
}
+/**
+ * @brief [re]build the path list's uRPF list
+ */
+static void
+fib_path_list_mk_urpf (fib_path_list_t *path_list)
+{
+ fib_node_index_t *path_index;
+
+ /*
+ * ditch the old one. by iterating through all paths we are going
+ * to re-find all the adjs that were in the old one anyway. If we
+ * keep the old one, then the |sort|uniq requires more work.
+ * All users of the RPF list have their own lock, so we can release
+ * immediately.
+ */
+ fib_urpf_list_unlock(path_list->fpl_urpf);
+ path_list->fpl_urpf = fib_urpf_list_alloc_and_lock();
+
+ vec_foreach (path_index, path_list->fpl_paths)
+ {
+ fib_path_contribute_urpf(*path_index, path_list->fpl_urpf);
+ }
+
+ fib_urpf_list_bake(path_list->fpl_urpf);
+}
+
+/**
+ * @brief Contribute (add) this path list's uRPF list. This allows the child
+ * to construct an aggregate list.
+ */
+void
+fib_path_list_contribute_urpf (fib_node_index_t path_list_index,
+ index_t urpf)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ fib_urpf_list_combine(urpf, path_list->fpl_urpf);
+}
+
+/**
+ * @brief Return the the child the RPF list pre-built for this path list
+ */
+index_t
+fib_path_list_get_urpf (fib_node_index_t path_list_index)
+{
+ fib_path_list_t *path_list;
+
+ path_list = fib_path_list_get(path_list_index);
+
+ return (path_list->fpl_urpf);
+}
+
/*
* fib_path_list_back_walk
*
@@ -410,6 +473,8 @@ fib_path_list_back_walk (fib_node_index_t path_list_index,
path_list = fib_path_list_get(path_list_index);
+ fib_path_list_mk_urpf(path_list);
+
/*
* propagate the backwalk further
*/
@@ -461,6 +526,7 @@ fib_path_list_memory_show (void)
pool_elts(fib_path_list_pool),
pool_len(fib_path_list_pool),
sizeof(fib_path_list_t));
+ fib_urpf_list_show_mem();
}
/*
@@ -483,6 +549,7 @@ fib_path_list_alloc (fib_node_index_t *path_list_index)
fib_node_init(&path_list->fpl_node,
FIB_NODE_TYPE_PATH_LIST);
+ path_list->fpl_urpf = INDEX_INVALID;
if (NULL != path_list_index)
{
@@ -519,6 +586,7 @@ fib_path_list_resolve (fib_path_list_t *path_list)
path_list = fib_path_list_get(path_list_index);
FIB_PATH_LIST_DBG(path_list, "resovled");
+ fib_path_list_mk_urpf(path_list);
return (path_list);
}
diff --git a/vnet/vnet/fib/fib_path_list.h b/vnet/vnet/fib/fib_path_list.h
index 42e07abd..852f07d4 100644
--- a/vnet/vnet/fib/fib_path_list.h
+++ b/vnet/vnet/fib/fib_path_list.h
@@ -104,9 +104,12 @@ extern fib_node_index_t fib_path_list_copy_and_path_remove(
fib_node_index_t pl_index,
fib_path_list_flags_t flags,
const fib_route_path_t *path);
-extern void fib_path_list_contribute_forwarding (fib_node_index_t path_list_index,
- fib_forward_chain_type_t type,
- dpo_id_t *dpo);
+extern void fib_path_list_contribute_forwarding(fib_node_index_t path_list_index,
+ fib_forward_chain_type_t type,
+ dpo_id_t *dpo);
+extern void fib_path_list_contribute_urpf(fib_node_index_t path_index,
+ index_t urpf);
+extern index_t fib_path_list_get_urpf(fib_node_index_t path_list_index);
extern index_t fib_path_list_get_adj(fib_node_index_t path_list_index,
fib_forward_chain_type_t type);
diff --git a/vnet/vnet/fib/fib_test.c b/vnet/vnet/fib/fib_test.c
index 898005e5..3dc9c1d7 100644
--- a/vnet/vnet/fib/fib_test.c
+++ b/vnet/vnet/fib/fib_test.c
@@ -29,6 +29,7 @@
#include <vnet/fib/fib_path_list.h>
#include <vnet/fib/fib_walk.h>
#include <vnet/fib/fib_node_list.h>
+#include <vnet/fib/fib_urpf_list.h>
#define FIB_TEST_I(_cond, _comment, _args...) \
({ \
@@ -139,14 +140,15 @@ fib_test_mk_intf (u32 ninterfaces)
}
}
-#define FIB_TEST_REC_FORW(_rec_prefix, _via_prefix) \
+#define FIB_TEST_REC_FORW(_rec_prefix, _via_prefix, _bucket) \
{ \
const dpo_id_t *_rec_dpo = fib_entry_contribute_ip_forwarding( \
fib_table_lookup_exact_match(fib_index, (_rec_prefix))); \
const dpo_id_t *_via_dpo = fib_entry_contribute_ip_forwarding( \
fib_table_lookup(fib_index, (_via_prefix))); \
FIB_TEST(!dpo_cmp(_via_dpo, \
- load_balance_get_bucket(_rec_dpo->dpoi_index, 0)), \
+ load_balance_get_bucket(_rec_dpo->dpoi_index, \
+ _bucket)), \
"%U is recursive via %U", \
format_fib_prefix, (_rec_prefix), \
format_fib_prefix, _via_prefix); \
@@ -167,6 +169,57 @@ fib_test_mk_intf (u32 ninterfaces)
format_dpo_id, _dpo1, 0); \
}
+#define FIB_TEST_RPF(_cond, _comment, _args...) \
+{ \
+ if (!FIB_TEST_I(_cond, _comment, ##_args)) { \
+ return (0); \
+ } \
+}
+
+static int
+fib_test_urpf_is_equal (fib_node_index_t fei,
+ fib_forward_chain_type_t fct,
+ u32 num, ...)
+{
+ dpo_id_t dpo = DPO_NULL;
+ fib_urpf_list_t *urpf;
+ index_t ui;
+ va_list ap;
+ int ii;
+
+ va_start(ap, num);
+
+ fib_entry_contribute_forwarding(fei, fct, &dpo);
+ ui = load_balance_get_urpf(dpo.dpoi_index);
+
+ urpf = fib_urpf_list_get(ui);
+
+ FIB_TEST_RPF(num == vec_len(urpf->furpf_itfs),
+ "RPF:%U len %d == %d",
+ format_fib_urpf_list, ui,
+ num, vec_len(urpf->furpf_itfs));
+ FIB_TEST_RPF(num == fib_urpf_check_size(ui),
+ "RPF:%U check-size %d == %d",
+ format_fib_urpf_list, ui,
+ num, vec_len(urpf->furpf_itfs));
+
+ for (ii = 0; ii < num; ii++)
+ {
+ adj_index_t ai = va_arg(ap, adj_index_t);
+
+ FIB_TEST_RPF(ai == urpf->furpf_itfs[ii],
+ "RPF:%d item:%d - %d == %d",
+ ui, ii, ai, urpf->furpf_itfs[ii]);
+ FIB_TEST_RPF(fib_urpf_check(ui, ai),
+ "RPF:%d %d found",
+ ui, ai);
+ }
+
+ dpo_reset(&dpo);
+
+ return (1);
+}
+
static void
fib_test_v4 (void)
{
@@ -329,6 +382,8 @@ fib_test_v4 (void)
FIB_TEST((FIB_NODE_INDEX_INVALID != fei), "local interface route present");
dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "RPF list for local length 0");
dpo = load_balance_get_bucket(dpo->dpoi_index, 0);
FIB_TEST((DPO_RECEIVE == dpo->dpoi_type),
"local interface adj is local");
@@ -466,6 +521,10 @@ fib_test_v4 (void)
u8 eth_addr[] = {
0xde, 0xde, 0xde, 0xba, 0xba, 0xba,
};
+ ip46_address_t nh_12_12_12_12 = {
+ .ip4.as_u32 = clib_host_to_net_u32(0x0c0c0c0c),
+ };
+ adj_index_t ai_12_12_12_12;
/*
* Add a route via an incomplete ADJ. then complete the ADJ
@@ -513,6 +572,24 @@ fib_test_v4 (void)
dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type,
"11.11.11.11/32 via complete adj");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for adj-fib contains adj");
+
+ ai_12_12_12_12 = adj_nbr_add_or_lock(FIB_PROTOCOL_IP4,
+ FIB_LINK_IP4,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index);
+ FIB_TEST((FIB_NODE_INDEX_INVALID != ai_12_12_12_12), "adj created");
+ adj = adj_get(ai_12_12_12_12);
+ FIB_TEST((IP_LOOKUP_NEXT_ARP == adj->lookup_next_index),
+ "adj is incomplete");
+ FIB_TEST((0 == ip46_address_cmp(&nh_12_12_12_12,
+ &adj->sub_type.nbr.next_hop)),
+ "adj nbr next-hop ok");
+ adj_nbr_update_rewrite(ai_12_12_12_12, eth_addr);
+ FIB_TEST((IP_LOOKUP_NEXT_REWRITE == adj->lookup_next_index),
+ "adj is complete");
/*
* add the adj fib
@@ -591,7 +668,7 @@ fib_test_v4 (void)
fib_entry_pool_size());
/*
- * Add a 2 routes via the first ADJ. ensure path-list sharing
+ * Add 2 routes via the first ADJ. ensure path-list sharing
*/
fib_prefix_t pfx_1_1_1_1_s_32 = {
.fp_len = 32,
@@ -675,6 +752,9 @@ fib_test_v4 (void)
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1, tm->hw[0]->sw_if_index),
+ "RPF list for 1.1.2.0/24 contains both adjs");
dpo1 = load_balance_get_bucket(dpo->dpoi_index, 0);
FIB_TEST(DPO_ADJACENCY == dpo1->dpoi_type, "type is %d", dpo1->dpoi_type);
@@ -709,6 +789,11 @@ fib_test_v4 (void)
1,
FIB_ROUTE_PATH_FLAG_NONE);
fei = fib_table_lookup(fib_index, &pfx_1_1_2_0_s_24);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
+ 1, tm->hw[0]->sw_if_index),
+ "RPF list for 1.1.2.0/24 contains one adj");
+
ai = fib_entry_get_adj(fei);
FIB_TEST((ai_01 == ai), "1.1.2.0/24 resolves via 10.10.10.1");
@@ -740,19 +825,22 @@ fib_test_v4 (void)
.ip4.as_u32 = clib_host_to_net_u32(0x01010101),
};
- fib_table_entry_path_add(fib_index,
- &bgp_100_pfx,
- FIB_SOURCE_API,
- FIB_ENTRY_FLAG_NONE,
- FIB_PROTOCOL_IP4,
- &nh_1_1_1_1,
- ~0, // no index provided.
- fib_index, // nexthop in same fib as route
- 1,
- MPLS_LABEL_INVALID,
- FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_path_add(fib_index,
+ &bgp_100_pfx,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &nh_1_1_1_1,
+ ~0, // no index provided.
+ fib_index, // nexthop in same fib as route
+ 1,
+ MPLS_LABEL_INVALID,
+ FIB_ROUTE_PATH_FLAG_NONE);
- FIB_TEST_REC_FORW(&bgp_100_pfx, &pfx_1_1_1_1_s_32);
+ FIB_TEST_REC_FORW(&bgp_100_pfx, &pfx_1_1_1_1_s_32, 0);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for adj-fib contains adj");
/*
* +1 entry and +1 shared-path-list
@@ -785,7 +873,10 @@ fib_test_v4 (void)
MPLS_LABEL_INVALID,
FIB_ROUTE_PATH_FLAG_NONE);
- FIB_TEST_REC_FORW(&bgp_101_pfx, &pfx_1_1_1_1_s_32);
+ FIB_TEST_REC_FORW(&bgp_101_pfx, &pfx_1_1_1_1_s_32, 0);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for adj-fib contains adj");
/*
* +1 entry, but the recursive path-list is shared.
@@ -889,7 +980,7 @@ fib_test_v4 (void)
MPLS_LABEL_INVALID,
FIB_ROUTE_PATH_FLAG_NONE);
- FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
/*
* the adj should be recursive via drop, since the route resolves via
@@ -898,6 +989,8 @@ fib_test_v4 (void)
fei = fib_table_lookup(fib_index, &pfx_1_1_1_2_s_32);
dpo1 = fib_entry_contribute_ip_forwarding(fei);
FIB_TEST(load_balance_is_drop(dpo1), "1.1.1.2/32 is drop");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "RPF list for 1.1.1.2/32 contains 0 adjs");
/*
* +2 entry and +1 shared-path-list
@@ -911,6 +1004,8 @@ fib_test_v4 (void)
/*
* Unequal Cost load-balance. 3:1 ratio. fits in a 4 bucket LB
+ * The paths are sort by NH first. in this case the the path with greater
+ * weight is first in the set. This ordering is to test the RPF sort|uniq logic
*/
fib_prefix_t pfx_1_2_3_4_s_32 = {
.fp_len = 32,
@@ -924,7 +1019,7 @@ fib_test_v4 (void)
FIB_SOURCE_API,
FIB_ENTRY_FLAG_NONE,
FIB_PROTOCOL_IP4,
- &nh_10_10_10_2,
+ &nh_10_10_10_1,
tm->hw[0]->sw_if_index,
~0,
1,
@@ -935,8 +1030,8 @@ fib_test_v4 (void)
FIB_SOURCE_API,
FIB_ENTRY_FLAG_NONE,
FIB_PROTOCOL_IP4,
- &nh_10_10_10_1,
- tm->hw[0]->sw_if_index,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index,
~0,
3,
MPLS_LABEL_INVALID,
@@ -949,14 +1044,16 @@ fib_test_v4 (void)
"1.2.3.4/32 LB has %d bucket",
lb->lb_n_buckets);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 0, ai_01);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 1, ai_01);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 2, ai_01);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 3, ai_02);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 0, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 1, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 2, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_4_s_32, 3, ai_01);
+
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
+ tm->hw[0]->sw_if_index,
+ tm->hw[1]->sw_if_index),
+ "RPF list for 1.2.3.4/32 contains both adjs");
- fib_table_entry_delete(fib_index,
- &pfx_1_2_3_4_s_32,
- FIB_SOURCE_API);
/*
* Unequal Cost load-balance. 4:1 ratio.
@@ -974,8 +1071,8 @@ fib_test_v4 (void)
FIB_SOURCE_API,
FIB_ENTRY_FLAG_NONE,
FIB_PROTOCOL_IP4,
- &nh_10_10_10_2,
- tm->hw[0]->sw_if_index,
+ &nh_12_12_12_12,
+ tm->hw[1]->sw_if_index,
~0,
1,
MPLS_LABEL_INVALID,
@@ -1012,13 +1109,85 @@ fib_test_v4 (void)
FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 10, ai_01);
FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 11, ai_01);
FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 12, ai_01);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 13, ai_02);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 14, ai_02);
- FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 15, ai_02);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 13, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 14, ai_12_12_12_12);
+ FIB_TEST_LB_BUCKET_VIA_ADJ(&pfx_1_2_3_5_s_32, 15, ai_12_12_12_12);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
+ tm->hw[0]->sw_if_index,
+ tm->hw[1]->sw_if_index),
+ "RPF list for 1.2.3.4/32 contains both adjs");
+
+ /*
+ * A recursive via the two unequal cost entries
+ */
+ fib_prefix_t bgp_44_s_32 = {
+ .fp_len = 32,
+ .fp_proto = FIB_PROTOCOL_IP4,
+ .fp_addr = {
+ /* 200.200.200.201/32 */
+ .ip4.as_u32 = clib_host_to_net_u32(0x44444444),
+ },
+ };
+ fei = fib_table_entry_path_add(fib_index,
+ &bgp_44_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_2_3_4_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ MPLS_LABEL_INVALID,
+ FIB_ROUTE_PATH_FLAG_NONE);
+ fei = fib_table_entry_path_add(fib_index,
+ &bgp_44_s_32,
+ FIB_SOURCE_API,
+ FIB_ENTRY_FLAG_NONE,
+ FIB_PROTOCOL_IP4,
+ &pfx_1_2_3_5_s_32.fp_addr,
+ ~0,
+ fib_index,
+ 1,
+ MPLS_LABEL_INVALID,
+ FIB_ROUTE_PATH_FLAG_NONE);
+
+ FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_4_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_44_s_32, &pfx_1_2_3_5_s_32, 1);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 2,
+ tm->hw[0]->sw_if_index,
+ tm->hw[1]->sw_if_index),
+ "RPF list for 1.2.3.4/32 contains both adjs");
+
+ /*
+ * test the uRPF check functions
+ */
+ dpo_id_t dpo_44 = DPO_NULL;
+ index_t urpfi;
+
+ fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
+ urpfi = load_balance_get_urpf(dpo_44.dpoi_index);
+
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[0]->sw_if_index),
+ "uRPF check for 68.68.68.68/32 on %d OK",
+ tm->hw[0]->sw_if_index);
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[1]->sw_if_index),
+ "uRPF check for 68.68.68.68/32 on %d OK",
+ tm->hw[1]->sw_if_index);
+ FIB_TEST(!fib_urpf_check(urpfi, 99),
+ "uRPF check for 68.68.68.68/32 on 99 not-OK",
+ 99);
+ dpo_reset(&dpo_44);
+
+ fib_table_entry_delete(fib_index,
+ &bgp_44_s_32,
+ FIB_SOURCE_API);
fib_table_entry_delete(fib_index,
&pfx_1_2_3_5_s_32,
FIB_SOURCE_API);
+ fib_table_entry_delete(fib_index,
+ &pfx_1_2_3_4_s_32,
+ FIB_SOURCE_API);
/*
* Add a recursive route:
@@ -1053,11 +1222,13 @@ fib_test_v4 (void)
MPLS_LABEL_INVALID,
FIB_ROUTE_PATH_FLAG_NONE);
- FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32);
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
fei = fib_table_lookup_exact_match(fib_index, &pfx_1_1_1_200_s_32);
FIB_TEST((FIB_ENTRY_FLAG_NONE == fib_entry_get_flags(fei)),
"Flags set on RR via non-attached");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "RPF list for BGP route empty");
/*
* +2 entry (BGP & RR) and +1 shared-path-list
@@ -1119,8 +1290,12 @@ fib_test_v4 (void)
/*
* the recursive adj for 200.200.200.200 should be updated.
*/
- FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32);
- FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32);
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
+ fei = fib_table_lookup(fib_index, &bgp_200_pfx);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1,
+ tm->hw[0]->sw_if_index),
+ "RPF list for BGP route has itf index 0");
/*
* insert a more specific route than 1.1.1.0/24 that also covers the
@@ -1166,8 +1341,8 @@ fib_test_v4 (void)
* the recursive adj for 200.200.200.200 should be updated.
* 200.200.200.201 remains unchanged.
*/
- FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32);
- FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32);
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
/*
* remove this /28. 200.200.200.200/32 should revert back to via 1.1.1.0/24
@@ -1187,8 +1362,8 @@ fib_test_v4 (void)
FIB_TEST((fib_table_lookup(fib_index, &pfx_1_1_1_0_s_28) ==
fib_table_lookup(fib_index, &pfx_1_1_1_0_s_24)),
"1.1.1.0/28 lookup via /24");
- FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32);
- FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32);
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
/*
* -1 entry. -1 shared path-list
@@ -1223,8 +1398,8 @@ fib_test_v4 (void)
FIB_TEST(load_balance_is_drop(fib_entry_contribute_ip_forwarding(fei)),
"1.1.1.200/32 route is DROP");
- FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32);
- FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32);
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
/*
* -1 entry
@@ -1254,8 +1429,8 @@ fib_test_v4 (void)
ai = fib_entry_get_adj(fei);
FIB_TEST((ai = ai_01), "1.1.1.2/32 resolves via 10.10.10.1");
- FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32);
- FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32);
+ FIB_TEST_REC_FORW(&bgp_201_pfx, &pfx_1_1_1_200_s_32, 0);
+ FIB_TEST_REC_FORW(&bgp_200_pfx, &pfx_1_1_1_2_s_32, 0);
/*
* no change. 1.1.1.2/32 was already there RR sourced.
@@ -2530,6 +2705,28 @@ fib_test_v4 (void)
"2001::/64 ADJ-adj is NH proto v4");
fib_table_entry_delete(0, &pfx_2001_s_64, FIB_SOURCE_API);
+ /*
+ * add a uRPF exempt prefix:
+ * test:
+ * - it's forwarding is drop
+ * - it's uRPF list is not empty
+ * - the uRPF list for the default route (it's cover) is empty
+ */
+ fei = fib_table_entry_special_add(fib_index,
+ &pfx_4_1_1_1_s_32,
+ FIB_SOURCE_URPF_EXEMPT,
+ FIB_ENTRY_FLAG_DROP,
+ ADJ_INDEX_INVALID);
+ dpo = fib_entry_contribute_ip_forwarding(fei);
+ FIB_TEST(load_balance_is_drop(dpo),
+ "uRPF exempt 4.1.1.1/32 DROP");
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 1, 0),
+ "uRPF list for exempt prefix has itf index 0");
+ fei = fib_table_lookup_exact_match(fib_index, &pfx_0_0_0_0_s_0);
+ FIB_TEST(fib_test_urpf_is_equal(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, 0),
+ "uRPF list for 0.0.0.0/0 empty");
+
+ fib_table_entry_delete(fib_index, &pfx_4_1_1_1_s_32, FIB_SOURCE_URPF_EXEMPT);
/*
* CLEANUP
@@ -2559,11 +2756,12 @@ fib_test_v4 (void)
fib_entry_pool_size());
/*
- * unlock the 2 adjacencies for which this test provided a rewrite.
+ * unlock the adjacencies for which this test provided a rewrite.
* These are the last locks on these adjs. they should thus go away.
*/
adj_unlock(ai_02);
adj_unlock(ai_01);
+ adj_unlock(ai_12_12_12_12);
FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d",
adj_nbr_db_size());
@@ -2623,6 +2821,8 @@ fib_test_v4 (void)
fib_path_list_pool_size());
FIB_TEST((NBR-5 == fib_entry_pool_size()), "entry pool size is %d",
fib_entry_pool_size());
+ FIB_TEST((NBR-5 == pool_elts(fib_urpf_list_pool)), "uRPF pool size is %d",
+ pool_elts(fib_urpf_list_pool));
return;
}
@@ -5014,6 +5214,28 @@ fib_test_label (void)
"2.2.2.2.2/32 LB 1 buckets via: "
"label 1600 over 1.1.1.1");
+ dpo_id_t dpo_44 = DPO_NULL;
+ index_t urpfi;
+
+ fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_UNICAST_IP4, &dpo_44);
+ urpfi = load_balance_get_urpf(dpo_44.dpoi_index);
+
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[0]->sw_if_index),
+ "uRPF check for 2.2.2.2/32 on %d OK",
+ tm->hw[0]->sw_if_index);
+ FIB_TEST(fib_urpf_check(urpfi, tm->hw[1]->sw_if_index),
+ "uRPF check for 2.2.2.2/32 on %d OK",
+ tm->hw[1]->sw_if_index);
+ FIB_TEST(!fib_urpf_check(urpfi, 99),
+ "uRPF check for 2.2.2.2/32 on 99 not-OK",
+ 99);
+
+ fib_entry_contribute_forwarding(fei, FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, &dpo_44);
+ FIB_TEST(urpfi == load_balance_get_urpf(dpo_44.dpoi_index),
+ "Shared uRPF on IP and non-EOS chain");
+
+ dpo_reset(&dpo_44);
+
/*
* we are holding a lock on the non-eos LB of the via-entry.
* do a PIC-core failover by shutting the link of the via-entry.
diff --git a/vnet/vnet/fib/fib_types.h b/vnet/vnet/fib/fib_types.h
index 232a2301..250aad76 100644
--- a/vnet/vnet/fib/fib_types.h
+++ b/vnet/vnet/fib/fib_types.h
@@ -118,10 +118,6 @@ fib_link_t fib_proto_to_link (fib_protocol_t proto);
*/
typedef enum fib_forward_chain_type_t_ {
/**
- * Contribute an object that is to be used to forward Ethernet packets
- */
- FIB_FORW_CHAIN_TYPE_ETHERNET,
- /**
* Contribute an object that is to be used to forward IP4 packets
*/
FIB_FORW_CHAIN_TYPE_UNICAST_IP4,
@@ -141,17 +137,24 @@ typedef enum fib_forward_chain_type_t_ {
* option is converted into one of the other three internally.
*/
FIB_FORW_CHAIN_TYPE_MPLS_EOS,
+ /**
+ * Contribute an object that is to be used to forward Ethernet packets.
+ * This is last in the list since it is not valid for many FIB objects,
+ * and thus their array of per-chain-type DPOs can be sized smaller.
+ */
+ FIB_FORW_CHAIN_TYPE_ETHERNET,
} __attribute__ ((packed)) fib_forward_chain_type_t;
#define FIB_FORW_CHAINS { \
- [FIB_FORW_CHAIN_TYPE_ETHERNET] = "ehternet", \
+ [FIB_FORW_CHAIN_TYPE_ETHERNET] = "ethernet", \
[FIB_FORW_CHAIN_TYPE_UNICAST_IP4] = "unicast-ip4", \
[FIB_FORW_CHAIN_TYPE_UNICAST_IP6] = "unicast-ip6", \
[FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS] = "mpls-neos", \
[FIB_FORW_CHAIN_TYPE_MPLS_EOS] = "mpls-eos", \
}
-#define FIB_FORW_CHAIN_NUM (FIB_FORW_CHAIN_TYPE_MPLS_EOS+1)
+#define FIB_FORW_CHAIN_NUM (FIB_FORW_CHAIN_TYPE_MPLS_ETHERNET+1)
+#define FIB_FORW_CHAIN_MPLS_NUM (FIB_FORW_CHAIN_TYPE_MPLS_EOS+1)
#define FOR_EACH_FIB_FORW_CHAIN(_item) \
for (_item = FIB_FORW_CHAIN_TYPE_ETHERNET; \
diff --git a/vnet/vnet/fib/fib_urpf_list.c b/vnet/vnet/fib/fib_urpf_list.c
new file mode 100644
index 00000000..263812ad
--- /dev/null
+++ b/vnet/vnet/fib/fib_urpf_list.c
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/fib/fib_urpf_list.h>
+#include <vnet/adj/adj.h>
+
+/**
+ * @brief pool of all fib_urpf_list
+ */
+fib_urpf_list_t *fib_urpf_list_pool;
+
+u8 *
+format_fib_urpf_list (u8 *s, va_list args)
+{
+ fib_urpf_list_t *urpf;
+ index_t ui;
+ u32 *swi;
+
+ ui = va_arg(args, index_t);
+ urpf = fib_urpf_list_get(ui);
+
+ s = format(s, "uPRF-list:%d len:%d itfs:[",
+ ui, vec_len(urpf->furpf_itfs));
+
+ vec_foreach(swi, urpf->furpf_itfs)
+ {
+ s = format(s, "%d, ", *swi);
+ }
+ s = format(s, "]");
+
+ return (s);
+}
+
+index_t
+fib_urpf_list_alloc_and_lock (void)
+{
+ fib_urpf_list_t *urpf;
+
+ pool_get(fib_urpf_list_pool, urpf);
+ memset(urpf, 0, sizeof(*urpf));
+
+ urpf->furpf_locks++;
+
+ return (urpf - fib_urpf_list_pool);
+}
+
+void
+fib_urpf_list_unlock (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ if (INDEX_INVALID == ui)
+ return;
+
+ urpf = fib_urpf_list_get(ui);
+
+ urpf->furpf_locks--;
+
+ if (0 == urpf->furpf_locks)
+ {
+ vec_free(urpf->furpf_itfs);
+ pool_put(fib_urpf_list_pool, urpf);
+ }
+}
+
+void
+fib_urpf_list_lock (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ urpf->furpf_locks++;
+}
+
+/**
+ * @brief Append another interface to the list.
+ */
+void
+fib_urpf_list_append (index_t ui,
+ u32 sw_if_index)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ vec_add1(urpf->furpf_itfs, sw_if_index);
+}
+
+/**
+ * @brief Combine to interface lists
+ */
+void
+fib_urpf_list_combine (index_t ui1,
+ index_t ui2)
+{
+ fib_urpf_list_t *urpf1, *urpf2;
+
+ urpf1 = fib_urpf_list_get(ui1);
+ urpf2 = fib_urpf_list_get(ui2);
+
+ vec_append(urpf1->furpf_itfs, urpf2->furpf_itfs);
+}
+
+/**
+ * @brief Sort the interface indicies.
+ * The sort is the first step in obtaining a unique list, so the order,
+ * w.r.t. next-hop, interface,etc is not important. So a sort based on the
+ * index is all we need.
+ */
+static int
+fib_urpf_itf_cmp_for_sort (void * v1,
+ void * v2)
+{
+ fib_node_index_t *i1 = v1, *i2 = v2;
+
+ return (*i2 < *i1);
+}
+
+/**
+ * @brief Convert the uRPF list from the itf set obtained during the walk
+ * to a unique list.
+ */
+void
+fib_urpf_list_bake (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ ASSERT(!(urpf->furpf_flags & FIB_URPF_LIST_BAKED));
+
+ if (vec_len(urpf->furpf_itfs) > 1)
+ {
+ u32 i,j;
+
+ /*
+ * cat list | sort | uniq > rpf_list
+ */
+ vec_sort_with_function(urpf->furpf_itfs, fib_urpf_itf_cmp_for_sort);
+
+ i = 0, j = 1;
+ while (j < vec_len(urpf->furpf_itfs))
+ {
+ if (urpf->furpf_itfs[i] == urpf->furpf_itfs[j])
+ {
+ /*
+ * the itfacenct entries are the same.
+ * search forward for a unique one
+ */
+ while (urpf->furpf_itfs[i] == urpf->furpf_itfs[j] &&
+ j < vec_len(urpf->furpf_itfs))
+ {
+ j++;
+ }
+ if (j == vec_len(urpf->furpf_itfs))
+ {
+ /*
+ * ran off the end without finding a unique index.
+ * we are done.
+ */
+ break;
+ }
+ else
+ {
+ urpf->furpf_itfs[i+1] = urpf->furpf_itfs[j];
+ }
+ }
+ i++, j++;
+ }
+
+ /*
+ * set the length of the vector to the number of unique itfs
+ */
+ _vec_len(urpf->furpf_itfs) = i+1;
+ }
+
+ urpf->furpf_flags |= FIB_URPF_LIST_BAKED;
+}
+
+void
+fib_urpf_list_show_mem (void)
+{
+ fib_show_memory_usage("uRPF-list",
+ pool_elts(fib_urpf_list_pool),
+ pool_len(fib_urpf_list_pool),
+ sizeof(fib_urpf_list_t));
+}
+
+static clib_error_t *
+show_fib_urpf_list_command (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ index_t ui;
+
+ if (unformat (input, "%d", &ui))
+ {
+ /*
+ * show one in detail
+ */
+ if (!pool_is_free_index(fib_urpf_list_pool, ui))
+ {
+ vlib_cli_output (vm, "%d@%U",
+ ui,
+ format_fib_urpf_list, ui);
+ }
+ else
+ {
+ vlib_cli_output (vm, "uRPF %d invalid", ui);
+ }
+ }
+ else
+ {
+ /*
+ * show all
+ */
+ vlib_cli_output (vm, "FIB uRPF Entries:");
+ pool_foreach_index(ui, fib_urpf_list_pool,
+ ({
+ vlib_cli_output (vm, "%d@%U",
+ ui,
+ format_fib_urpf_list, ui);
+ }));
+ }
+
+ return (NULL);
+}
+
+/* *INDENT-OFF* */
+/*?
+ * The '<em>sh fib uRPF [index] </em>' command displays the uRPF lists
+ *
+ * @cliexpar
+ * @cliexstart{show fib uRPF}
+ * FIB uRPF Entries:
+ * 0@uPRF-list:0 len:0 itfs:[]
+ * 1@uPRF-list:1 len:2 itfs:[1, 2, ]
+ * 2@uPRF-list:2 len:1 itfs:[3, ]
+ * 3@uPRF-list:3 len:1 itfs:[9, ]
+ * @cliexend
+?*/
+VLIB_CLI_COMMAND (show_fib_urpf_list, static) = {
+ .path = "show fib uRPF",
+ .function = show_fib_urpf_list_command,
+ .short_help = "show fib uRPF",
+};
+/* *INDENT-OFF* */
diff --git a/vnet/vnet/fib/fib_urpf_list.h b/vnet/vnet/fib/fib_urpf_list.h
new file mode 100644
index 00000000..09f47574
--- /dev/null
+++ b/vnet/vnet/fib/fib_urpf_list.h
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * @brief A unicast RPF list.
+ * The uRPF list is the set of interfaces that a prefix can be reached through.
+ * There are 3 levels of RPF check:
+ * - do we have any route to the source (i.e. it's not drop)
+ * - did the packet arrive on an interface that the source is reachable through
+ * - did the packet arrive from a peer that the source is reachable through
+ * we don't support the last. But it could be done by storing adjs in the uPRF
+ * list rather than interface indices.
+ *
+ * these conditions are checked against the list by:
+ * - the list is not empty
+ * - there is an interface in the list that is on the input interface.
+ * - there is an adj in the list whose MAC address matches the packet's
+ * source MAC and input interface.
+ *
+ * To speed the last two checks the interface list only needs to have the unique
+ * interfaces present. If the uRPF check was instead implemented by forward
+ * walking the DPO chain, then that walk would encounter a great deal of
+ * non-adjacency objects (i.e. load-balances, mpls-labels, etc) and potentially
+ * the same adjacency many times (esp. when UCMP is used).
+ * To that end the uRPF list is a collapsed, unique interface only list.
+ */
+
+#ifndef __FIB_URPF_LIST_H__
+#define __FIB_URPF_LIST_H__
+
+#include <vnet/fib/fib_types.h>
+#include <vnet/adj/adj.h>
+
+/**
+ * @brief flags
+ */
+typedef enum fib_urpf_list_flag_t_
+{
+ /**
+ * @brief Set to indicated that the uRPF list has already been baked.
+ * This is protection against it being baked more than once. These
+ * are not chunky fries - once is enough.
+ */
+ FIB_URPF_LIST_BAKED = (1 << 0),
+} fib_urpf_list_flag_t;
+
+typedef struct fib_urpf_list_t_
+{
+ /**
+ * The list of interfaces that comprise the allowed accepting interfaces
+ */
+ adj_index_t *furpf_itfs;
+
+ /**
+ * flags
+ */
+ fib_urpf_list_flag_t furpf_flags;
+
+ /**
+ * uRPF lists are shared amongst many entries so we require a locking
+ * mechanism.
+ */
+ u32 furpf_locks;
+} fib_urpf_list_t;
+
+extern index_t fib_urpf_list_alloc_and_lock(void);
+extern void fib_urpf_list_unlock(index_t urpf);
+extern void fib_urpf_list_lock(index_t urpf);
+
+extern void fib_urpf_list_append(index_t urpf, adj_index_t adj);
+extern void fib_urpf_list_combine(index_t urpf1, index_t urpf2);
+
+extern void fib_urpf_list_bake(index_t urpf);
+
+extern u8 *format_fib_urpf_list(u8 *s, va_list ap);
+
+extern void fib_urpf_list_show_mem(void);
+
+/**
+ * @brief pool of all fib_urpf_list
+ */
+extern fib_urpf_list_t *fib_urpf_list_pool;
+
+static inline fib_urpf_list_t *
+fib_urpf_list_get (index_t index)
+{
+ return (pool_elt_at_index(fib_urpf_list_pool, index));
+}
+
+/**
+ * @brief Data-Plane function to check an input interface against an uRPF list
+ *
+ * @param ui The uRPF list index to check against. Get this from the load-balance
+ * object that is the result of the FIB lookup
+ * @param sw_if_index The SW interface index to validate
+ *
+ * @return 1 if the interface is found, 0 otherwise
+ */
+always_inline int
+fib_urpf_check (index_t ui, u32 sw_if_index)
+{
+ fib_urpf_list_t *urpf;
+ u32 *swi;
+
+ urpf = fib_urpf_list_get(ui);
+
+ vec_foreach(swi, urpf->furpf_itfs)
+ {
+ if (*swi == sw_if_index)
+ return (1);
+ }
+
+ return (0);
+}
+
+/**
+ * @brief Data-Plane function to check the size of an uRPF list, (i.e. the number
+ * of interfaces in the list).
+ *
+ * @param ui The uRPF list index to check against. Get this from the load-balance
+ * object that is the result of the FIB lookup
+ *
+ * @return the number of interfaces in the list
+ */
+always_inline int
+fib_urpf_check_size (index_t ui)
+{
+ fib_urpf_list_t *urpf;
+
+ urpf = fib_urpf_list_get(ui);
+
+ return (vec_len(urpf->furpf_itfs));
+}
+
+#endif
diff --git a/vnet/vnet/gre/gre.c b/vnet/vnet/gre/gre.c
index 0028118d..aa6fca0f 100644
--- a/vnet/vnet/gre/gre.c
+++ b/vnet/vnet/gre/gre.c
@@ -365,6 +365,11 @@ gre_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
u32 ti;
hi = vnet_get_hw_interface (vnm, hw_if_index);
+
+ if (NULL == gm->tunnel_index_by_sw_if_index ||
+ hi->sw_if_index >= vec_len(gm->tunnel_index_by_sw_if_index))
+ return (NULL);
+
ti = gm->tunnel_index_by_sw_if_index[hi->sw_if_index];
if (~0 == ti)
diff --git a/vnet/vnet/ip/ip4_forward.c b/vnet/vnet/ip/ip4_forward.c
index b8d326f1..72c9fb0e 100644
--- a/vnet/vnet/ip/ip4_forward.c
+++ b/vnet/vnet/ip/ip4_forward.c
@@ -46,6 +46,7 @@
#include <vnet/api_errno.h> /* for API error numbers */
#include <vnet/fib/fib_table.h> /* for FIB table and entry creation */
#include <vnet/fib/fib_entry.h> /* for FIB table and entry creation */
+#include <vnet/fib/fib_urpf_list.h> /* for FIB uRPF check */
#include <vnet/fib/ip4_fib.h>
#include <vnet/dpo/load_balance.h>
#include <vnet/dpo/classify_dpo.h>
@@ -1501,31 +1502,31 @@ ip4_local (vlib_main_t * vm,
/*
* Must have a route to source otherwise we drop the packet.
* ip4 broadcasts are accepted, e.g. to make dhcp client work
+ *
+ * The checks are:
+ * - the source is a recieve => it's from us => bogus, do this
+ * first since it sets a different error code.
+ * - uRPF check for any route to source - accept if passes.
+ * - allow packets destined to the broadcast address from unknown sources
*/
- error0 = (error0 == IP4_ERROR_UNKNOWN_PROTOCOL
- && dpo0->dpoi_type != DPO_ADJACENCY
- && dpo0->dpoi_type != DPO_ADJACENCY_INCOMPLETE
- && dpo0->dpoi_type != DPO_RECEIVE
- && dpo0->dpoi_type != DPO_DROP
- && dpo0->dpoi_type != DPO_ADJACENCY_GLEAN
- && ip0->dst_address.as_u32 != 0xFFFFFFFF
- ? IP4_ERROR_SRC_LOOKUP_MISS
- : error0);
- error0 = (dpo0->dpoi_type == DPO_RECEIVE ?
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo0->dpoi_type == DPO_RECEIVE) ?
IP4_ERROR_SPOOFED_LOCAL_PACKETS :
error0);
- error1 = (error1 == IP4_ERROR_UNKNOWN_PROTOCOL
- && dpo1->dpoi_type != DPO_ADJACENCY
- && dpo1->dpoi_type != DPO_ADJACENCY_INCOMPLETE
- && dpo1->dpoi_type != DPO_RECEIVE
- && dpo1->dpoi_type != DPO_DROP
- && dpo1->dpoi_type != DPO_ADJACENCY_GLEAN
- && ip1->dst_address.as_u32 != 0xFFFFFFFF
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size(lb0->lb_urpf) &&
+ ip0->dst_address.as_u32 != 0xFFFFFFFF)
? IP4_ERROR_SRC_LOOKUP_MISS
- : error1);
- error1 = (dpo0->dpoi_type == DPO_RECEIVE ?
+ : error0);
+ error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo1->dpoi_type == DPO_RECEIVE) ?
IP4_ERROR_SPOOFED_LOCAL_PACKETS :
error1);
+ error1 = ((error1 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size(lb1->lb_urpf) &&
+ ip1->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS
+ : error1);
next0 = lm->local_next_by_ip_protocol[proto0];
next1 = lm->local_next_by_ip_protocol[proto1];
@@ -1670,20 +1671,15 @@ ip4_local (vlib_main_t * vm,
vnet_buffer (p0)->ip.adj_index[VLIB_RX] =
dpo0->dpoi_index;
- /* Must have a route to source otherwise we drop the packet. */
- error0 = (error0 == IP4_ERROR_UNKNOWN_PROTOCOL
- && dpo0->dpoi_type != DPO_ADJACENCY
- && dpo0->dpoi_type != DPO_ADJACENCY_INCOMPLETE
- && dpo0->dpoi_type != DPO_RECEIVE
- && dpo0->dpoi_type != DPO_DROP
- && dpo0->dpoi_type != DPO_ADJACENCY_GLEAN
- && ip0->dst_address.as_u32 != 0xFFFFFFFF
- ? IP4_ERROR_SRC_LOOKUP_MISS
- : error0);
- /* Packet originated from a local address => spoofing */
- error0 = (dpo0->dpoi_type == DPO_RECEIVE ?
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ dpo0->dpoi_type == DPO_RECEIVE) ?
IP4_ERROR_SPOOFED_LOCAL_PACKETS :
error0);
+ error0 = ((error0 == IP4_ERROR_UNKNOWN_PROTOCOL &&
+ !fib_urpf_check_size(lb0->lb_urpf) &&
+ ip0->dst_address.as_u32 != 0xFFFFFFFF)
+ ? IP4_ERROR_SRC_LOOKUP_MISS
+ : error0);
next0 = lm->local_next_by_ip_protocol[proto0];
diff --git a/vnet/vnet/ip/ip4_source_check.c b/vnet/vnet/ip/ip4_source_check.c
index 2323ac29..97d47031 100644
--- a/vnet/vnet/ip/ip4_source_check.c
+++ b/vnet/vnet/ip/ip4_source_check.c
@@ -39,10 +39,12 @@
#include <vnet/ip/ip.h>
#include <vnet/fib/ip4_fib.h>
+#include <vnet/fib/fib_urpf_list.h>
#include <vnet/dpo/load_balance.h>
typedef struct {
u8 packet_data[64];
+ index_t urpf;
} ip4_source_check_trace_t;
static u8 * format_ip4_source_check_trace (u8 * s, va_list * va)
@@ -69,11 +71,7 @@ typedef enum {
} ip4_source_check_type_t;
typedef union {
- struct {
- u32 no_default_route : 1;
- u32 fib_index : 31;
- };
- u32 as_u32[1];
+ u32 fib_index;
} ip4_source_check_config_t;
always_inline uword
@@ -115,9 +113,6 @@ ip4_source_check_inline (vlib_main_t * vm,
const load_balance_t * lb0, * lb1;
u32 pi0, next0, pass0, lb_index0;
u32 pi1, next1, pass1, lb_index1;
- const ip_adjacency_t *adj0, *adj1;
- const dpo_id_t *dpo0, *dpo1;
- u32 ii0, ii1;
/* Prefetch next iteration. */
{
@@ -182,61 +177,18 @@ ip4_source_check_inline (vlib_main_t * vm,
pass0 = ip4_address_is_multicast (&ip0->src_address) || ip0->src_address.as_u32 == clib_host_to_net_u32(0xFFFFFFFF);
pass1 = ip4_address_is_multicast (&ip1->src_address) || ip1->src_address.as_u32 == clib_host_to_net_u32(0xFFFFFFFF);
- if (PREDICT_TRUE(1 == lb0->lb_n_buckets))
- {
- dpo0 = load_balance_get_bucket_i(lb0, 0);
- if (PREDICT_TRUE(dpo0->dpoi_type == DPO_ADJACENCY))
- {
- pass0 |= (source_check_type ==
- IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
- adj0 = adj_get(dpo0->dpoi_index);
- pass0 |= (vnet_buffer (p0)->sw_if_index[VLIB_RX] ==
- adj0->rewrite_header.sw_if_index);
- }
- }
- else
- {
- for (ii0 = 0; ii0 < lb0->lb_n_buckets && !pass0; ii0++)
- {
- dpo0 = load_balance_get_bucket_i(lb0, ii0);
- if (PREDICT_TRUE(dpo0->dpoi_type == DPO_ADJACENCY))
- {
- pass0 |= (source_check_type ==
- IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
- adj0 = adj_get(dpo0->dpoi_index);
- pass0 |= (vnet_buffer (p0)->sw_if_index[VLIB_RX] ==
- adj0->rewrite_header.sw_if_index);
- }
- }
- }
- if (PREDICT_TRUE(1 == lb1->lb_n_buckets))
- {
- dpo1 = load_balance_get_bucket_i(lb1, 0);
- if (PREDICT_TRUE(dpo1->dpoi_type == DPO_ADJACENCY))
- {
- pass1 |= (source_check_type ==
- IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
- adj1 = adj_get(dpo1->dpoi_index);
- pass1 |= (vnet_buffer (p1)->sw_if_index[VLIB_RX] ==
- adj1->rewrite_header.sw_if_index);
- }
- }
- else
- {
- for (ii1 = 0; ii1 < lb1->lb_n_buckets && !pass1; ii1++)
- {
- dpo1 = load_balance_get_bucket_i(lb1, ii1);
- if (PREDICT_TRUE(dpo1->dpoi_type == DPO_ADJACENCY))
- {
- pass1 |= (source_check_type ==
- IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
- adj1 = adj_get(dpo1->dpoi_index);
- pass1 |= (vnet_buffer (p1)->sw_if_index[VLIB_RX] ==
- adj1->rewrite_header.sw_if_index);
- }
- }
- }
-
+ if (IP4_SOURCE_CHECK_REACHABLE_VIA_RX == source_check_type)
+ {
+ pass0 |= fib_urpf_check(lb0->lb_urpf,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ pass1 |= fib_urpf_check(lb1->lb_urpf,
+ vnet_buffer (p1)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ pass0 |= fib_urpf_check_size(lb0->lb_urpf);
+ pass1 |= fib_urpf_check_size(lb1->lb_urpf);
+ }
next0 = (pass0 ? next0 : IP4_SOURCE_CHECK_NEXT_DROP);
next1 = (pass1 ? next1 : IP4_SOURCE_CHECK_NEXT_DROP);
@@ -255,11 +207,8 @@ ip4_source_check_inline (vlib_main_t * vm,
ip4_fib_mtrie_t * mtrie0;
ip4_fib_mtrie_leaf_t leaf0;
ip4_source_check_config_t * c0;
- ip_adjacency_t * adj0;
u32 pi0, next0, pass0, lb_index0;
const load_balance_t * lb0;
- const dpo_id_t *dpo0;
- u32 ii0;
pi0 = from[0];
to_next[0] = pi0;
@@ -295,33 +244,15 @@ ip4_source_check_inline (vlib_main_t * vm,
/* Pass multicast. */
pass0 = ip4_address_is_multicast (&ip0->src_address) || ip0->src_address.as_u32 == clib_host_to_net_u32(0xFFFFFFFF);
- if (PREDICT_TRUE(1 == lb0->lb_n_buckets))
- {
- dpo0 = load_balance_get_bucket_i(lb0, 0);
- if (PREDICT_TRUE(dpo0->dpoi_type == DPO_ADJACENCY))
- {
- pass0 |= (source_check_type ==
- IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
- adj0 = adj_get(dpo0->dpoi_index);
- pass0 |= (vnet_buffer (p0)->sw_if_index[VLIB_RX] ==
- adj0->rewrite_header.sw_if_index);
- }
- }
- else
- {
- for (ii0 = 0; ii0 < lb0->lb_n_buckets && !pass0; ii0++)
- {
- dpo0 = load_balance_get_bucket_i(lb0, ii0);
- if (PREDICT_TRUE(dpo0->dpoi_type == DPO_ADJACENCY))
- {
- pass0 |= (source_check_type ==
- IP4_SOURCE_CHECK_REACHABLE_VIA_ANY);
- adj0 = adj_get(dpo0->dpoi_index);
- pass0 |= (vnet_buffer (p0)->sw_if_index[VLIB_RX] ==
- adj0->rewrite_header.sw_if_index);
- }
- }
- }
+ if (IP4_SOURCE_CHECK_REACHABLE_VIA_RX == source_check_type)
+ {
+ pass0 |= fib_urpf_check(lb0->lb_urpf,
+ vnet_buffer (p0)->sw_if_index[VLIB_RX]);
+ }
+ else
+ {
+ pass0 |= fib_urpf_check_size(lb0->lb_urpf);
+ }
next0 = (pass0 ? next0 : IP4_SOURCE_CHECK_NEXT_DROP);
p0->error = error_node->errors[IP4_ERROR_UNICAST_SOURCE_CHECK_FAILS];
@@ -392,6 +323,7 @@ set_ip_source_check (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
{
+ unformat_input_t _line_input, * line_input = &_line_input;
vnet_main_t * vnm = vnet_get_main();
ip4_main_t * im = &ip4_main;
ip_lookup_main_t * lm = &im->lookup_main;
@@ -402,21 +334,37 @@ set_ip_source_check (vlib_main_t * vm,
u32 feature_index;
sw_if_index = ~0;
+ is_del = 0;
+ feature_index = im->ip4_unicast_rx_feature_source_reachable_via_rx;
- if (! unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat_user (line_input, unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "strict"))
+ feature_index = im->ip4_unicast_rx_feature_source_reachable_via_rx;
+ else if (unformat (line_input, "loose"))
+ feature_index = im->ip4_unicast_rx_feature_source_reachable_via_any;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 == sw_if_index)
{
error = clib_error_return (0, "unknown interface `%U'",
- format_unformat_error, input);
+ format_unformat_error, line_input);
goto done;
}
- is_del = 0;
- config.no_default_route = 0;
config.fib_index = im->fib_index_by_sw_if_index[sw_if_index];
- feature_index = im->ip4_unicast_rx_feature_source_reachable_via_rx;
- if (unformat (input, "del"))
- is_del = 1;
-
ci = rx_cm->config_index_by_sw_if_index[sw_if_index];
ci = (is_del
? vnet_config_del_feature
@@ -432,11 +380,121 @@ set_ip_source_check (vlib_main_t * vm,
return error;
}
+/* *INDENT-OFF* */
+/*?
+ * Add the unicast RPF check feature to an input interface
+ *
+ * @cliexpar
+ * @cliexstart{set interface ip source-check}
+ * Two flavours are supported;
+ * loose: accept ingress packet if there is a route to reach the source
+ * strict: accept ingress packet if it arrived on an interface which
+ * the route to the source uses. i.e. an interface that the source
+ * is reachable via.
+ * the deafult is strict.
+ *
+ * @cliexend
+?*/
VLIB_CLI_COMMAND (set_interface_ip_source_check_command, static) = {
.path = "set interface ip source-check",
.function = set_ip_source_check,
.short_help = "Set IP4/IP6 interface unicast source check",
};
+/* *INDENT-ON* */
+
+static clib_error_t *
+ip_source_check_accept (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, * line_input = &_line_input;
+ fib_prefix_t pfx = {
+ .fp_proto = FIB_PROTOCOL_IP4,
+ };
+ clib_error_t * error = NULL;
+ u32 table_id, is_add, fib_index;
+
+ is_add = 1;
+ table_id = ~0;
+
+ /* Get a line of input. */
+ if (! unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "table %d", &table_id))
+ ;
+ else if (unformat (line_input, "del"))
+ is_add = 0;
+ else if (unformat (line_input, "add"))
+ is_add = 1;
+ else if (unformat (line_input, "%U/%d",
+ unformat_ip4_address,
+ &pfx.fp_addr.ip4,
+ &pfx.fp_len))
+ pfx.fp_proto = FIB_PROTOCOL_IP4;
+ else
+ {
+ error = unformat_parse_error (line_input);
+ goto done;
+ }
+ }
+
+ if (~0 != table_id)
+ {
+ fib_index = fib_table_id_find_fib_index(pfx.fp_proto, table_id);
+ if (~0 == fib_index)
+ {
+ error = clib_error_return (0,
+ "Nonexistent table id %d",
+ table_id);
+ goto done;
+ }
+ }
+ else
+ {
+ fib_index = 0;
+ }
+
+ if (is_add)
+ {
+ fib_table_entry_special_add(fib_index,
+ &pfx,
+ FIB_SOURCE_URPF_EXEMPT,
+ FIB_ENTRY_FLAG_DROP,
+ ADJ_INDEX_INVALID);
+ }
+ else
+ {
+ fib_table_entry_special_remove(fib_index,
+ &pfx,
+ FIB_SOURCE_URPF_EXEMPT);
+ }
+
+done:
+ return (error);
+}
+
+/* *INDENT-OFF* */
+/*?
+ * Add an exemption for a prefix to pass the uRPF loose check. Testing purposes only.
+ *
+ * @cliexpar
+ * @cliexstart{ip rpf-accept}
+ *
+ * Add an exception for a prefix to pass the loose RPF tests. This is usefull
+ * for testing purposes.
+ * VPP always performs a loose uRPF check for for-us traffic.
+ * @cliexend
+?*/
+VLIB_CLI_COMMAND (ip_source_check_accept_command, static) = {
+ .path = "ip urpf-accept",
+ .function = ip_source_check_accept,
+ .short_help = "Add a loose uRPF check exemption",
+};
+/* *INDENT-ON* */
+
/* Dummy init function to get us linked in. */
clib_error_t * ip4_source_check_init (vlib_main_t * vm)
diff --git a/vnet/vnet/ip/lookup.c b/vnet/vnet/ip/lookup.c
index 5cfdc23e..384e389d 100644
--- a/vnet/vnet/ip/lookup.c
+++ b/vnet/vnet/ip/lookup.c
@@ -598,12 +598,20 @@ vnet_ip_route_cmd (vlib_main_t * vm,
{
for (j = 0; j < vec_len (rpaths); j++)
{
+ u32 fi;
/*
* the CLI parsing stored table Ids, swap to FIB indicies
*/
- rpaths[i].frp_fib_index =
- fib_table_id_find_fib_index(prefixs[i].fp_proto,
- rpaths[i].frp_fib_index);
+ fi = fib_table_id_find_fib_index(prefixs[i].fp_proto,
+ rpaths[i].frp_fib_index);
+
+ if (~0 == fi)
+ {
+ error = clib_error_return(0 , "Via table %d does not exist",
+ rpaths[i].frp_fib_index);
+ goto done;
+ }
+ rpaths[i].frp_fib_index = fi;
fib_prefix_t rpfx = {
.fp_len = prefixs[i].fp_len,