summaryrefslogtreecommitdiffstats
path: root/vnet/vnet/dpo/load_balance.c
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2016-10-03 13:05:48 +0100
committerDamjan Marion <dmarion.lists@gmail.com>2016-10-07 21:32:24 +0000
commit3ee44040c66cbe47ff292ac7fb0badccbe2afe6d (patch)
treea52a4dd0750467845f237ee5e4e88aa95ea11bab /vnet/vnet/dpo/load_balance.c
parent4a7e58bf481adb843707eec4a81213776a6d5212 (diff)
unicast RPF for FIB2.0
In a heirarchical FIB performing a unicast RPF check would require the traversal of the data-plane graph to seek out all the adjacency objects and then read those to find their interface. This is not efficient. Instead, for each path-list we construct a list of unique input interfaces and link this uRPF-list against the entry in the prefix table. In the data-plane the uRPF list can be retrieved from the load-balance lookup result and the RPF check is a simple and efficient walk across the minimal interface list. The uRPF-list is maintained as the routing heirarchy changes, in a similar way to the data-plane object graph. We also provide a knob to allow an arbitrary prefix to pass the loose check. Change-Id: Ie7c0ae3c4483ef467cfd5b136ee0315ff98ec15b Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'vnet/vnet/dpo/load_balance.c')
-rw-r--r--vnet/vnet/dpo/load_balance.c37
1 files changed, 36 insertions, 1 deletions
diff --git a/vnet/vnet/dpo/load_balance.c b/vnet/vnet/dpo/load_balance.c
index 093661d8578..fc788508bdb 100644
--- a/vnet/vnet/dpo/load_balance.c
+++ b/vnet/vnet/dpo/load_balance.c
@@ -20,6 +20,7 @@
#include <vppinfra/math.h> /* for fabs */
#include <vnet/adj/adj.h>
#include <vnet/adj/adj_internal.h>
+#include <vnet/fib/fib_urpf_list.h>
/*
* distribution error tolerance for load-balancing
@@ -87,6 +88,7 @@ load_balance_alloc_i (void)
memset(lb, 0, sizeof(*lb));
lb->lb_map = INDEX_INVALID;
+ lb->lb_urpf = INDEX_INVALID;
vlib_validate_combined_counter(&(load_balance_main.lbm_to_counters),
load_balance_get_index(lb));
vlib_validate_combined_counter(&(load_balance_main.lbm_via_counters),
@@ -117,7 +119,7 @@ load_balance_format (index_t lbi,
s = format(s, "%U: ", format_dpo_type, DPO_LOAD_BALANCE);
s = format(s, "[index:%d buckets:%d ", lbi, lb->lb_n_buckets);
- s = format(s, "locks:%d ", lb->lb_locks);
+ s = format(s, "uRPF:%d ", lb->lb_urpf);
s = format(s, "to:[%Ld:%Ld]", to.packets, to.bytes);
if (0 != via.packets)
{
@@ -236,6 +238,35 @@ load_balance_is_drop (const dpo_id_t *dpo)
return (0);
}
+void
+load_balance_set_urpf (index_t lbi,
+ index_t urpf)
+{
+ load_balance_t *lb;
+ index_t old;
+
+ lb = load_balance_get(lbi);
+
+ /*
+ * packets in flight we see this change. but it's atomic, so :P
+ */
+ old = lb->lb_urpf;
+ lb->lb_urpf = urpf;
+
+ fib_urpf_list_unlock(old);
+ fib_urpf_list_lock(urpf);
+}
+
+index_t
+load_balance_get_urpf (index_t lbi)
+{
+ load_balance_t *lb;
+
+ lb = load_balance_get(lbi);
+
+ return (lb->lb_urpf);
+}
+
const dpo_id_t *
load_balance_get_bucket (index_t lbi,
u32 bucket)
@@ -652,6 +683,9 @@ load_balance_destroy (load_balance_t *lb)
vec_free(lb->lb_buckets);
}
+ fib_urpf_list_unlock(lb->lb_urpf);
+ load_balance_map_unlock(lb->lb_map);
+
pool_put(load_balance_pool, lb);
}
@@ -677,6 +711,7 @@ load_balance_mem_show (void)
pool_elts(load_balance_pool),
pool_len(load_balance_pool),
sizeof(load_balance_t));
+ load_balance_map_show_mem();
}
const static dpo_vft_t lb_vft = {