aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorMaxime Peim <mpeim@cisco.com>2024-04-15 10:13:58 +0200
committerBeno�t Ganne <bganne@cisco.com>2024-05-28 11:56:15 +0000
commit7624083c27125d1d0abd11738f7dceb2990969ed (patch)
treef378c2253d9cd34a3111ff84fdc59121ab152510 /src/plugins
parentfc42280434f669095c63d43b3af8d73e3adce366 (diff)
urpf: node refactor
Type: refactor Change-Id: Icb3c6cbe1425331c7a8a4b0dd583389f2257befa Signed-off-by: Maxime Peim <mpeim@cisco.com>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/urpf/urpf_dp.h324
1 files changed, 179 insertions, 145 deletions
diff --git a/src/plugins/urpf/urpf_dp.h b/src/plugins/urpf/urpf_dp.h
index 816d8b70b90..bca41863d41 100644
--- a/src/plugins/urpf/urpf_dp.h
+++ b/src/plugins/urpf/urpf_dp.h
@@ -56,6 +56,7 @@
typedef struct
{
index_t urpf;
+ u32 fib_index;
} urpf_trace_t;
static u8 *
@@ -65,9 +66,7 @@ format_urpf_trace (u8 * s, va_list * va)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
urpf_trace_t *t = va_arg (*va, urpf_trace_t *);
- s = format (s, "uRPF:%d", t->urpf);
-
- return s;
+ return format (s, "uRPF:%d fib:%d", t->urpf, t->fib_index);
}
#define foreach_urpf_error \
@@ -87,10 +86,157 @@ typedef enum
URPF_N_NEXT,
} urpf_next_t;
+static_always_inline u32
+urpf_get_fib_index (vlib_buffer_t *b, ip_address_family_t af, vlib_dir_t dir)
+{
+ u32 sw_if_index = vnet_buffer (b)->sw_if_index[dir];
+ return vec_elt (urpf_cfgs[af][dir], sw_if_index).fib_index;
+}
+
+static_always_inline void
+urpf_perform_check_x1 (ip_address_family_t af, vlib_dir_t dir,
+ urpf_mode_t mode, vlib_buffer_t *b, const u8 *h,
+ u32 fib_index, load_balance_t **lb, u32 *pass)
+{
+ load_balance_t *llb;
+ u32 lpass;
+ u32 lb_index;
+
+ ASSERT (fib_index != ~0);
+
+ if (AF_IP4 == af)
+ {
+ const ip4_header_t *ip;
+
+ ip = (ip4_header_t *) h;
+
+ lb_index = ip4_fib_forwarding_lookup (fib_index, &ip->src_address);
+
+ /* Pass multicast. */
+ lpass = (ip4_address_is_multicast (&ip->src_address) ||
+ ip4_address_is_global_broadcast (&ip->src_address));
+ }
+ else
+ {
+ const ip6_header_t *ip;
+
+ ip = (ip6_header_t *) h;
+
+ lb_index = ip6_fib_table_fwding_lookup (fib_index, &ip->src_address);
+ lpass = ip6_address_is_multicast (&ip->src_address);
+ }
+
+ llb = load_balance_get (lb_index);
+
+ if (URPF_MODE_STRICT == mode)
+ {
+ int res;
+
+ res = fib_urpf_check (llb->lb_urpf, vnet_buffer (b)->sw_if_index[dir]);
+ if (VLIB_RX == dir)
+ lpass |= res;
+ else
+ {
+ lpass |= !res && fib_urpf_check_size (llb->lb_urpf);
+ lpass |= b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ }
+ }
+ else
+ lpass |= fib_urpf_check_size (llb->lb_urpf);
+
+ *lb = llb;
+ *pass = lpass;
+}
+
+static_always_inline void
+urpf_perform_check_x2 (ip_address_family_t af, vlib_dir_t dir,
+ urpf_mode_t mode, vlib_buffer_t *b0, vlib_buffer_t *b1,
+ const u8 *h0, const u8 *h1, u32 fib_index0,
+ u32 fib_index1, load_balance_t **lb0,
+ load_balance_t **lb1, u32 *pass0, u32 *pass1)
+{
+ load_balance_t *llb0, *llb1;
+ u32 lpass0, lpass1;
+ u32 lb_index0, lb_index1;
+
+ ASSERT (fib_index0 != ~0);
+ ASSERT (fib_index1 != ~0);
+
+ if (AF_IP4 == af)
+ {
+ const ip4_header_t *ip0, *ip1;
+
+ ip0 = (ip4_header_t *) h0;
+ ip1 = (ip4_header_t *) h1;
+
+ ip4_fib_forwarding_lookup_x2 (fib_index0, fib_index1, &ip0->src_address,
+ &ip1->src_address, &lb_index0, &lb_index1);
+ /* Pass multicast. */
+ lpass0 = (ip4_address_is_multicast (&ip0->src_address) ||
+ ip4_address_is_global_broadcast (&ip0->src_address));
+ lpass1 = (ip4_address_is_multicast (&ip1->src_address) ||
+ ip4_address_is_global_broadcast (&ip1->src_address));
+ }
+ else
+ {
+ const ip6_header_t *ip0, *ip1;
+
+ ip0 = (ip6_header_t *) h0;
+ ip1 = (ip6_header_t *) h1;
+
+ lb_index0 = ip6_fib_table_fwding_lookup (fib_index0, &ip0->src_address);
+ lb_index1 = ip6_fib_table_fwding_lookup (fib_index1, &ip1->src_address);
+ lpass0 = ip6_address_is_multicast (&ip0->src_address);
+ lpass1 = ip6_address_is_multicast (&ip1->src_address);
+ }
+
+ llb0 = load_balance_get (lb_index0);
+ llb1 = load_balance_get (lb_index1);
+
+ if (URPF_MODE_STRICT == mode)
+ {
+ /* for RX the check is: would this source adddress be
+ * forwarded out of the interface on which it was recieved,
+ * if yes allow. For TX it's; would this source address be
+ * forwarded out of the interface through which it is being
+ * sent, if yes drop.
+ */
+ int res0, res1;
+
+ res0 =
+ fib_urpf_check (llb0->lb_urpf, vnet_buffer (b0)->sw_if_index[dir]);
+ res1 =
+ fib_urpf_check (llb1->lb_urpf, vnet_buffer (b1)->sw_if_index[dir]);
+
+ if (VLIB_RX == dir)
+ {
+ lpass0 |= res0;
+ lpass1 |= res1;
+ }
+ else
+ {
+ lpass0 |= !res0 && fib_urpf_check_size (llb0->lb_urpf);
+ lpass1 |= !res1 && fib_urpf_check_size (llb1->lb_urpf);
+
+ /* allow locally generated */
+ lpass0 |= b0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ lpass1 |= b1->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ }
+ }
+ else
+ {
+ lpass0 |= fib_urpf_check_size (llb0->lb_urpf);
+ lpass1 |= fib_urpf_check_size (llb1->lb_urpf);
+ }
+
+ *lb0 = llb0;
+ *lb1 = llb1;
+ *pass0 = lpass0;
+ *pass1 = lpass1;
+}
+
static_always_inline uword
-urpf_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
+urpf_inline (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame,
ip_address_family_t af, vlib_dir_t dir, urpf_mode_t mode)
{
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
@@ -106,8 +252,8 @@ urpf_inline (vlib_main_t * vm,
while (n_left >= 4)
{
- u32 pass0, lb_index0, pass1, lb_index1;
- const load_balance_t *lb0, *lb1;
+ u32 pass0, pass1;
+ load_balance_t *lb0 = 0, *lb1 = 0;
u32 fib_index0, fib_index1;
const u8 *h0, *h1;
@@ -121,87 +267,32 @@ urpf_inline (vlib_main_t * vm,
h0 = (u8 *) vlib_buffer_get_current (b[0]);
h1 = (u8 *) vlib_buffer_get_current (b[1]);
-
if (VLIB_TX == dir)
{
h0 += vnet_buffer (b[0])->ip.save_rewrite_length;
h1 += vnet_buffer (b[1])->ip.save_rewrite_length;
}
- fib_index0 =
- urpf_cfgs[af][dir][vnet_buffer (b[0])->sw_if_index[dir]].fib_index;
- fib_index1 =
- urpf_cfgs[af][dir][vnet_buffer (b[1])->sw_if_index[dir]].fib_index;
+ fib_index0 = urpf_get_fib_index (b[0], af, dir);
+ fib_index1 = urpf_get_fib_index (b[1], af, dir);
+ urpf_perform_check_x2 (af, dir, mode, b[0], b[1], h0, h1, fib_index0,
+ fib_index1, &lb0, &lb1, &pass0, &pass1);
- if (AF_IP4 == af)
- {
- const ip4_header_t *ip0, *ip1;
-
- ip0 = (ip4_header_t *) h0;
- ip1 = (ip4_header_t *) h1;
-
- ip4_fib_forwarding_lookup_x2 (fib_index0,
- fib_index1,
- &ip0->src_address,
- &ip1->src_address,
- &lb_index0, &lb_index1);
- /* Pass multicast. */
- pass0 = (ip4_address_is_multicast (&ip0->src_address) ||
- ip4_address_is_global_broadcast (&ip0->src_address));
- pass1 = (ip4_address_is_multicast (&ip1->src_address) ||
- ip4_address_is_global_broadcast (&ip1->src_address));
- }
- else
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
{
- const ip6_header_t *ip0, *ip1;
-
- ip0 = (ip6_header_t *) h0;
- ip1 = (ip6_header_t *) h1;
-
- lb_index0 = ip6_fib_table_fwding_lookup (fib_index0,
- &ip0->src_address);
- lb_index1 = ip6_fib_table_fwding_lookup (fib_index1,
- &ip1->src_address);
- pass0 = ip6_address_is_multicast (&ip0->src_address);
- pass1 = ip6_address_is_multicast (&ip1->src_address);
- }
-
- lb0 = load_balance_get (lb_index0);
- lb1 = load_balance_get (lb_index1);
+ urpf_trace_t *t;
- if (URPF_MODE_STRICT == mode)
- {
- /* for RX the check is: would this source adddress be forwarded
- * out of the interface on which it was recieved, if yes allow.
- * For TX it's; would this source address be forwarded out of the
- * interface through which it is being sent, if yes drop.
- */
- int res0, res1;
-
- res0 = fib_urpf_check (lb0->lb_urpf,
- vnet_buffer (b[0])->sw_if_index[dir]);
- res1 = fib_urpf_check (lb1->lb_urpf,
- vnet_buffer (b[1])->sw_if_index[dir]);
-
- if (VLIB_RX == dir)
- {
- pass0 |= res0;
- pass1 |= res1;
- }
- else
- {
- pass0 |= !res0 && fib_urpf_check_size (lb0->lb_urpf);
- pass1 |= !res1 && fib_urpf_check_size (lb1->lb_urpf);
-
- /* allow locally generated */
- pass0 |= b[0]->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED;
- pass1 |= b[1]->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED;
- }
+ t = vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->urpf = lb0 ? lb0->lb_urpf : ~0;
+ t->fib_index = fib_index0;
}
- else
+ if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
{
- pass0 |= fib_urpf_check_size (lb0->lb_urpf);
- pass1 |= fib_urpf_check_size (lb1->lb_urpf);
+ urpf_trace_t *t;
+
+ t = vlib_add_trace (vm, node, b[1], sizeof (*t));
+ t->urpf = lb1 ? lb1->lb_urpf : ~0;
+ t->fib_index = fib_index1;
}
if (PREDICT_TRUE (pass0))
@@ -218,22 +309,6 @@ urpf_inline (vlib_main_t * vm,
next[1] = URPF_NEXT_DROP;
b[1]->error = node->errors[URPF_ERROR_DROP];
}
-
- if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
- {
- urpf_trace_t *t;
-
- t = vlib_add_trace (vm, node, b[0], sizeof (*t));
- t->urpf = lb0->lb_urpf;
- }
- if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
- {
- urpf_trace_t *t;
-
- t = vlib_add_trace (vm, node, b[1], sizeof (*t));
- t->urpf = lb1->lb_urpf;
- }
-
b += 2;
next += 2;
n_left -= 2;
@@ -241,8 +316,8 @@ urpf_inline (vlib_main_t * vm,
while (n_left)
{
- u32 pass0, lb_index0, fib_index0;
- const load_balance_t *lb0;
+ u32 pass0, fib_index0;
+ load_balance_t *lb0 = 0;
const u8 *h0;
h0 = (u8 *) vlib_buffer_get_current (b[0]);
@@ -250,51 +325,18 @@ urpf_inline (vlib_main_t * vm,
if (VLIB_TX == dir)
h0 += vnet_buffer (b[0])->ip.save_rewrite_length;
- fib_index0 =
- urpf_cfgs[af][dir][vnet_buffer (b[0])->sw_if_index[dir]].fib_index;
-
- if (AF_IP4 == af)
- {
- const ip4_header_t *ip0;
-
- ip0 = (ip4_header_t *) h0;
+ fib_index0 = urpf_get_fib_index (b[0], af, dir);
+ urpf_perform_check_x1 (af, dir, mode, b[0], h0, fib_index0, &lb0,
+ &pass0);
- lb_index0 = ip4_fib_forwarding_lookup (fib_index0,
- &ip0->src_address);
-
- /* Pass multicast. */
- pass0 = (ip4_address_is_multicast (&ip0->src_address) ||
- ip4_address_is_global_broadcast (&ip0->src_address));
- }
- else
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
{
- const ip6_header_t *ip0;
-
- ip0 = (ip6_header_t *) h0;
-
- lb_index0 = ip6_fib_table_fwding_lookup (fib_index0,
- &ip0->src_address);
- pass0 = ip6_address_is_multicast (&ip0->src_address);
- }
-
- lb0 = load_balance_get (lb_index0);
+ urpf_trace_t *t;
- if (URPF_MODE_STRICT == mode)
- {
- int res0;
-
- res0 = fib_urpf_check (lb0->lb_urpf,
- vnet_buffer (b[0])->sw_if_index[dir]);
- if (VLIB_RX == dir)
- pass0 |= res0;
- else
- {
- pass0 |= !res0 && fib_urpf_check_size (lb0->lb_urpf);
- pass0 |= b[0]->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED;
- }
+ t = vlib_add_trace (vm, node, b[0], sizeof (*t));
+ t->urpf = lb0 ? lb0->lb_urpf : ~0;
+ t->fib_index = fib_index0;
}
- else
- pass0 |= fib_urpf_check_size (lb0->lb_urpf);
if (PREDICT_TRUE (pass0))
vnet_feature_next_u16 (&next[0], b[0]);
@@ -303,14 +345,6 @@ urpf_inline (vlib_main_t * vm,
next[0] = URPF_NEXT_DROP;
b[0]->error = node->errors[URPF_ERROR_DROP];
}
-
- if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
- {
- urpf_trace_t *t;
-
- t = vlib_add_trace (vm, node, b[0], sizeof (*t));
- t->urpf = lb0->lb_urpf;
- }
b++;
next++;
n_left--;