diff options
author | Filip Tehlar <ftehlar@cisco.com> | 2019-02-28 02:51:47 -0800 |
---|---|---|
committer | Neale Ranns <nranns@cisco.com> | 2019-03-04 07:49:25 +0000 |
commit | 6fa5dac5fe9699c5eeed383c5bcb3f08507e1548 (patch) | |
tree | e09cec5c32013f5d790a8d1cc036ed526a329498 | |
parent | 6442401c21f880dbfd25d36859f79ddf54b49178 (diff) |
gbp: migrate old MULTIARCH macros to VLIB_NODE_FN
Change-Id: I3b10caa447b796172f787df8fcbb92f2b4dd2803
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
-rw-r--r-- | src/plugins/gbp/CMakeLists.txt | 12 | ||||
-rw-r--r-- | src/plugins/gbp/gbp.h | 13 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_classify.c | 75 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_fwd.c | 164 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_fwd_dpo.c | 20 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_fwd_node.c | 162 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_learn.c | 63 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_policy.c | 39 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_policy_dpo.c | 19 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_sclass.c | 58 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_vxlan.c | 193 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_vxlan.h | 1 | ||||
-rw-r--r-- | src/plugins/gbp/gbp_vxlan_node.c | 219 |
13 files changed, 535 insertions, 503 deletions
diff --git a/src/plugins/gbp/CMakeLists.txt b/src/plugins/gbp/CMakeLists.txt index ef254024d43..140a456bfd0 100644 --- a/src/plugins/gbp/CMakeLists.txt +++ b/src/plugins/gbp/CMakeLists.txt @@ -22,6 +22,7 @@ add_vpp_plugin(gbp gbp_ext_itf.c gbp_fwd.c gbp_fwd_dpo.c + gbp_fwd_node.c gbp_itf.c gbp_learn.c gbp_policy.c @@ -32,6 +33,17 @@ add_vpp_plugin(gbp gbp_sclass.c gbp_subnet.c gbp_vxlan.c + gbp_vxlan_node.c + + MULTIARCH_SOURCES + gbp_fwd_node.c + gbp_policy.c + gbp_learn.c + gbp_policy_dpo.c + gbp_fwd_dpo.c + gbp_vxlan_node.c + gbp_sclass.c + gbp_classify.c API_FILES gbp.api diff --git a/src/plugins/gbp/gbp.h b/src/plugins/gbp/gbp.h index 06612cd948f..35e02d26ca7 100644 --- a/src/plugins/gbp/gbp.h +++ b/src/plugins/gbp/gbp.h @@ -48,6 +48,19 @@ typedef struct extern gbp_main_t gbp_main; +/** + * Grouping of global data for the GBP source EPG classification feature + */ +typedef struct gbp_policy_main_t_ +{ + /** + * Next nodes for L2 output features + */ + u32 l2_output_feat_next[2][32]; +} gbp_policy_main_t; + +extern gbp_policy_main_t gbp_policy_main; + #endif /* diff --git a/src/plugins/gbp/gbp_classify.c b/src/plugins/gbp/gbp_classify.c index ba6fe9bee5c..2eaa7bb44b6 100644 --- a/src/plugins/gbp/gbp_classify.c +++ b/src/plugins/gbp/gbp_classify.c @@ -185,33 +185,33 @@ gbp_classify_inline (vlib_main_t * vm, return frame->n_vectors; } -static uword -gbp_src_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_src_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_PORT, DPO_PROTO_ETHERNET)); } -static uword -gbp_null_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_null_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_NULL, DPO_PROTO_ETHERNET)); } -static uword -gbp_ip4_src_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_ip4_src_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP4)); } -static uword -gbp_ip6_src_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_ip6_src_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_classify_inline (vm, node, frame, GBP_SRC_CLASSIFY_PORT, DPO_PROTO_IP6)); @@ -233,7 +233,6 @@ format_gbp_classify_trace (u8 * s, va_list * args) /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gbp_null_classify_node) = { - .function = gbp_null_classify, .name = "gbp-null-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -243,10 +242,7 @@ VLIB_REGISTER_NODE (gbp_null_classify_node) = { .n_next_nodes = 0, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_null_classify_node, gbp_null_classify); - VLIB_REGISTER_NODE (gbp_src_classify_node) = { - .function = gbp_src_classify, .name = "gbp-src-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -256,10 +252,7 @@ VLIB_REGISTER_NODE (gbp_src_classify_node) = { .n_next_nodes = 0, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_src_classify_node, gbp_src_classify); - VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = { - .function = gbp_ip4_src_classify, .name = "ip4-gbp-src-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -272,10 +265,7 @@ VLIB_REGISTER_NODE (gbp_ip4_src_classify_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_src_classify_node, gbp_ip4_src_classify); - VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = { - .function = gbp_ip6_src_classify, .name = "ip6-gbp-src-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -288,8 +278,6 @@ VLIB_REGISTER_NODE (gbp_ip6_src_classify_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_src_classify_node, gbp_ip6_src_classify); - VNET_FEATURE_INIT (gbp_ip4_src_classify_feat_node, static) = { .arc_name = "ip4-unicast", @@ -490,30 +478,29 @@ gbp_lpm_classify_inline (vlib_main_t * vm, return frame->n_vectors; } -static uword -gbp_ip4_lpm_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_ip4_lpm_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP4, 1)); } -static uword -gbp_ip6_lpm_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_ip6_lpm_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_IP6, 1)); } -static uword -gbp_l2_lpm_classify (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_l2_lpm_classify_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_lpm_classify_inline (vm, node, frame, DPO_PROTO_ETHERNET, 0)); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = { - .function = gbp_ip4_lpm_classify, .name = "ip4-gbp-lpm-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -526,10 +513,7 @@ VLIB_REGISTER_NODE (gbp_ip4_lpm_classify_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip4_lpm_classify_node, gbp_ip4_lpm_classify); - VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = { - .function = gbp_ip6_lpm_classify, .name = "ip6-gbp-lpm-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -542,10 +526,7 @@ VLIB_REGISTER_NODE (gbp_ip6_lpm_classify_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_ip6_lpm_classify_node, gbp_ip6_lpm_classify); - VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = { - .function = gbp_l2_lpm_classify, .name = "l2-gbp-lpm-classify", .vector_size = sizeof (u32), .format_trace = format_gbp_classify_trace, @@ -558,8 +539,6 @@ VLIB_REGISTER_NODE (gbp_l2_lpm_classify_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_l2_lpm_classify_node, gbp_l2_lpm_classify); - VNET_FEATURE_INIT (gbp_ip4_lpm_classify_feat_node, static) = { .arc_name = "ip4-unicast", @@ -580,19 +559,25 @@ gbp_src_classify_init (vlib_main_t * vm) { gbp_src_classify_main_t *em = &gbp_src_classify_main; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-src-classify"); + /* Initialize the feature next-node indexes */ feat_bitmap_init_next_nodes (vm, - gbp_src_classify_node.index, + node->index, L2INPUT_N_FEAT, l2input_get_feat_names (), em->l2_input_feat_next[GBP_SRC_CLASSIFY_NULL]); + + node = vlib_get_node_by_name (vm, (u8 *) "gbp-null-classify"); feat_bitmap_init_next_nodes (vm, - gbp_null_classify_node.index, + node->index, L2INPUT_N_FEAT, l2input_get_feat_names (), em->l2_input_feat_next[GBP_SRC_CLASSIFY_PORT]); + + node = vlib_get_node_by_name (vm, (u8 *) "l2-gbp-lpm-classify"); feat_bitmap_init_next_nodes (vm, - gbp_l2_lpm_classify_node.index, + node->index, L2INPUT_N_FEAT, l2input_get_feat_names (), em->l2_input_feat_next[GBP_SRC_CLASSIFY_LPM]); diff --git a/src/plugins/gbp/gbp_fwd.c b/src/plugins/gbp/gbp_fwd.c index e4814337558..8d98f1cd365 100644 --- a/src/plugins/gbp/gbp_fwd.c +++ b/src/plugins/gbp/gbp_fwd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018 Cisco and/or its affiliates. + * Copyright (c) 2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -12,9 +12,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - #include <plugins/gbp/gbp.h> #include <vnet/l2/l2_input.h> +#include <plugins/gbp/gbp_learn.h> /** * Grouping of global data for the GBP source EPG classification feature @@ -29,155 +29,49 @@ typedef struct gbp_fwd_main_t_ static gbp_fwd_main_t gbp_fwd_main; -#define foreach_gbp_fwd \ - _(DROP, "drop") \ - _(OUTPUT, "output") - -typedef enum -{ -#define _(sym,str) GBP_FWD_ERROR_##sym, - foreach_gbp_fwd -#undef _ - GBP_FWD_N_ERROR, -} gbp_fwd_error_t; - -static char *gbp_fwd_error_strings[] = { -#define _(sym,string) string, - foreach_gbp_fwd -#undef _ -}; - -typedef enum -{ -#define _(sym,str) GBP_FWD_NEXT_##sym, - foreach_gbp_fwd -#undef _ - GBP_FWD_N_NEXT, -} gbp_fwd_next_t; - -/** - * per-packet trace data - */ -typedef struct gbp_fwd_trace_t_ -{ - /* per-pkt trace data */ - epg_id_t src_epg; - u32 sw_if_index; -} gbp_fwd_trace_t; +gbp_policy_main_t gbp_policy_main; -static uword -gbp_fwd (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +void +gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode) { - u32 n_left_from, *from, *to_next; - u32 next_index; - - next_index = 0; - n_left_from = frame->n_vectors; - from = vlib_frame_vector_args (frame); - - while (n_left_from > 0) + if (GBP_LEARN_MODE_L2 == mode) { - u32 n_left_to_next; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0, sw_if_index0, src_epg; - gbp_fwd_next_t next0; - vlib_buffer_t *b0; - - next0 = GBP_FWD_NEXT_DROP; - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - - /* - * lookup the uplink based on src EPG - */ - src_epg = vnet_buffer2 (b0)->gbp.src_epg; - - sw_if_index0 = gbp_epg_itf_lookup (src_epg); - - if (~0 != sw_if_index0) - { - vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0; - - next0 = GBP_FWD_NEXT_OUTPUT; - } - /* - * else - * don't know the uplink interface for this EPG => drop - */ - - if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED))) - { - gbp_fwd_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); - t->src_epg = src_epg; - t->sw_if_index = sw_if_index0; - } - - /* verify speculative enqueue, maybe switch current next frame */ - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1); + } + else + { + vnet_feature_enable_disable ("ip4-unicast", + "gbp-learn-ip4", sw_if_index, 1, 0, 0); + vnet_feature_enable_disable ("ip6-unicast", + "gbp-learn-ip6", sw_if_index, 1, 0, 0); } - - return frame->n_vectors; } -/* packet trace format function */ -static u8 * -format_gbp_fwd_trace (u8 * s, va_list * args) +void +gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode) { - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - gbp_fwd_trace_t *t = va_arg (*args, gbp_fwd_trace_t *); - - s = format (s, "src-epg:%d", t->src_epg); - - return s; + if (GBP_LEARN_MODE_L2 == mode) + { + l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0); + } + else + { + vnet_feature_enable_disable ("ip4-unicast", + "gbp-learn-ip4", sw_if_index, 0, 0, 0); + vnet_feature_enable_disable ("ip6-unicast", + "gbp-learn-ip6", sw_if_index, 0, 0, 0); + } } -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (gbp_fwd_node) = { - .function = gbp_fwd, - .name = "gbp-fwd", - .vector_size = sizeof (u32), - .format_trace = format_gbp_fwd_trace, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(gbp_fwd_error_strings), - .error_strings = gbp_fwd_error_strings, - - .n_next_nodes = GBP_FWD_N_NEXT, - - .next_nodes = { - [GBP_FWD_NEXT_DROP] = "error-drop", - [GBP_FWD_NEXT_OUTPUT] = "l2-output", - }, -}; - -VLIB_NODE_FUNCTION_MULTIARCH (gbp_fwd_node, gbp_fwd); - -/* *INDENT-ON* */ - static clib_error_t * gbp_fwd_init (vlib_main_t * vm) { gbp_fwd_main_t *gpm = &gbp_fwd_main; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-fwd"); /* Initialize the feature next-node indices */ feat_bitmap_init_next_nodes (vm, - gbp_fwd_node.index, + node->index, L2INPUT_N_FEAT, l2input_get_feat_names (), gpm->l2_input_feat_next); diff --git a/src/plugins/gbp/gbp_fwd_dpo.c b/src/plugins/gbp/gbp_fwd_dpo.c index e3dba5fa527..c0b19a9e398 100644 --- a/src/plugins/gbp/gbp_fwd_dpo.c +++ b/src/plugins/gbp/gbp_fwd_dpo.c @@ -18,6 +18,8 @@ #include <vnet/ethernet/ethernet.h> + +#ifndef CLIB_MARCH_VARIANT /** * The 'DB' of GBP FWD DPOs. * There is one per-proto @@ -164,6 +166,7 @@ gbp_fwd_dpo_module_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (gbp_fwd_dpo_module_init); +#endif /* CLIB_MARCH_VARIANT */ typedef struct gbp_fwd_dpo_trace_t_ { @@ -255,23 +258,22 @@ format_gbp_fwd_dpo_trace (u8 * s, va_list * args) return s; } -static uword -ip4_gbp_fwd_dpo (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ip4_gbp_fwd_dpo_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return (gbp_fwd_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP4)); } -static uword -ip6_gbp_fwd_dpo (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ip6_gbp_fwd_dpo_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return (gbp_fwd_dpo_inline (vm, node, from_frame, FIB_PROTOCOL_IP6)); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_gbp_fwd_dpo_node) = { - .function = ip4_gbp_fwd_dpo, .name = "ip4-gbp-fwd-dpo", .vector_size = sizeof (u32), .format_trace = format_gbp_fwd_dpo_trace, @@ -283,7 +285,6 @@ VLIB_REGISTER_NODE (ip4_gbp_fwd_dpo_node) = { } }; VLIB_REGISTER_NODE (ip6_gbp_fwd_dpo_node) = { - .function = ip6_gbp_fwd_dpo, .name = "ip6-gbp-fwd-dpo", .vector_size = sizeof (u32), .format_trace = format_gbp_fwd_dpo_trace, @@ -294,9 +295,6 @@ VLIB_REGISTER_NODE (ip6_gbp_fwd_dpo_node) = { [GBP_FWD_FWD] = "ip6-dvr-dpo", } }; - -VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_fwd_dpo_node, ip4_gbp_fwd_dpo) -VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_fwd_dpo_node, ip6_gbp_fwd_dpo) /* *INDENT-ON* */ /* diff --git a/src/plugins/gbp/gbp_fwd_node.c b/src/plugins/gbp/gbp_fwd_node.c new file mode 100644 index 00000000000..eff4aeb1880 --- /dev/null +++ b/src/plugins/gbp/gbp_fwd_node.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2018 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <plugins/gbp/gbp.h> +#include <vnet/l2/l2_input.h> + +#define foreach_gbp_fwd \ + _(DROP, "drop") \ + _(OUTPUT, "output") + +typedef enum +{ +#define _(sym,str) GBP_FWD_ERROR_##sym, + foreach_gbp_fwd +#undef _ + GBP_FWD_N_ERROR, +} gbp_fwd_error_t; + +static char *gbp_fwd_error_strings[] = { +#define _(sym,string) string, + foreach_gbp_fwd +#undef _ +}; + +typedef enum +{ +#define _(sym,str) GBP_FWD_NEXT_##sym, + foreach_gbp_fwd +#undef _ + GBP_FWD_N_NEXT, +} gbp_fwd_next_t; + +/** + * per-packet trace data + */ +typedef struct gbp_fwd_trace_t_ +{ + /* per-pkt trace data */ + epg_id_t src_epg; + u32 sw_if_index; +} gbp_fwd_trace_t; + +VLIB_NODE_FN (gbp_fwd_node) (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 n_left_from, *from, *to_next; + u32 next_index; + + next_index = 0; + n_left_from = frame->n_vectors; + from = vlib_frame_vector_args (frame); + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, sw_if_index0, src_epg; + gbp_fwd_next_t next0; + vlib_buffer_t *b0; + + next0 = GBP_FWD_NEXT_DROP; + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + + /* + * lookup the uplink based on src EPG + */ + src_epg = vnet_buffer2 (b0)->gbp.src_epg; + + sw_if_index0 = gbp_epg_itf_lookup (src_epg); + + if (~0 != sw_if_index0) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0; + + next0 = GBP_FWD_NEXT_OUTPUT; + } + /* + * else + * don't know the uplink interface for this EPG => drop + */ + + if (PREDICT_FALSE ((b0->flags & VLIB_BUFFER_IS_TRACED))) + { + gbp_fwd_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); + t->src_epg = src_epg; + t->sw_if_index = sw_if_index0; + } + + /* verify speculative enqueue, maybe switch current next frame */ + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return frame->n_vectors; +} + +/* packet trace format function */ +static u8 * +format_gbp_fwd_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + gbp_fwd_trace_t *t = va_arg (*args, gbp_fwd_trace_t *); + + s = format (s, "src-epg:%d", t->src_epg); + + return s; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (gbp_fwd_node) = { + .name = "gbp-fwd", + .vector_size = sizeof (u32), + .format_trace = format_gbp_fwd_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = ARRAY_LEN(gbp_fwd_error_strings), + .error_strings = gbp_fwd_error_strings, + + .n_next_nodes = GBP_FWD_N_NEXT, + + .next_nodes = { + [GBP_FWD_NEXT_DROP] = "error-drop", + [GBP_FWD_NEXT_OUTPUT] = "l2-output", + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/gbp/gbp_learn.c b/src/plugins/gbp/gbp_learn.c index 514aca26ef9..156ea358044 100644 --- a/src/plugins/gbp/gbp_learn.c +++ b/src/plugins/gbp/gbp_learn.c @@ -211,9 +211,9 @@ gbp_learn_get_outer (const ethernet_header_t * eh0, *outer_dst = ip0->dst_address; } -static uword -gbp_learn_l2 (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_learn_l2_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { u32 n_left_from, *from, *to_next, next_index, thread_index, seed; gbp_learn_main_t *glm; @@ -379,7 +379,6 @@ format_gbp_learn_l2_trace (u8 * s, va_list * args) /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gbp_learn_l2_node) = { - .function = gbp_learn_l2, .name = "gbp-learn-l2", .vector_size = sizeof (u32), .format_trace = format_gbp_learn_l2_trace, @@ -394,8 +393,6 @@ VLIB_REGISTER_NODE (gbp_learn_l2_node) = { [GBP_LEARN_NEXT_DROP] = "error-drop", }, }; - -VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_l2_node, gbp_learn_l2); /* *INDENT-ON* */ typedef struct gbp_learn_l3_t_ @@ -653,31 +650,28 @@ format_gbp_learn_l3_trace (u8 * s, va_list * args) return s; } -static uword -gbp_learn_ip4 (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_learn_ip4_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP4)); } -static uword -gbp_learn_ip6 (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_learn_ip6_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_learn_l3 (vm, node, frame, FIB_PROTOCOL_IP6)); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gbp_learn_ip4_node) = { - .function = gbp_learn_ip4, .name = "gbp-learn-ip4", .vector_size = sizeof (u32), .format_trace = format_gbp_learn_l3_trace, .type = VLIB_NODE_TYPE_INTERNAL, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip4_node, gbp_learn_ip4); - VNET_FEATURE_INIT (gbp_learn_ip4, static) = { .arc_name = "ip4-unicast", @@ -685,15 +679,12 @@ VNET_FEATURE_INIT (gbp_learn_ip4, static) = }; VLIB_REGISTER_NODE (gbp_learn_ip6_node) = { - .function = gbp_learn_ip6, .name = "gbp-learn-ip6", .vector_size = sizeof (u32), .format_trace = format_gbp_learn_l3_trace, .type = VLIB_NODE_TYPE_INTERNAL, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_learn_ip6_node, gbp_learn_ip6); - VNET_FEATURE_INIT (gbp_learn_ip6, static) = { .arc_name = "ip6-unicast", @@ -702,47 +693,17 @@ VNET_FEATURE_INIT (gbp_learn_ip6, static) = /* *INDENT-ON* */ -void -gbp_learn_enable (u32 sw_if_index, gbb_learn_mode_t mode) -{ - if (GBP_LEARN_MODE_L2 == mode) - { - l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 1); - } - else - { - vnet_feature_enable_disable ("ip4-unicast", - "gbp-learn-ip4", sw_if_index, 1, 0, 0); - vnet_feature_enable_disable ("ip6-unicast", - "gbp-learn-ip6", sw_if_index, 1, 0, 0); - } -} - -void -gbp_learn_disable (u32 sw_if_index, gbb_learn_mode_t mode) -{ - if (GBP_LEARN_MODE_L2 == mode) - { - l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_GBP_LEARN, 0); - } - else - { - vnet_feature_enable_disable ("ip4-unicast", - "gbp-learn-ip4", sw_if_index, 0, 0, 0); - vnet_feature_enable_disable ("ip6-unicast", - "gbp-learn-ip6", sw_if_index, 0, 0, 0); - } -} - static clib_error_t * gbp_learn_init (vlib_main_t * vm) { gbp_learn_main_t *glm = &gbp_learn_main; vlib_thread_main_t *tm = &vlib_thread_main; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-learn-l2"); + /* Initialize the feature next-node indices */ feat_bitmap_init_next_nodes (vm, - gbp_learn_l2_node.index, + node->index, L2INPUT_N_FEAT, l2input_get_feat_names (), glm->gl_l2_input_feat_next); diff --git a/src/plugins/gbp/gbp_policy.c b/src/plugins/gbp/gbp_policy.c index 7b7bd5ca08e..5d5dcd0befc 100644 --- a/src/plugins/gbp/gbp_policy.c +++ b/src/plugins/gbp/gbp_policy.c @@ -18,19 +18,6 @@ #include <vnet/vxlan-gbp/vxlan_gbp_packet.h> -/** - * Grouping of global data for the GBP source EPG classification feature - */ -typedef struct gbp_policy_main_t_ -{ - /** - * Next nodes for L2 output features - */ - u32 l2_output_feat_next[2][32]; -} gbp_policy_main_t; - -static gbp_policy_main_t gbp_policy_main; - #define foreach_gbp_policy \ _(DENY, "deny") @@ -336,16 +323,16 @@ gbp_policy_inline (vlib_main_t * vm, return frame->n_vectors; } -static uword -gbp_policy_port (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_policy_port_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_policy_inline (vm, node, frame, 1)); } -static uword -gbp_policy_mac (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (gbp_policy_mac_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_policy_inline (vm, node, frame, 0)); } @@ -367,7 +354,6 @@ format_gbp_policy_trace (u8 * s, va_list * args) /* *INDENT-OFF* */ VLIB_REGISTER_NODE (gbp_policy_port_node) = { - .function = gbp_policy_port, .name = "gbp-policy-port", .vector_size = sizeof (u32), .format_trace = format_gbp_policy_trace, @@ -383,10 +369,7 @@ VLIB_REGISTER_NODE (gbp_policy_port_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_port_node, gbp_policy_port); - VLIB_REGISTER_NODE (gbp_policy_mac_node) = { - .function = gbp_policy_mac, .name = "gbp-policy-mac", .vector_size = sizeof (u32), .format_trace = format_gbp_policy_trace, @@ -394,8 +377,6 @@ VLIB_REGISTER_NODE (gbp_policy_mac_node) = { .sibling_of = "gbp-policy-port", }; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_policy_mac_node, gbp_policy_mac); - /* *INDENT-ON* */ static clib_error_t * @@ -404,14 +385,18 @@ gbp_policy_init (vlib_main_t * vm) gbp_policy_main_t *gpm = &gbp_policy_main; clib_error_t *error = 0; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-policy-port"); + /* Initialize the feature next-node indexes */ feat_bitmap_init_next_nodes (vm, - gbp_policy_port_node.index, + node->index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), gpm->l2_output_feat_next[1]); + + node = vlib_get_node_by_name (vm, (u8 *) "gbp-policy-mac"); feat_bitmap_init_next_nodes (vm, - gbp_policy_mac_node.index, + node->index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), gpm->l2_output_feat_next[0]); diff --git a/src/plugins/gbp/gbp_policy_dpo.c b/src/plugins/gbp/gbp_policy_dpo.c index 213475fbc65..ce628e5c975 100644 --- a/src/plugins/gbp/gbp_policy_dpo.c +++ b/src/plugins/gbp/gbp_policy_dpo.c @@ -21,6 +21,7 @@ #include <plugins/gbp/gbp_policy_dpo.h> #include <plugins/gbp/gbp_recirc.h> +#ifndef CLIB_MARCH_VARIANT /** * DPO pool */ @@ -206,6 +207,7 @@ gbp_policy_dpo_module_init (vlib_main_t * vm) } VLIB_INIT_FUNCTION (gbp_policy_dpo_module_init); +#endif /* CLIB_MARCH_VARIANT */ typedef struct gbp_policy_dpo_trace_t_ { @@ -394,23 +396,22 @@ format_gbp_policy_dpo_trace (u8 * s, va_list * args) return s; } -static uword -ip4_gbp_policy_dpo (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ip4_gbp_policy_dpo_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return (gbp_policy_dpo_inline (vm, node, from_frame, 0)); } -static uword -ip6_gbp_policy_dpo (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) +VLIB_NODE_FN (ip6_gbp_policy_dpo_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return (gbp_policy_dpo_inline (vm, node, from_frame, 1)); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = { - .function = ip4_gbp_policy_dpo, .name = "ip4-gbp-policy-dpo", .vector_size = sizeof (u32), .format_trace = format_gbp_policy_dpo_trace, @@ -421,7 +422,6 @@ VLIB_REGISTER_NODE (ip4_gbp_policy_dpo_node) = { } }; VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = { - .function = ip6_gbp_policy_dpo, .name = "ip6-gbp-policy-dpo", .vector_size = sizeof (u32), .format_trace = format_gbp_policy_dpo_trace, @@ -431,9 +431,6 @@ VLIB_REGISTER_NODE (ip6_gbp_policy_dpo_node) = { [GBP_POLICY_DROP] = "ip6-drop", } }; - -VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_policy_dpo_node, ip4_gbp_policy_dpo) -VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_policy_dpo_node, ip6_gbp_policy_dpo) /* *INDENT-ON* */ /* diff --git a/src/plugins/gbp/gbp_sclass.c b/src/plugins/gbp/gbp_sclass.c index e25ea38de01..24581e28904 100644 --- a/src/plugins/gbp/gbp_sclass.c +++ b/src/plugins/gbp/gbp_sclass.c @@ -146,44 +146,44 @@ gbp_sclass_inline (vlib_main_t * vm, return frame->n_vectors; } -uword -l2_gbp_id_2_sclass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (l2_gbp_id_2_sclass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_sclass_inline (vm, node, frame, 1, 1)); } -uword -l2_gbp_sclass_2_id (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (l2_gbp_sclass_2_id_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_sclass_inline (vm, node, frame, 0, 1)); } -uword -ip4_gbp_id_2_sclass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_gbp_id_2_sclass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_sclass_inline (vm, node, frame, 1, 0)); } -uword -ip4_gbp_sclass_2_id (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_gbp_sclass_2_id_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_sclass_inline (vm, node, frame, 0, 0)); } -uword -ip6_gbp_id_2_sclass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip6_gbp_id_2_sclass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_sclass_inline (vm, node, frame, 1, 0)); } -uword -ip6_gbp_sclass_2_id (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip6_gbp_sclass_2_id_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return (gbp_sclass_inline (vm, node, frame, 0, 0)); } @@ -203,7 +203,6 @@ format_gbp_sclass_trace (u8 * s, va_list * args) /* *INDENT-OFF* */ VLIB_REGISTER_NODE (l2_gbp_id_2_sclass_node) = { - .function = l2_gbp_id_2_sclass, .name = "l2-gbp-id-2-sclass", .vector_size = sizeof (u32), .format_trace = format_gbp_sclass_trace, @@ -216,7 +215,6 @@ VLIB_REGISTER_NODE (l2_gbp_id_2_sclass_node) = { }, }; VLIB_REGISTER_NODE (l2_gbp_sclass_2_id_node) = { - .function = l2_gbp_sclass_2_id, .name = "l2-gbp-sclass-2-id", .vector_size = sizeof (u32), .format_trace = format_gbp_sclass_trace, @@ -230,7 +228,6 @@ VLIB_REGISTER_NODE (l2_gbp_sclass_2_id_node) = { }; VLIB_REGISTER_NODE (ip4_gbp_id_2_sclass_node) = { - .function = ip4_gbp_id_2_sclass, .name = "ip4-gbp-id-2-sclass", .vector_size = sizeof (u32), .format_trace = format_gbp_sclass_trace, @@ -243,7 +240,6 @@ VLIB_REGISTER_NODE (ip4_gbp_id_2_sclass_node) = { }, }; VLIB_REGISTER_NODE (ip4_gbp_sclass_2_id_node) = { - .function = ip4_gbp_sclass_2_id, .name = "ip4-gbp-sclass-2-id", .vector_size = sizeof (u32), .format_trace = format_gbp_sclass_trace, @@ -257,7 +253,6 @@ VLIB_REGISTER_NODE (ip4_gbp_sclass_2_id_node) = { }; VLIB_REGISTER_NODE (ip6_gbp_id_2_sclass_node) = { - .function = ip6_gbp_id_2_sclass, .name = "ip6-gbp-id-2-sclass", .vector_size = sizeof (u32), .format_trace = format_gbp_sclass_trace, @@ -270,7 +265,6 @@ VLIB_REGISTER_NODE (ip6_gbp_id_2_sclass_node) = { }, }; VLIB_REGISTER_NODE (ip6_gbp_sclass_2_id_node) = { - .function = ip6_gbp_sclass_2_id, .name = "ip6-gbp-sclass-2-id", .vector_size = sizeof (u32), .format_trace = format_gbp_sclass_trace, @@ -283,13 +277,6 @@ VLIB_REGISTER_NODE (ip6_gbp_sclass_2_id_node) = { }, }; -VLIB_NODE_FUNCTION_MULTIARCH (l2_gbp_id_2_sclass_node, l2_gbp_id_2_sclass); -VLIB_NODE_FUNCTION_MULTIARCH (l2_gbp_sclass_2_id_node, l2_gbp_sclass_2_id); - -VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_id_2_sclass_node, ip4_gbp_id_2_sclass); -VLIB_NODE_FUNCTION_MULTIARCH (ip4_gbp_sclass_2_id_node, ip4_gbp_sclass_2_id); -VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_id_2_sclass_node, ip6_gbp_id_2_sclass); -VLIB_NODE_FUNCTION_MULTIARCH (ip6_gbp_sclass_2_id_node, ip6_gbp_sclass_2_id); VNET_FEATURE_INIT (ip4_gbp_sclass_2_id_feat, static) = { @@ -315,6 +302,7 @@ VNET_FEATURE_INIT (ip6_gbp_id_2_sclass_feat, static) = }; /* *INDENT-ON* */ +#ifndef CLIB_MARCH_VARIANT void gbp_sclass_enable_l2 (u32 sw_if_index) { @@ -354,20 +342,24 @@ gbp_sclass_disable_ip (u32 sw_if_index) vnet_feature_enable_disable ("ip6-output", "ip6-gbp-id-2-sclass", sw_if_index, 0, 0, 0); } +#endif /* CLIB_MARCH_VARIANT */ static clib_error_t * gbp_sclass_init (vlib_main_t * vm) { gbp_sclass_main_t *glm = &gbp_sclass_main; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "l2-gbp-sclass-2-id"); /* Initialize the feature next-node indices */ feat_bitmap_init_next_nodes (vm, - l2_gbp_sclass_2_id_node.index, + node->index, L2INPUT_N_FEAT, l2input_get_feat_names (), glm->gel_l2_input_feat_next); + + node = vlib_get_node_by_name (vm, (u8 *) "l2-gbp-id-2-sclass"); feat_bitmap_init_next_nodes (vm, - l2_gbp_id_2_sclass_node.index, + node->index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), glm->gel_l2_output_feat_next); diff --git a/src/plugins/gbp/gbp_vxlan.c b/src/plugins/gbp/gbp_vxlan.c index c7be90f0e36..846ea0fa6ff 100644 --- a/src/plugins/gbp/gbp_vxlan.c +++ b/src/plugins/gbp/gbp_vxlan.c @@ -79,7 +79,7 @@ static char *gbp_vxlan_tunnel_layer_strings[] = { -always_inline gbp_vxlan_tunnel_t * +gbp_vxlan_tunnel_t * gbp_vxlan_tunnel_get (index_t gti) { return (pool_elt_at_index (gbp_vxlan_tunnel_pool, gti)); @@ -325,194 +325,6 @@ vxlan_gbp_tunnel_lock (u32 sw_if_index) vxr->vxr_locks++; } -#define foreach_gbp_vxlan_input_next \ - _(DROP, "error-drop") \ - _(L2_INPUT, "l2-input") \ - _(IP4_INPUT, "ip4-input") \ - _(IP6_INPUT, "ip6-input") - -typedef enum -{ -#define _(s,n) GBP_VXLAN_INPUT_NEXT_##s, - foreach_gbp_vxlan_input_next -#undef _ - GBP_VXLAN_INPUT_N_NEXT, -} gbp_vxlan_input_next_t; - -#define foreach_gbp_vxlan_error \ - _(DECAPPED, "decapped") \ - _(LEARNED, "learned") - -typedef enum -{ -#define _(s,n) GBP_VXLAN_ERROR_##s, - foreach_gbp_vxlan_error -#undef _ - GBP_VXLAN_N_ERROR, -} gbp_vxlan_input_error_t; - -static char *gbp_vxlan_error_strings[] = { -#define _(n,s) s, - foreach_gbp_vxlan_error -#undef _ -}; - -typedef struct gbp_vxlan_trace_t_ -{ - u8 dropped; - u32 vni; - u32 sw_if_index; - u16 sclass; - u8 flags; -} gbp_vxlan_trace_t; - - -static uword -gbp_vxlan_decap (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * from_frame, u8 is_ip4) -{ - u32 n_left_to_next, n_left_from, next_index, *to_next, *from; - - next_index = 0; - from = vlib_frame_vector_args (from_frame); - n_left_from = from_frame->n_vectors; - - while (n_left_from > 0) - { - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - while (n_left_from > 0 && n_left_to_next > 0) - { - vxlan_gbp_header_t *vxlan_gbp0; - gbp_vxlan_input_next_t next0; - gbp_vxlan_tunnel_t *gt0; - vlib_buffer_t *b0; - u32 bi0, vni0; - uword *p; - - bi0 = to_next[0] = from[0]; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - next0 = GBP_VXLAN_INPUT_NEXT_DROP; - - b0 = vlib_get_buffer (vm, bi0); - vxlan_gbp0 = - vlib_buffer_get_current (b0) - sizeof (vxlan_gbp_header_t); - - vni0 = vxlan_gbp_get_vni (vxlan_gbp0); - p = hash_get (gv_db, vni0); - - if (PREDICT_FALSE (NULL == p)) - { - gt0 = NULL; - next0 = GBP_VXLAN_INPUT_NEXT_DROP; - } - else - { - gt0 = gbp_vxlan_tunnel_get (p[0]); - - vnet_buffer (b0)->sw_if_index[VLIB_RX] = gt0->gt_sw_if_index; - - if (GBP_VXLAN_TUN_L2 == gt0->gt_layer) - /* - * An L2 layer tunnel goes into the BD - */ - next0 = GBP_VXLAN_INPUT_NEXT_L2_INPUT; - else - { - /* - * An L3 layer tunnel needs to strip the L2 header - * an inject into the RD - */ - ethernet_header_t *e0; - u16 type0; - - e0 = vlib_buffer_get_current (b0); - type0 = clib_net_to_host_u16 (e0->type); - switch (type0) - { - case ETHERNET_TYPE_IP4: - next0 = GBP_VXLAN_INPUT_NEXT_IP4_INPUT; - break; - case ETHERNET_TYPE_IP6: - next0 = GBP_VXLAN_INPUT_NEXT_IP6_INPUT; - break; - default: - goto trace; - } - vlib_buffer_advance (b0, sizeof (*e0)); - } - } - - trace: - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) - { - gbp_vxlan_trace_t *tr; - - tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); - tr->dropped = (next0 == GBP_VXLAN_INPUT_NEXT_DROP); - tr->vni = vni0; - tr->sw_if_index = (gt0 ? gt0->gt_sw_if_index : ~0); - tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0); - tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0); - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, next0); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - return from_frame->n_vectors; -} - -static u8 * -format_gbp_vxlan_rx_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - gbp_vxlan_trace_t *t = va_arg (*args, gbp_vxlan_trace_t *); - - s = format (s, "vni:%d dropped:%d rx:%d sclass:%d flags:%U", - t->vni, t->dropped, t->sw_if_index, - t->sclass, format_vxlan_gbp_header_gpflags, t->flags); - - return (s); -} - -static uword -gbp_vxlan4_decap (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * from_frame) -{ - return gbp_vxlan_decap (vm, node, from_frame, 1); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (gbp_vxlan4_input_node) = -{ - .function = gbp_vxlan4_decap, - .name = "gbp-vxlan4", - .vector_size = sizeof (u32), - .n_errors = GBP_VXLAN_N_ERROR, - .error_strings = gbp_vxlan_error_strings, - .n_next_nodes = GBP_VXLAN_INPUT_N_NEXT, - .format_trace = format_gbp_vxlan_rx_trace, - .next_nodes = { -#define _(s,n) [GBP_VXLAN_INPUT_NEXT_##s] = n, - foreach_gbp_vxlan_input_next -#undef _ - }, -}; -VLIB_NODE_FUNCTION_MULTIARCH (gbp_vxlan4_input_node, gbp_vxlan4_decap) - -/* *INDENT-ON* */ - void gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx) { @@ -866,6 +678,7 @@ static clib_error_t * gbp_vxlan_init (vlib_main_t * vm) { u32 slot4; + vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "gbp-vxlan4"); /* * insert ourselves into the VXLAN-GBP arc to collect the no-tunnel @@ -873,7 +686,7 @@ gbp_vxlan_init (vlib_main_t * vm) */ slot4 = vlib_node_add_next_with_slot (vm, vxlan4_gbp_input_node.index, - gbp_vxlan4_input_node.index, + node->index, VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); ASSERT (slot4 == VXLAN_GBP_INPUT_NEXT_NO_TUNNEL); diff --git a/src/plugins/gbp/gbp_vxlan.h b/src/plugins/gbp/gbp_vxlan.h index 7aa22e351a2..acf5f4b1c7c 100644 --- a/src/plugins/gbp/gbp_vxlan.h +++ b/src/plugins/gbp/gbp_vxlan.h @@ -122,6 +122,7 @@ extern void gbp_vxlan_walk (gbp_vxlan_cb_t cb, void *ctx); extern u8 *format_gbp_vxlan_tunnel (u8 * s, va_list * args); extern u8 *format_gbp_vxlan_tunnel_layer (u8 * s, va_list * args); +extern gbp_vxlan_tunnel_t *gbp_vxlan_tunnel_get (index_t gti); #endif /* diff --git a/src/plugins/gbp/gbp_vxlan_node.c b/src/plugins/gbp/gbp_vxlan_node.c new file mode 100644 index 00000000000..c49f74af879 --- /dev/null +++ b/src/plugins/gbp/gbp_vxlan_node.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2019 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include <plugins/gbp/gbp_vxlan.h> +#include <plugins/gbp/gbp_itf.h> +#include <plugins/gbp/gbp_learn.h> +#include <plugins/gbp/gbp_bridge_domain.h> +#include <plugins/gbp/gbp_route_domain.h> +#include <plugins/gbp/gbp_sclass.h> + +#include <vnet/vxlan-gbp/vxlan_gbp.h> +#include <vlibmemory/api.h> +#include <vnet/fib/fib_table.h> + +extern uword *gv_db; + +typedef struct gbp_vxlan_trace_t_ +{ + u8 dropped; + u32 vni; + u32 sw_if_index; + u16 sclass; + u8 flags; +} gbp_vxlan_trace_t; + +#define foreach_gbp_vxlan_input_next \ + _(DROP, "error-drop") \ + _(L2_INPUT, "l2-input") \ + _(IP4_INPUT, "ip4-input") \ + _(IP6_INPUT, "ip6-input") + +typedef enum +{ +#define _(s,n) GBP_VXLAN_INPUT_NEXT_##s, + foreach_gbp_vxlan_input_next +#undef _ + GBP_VXLAN_INPUT_N_NEXT, +} gbp_vxlan_input_next_t; + + +#define foreach_gbp_vxlan_error \ + _(DECAPPED, "decapped") \ + _(LEARNED, "learned") + +typedef enum +{ +#define _(s,n) GBP_VXLAN_ERROR_##s, + foreach_gbp_vxlan_error +#undef _ + GBP_VXLAN_N_ERROR, +} gbp_vxlan_input_error_t; + +static char *gbp_vxlan_error_strings[] = { +#define _(n,s) s, + foreach_gbp_vxlan_error +#undef _ +}; + +static uword +gbp_vxlan_decap (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame, u8 is_ip4) +{ + u32 n_left_to_next, n_left_from, next_index, *to_next, *from; + + next_index = 0; + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + while (n_left_from > 0) + { + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + vxlan_gbp_header_t *vxlan_gbp0; + gbp_vxlan_input_next_t next0; + gbp_vxlan_tunnel_t *gt0; + vlib_buffer_t *b0; + u32 bi0, vni0; + uword *p; + + bi0 = to_next[0] = from[0]; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + next0 = GBP_VXLAN_INPUT_NEXT_DROP; + + b0 = vlib_get_buffer (vm, bi0); + vxlan_gbp0 = + vlib_buffer_get_current (b0) - sizeof (vxlan_gbp_header_t); + + vni0 = vxlan_gbp_get_vni (vxlan_gbp0); + p = hash_get (gv_db, vni0); + + if (PREDICT_FALSE (NULL == p)) + { + gt0 = NULL; + next0 = GBP_VXLAN_INPUT_NEXT_DROP; + } + else + { + gt0 = gbp_vxlan_tunnel_get (p[0]); + + vnet_buffer (b0)->sw_if_index[VLIB_RX] = gt0->gt_sw_if_index; + + if (GBP_VXLAN_TUN_L2 == gt0->gt_layer) + /* + * An L2 layer tunnel goes into the BD + */ + next0 = GBP_VXLAN_INPUT_NEXT_L2_INPUT; + else + { + /* + * An L3 layer tunnel needs to strip the L2 header + * an inject into the RD + */ + ethernet_header_t *e0; + u16 type0; + + e0 = vlib_buffer_get_current (b0); + type0 = clib_net_to_host_u16 (e0->type); + switch (type0) + { + case ETHERNET_TYPE_IP4: + next0 = GBP_VXLAN_INPUT_NEXT_IP4_INPUT; + break; + case ETHERNET_TYPE_IP6: + next0 = GBP_VXLAN_INPUT_NEXT_IP6_INPUT; + break; + default: + goto trace; + } + vlib_buffer_advance (b0, sizeof (*e0)); + } + } + + trace: + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + gbp_vxlan_trace_t *tr; + + tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->dropped = (next0 == GBP_VXLAN_INPUT_NEXT_DROP); + tr->vni = vni0; + tr->sw_if_index = (gt0 ? gt0->gt_sw_if_index : ~0); + tr->flags = vxlan_gbp_get_gpflags (vxlan_gbp0); + tr->sclass = vxlan_gbp_get_sclass (vxlan_gbp0); + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return from_frame->n_vectors; +} + +VLIB_NODE_FN (gbp_vxlan4_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return gbp_vxlan_decap (vm, node, from_frame, 1); +} + +static u8 * +format_gbp_vxlan_rx_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + gbp_vxlan_trace_t *t = va_arg (*args, gbp_vxlan_trace_t *); + + s = format (s, "vni:%d dropped:%d rx:%d sclass:%d flags:%U", + t->vni, t->dropped, t->sw_if_index, + t->sclass, format_vxlan_gbp_header_gpflags, t->flags); + + return (s); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (gbp_vxlan4_input_node) = +{ + .name = "gbp-vxlan4", + .vector_size = sizeof (u32), + .n_errors = GBP_VXLAN_N_ERROR, + .error_strings = gbp_vxlan_error_strings, + .n_next_nodes = GBP_VXLAN_INPUT_N_NEXT, + .format_trace = format_gbp_vxlan_rx_trace, + .next_nodes = { +#define _(s,n) [GBP_VXLAN_INPUT_NEXT_##s] = n, + foreach_gbp_vxlan_input_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ |