diff options
author | Neale Ranns <nranns@cisco.com> | 2020-09-29 15:38:51 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2020-10-08 08:51:59 +0000 |
commit | 47a3d9975fa3af7a7537b565d6511dadc0df61fb (patch) | |
tree | fa33e3360af84239615f48b164b239ee3b660ee6 /src/vnet/l2/l2_input.c | |
parent | 83143710e80c8df703fe1ebc0e513aa37971d295 (diff) |
l2: input performance
Type: improvement
- cache the values form the BD on the input config to avoid loading
- avoid the short write long read on the sequence number
- use vlib_buffer_enqueue_to_next
Signed-off-by: Neale Ranns <nranns@cisco.com>
Change-Id: I33442b9104b457e4c638d26e9ad3bc965687a0bc
Diffstat (limited to 'src/vnet/l2/l2_input.c')
-rw-r--r-- | src/vnet/l2/l2_input.c | 501 |
1 files changed, 99 insertions, 402 deletions
diff --git a/src/vnet/l2/l2_input.c b/src/vnet/l2/l2_input.c index 5e73faa28eb..9dc452e1558 100644 --- a/src/vnet/l2/l2_input.c +++ b/src/vnet/l2/l2_input.c @@ -48,8 +48,6 @@ * For interfaces in Layer 3 mode, the packets will be routed. */ -#ifndef CLIB_MARCH_VARIANT - /* Feature graph node names */ static char *l2input_feat_names[] = { #define _(sym,name) name, @@ -64,7 +62,7 @@ l2input_get_feat_names (void) } u8 * -format_l2_input_features (u8 * s, va_list * args) +format_l2_input_feature_bitmap (u8 * s, va_list * args) { static char *display_names[] = { #define _(sym,name) #sym, @@ -95,394 +93,51 @@ format_l2_input_features (u8 * s, va_list * args) } return s; } -#endif /* CLIB_MARCH_VARIANT */ - -typedef struct -{ - /* per-pkt trace data */ - u8 dst_and_src[12]; - u32 next_index; - u32 sw_if_index; -} l2input_trace_t; - -/* packet trace format function */ -static u8 * -format_l2input_trace (u8 * s, va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - l2input_trace_t *t = va_arg (*args, l2input_trace_t *); - - s = format (s, "l2-input: sw_if_index %d dst %U src %U", - t->sw_if_index, - format_ethernet_address, t->dst_and_src, - format_ethernet_address, t->dst_and_src + 6); - return s; -} - -extern l2input_main_t l2input_main; - -#ifndef CLIB_MARCH_VARIANT -l2input_main_t l2input_main; -#endif /* CLIB_MARCH_VARIANT */ - -#define foreach_l2input_error \ -_(L2INPUT, "L2 input packets") \ -_(DROP, "L2 input drops") - -typedef enum -{ -#define _(sym,str) L2INPUT_ERROR_##sym, - foreach_l2input_error -#undef _ - L2INPUT_N_ERROR, -} l2input_error_t; -static char *l2input_error_strings[] = { -#define _(sym,string) string, - foreach_l2input_error -#undef _ -}; - -typedef enum -{ /* */ - L2INPUT_NEXT_LEARN, - L2INPUT_NEXT_FWD, - L2INPUT_NEXT_DROP, - L2INPUT_N_NEXT, -} l2input_next_t; - - -static_always_inline void -classify_and_dispatch (l2input_main_t * msm, vlib_buffer_t * b0, u32 * next0) +u8 * +format_l2_input_features (u8 * s, va_list * args) { - /* - * Load L2 input feature struct - * Load bridge domain struct - * Parse ethernet header to determine unicast/mcast/broadcast - * take L2 input stat - * classify packet as IP/UDP/TCP, control, other - * mask feature bitmap - * go to first node in bitmap - * Later: optimize VTM - * - * For L2XC, - * set tx sw-if-handle - */ - - u32 feat_mask = ~0; - u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; - ethernet_header_t *h0 = vlib_buffer_get_current (b0); - - /* Get config for the input interface */ - l2_input_config_t *config = vec_elt_at_index (msm->configs, sw_if_index0); - - /* Save split horizon group */ - vnet_buffer (b0)->l2.shg = config->shg; - - /* determine layer2 kind for stat and mask */ - if (PREDICT_FALSE (ethernet_address_cast (h0->dst_address))) - { - u8 *l3h0 = (u8 *) h0 + vnet_buffer (b0)->l2.l2_len; - -#define get_u16(addr) ( *((u16 *)(addr)) ) - u16 ethertype = clib_net_to_host_u16 (get_u16 (l3h0 - 2)); - u8 protocol = ((ip6_header_t *) l3h0)->protocol; - - /* Disable bridge forwarding (flooding will execute instead if not xconnect) */ - feat_mask &= ~(L2INPUT_FEAT_FWD | - L2INPUT_FEAT_UU_FLOOD | - L2INPUT_FEAT_UU_FWD | L2INPUT_FEAT_GBP_FWD); - - if (ethertype != ETHERNET_TYPE_ARP) - feat_mask &= ~(L2INPUT_FEAT_ARP_UFWD); - - /* Disable ARP-term for non-ARP and non-ICMP6 packet */ - if (ethertype != ETHERNET_TYPE_ARP && - (ethertype != ETHERNET_TYPE_IP6 || protocol != IP_PROTOCOL_ICMP6)) - feat_mask &= ~(L2INPUT_FEAT_ARP_TERM); - /* - * For packet from BVI - set SHG of ARP request or ICMPv6 neighbor - * solicitation packet from BVI to 0 so it can also flood to VXLAN - * tunnels or other ports with the same SHG as that of the BVI. - */ - else if (PREDICT_FALSE (vnet_buffer (b0)->sw_if_index[VLIB_TX] == - L2INPUT_BVI)) - { - if (ethertype == ETHERNET_TYPE_ARP) - { - ethernet_arp_header_t *arp0 = (ethernet_arp_header_t *) l3h0; - if (arp0->opcode == - clib_host_to_net_u16 (ETHERNET_ARP_OPCODE_request)) - vnet_buffer (b0)->l2.shg = 0; - } - else /* must be ICMPv6 */ - { - ip6_header_t *iph0 = (ip6_header_t *) l3h0; - icmp6_neighbor_solicitation_or_advertisement_header_t *ndh0; - ndh0 = ip6_next_header (iph0); - if (ndh0->icmp.type == ICMP6_neighbor_solicitation) - vnet_buffer (b0)->l2.shg = 0; - } - } - } - else - { - /* - * For packet from BVI - set SHG of unicast packet from BVI to 0 so it - * is not dropped on output to VXLAN tunnels or other ports with the - * same SHG as that of the BVI. - */ - if (PREDICT_FALSE (vnet_buffer (b0)->sw_if_index[VLIB_TX] == - L2INPUT_BVI)) - vnet_buffer (b0)->l2.shg = 0; - } - - - if (config->bridge) - { - /* Do bridge-domain processing */ - u16 bd_index0 = config->bd_index; - /* save BD ID for next feature graph nodes */ - vnet_buffer (b0)->l2.bd_index = bd_index0; - - /* Get config for the bridge domain interface */ - l2_bridge_domain_t *bd_config = - vec_elt_at_index (msm->bd_configs, bd_index0); - - /* Save bridge domain and interface seq_num */ - /* *INDENT-OFF* */ - l2fib_seq_num_t sn = { - .swif = *l2fib_swif_seq_num(sw_if_index0), - .bd = bd_config->seq_num, - }; - /* *INDENT-ON* */ - vnet_buffer (b0)->l2.l2fib_sn = sn.as_u16;; - vnet_buffer (b0)->l2.bd_age = bd_config->mac_age; + u32 sw_if_index = va_arg (*args, u32); + u32 verbose = va_arg (*args, u32); - /* - * Process bridge domain feature enables. - * To perform learning/flooding/forwarding, the corresponding bit - * must be enabled in both the input interface config and in the - * bridge domain config. In the bd_bitmap, bits for features other - * than learning/flooding/forwarding should always be set. - */ - feat_mask = feat_mask & bd_config->feature_bitmap; - } - else if (config->xconnect) - { - /* Set the output interface */ - vnet_buffer (b0)->sw_if_index[VLIB_TX] = config->output_sw_if_index; - } - else - feat_mask = L2INPUT_FEAT_DROP; + l2_input_config_t *l2_input = l2input_intf_config (sw_if_index); + u32 fb = l2_input->feature_bitmap; - /* mask out features from bitmap using packet type and bd config */ - u32 feature_bitmap = config->feature_bitmap & feat_mask; + /* intf input features are masked by bridge domain */ + if (l2_input_is_bridge (l2_input)) + fb &= l2_input->bd_feature_bitmap; - /* save for next feature graph nodes */ - vnet_buffer (b0)->l2.feature_bitmap = feature_bitmap; + s = + format (s, "\nl2-input:\n%U", format_l2_input_feature_bitmap, fb, + verbose); - /* Determine the next node */ - *next0 = feat_bitmap_get_next_node_index (msm->feat_next_node_index, - feature_bitmap); + return (s); } -static_always_inline uword -l2input_node_inline (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame, - int do_trace) +u8 * +format_l2_input (u8 * s, va_list * args) { - u32 n_left_from, *from, *to_next; - l2input_next_t next_index; - l2input_main_t *msm = &l2input_main; - vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; - - from = vlib_frame_vector_args (frame); - n_left_from = frame->n_vectors; /* number of packets to process */ - next_index = node->cached_next_index; - vlib_get_buffers (vm, from, bufs, n_left_from); + u32 sw_if_index = va_arg (*args, u32); + l2_input_config_t *l2_input = l2input_intf_config (sw_if_index); - while (n_left_from > 0) + /* intf input features are masked by bridge domain */ + if (l2_input_is_bridge (l2_input)) { - u32 n_left_to_next; - - /* get space to enqueue frame to graph node "next_index" */ - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + bd_main_t *bdm = &bd_main; + u32 bd_id = l2input_main.bd_configs[l2_input->bd_index].bd_id; - while (n_left_from >= 8 && n_left_to_next >= 4) - { - u32 next0, next1, next2, next3; - u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3; - - /* Prefetch next iteration. */ - { - - /* Prefetch the buffer header and packet for the N+2 loop iteration */ - vlib_prefetch_buffer_header (b[4], LOAD); - vlib_prefetch_buffer_header (b[5], LOAD); - vlib_prefetch_buffer_header (b[6], LOAD); - vlib_prefetch_buffer_header (b[7], LOAD); - - CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, STORE); - - /* - * Don't bother prefetching the bridge-domain config (which - * depends on the input config above). Only a small number of - * bridge domains are expected. Plus the structure is small - * and several fit in a cache line. - */ - } - - /* speculatively enqueue b0 and b1 to the current next frame */ - /* bi is "buffer index", b is pointer to the buffer */ - - if (do_trace) - { - /* RX interface handles */ - sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; - sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX]; - sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX]; - sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX]; - - if (b[0]->flags & VLIB_BUFFER_IS_TRACED) - { - ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); - l2input_trace_t *t = - vlib_add_trace (vm, node, b[0], sizeof (*t)); - t->sw_if_index = sw_if_index0; - clib_memcpy_fast (t->dst_and_src, h0->dst_address, - sizeof (h0->dst_address) + - sizeof (h0->src_address)); - } - if (b[1]->flags & VLIB_BUFFER_IS_TRACED) - { - ethernet_header_t *h1 = vlib_buffer_get_current (b[1]); - l2input_trace_t *t = - vlib_add_trace (vm, node, b[1], sizeof (*t)); - t->sw_if_index = sw_if_index1; - clib_memcpy_fast (t->dst_and_src, h1->dst_address, - sizeof (h1->dst_address) + - sizeof (h1->src_address)); - } - if (b[2]->flags & VLIB_BUFFER_IS_TRACED) - { - ethernet_header_t *h2 = vlib_buffer_get_current (b[2]); - l2input_trace_t *t = - vlib_add_trace (vm, node, b[2], sizeof (*t)); - t->sw_if_index = sw_if_index2; - clib_memcpy_fast (t->dst_and_src, h2->dst_address, - sizeof (h2->dst_address) + - sizeof (h2->src_address)); - } - if (b[3]->flags & VLIB_BUFFER_IS_TRACED) - { - ethernet_header_t *h3 = vlib_buffer_get_current (b[3]); - l2input_trace_t *t = - vlib_add_trace (vm, node, b[3], sizeof (*t)); - t->sw_if_index = sw_if_index3; - clib_memcpy_fast (t->dst_and_src, h3->dst_address, - sizeof (h3->dst_address) + - sizeof (h3->src_address)); - } - } - - classify_and_dispatch (msm, b[0], &next0); - classify_and_dispatch (msm, b[1], &next1); - //show the better performance when clib_memcpy_fast is put here. - clib_memcpy_fast (to_next, from, sizeof (from[0]) * 4); - to_next += 4; - classify_and_dispatch (msm, b[2], &next2); - classify_and_dispatch (msm, b[3], &next3); - b += 4; - n_left_from -= 4; - n_left_to_next -= 4; - - /* verify speculative enqueues, maybe switch current next frame */ - /* if next0==next1==next_index then nothing special needs to be done */ - vlib_validate_buffer_enqueue_x4 (vm, node, next_index, - to_next, n_left_to_next, - from[0], from[1], from[2], from[3], - next0, next1, next2, next3); - from += 4; - } - - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 next0; - u32 sw_if_index0; - - /* speculatively enqueue b0 to the current next frame */ - - if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) - { - ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); - l2input_trace_t *t = - vlib_add_trace (vm, node, b[0], sizeof (*t)); - sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; - t->sw_if_index = sw_if_index0; - clib_memcpy_fast (t->dst_and_src, h0->dst_address, - sizeof (h0->dst_address) + - sizeof (h0->src_address)); - } - - classify_and_dispatch (msm, b[0], &next0); - b += 1; - to_next[0] = from[0]; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - /* verify speculative enqueue, maybe switch current next frame */ - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[0], next0); - from += 1; - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); + s = format (s, " L2 bridge bd-id %d idx %d shg %d %s", + bd_id, bd_find_index (bdm, bd_id), l2_input->shg, + l2_input_is_bvi (l2_input) ? "bvi" : " "); } + else if (l2_input_is_xconnect (l2_input)) + s = format (s, " L2 xconnect %U", + format_vnet_sw_if_index_name, vnet_get_main (), + l2_input->output_sw_if_index); - vlib_node_increment_counter (vm, l2input_node.index, - L2INPUT_ERROR_L2INPUT, frame->n_vectors); - - return frame->n_vectors; + return (s); } -VLIB_NODE_FN (l2input_node) (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) -{ - if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) - return l2input_node_inline (vm, node, frame, 1 /* do_trace */ ); - return l2input_node_inline (vm, node, frame, 0 /* do_trace */ ); -} - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (l2input_node) = { - .name = "l2-input", - .vector_size = sizeof (u32), - .format_trace = format_l2input_trace, - .format_buffer = format_ethernet_header_with_length, - .type = VLIB_NODE_TYPE_INTERNAL, - - .n_errors = ARRAY_LEN(l2input_error_strings), - .error_strings = l2input_error_strings, - - .n_next_nodes = L2INPUT_N_NEXT, - - /* edit / add dispositions here */ - .next_nodes = { - [L2INPUT_NEXT_LEARN] = "l2-learn", - [L2INPUT_NEXT_FWD] = "l2-fwd", - [L2INPUT_NEXT_DROP] = "error-drop", - }, -}; -/* *INDENT-ON* */ - -#ifndef CLIB_MARCH_VARIANT clib_error_t * l2input_init (vlib_main_t * vm) { @@ -494,10 +149,6 @@ l2input_init (vlib_main_t * vm) /* Get packets RX'd from L2 interfaces */ ethernet_register_l2_input (vm, l2input_node.index); - /* Create the config vector */ - vec_validate (mp->configs, 100); - /* create 100 sw interface entries and zero them */ - /* Initialize the feature next-node indexes */ feat_bitmap_init_next_nodes (vm, l2input_node.index, @@ -555,7 +206,7 @@ l2input_interface_mac_change (u32 sw_if_index, intf_config = l2input_intf_config (sw_if_index); - if (intf_config->bridge && intf_config->bvi) + if (l2_input_is_bridge (intf_config) && l2_input_is_bvi (intf_config)) { /* delete and re-add l2fib entry for the bvi interface */ l2fib_del_entry (old_address, intf_config->bd_index, sw_if_index); @@ -567,6 +218,32 @@ l2input_interface_mac_change (u32 sw_if_index, } } +walk_rc_t +l2input_recache (u32 bd_index, u32 sw_if_index) +{ + l2_input_config_t *input; + l2_bridge_domain_t *bd; + + bd = bd_get (bd_index); + input = l2input_intf_config (sw_if_index); + + input->bd_mac_age = bd->mac_age; + input->bd_seq_num = bd->seq_num; + input->bd_feature_bitmap = bd->feature_bitmap; + + return (WALK_CONTINUE); +} + +void +l2_input_seq_num_inc (u32 sw_if_index) +{ + l2_input_config_t *input; + + input = vec_elt_at_index (l2input_main.configs, sw_if_index); + + input->seq_num++; +} + /** * Set the subinterface to run in l2 or l3 mode. * For L3 mode, just the sw_if_index is specified. @@ -584,7 +261,6 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ u32 shg, /* the bridged interface split horizon group */ u32 xc_sw_if_index) /* peer interface for xconnect */ { - l2input_main_t *mp = &l2input_main; l2output_main_t *l2om = &l2output_main; vnet_main_t *vnm = vnet_get_main (); vnet_hw_interface_t *hi; @@ -600,10 +276,10 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ if (l2fib_main.mac_table_initialized == 0) l2fib_table_init (); - if (config->bridge) + if (l2_input_is_bridge (config)) { /* Interface is already in bridge mode. Undo the existing config. */ - bd_config = vec_elt_at_index (mp->bd_configs, config->bd_index); + bd_config = bd_get (config->bd_index); /* remove interface from flood vector */ bd_remove_member (bd_config, sw_if_index); @@ -614,7 +290,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ vnet_sw_interface_t *si; bd_config->bvi_sw_if_index = ~0; - config->bvi = 0; + config->flags &= ~L2_INPUT_FLAG_BVI; /* delete the l2fib entry for the bvi interface */ l2fib_del_entry (hi->hw_address, config->bd_index, sw_if_index); @@ -634,9 +310,10 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ (bd_config->feature_bitmap & L2INPUT_FEAT_LEARN)) l2fib_flush_int_mac (vm, sw_if_index); + bd_input_walk (config->bd_index, l2input_recache, NULL); l2_if_adjust--; } - else if (config->xconnect) + else if (l2_input_is_xconnect (config)) { l2_if_adjust--; } @@ -650,8 +327,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ { /* Set L2 config to BD index 0 so that if any packet accidentally * came in on L2 path, it will be dropped in BD 0 */ - config->xconnect = 0; - config->bridge = 0; + config->flags = L2_INPUT_FLAG_NONE; config->shg = 0; config->bd_index = 0; config->feature_bitmap = L2INPUT_FEAT_DROP; @@ -684,10 +360,9 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ if (!hi) return MODE_ERROR_ETH; /* non-ethernet */ - config->xconnect = 0; - config->bridge = 1; + config->flags = L2_INPUT_FLAG_BRIDGE; config->bd_index = bd_index; - *l2fib_valid_swif_seq_num (sw_if_index) += 1; + l2_input_seq_num_inc (sw_if_index); /* * Enable forwarding, flooding, learning and ARP termination by default @@ -724,7 +399,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ return MODE_ERROR_BVI_DEF; /* bd already has a bvi interface */ } bd_config->bvi_sw_if_index = sw_if_index; - config->bvi = 1; + config->flags |= L2_INPUT_FLAG_BVI; /* create the l2fib entry for the bvi interface */ l2fib_add_entry (hi->hw_address, bd_index, sw_if_index, @@ -763,8 +438,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ } else if (mode == MODE_L2_XC) { - config->xconnect = 1; - config->bridge = 0; + config->flags = L2_INPUT_FLAG_XCONNECT; config->output_sw_if_index = xc_sw_if_index; /* Make sure last-chance drop is configured */ @@ -779,8 +453,7 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ } else if (mode == MODE_L2_CLASSIFY) { - config->xconnect = 1; - config->bridge = 0; + config->flags = L2_INPUT_FLAG_XCONNECT; config->output_sw_if_index = xc_sw_if_index; /* Make sure last-chance drop is configured */ @@ -806,6 +479,8 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ */ l2_if_adjust++; + + bd_input_walk (bd_index, l2input_recache, NULL); } /* Adjust count of L2 interfaces */ @@ -841,7 +516,31 @@ set_int_l2_mode (vlib_main_t * vm, vnet_main_t * vnet_main, /* */ return 0; } -#endif /* CLIB_MARCH_VARIANT */ + +static clib_error_t * +l2_input_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add) +{ + if (!is_add) + { + vlib_main_t *vm = vlib_get_main (); + l2_input_config_t *config; + + if (sw_if_index < vec_len (l2input_main.configs)) + { + config = vec_elt_at_index (l2input_main.configs, sw_if_index); + if (l2_input_is_xconnect (config)) + set_int_l2_mode (vm, vnm, MODE_L3, config->output_sw_if_index, 0, + L2_BD_PORT_TYPE_NORMAL, 0, 0); + if (l2_input_is_xconnect (config) || l2_input_is_bridge (config)) + set_int_l2_mode (vm, vnm, MODE_L3, sw_if_index, 0, + L2_BD_PORT_TYPE_NORMAL, 0, 0); + } + } + + return (NULL); +} + +VNET_SW_INTERFACE_ADD_DEL_FUNCTION (l2_input_interface_add_del); /** * Set subinterface in bridging mode with a bridge-domain ID. @@ -1119,16 +818,16 @@ show_int_mode (vlib_main_t * vm, vec_foreach (si, sis) { l2_input_config_t *config = l2input_intf_config (si->sw_if_index); - if (config->bridge) + if (l2_input_is_bridge (config)) { u32 bd_id; mode = "l2 bridge"; bd_id = l2input_main.bd_configs[config->bd_index].bd_id; - args = format (0, "bd_id %d%s%d", bd_id, - config->bvi ? " bvi shg " : " shg ", config->shg); + args = format (0, "bd_id %d%s shg %d", bd_id, + l2_input_is_bvi (config) ? " bvi" : "", config->shg); } - else if (config->xconnect) + else if (l2_input_is_xconnect (config)) { mode = "l2 xconnect"; args = format (0, "%U", @@ -1204,7 +903,6 @@ _(l2output_init) \ _(l2_patch_init) \ _(l2_xcrw_init) -#ifndef CLIB_MARCH_VARIANT clib_error_t * l2_init (vlib_main_t * vm) { @@ -1219,7 +917,6 @@ while (0); } VLIB_INIT_FUNCTION (l2_init); -#endif /* CLIB_MARCH_VARIANT */ /* * fd.io coding-style-patch-verification: ON |