diff options
author | John Lo <loj@cisco.com> | 2017-09-23 08:59:58 -0400 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2017-09-25 16:07:37 +0000 |
commit | 7e9743aef924093c9c25bdf445637434c190d31a (patch) | |
tree | 74367e3929a946465d9644ede6a21bd32ee197e5 | |
parent | 905c14af2b1464840cea201daed005cb30513683 (diff) |
Fix sending GARP/NA on Bonded Interface Active/Backup Link Up/Down
For bonded interface in Active/Backup mode (mode 1), we need to
send a GARP/NA packet, if IP address is present, on slave link
state change to up or down to help with route convergence. The
callback from DPDK happens in a separate thread so we need to make
sure RPC call is used to signal the send_garp_na process in the
main thread. Also need to fix DPDK polling so the slave links are
not polled.
Change-Id: If5fd8ea2d28c54dd28726ac403ad366386ce9651
Signed-off-by: John Lo <loj@cisco.com>
-rw-r--r-- | src/plugins/dpdk/device/common.c | 94 | ||||
-rw-r--r-- | src/plugins/dpdk/device/node.c | 2 | ||||
-rw-r--r-- | src/vlibmemory/memory_vlib.c | 30 | ||||
-rw-r--r-- | src/vnet/ethernet/arp.c | 38 | ||||
-rw-r--r-- | src/vnet/ethernet/arp_packet.h | 9 |
5 files changed, 101 insertions, 72 deletions
diff --git a/src/plugins/dpdk/device/common.c b/src/plugins/dpdk/device/common.c index 2707b4d889c..aedc3f5227d 100644 --- a/src/plugins/dpdk/device/common.c +++ b/src/plugins/dpdk/device/common.c @@ -181,12 +181,69 @@ dpdk_device_stop (dpdk_device_t * xd) } } +/* Even type for send_garp_na_process */ +enum +{ + SEND_GARP_NA = 1, +} dpdk_send_garp_na_process_event_t; + +static vlib_node_registration_t send_garp_na_proc_node; + +static uword +send_garp_na_process (vlib_main_t * vm, + vlib_node_runtime_t * rt, vlib_frame_t * f) +{ + vnet_main_t *vnm = vnet_get_main (); + uword event_type, *event_data = 0; + + while (1) + { + u32 i; + uword dpdk_port; + vlib_process_wait_for_event (vm); + event_type = vlib_process_get_events (vm, &event_data); + ASSERT (event_type == SEND_GARP_NA); + for (i = 0; i < vec_len (event_data); i++) + { + dpdk_port = event_data[i]; + if (i < 5) /* wait 0.2 sec for link to settle, max total 1 sec */ + vlib_process_suspend (vm, 0.2); + dpdk_device_t *xd = &dpdk_main.devices[dpdk_port]; + u32 hw_if_index = xd->hw_if_index; + vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index); + dpdk_update_link_state (xd, vlib_time_now (vm)); + send_ip4_garp (vm, hi); + send_ip6_na (vm, hi); + } + vec_reset_length (event_data); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (send_garp_na_proc_node, static) = { + .function = send_garp_na_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "send-garp-na-process", +}; +/* *INDENT-ON* */ + +void vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length); + +static void +garp_na_proc_callback (uword * dpdk_port) +{ + vlib_main_t *vm = vlib_get_main (); + ASSERT (vlib_get_thread_index () == 0); + vlib_process_signal_event + (vm, send_garp_na_proc_node.index, SEND_GARP_NA, *dpdk_port); +} + always_inline int dpdk_port_state_callback_inline (uint8_t port_id, enum rte_eth_event_type type, void *param) { struct rte_eth_link link; - vlib_main_t *vm = vlib_get_main (); dpdk_device_t *xd = &dpdk_main.devices[port_id]; RTE_SET_USED (param); @@ -201,32 +258,21 @@ dpdk_port_state_callback_inline (uint8_t port_id, if (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE) { - u8 bd_port = xd->bond_port; + uword bd_port = xd->bond_port; int bd_mode = rte_eth_bond_mode_get (bd_port); - - if ((link_up && !(xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE_UP)) || - (!link_up && (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE_UP))) +#if 0 + clib_warning ("Port %d state to %s, " + "slave of port %d BondEthernet%d in mode %d", + port_id, (link_up) ? "UP" : "DOWN", + bd_port, xd->port_id, bd_mode); +#endif + if (bd_mode == BONDING_MODE_ACTIVE_BACKUP) { - clib_warning ("Port %d state to %s, " - "slave of port %d BondEthernet%d in mode %d", - port_id, (link_up) ? "UP" : "DOWN", - bd_port, xd->port_id, bd_mode); - if (bd_mode == BONDING_MODE_ACTIVE_BACKUP) - { - rte_eth_link_get_nowait (bd_port, &link); - if (link.link_status) /* bonded interface up */ - { - u32 hw_if_index = dpdk_main.devices[bd_port].hw_if_index; - vlib_process_signal_event - (vm, send_garp_na_process_node_index, SEND_GARP_NA, - hw_if_index); - } - } + vl_api_force_rpc_call_main_thread + (garp_na_proc_callback, (u8 *) & bd_port, sizeof (uword)); } - if (link_up) /* Update slave link status */ - xd->flags |= DPDK_DEVICE_FLAG_BOND_SLAVE_UP; - else - xd->flags &= ~DPDK_DEVICE_FLAG_BOND_SLAVE_UP; + xd->flags |= link_up ? + DPDK_DEVICE_FLAG_BOND_SLAVE_UP : ~DPDK_DEVICE_FLAG_BOND_SLAVE_UP; } else /* Should not happen as callback not setup for "normal" links */ { diff --git a/src/plugins/dpdk/device/node.c b/src/plugins/dpdk/device/node.c index 74fb8da1f2e..cf8b9699206 100644 --- a/src/plugins/dpdk/device/node.c +++ b/src/plugins/dpdk/device/node.c @@ -661,6 +661,8 @@ dpdk_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f) foreach_device_and_queue (dq, rt->devices_and_queues) { xd = vec_elt_at_index(dm->devices, dq->dev_instance); + if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE)) + continue; /* Do not poll slave to a bonded interface */ if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG) n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 1); else diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index b6b87529375..77959e6d366 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -1452,8 +1452,9 @@ vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) clib_warning ("unimplemented"); } -void -vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) +always_inline void +vl_api_rpc_call_main_thread_inline (void *fp, u8 * data, u32 data_length, + u8 force_rpc) { vl_api_rpc_call_t *mp; api_main_t *am = &api_main; @@ -1461,7 +1462,7 @@ vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) unix_shared_memory_queue_t *q; /* Main thread: call the function directly */ - if (vlib_get_thread_index () == 0) + if ((force_rpc == 0) && (vlib_get_thread_index () == 0)) { vlib_main_t *vm = vlib_get_main (); void (*call_fp) (void *); @@ -1507,6 +1508,29 @@ vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) pthread_mutex_unlock (&q->mutex); } +/* + * Check if called from worker threads. + * If so, make rpc call of fp through shmem. + * Otherwise, call fp directly + */ +void +vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) +{ + vl_api_rpc_call_main_thread_inline (fp, data, data_length, /*force_rpc */ + 0); +} + +/* + * Always make rpc call of fp through shmem, useful for calling from threads + * not setup as worker threads, such as DPDK callback thread + */ +void +vl_api_force_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) +{ + vl_api_rpc_call_main_thread_inline (fp, data, data_length, /*force_rpc */ + 1); +} + static void vl_api_trace_plugin_msg_ids_t_handler (vl_api_trace_plugin_msg_ids_t * mp) { diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c index e974d2551e5..120a276cc0f 100644 --- a/src/vnet/ethernet/arp.c +++ b/src/vnet/ethernet/arp.c @@ -2482,7 +2482,7 @@ ethernet_arp_change_mac (u32 sw_if_index) /* *INDENT-ON* */ } -void static +void send_ip4_garp (vlib_main_t * vm, vnet_hw_interface_t * hi) { ip4_main_t *i4m = &ip4_main; @@ -2526,42 +2526,6 @@ send_ip4_garp (vlib_main_t * vm, vnet_hw_interface_t * hi) } } -static vlib_node_registration_t send_garp_na_proc_node; - -static uword -send_garp_na_process (vlib_main_t * vm, - vlib_node_runtime_t * rt, vlib_frame_t * f) -{ - vnet_main_t *vnm = vnet_get_main (); - uword event_type, *event_data = 0; - - send_garp_na_process_node_index = send_garp_na_proc_node.index; - - while (1) - { - vlib_process_wait_for_event (vm); - event_type = vlib_process_get_events (vm, &event_data); - if ((event_type == SEND_GARP_NA) && (vec_len (event_data) >= 1)) - { - u32 hw_if_index = event_data[0]; - vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index); - send_ip4_garp (vm, hi); - send_ip6_na (vm, hi); - } - vec_reset_length (event_data); - } - return 0; -} - - -/* *INDENT-OFF* */ -VLIB_REGISTER_NODE (send_garp_na_proc_node, static) = { - .function = send_garp_na_process, - .type = VLIB_NODE_TYPE_PROCESS, - .name = "send-garp-na-process", -}; -/* *INDENT-ON* */ - /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ethernet/arp_packet.h b/src/vnet/ethernet/arp_packet.h index d740b844e12..661f33f93af 100644 --- a/src/vnet/ethernet/arp_packet.h +++ b/src/vnet/ethernet/arp_packet.h @@ -167,14 +167,7 @@ typedef struct ethernet_arp_ip4_entry_t *ip4_neighbor_entries (u32 sw_if_index); u8 *format_ethernet_arp_ip4_entry (u8 * s, va_list * va); -/* Node index for send_garp_na_process */ -extern u32 send_garp_na_process_node_index; - -/* Even type for send_garp_na_process */ -enum -{ - SEND_GARP_NA = 1, -} dpdk_send_garp_na_process_event_t; +void send_ip4_garp (vlib_main_t * vm, vnet_hw_interface_t * hi); #endif /* included_ethernet_arp_packet_h */ |