From f12a83f54ff2239d70494d577af3e1bb253692e1 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Tue, 18 Apr 2017 09:09:40 -0700 Subject: Improve Load-Balance MAPs - only build them for popular path-lists (where popular means more than 64 children) the reason to have a map is to improve convergence speed for recursive prefixes - if there are only a few this technique is not needed - only build them when there is at least one path that has recursive constraints, i.e. a path that can 'fail' in a PIC scenario. - Use the MAPS in the switch path. - PIC test cases for functionality (not convergence performance) Change-Id: I70705444c8469d22b07ae34be82cfb6a01358e10 Signed-off-by: Neale Ranns --- src/vnet/ip/ip4_forward.c | 137 ++++++++++++++++++++++++++------------------- src/vnet/ip/ip6_forward.c | 104 ++++++++++++++++++++++++---------- src/vnet/ip/ip6_neighbor.c | 2 +- 3 files changed, 153 insertions(+), 90 deletions(-) (limited to 'src/vnet/ip') diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 0f562037..697d2169 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -49,6 +49,7 @@ #include /* for FIB uRPF check */ #include #include +#include #include #include /* for mFIB table and entry creation */ @@ -89,7 +90,6 @@ ip4_lookup_inline (vlib_main_t * vm, { vlib_buffer_t *p0, *p1, *p2, *p3; ip4_header_t *ip0, *ip1, *ip2, *ip3; - __attribute__ ((unused)) tcp_header_t *tcp0, *tcp1, *tcp2, *tcp3; ip_lookup_next_t next0, next1, next2, next3; const load_balance_t *lb0, *lb1, *lb2, *lb3; ip4_fib_mtrie_t *mtrie0, *mtrie1, *mtrie2, *mtrie3; @@ -188,11 +188,6 @@ ip4_lookup_inline (vlib_main_t * vm, leaf3 = ip4_fib_mtrie_lookup_step_one (mtrie3, dst_addr3); } - tcp0 = (void *) (ip0 + 1); - tcp1 = (void *) (ip1 + 1); - tcp2 = (void *) (ip2 + 1); - tcp3 = (void *) (ip3 + 1); - if (!lookup_for_responses_to_locally_received_packets) { leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); @@ -230,6 +225,15 @@ ip4_lookup_inline (vlib_main_t * vm, lb2 = load_balance_get (lb_index2); lb3 = load_balance_get (lb_index3); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb1->lb_n_buckets)); + ASSERT (lb2->lb_n_buckets > 0); + ASSERT (is_pow2 (lb2->lb_n_buckets)); + ASSERT (lb3->lb_n_buckets > 0); + ASSERT (is_pow2 (lb3->lb_n_buckets)); + /* Use flow hash to compute multipath adjacency. */ hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0; hash_c1 = vnet_buffer (p1)->ip.flow_hash = 0; @@ -240,47 +244,57 @@ ip4_lookup_inline (vlib_main_t * vm, flow_hash_config0 = lb0->lb_hash_config; hash_c0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hash_c0 & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { flow_hash_config1 = lb1->lb_hash_config; hash_c1 = vnet_buffer (p1)->ip.flow_hash = ip4_compute_flow_hash (ip1, flow_hash_config1); + dpo1 = + load_balance_get_fwd_bucket (lb1, + (hash_c1 & + (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } if (PREDICT_FALSE (lb2->lb_n_buckets > 1)) { flow_hash_config2 = lb2->lb_hash_config; hash_c2 = vnet_buffer (p2)->ip.flow_hash = ip4_compute_flow_hash (ip2, flow_hash_config2); + dpo2 = + load_balance_get_fwd_bucket (lb2, + (hash_c2 & + (lb2->lb_n_buckets_minus_1))); + } + else + { + dpo2 = load_balance_get_bucket_i (lb2, 0); } if (PREDICT_FALSE (lb3->lb_n_buckets > 1)) { flow_hash_config3 = lb3->lb_hash_config; hash_c3 = vnet_buffer (p3)->ip.flow_hash = ip4_compute_flow_hash (ip3, flow_hash_config3); + dpo3 = + load_balance_get_fwd_bucket (lb3, + (hash_c3 & + (lb3->lb_n_buckets_minus_1))); + } + else + { + dpo3 = load_balance_get_bucket_i (lb3, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - ASSERT (lb2->lb_n_buckets > 0); - ASSERT (is_pow2 (lb2->lb_n_buckets)); - ASSERT (lb3->lb_n_buckets > 0); - ASSERT (is_pow2 (lb3->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i (lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); - dpo1 = load_balance_get_bucket_i (lb1, - (hash_c1 & - (lb1->lb_n_buckets_minus_1))); - dpo2 = load_balance_get_bucket_i (lb2, - (hash_c2 & - (lb2->lb_n_buckets_minus_1))); - dpo3 = load_balance_get_bucket_i (lb3, - (hash_c3 & - (lb3->lb_n_buckets_minus_1))); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; @@ -293,20 +307,16 @@ ip4_lookup_inline (vlib_main_t * vm, vlib_increment_combined_counter (cm, thread_index, lb_index0, 1, - vlib_buffer_length_in_chain (vm, p0) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter (cm, thread_index, lb_index1, 1, - vlib_buffer_length_in_chain (vm, p1) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p1)); vlib_increment_combined_counter (cm, thread_index, lb_index2, 1, - vlib_buffer_length_in_chain (vm, p2) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p2)); vlib_increment_combined_counter (cm, thread_index, lb_index3, 1, - vlib_buffer_length_in_chain (vm, p3) - + sizeof (ethernet_header_t)); + vlib_buffer_length_in_chain (vm, p3)); vlib_validate_buffer_enqueue_x4 (vm, node, next, to_next, n_left_to_next, @@ -318,7 +328,6 @@ ip4_lookup_inline (vlib_main_t * vm, { vlib_buffer_t *p0; ip4_header_t *ip0; - __attribute__ ((unused)) tcp_header_t *tcp0; ip_lookup_next_t next0; const load_balance_t *lb0; ip4_fib_mtrie_t *mtrie0; @@ -352,8 +361,6 @@ ip4_lookup_inline (vlib_main_t * vm, leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, dst_addr0); } - tcp0 = (void *) (ip0 + 1); - if (!lookup_for_responses_to_locally_received_packets) leaf0 = ip4_fib_mtrie_lookup_step (mtrie0, leaf0, dst_addr0, 2); @@ -371,6 +378,9 @@ ip4_lookup_inline (vlib_main_t * vm, ASSERT (lbi0); lb0 = load_balance_get (lbi0); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + /* Use flow hash to compute multipath adjacency. */ hash_c0 = vnet_buffer (p0)->ip.flow_hash = 0; if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) @@ -379,20 +389,22 @@ ip4_lookup_inline (vlib_main_t * vm, hash_c0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hash_c0 & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - - dpo0 = load_balance_get_bucket_i (lb0, - (hash_c0 & - (lb0->lb_n_buckets_minus_1))); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - vlib_increment_combined_counter - (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + vlib_increment_combined_counter (cm, thread_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, + p0)); from += 1; to_next += 1; @@ -555,6 +567,12 @@ ip4_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -568,14 +586,13 @@ ip4_load_balance (vlib_main_t * vm, hc1 = vnet_buffer (p1)->ip.flow_hash = ip4_compute_flow_hash (ip1, lb1->lb_hash_config); } + dpo1 = load_balance_get_fwd_bucket + (lb1, (hc1 & (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = - load_balance_get_bucket_i (lb1, - hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -629,11 +646,13 @@ ip4_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip4_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = load_balance_get_fwd_bucket + (lb0, (hc0 & (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index 98bfd4d1..3bc07d0e 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -45,7 +45,7 @@ #include /* for FIB uRPF check */ #include #include -#include +#include #include #include @@ -138,6 +138,10 @@ ip6_lookup_inline (vlib_main_t * vm, lb0 = load_balance_get (lbi0); lb1 = load_balance_get (lbi1); + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (lb1->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); + ASSERT (is_pow2 (lb1->lb_n_buckets)); vnet_buffer (p0)->ip.flow_hash = vnet_buffer (p1)->ip.flow_hash = 0; @@ -146,25 +150,29 @@ ip6_lookup_inline (vlib_main_t * vm, flow_hash_config0 = lb0->lb_hash_config; vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (vnet_buffer (p0)->ip.flow_hash & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { flow_hash_config1 = lb1->lb_hash_config; vnet_buffer (p1)->ip.flow_hash = ip6_compute_flow_hash (ip1, flow_hash_config1); + dpo1 = + load_balance_get_fwd_bucket (lb1, + (vnet_buffer (p1)->ip.flow_hash & + (lb1->lb_n_buckets_minus_1))); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (lb1->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); - ASSERT (is_pow2 (lb1->lb_n_buckets)); - dpo0 = load_balance_get_bucket_i (lb0, - (vnet_buffer (p0)->ip.flow_hash & - lb0->lb_n_buckets_minus_1)); - dpo1 = load_balance_get_bucket_i (lb1, - (vnet_buffer (p1)->ip.flow_hash & - lb1->lb_n_buckets_minus_1)); - next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -266,16 +274,24 @@ ip6_lookup_inline (vlib_main_t * vm, lb0 = load_balance_get (lbi0); vnet_buffer (p0)->ip.flow_hash = 0; + ASSERT (lb0->lb_n_buckets > 0); + ASSERT (is_pow2 (lb0->lb_n_buckets)); if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) { flow_hash_config0 = lb0->lb_hash_config; vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, flow_hash_config0); + dpo0 = + load_balance_get_fwd_bucket (lb0, + (vnet_buffer (p0)->ip.flow_hash & + (lb0->lb_n_buckets_minus_1))); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - ASSERT (lb0->lb_n_buckets > 0); - ASSERT (is_pow2 (lb0->lb_n_buckets)); dpo0 = load_balance_get_bucket_i (lb0, (vnet_buffer (p0)->ip.flow_hash & lb0->lb_n_buckets_minus_1)); @@ -337,10 +353,18 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, { fib_node_index_t fei; - fei = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), FIB_PROTOCOL_IP6, NULL, /* No next-hop address */ - sw_if_index, ~0, // invalid FIB index - 1, NULL, // no label stack - FIB_ROUTE_PATH_FLAG_NONE); + fei = fib_table_entry_update_one_path (fib_index, + &pfx, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_CONNECTED | + FIB_ENTRY_FLAG_ATTACHED), + FIB_PROTOCOL_IP6, + /* No next-hop address */ + NULL, sw_if_index, + /* invalid FIB index */ + ~0, 1, + /* no label stack */ + NULL, FIB_ROUTE_PATH_FLAG_NONE); a->neighbor_probe_adj_index = fib_entry_get_adj (fei); } @@ -366,7 +390,13 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, } } - fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), FIB_PROTOCOL_IP6, &pfx.fp_addr, sw_if_index, ~0, // invalid FIB index + fib_table_entry_update_one_path (fib_index, &pfx, + FIB_SOURCE_INTERFACE, + (FIB_ENTRY_FLAG_CONNECTED | + FIB_ENTRY_FLAG_LOCAL), + FIB_PROTOCOL_IP6, + &pfx.fp_addr, + sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } @@ -780,6 +810,14 @@ ip6_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hc0 & + lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) { @@ -793,14 +831,15 @@ ip6_load_balance (vlib_main_t * vm, hc1 = vnet_buffer (p1)->ip.flow_hash = ip6_compute_flow_hash (ip1, lb1->lb_hash_config); } + dpo1 = + load_balance_get_fwd_bucket (lb1, + (hc1 & + lb1->lb_n_buckets_minus_1)); + } + else + { + dpo1 = load_balance_get_bucket_i (lb1, 0); } - - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); - dpo1 = - load_balance_get_bucket_i (lb1, - hc1 & (lb1->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; next1 = dpo1->dpoi_next_node; @@ -869,10 +908,15 @@ ip6_load_balance (vlib_main_t * vm, hc0 = vnet_buffer (p0)->ip.flow_hash = ip6_compute_flow_hash (ip0, lb0->lb_hash_config); } + dpo0 = + load_balance_get_fwd_bucket (lb0, + (hc0 & + lb0->lb_n_buckets_minus_1)); + } + else + { + dpo0 = load_balance_get_bucket_i (lb0, 0); } - dpo0 = - load_balance_get_bucket_i (lb0, - hc0 & (lb0->lb_n_buckets_minus_1)); next0 = dpo0->dpoi_next_node; vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index 31182770..ee80ee3d 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -630,7 +630,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, n->fib_entry_index = fib_table_entry_update_one_path (fib_index, &pfx, FIB_SOURCE_ADJ, - FIB_ENTRY_FLAG_NONE, + FIB_ENTRY_FLAG_ATTACHED, FIB_PROTOCOL_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); -- cgit 1.2.3-korg