diff options
-rw-r--r-- | vnet/vnet/adj/adj_midchain.c | 91 | ||||
-rw-r--r-- | vnet/vnet/dpo/lookup_dpo.c | 588 |
2 files changed, 534 insertions, 145 deletions
diff --git a/vnet/vnet/adj/adj_midchain.c b/vnet/vnet/adj/adj_midchain.c index 136f69bdd81..c420155320d 100644 --- a/vnet/vnet/adj/adj_midchain.c +++ b/vnet/vnet/adj/adj_midchain.c @@ -64,10 +64,95 @@ adj_mdichain_tx_inline (vlib_main_t * vm, /* set up to enqueue to our disposition with index = next_index */ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - /* - * FIXME DUAL LOOP - */ + while (n_left_from >= 4 && n_left_to_next > 2) + { + u32 bi0, adj_index0, next0; + const ip_adjacency_t * adj0; + const dpo_id_t *dpo0; + vlib_buffer_t * b0; + u32 bi1, adj_index1, next1; + const ip_adjacency_t * adj1; + const dpo_id_t *dpo1; + vlib_buffer_t * b1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + bi0 = from[0]; + to_next[0] = bi0; + bi1 = from[1]; + to_next[1] = bi1; + + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + b0 = vlib_get_buffer(vm, bi0); + b1 = vlib_get_buffer(vm, bi1); + + /* Follow the DPO on which the midchain is stacked */ + adj_index0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + adj_index1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX]; + + adj0 = adj_get(adj_index0); + adj1 = adj_get(adj_index1); + + dpo0 = &adj0->sub_type.midchain.next_dpo; + dpo1 = &adj1->sub_type.midchain.next_dpo; + + next0 = dpo0->dpoi_next_node; + next1 = dpo1->dpoi_next_node; + + vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + + if (interface_count) + { + vlib_increment_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_TX, + cpu_index, + adj0->rewrite_header.sw_if_index, + 1, + vlib_buffer_length_in_chain (vm, b0)); + vlib_increment_combined_counter (im->combined_sw_if_counters + + VNET_INTERFACE_COUNTER_TX, + cpu_index, + adj1->rewrite_header.sw_if_index, + 1, + vlib_buffer_length_in_chain (vm, b1)); + } + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->ai = adj_index0; + } + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + adj_midchain_tx_trace_t *tr = vlib_add_trace (vm, node, + b1, sizeof (*tr)); + tr->ai = adj_index1; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, + next0, next1); + } while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0, adj_index0, next0; diff --git a/vnet/vnet/dpo/lookup_dpo.c b/vnet/vnet/dpo/lookup_dpo.c index ab3a7b905e4..c9e0fca7ed7 100644 --- a/vnet/vnet/dpo/lookup_dpo.c +++ b/vnet/vnet/dpo/lookup_dpo.c @@ -292,95 +292,246 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); - while (n_left_from > 0 && n_left_to_next > 0) - { + while (n_left_from >= 4 && n_left_to_next > 2) + { u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0; flow_hash_config_t flow_hash_config0; - const ip4_address_t *input_addr; - const load_balance_t *lb0; - const lookup_dpo_t * lkd0; - const ip4_header_t * ip0; - const dpo_id_t *dpo0; - vlib_buffer_t * b0; + const ip4_address_t *input_addr0; + const load_balance_t *lb0; + const lookup_dpo_t * lkd0; + const ip4_header_t * ip0; + const dpo_id_t *dpo0; + vlib_buffer_t * b0; + u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1; + flow_hash_config_t flow_hash_config1; + const ip4_address_t *input_addr1; + const load_balance_t *lb1; + const lookup_dpo_t * lkd1; + const ip4_header_t * ip1; + const dpo_id_t *dpo1; + vlib_buffer_t * b1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + bi0 = from[0]; + to_next[0] = bi0; + bi1 = from[1]; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + b1 = vlib_get_buffer (vm, bi1); + ip1 = vlib_buffer_get_current (b1); + + /* dst lookup was done by ip4 lookup */ + lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX]; + lkd0 = lookup_dpo_get(lkdi0); + lkd1 = lookup_dpo_get(lkdi1); + + /* + * choose between a lookup using the fib index in the DPO + * or getting the FIB index from the interface. + */ + if (table_from_interface) + { + fib_index0 = + ip4_fib_table_get_index_for_sw_if_index( + vnet_buffer(b0)->sw_if_index[VLIB_RX]); + fib_index1 = + ip4_fib_table_get_index_for_sw_if_index( + vnet_buffer(b1)->sw_if_index[VLIB_RX]); + } + else + { + fib_index0 = lkd0->lkd_fib_index; + fib_index1 = lkd1->lkd_fib_index; + } + + /* + * choose between a source or destination address lookup in the table + */ + if (input_src_addr) + { + input_addr0 = &ip0->src_address; + input_addr1 = &ip1->src_address; + } + else + { + input_addr0 = &ip0->dst_address; + input_addr1 = &ip1->dst_address; + } + + /* do lookup */ + ip4_src_fib_lookup_one (fib_index0, input_addr0, &lbi0); + ip4_src_fib_lookup_one (fib_index1, input_addr1, &lbi1); + lb0 = load_balance_get(lbi0); + lb1 = load_balance_get(lbi1); - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - ip0 = vlib_buffer_get_current (b0); - - /* dst lookup was done by ip4 lookup */ - lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; - lkd0 = lookup_dpo_get(lkdi0); + /* Use flow hash to compute multipath adjacency. */ + hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0; + hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0; - /* - * choose between a lookup using the fib index in the DPO - * or getting the FIB index from the interface. - */ - if (table_from_interface) - { - fib_index0 = - ip4_fib_table_get_index_for_sw_if_index( - vnet_buffer(b0)->sw_if_index[VLIB_RX]); - } - else - { - fib_index0 = lkd0->lkd_fib_index; - } + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + { + flow_hash_config0 = lb0->lb_hash_config; + hash_c0 = vnet_buffer (b0)->ip.flow_hash = + ip4_compute_flow_hash (ip0, flow_hash_config0); + } - /* - * choose between a source or destination address lookup in the table - */ - if (input_src_addr) - { - input_addr = &ip0->src_address; - } - else - { - input_addr = &ip0->dst_address; - } + if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) + { + flow_hash_config1 = lb1->lb_hash_config; + hash_c1 = vnet_buffer (b1)->ip.flow_hash = + ip4_compute_flow_hash (ip1, flow_hash_config1); + } - /* do lookup */ - ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0); - lb0 = load_balance_get(lbi0); + dpo0 = load_balance_get_bucket_i(lb0, + (hash_c0 & + (lb0->lb_n_buckets_minus_1))); + dpo1 = load_balance_get_bucket_i(lb1, + (hash_c1 & + (lb1->lb_n_buckets_minus_1))); + + next0 = dpo0->dpoi_next_node; + next1 = dpo1->dpoi_next_node; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; + + vlib_increment_combined_counter + (cm, cpu_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + vlib_increment_combined_counter + (cm, cpu_index, lbi1, 1, + vlib_buffer_length_in_chain (vm, b1)); + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + lookup_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->fib_index = fib_index0; + tr->lbi = lbi0; + tr->addr.ip4 = *input_addr0; + } + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + lookup_trace_t *tr = vlib_add_trace (vm, node, + b1, sizeof (*tr)); + tr->fib_index = fib_index1; + tr->lbi = lbi1; + tr->addr.ip4 = *input_addr1; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, + bi0, bi1, next0, next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0; + flow_hash_config_t flow_hash_config0; + const ip4_address_t *input_addr; + const load_balance_t *lb0; + const lookup_dpo_t * lkd0; + const ip4_header_t * ip0; + const dpo_id_t *dpo0; + vlib_buffer_t * b0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + + /* dst lookup was done by ip4 lookup */ + lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + lkd0 = lookup_dpo_get(lkdi0); + + /* + * choose between a lookup using the fib index in the DPO + * or getting the FIB index from the interface. + */ + if (table_from_interface) + { + fib_index0 = + ip4_fib_table_get_index_for_sw_if_index( + vnet_buffer(b0)->sw_if_index[VLIB_RX]); + } + else + { + fib_index0 = lkd0->lkd_fib_index; + } + + /* + * choose between a source or destination address lookup in the table + */ + if (input_src_addr) + { + input_addr = &ip0->src_address; + } + else + { + input_addr = &ip0->dst_address; + } + + /* do lookup */ + ip4_src_fib_lookup_one (fib_index0, input_addr, &lbi0); + lb0 = load_balance_get(lbi0); /* Use flow hash to compute multipath adjacency. */ hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0; if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) - { + { flow_hash_config0 = lb0->lb_hash_config; hash_c0 = vnet_buffer (b0)->ip.flow_hash = ip4_compute_flow_hash (ip0, flow_hash_config0); - } + } dpo0 = load_balance_get_bucket_i(lb0, (hash_c0 & (lb0->lb_n_buckets_minus_1))); - next0 = dpo0->dpoi_next_node; - vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - - vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, b0)); - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - lookup_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->fib_index = fib_index0; - tr->lbi = lbi0; - tr->addr.ip4 = *input_addr; - } - - vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, - n_left_to_next, bi0, next0); - } + next0 = dpo0->dpoi_next_node; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + + vlib_increment_combined_counter + (cm, cpu_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + lookup_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->fib_index = fib_index0; + tr->lbi = lbi0; + tr->addr.ip4 = *input_addr; + } + + vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return from_frame->n_vectors; @@ -474,96 +625,249 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); - while (n_left_from > 0 && n_left_to_next > 0) - { - u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0; + while (n_left_from >= 4 && n_left_to_next > 2) + { + u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0; flow_hash_config_t flow_hash_config0; - const ip6_address_t *input_addr0; - const load_balance_t *lb0; - const lookup_dpo_t * lkd0; - const ip6_header_t * ip0; - const dpo_id_t *dpo0; - vlib_buffer_t * b0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - ip0 = vlib_buffer_get_current (b0); + const ip6_address_t *input_addr0; + const load_balance_t *lb0; + const lookup_dpo_t * lkd0; + const ip6_header_t * ip0; + const dpo_id_t *dpo0; + vlib_buffer_t * b0; + u32 bi1, lkdi1, lbi1, fib_index1, next1, hash_c1; + flow_hash_config_t flow_hash_config1; + const ip6_address_t *input_addr1; + const load_balance_t *lb1; + const lookup_dpo_t * lkd1; + const ip6_header_t * ip1; + const dpo_id_t *dpo1; + vlib_buffer_t * b1; + + /* Prefetch next iteration. */ + { + vlib_buffer_t * p2, * p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + bi0 = from[0]; + to_next[0] = bi0; + bi1 = from[1]; + to_next[1] = bi1; + from += 2; + to_next += 2; + n_left_from -= 2; + n_left_to_next -= 2; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + b1 = vlib_get_buffer (vm, bi1); + ip1 = vlib_buffer_get_current (b1); + + /* dst lookup was done by ip6 lookup */ + lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + lkdi1 = vnet_buffer(b1)->ip.adj_index[VLIB_TX]; + lkd0 = lookup_dpo_get(lkdi0); + lkd1 = lookup_dpo_get(lkdi1); + + /* + * choose between a lookup using the fib index in the DPO + * or getting the FIB index from the interface. + */ + if (table_from_interface) + { + fib_index0 = + ip4_fib_table_get_index_for_sw_if_index( + vnet_buffer(b0)->sw_if_index[VLIB_RX]); + fib_index1 = + ip4_fib_table_get_index_for_sw_if_index( + vnet_buffer(b1)->sw_if_index[VLIB_RX]); + } + else + { + fib_index0 = lkd0->lkd_fib_index; + fib_index1 = lkd1->lkd_fib_index; + } + + /* + * choose between a source or destination address lookup in the table + */ + if (input_src_addr) + { + input_addr0 = &ip0->src_address; + input_addr1 = &ip1->src_address; + } + else + { + input_addr0 = &ip0->dst_address; + input_addr1 = &ip1->dst_address; + } + + /* do src lookup */ + lbi0 = ip6_fib_table_fwding_lookup(&ip6_main, + fib_index0, + input_addr0); + lbi1 = ip6_fib_table_fwding_lookup(&ip6_main, + fib_index1, + input_addr1); + lb0 = load_balance_get(lbi0); + lb1 = load_balance_get(lbi1); - /* dst lookup was done by ip6 lookup */ - lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; - lkd0 = lookup_dpo_get(lkdi0); + /* Use flow hash to compute multipath adjacency. */ + hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0; + hash_c1 = vnet_buffer (b1)->ip.flow_hash = 0; - /* - * choose between a lookup using the fib index in the DPO - * or getting the FIB index from the interface. - */ - if (table_from_interface) - { - fib_index0 = - ip4_fib_table_get_index_for_sw_if_index( - vnet_buffer(b0)->sw_if_index[VLIB_RX]); - } - else - { - fib_index0 = lkd0->lkd_fib_index; - } + if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) + { + flow_hash_config0 = lb0->lb_hash_config; + hash_c0 = vnet_buffer (b0)->ip.flow_hash = + ip6_compute_flow_hash (ip0, flow_hash_config0); + } - /* - * choose between a source or destination address lookup in the table - */ - if (input_src_addr) - { - input_addr0 = &ip0->src_address; - } - else - { - input_addr0 = &ip0->dst_address; - } + if (PREDICT_FALSE (lb1->lb_n_buckets > 1)) + { + flow_hash_config1 = lb1->lb_hash_config; + hash_c1 = vnet_buffer (b1)->ip.flow_hash = + ip6_compute_flow_hash (ip1, flow_hash_config1); + } - /* do src lookup */ - lbi0 = ip6_fib_table_fwding_lookup(&ip6_main, - fib_index0, - input_addr0); - lb0 = load_balance_get(lbi0); + dpo0 = load_balance_get_bucket_i(lb0, + (hash_c0 & + (lb0->lb_n_buckets_minus_1))); + dpo1 = load_balance_get_bucket_i(lb1, + (hash_c1 & + (lb1->lb_n_buckets_minus_1))); + + next0 = dpo0->dpoi_next_node; + next1 = dpo1->dpoi_next_node; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; + + vlib_increment_combined_counter + (cm, cpu_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + vlib_increment_combined_counter + (cm, cpu_index, lbi1, 1, + vlib_buffer_length_in_chain (vm, b1)); + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + lookup_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->fib_index = fib_index0; + tr->lbi = lbi0; + tr->addr.ip6 = *input_addr0; + } + if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED)) + { + lookup_trace_t *tr = vlib_add_trace (vm, node, + b1, sizeof (*tr)); + tr->fib_index = fib_index1; + tr->lbi = lbi1; + tr->addr.ip6 = *input_addr1; + } + vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, + next0, next1); + } + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0, lkdi0, lbi0, fib_index0, next0, hash_c0; + flow_hash_config_t flow_hash_config0; + const ip6_address_t *input_addr0; + const load_balance_t *lb0; + const lookup_dpo_t * lkd0; + const ip6_header_t * ip0; + const dpo_id_t *dpo0; + vlib_buffer_t * b0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + + /* dst lookup was done by ip6 lookup */ + lkdi0 = vnet_buffer(b0)->ip.adj_index[VLIB_TX]; + lkd0 = lookup_dpo_get(lkdi0); + + /* + * choose between a lookup using the fib index in the DPO + * or getting the FIB index from the interface. + */ + if (table_from_interface) + { + fib_index0 = + ip4_fib_table_get_index_for_sw_if_index( + vnet_buffer(b0)->sw_if_index[VLIB_RX]); + } + else + { + fib_index0 = lkd0->lkd_fib_index; + } + + /* + * choose between a source or destination address lookup in the table + */ + if (input_src_addr) + { + input_addr0 = &ip0->src_address; + } + else + { + input_addr0 = &ip0->dst_address; + } + + /* do src lookup */ + lbi0 = ip6_fib_table_fwding_lookup(&ip6_main, + fib_index0, + input_addr0); + lb0 = load_balance_get(lbi0); /* Use flow hash to compute multipath adjacency. */ hash_c0 = vnet_buffer (b0)->ip.flow_hash = 0; if (PREDICT_FALSE (lb0->lb_n_buckets > 1)) - { + { flow_hash_config0 = lb0->lb_hash_config; hash_c0 = vnet_buffer (b0)->ip.flow_hash = ip6_compute_flow_hash (ip0, flow_hash_config0); - } + } dpo0 = load_balance_get_bucket_i(lb0, (hash_c0 & (lb0->lb_n_buckets_minus_1))); - next0 = dpo0->dpoi_next_node; - vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; - - vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, - vlib_buffer_length_in_chain (vm, b0)); - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - lookup_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->fib_index = fib_index0; - tr->lbi = lbi0; - tr->addr.ip6 = *input_addr0; - } - vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, - n_left_to_next, bi0, next0); - } + next0 = dpo0->dpoi_next_node; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; + + vlib_increment_combined_counter + (cm, cpu_index, lbi0, 1, + vlib_buffer_length_in_chain (vm, b0)); + + if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) + { + lookup_trace_t *tr = vlib_add_trace (vm, node, + b0, sizeof (*tr)); + tr->fib_index = fib_index0; + tr->lbi = lbi0; + tr->addr.ip6 = *input_addr0; + } + vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } return from_frame->n_vectors; |