aboutsummaryrefslogtreecommitdiffstats
path: root/tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr.robot
diff options
context:
space:
mode:
Diffstat (limited to 'tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr.robot')
-rw-r--r--tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr.robot151
1 files changed, 151 insertions, 0 deletions
diff --git a/tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr.robot b/tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr.robot
new file mode 100644
index 0000000000..6d52cfd697
--- /dev/null
+++ b/tests/vpp/perf/crypto/10ge2p1x710-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr.robot
@@ -0,0 +1,151 @@
+# Copyright (c) 2019 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+| Resource | resources/libraries/robot/performance/performance_setup.robot
+| Resource | resources/libraries/robot/crypto/ipsec.robot
+| ...
+| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDR
+| ... | IP4FWD | IPSEC | IPSECHW | IPSECTUN | NIC_Intel-X710 | BASE |
+| ... | AES_128_CBC | HMAC_SHA_256 | HMAC | AES
+| ...
+| Suite Setup | Set up IPSec performance test suite | L3 | ${nic_name}
+| ... | HW_DH895xcc
+| Suite Teardown | Tear down 3-node performance topology
+| ...
+| Test Setup | Set up performance test
+| Test Teardown | Tear down performance test
+| ...
+| Test Template | Local Template
+| ...
+| Documentation | *IPv4 IPsec tunnel mode performance test suite.*
+| ...
+| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
+| ... | with single links between nodes.
+| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 on TG-DUTn,
+| ... | Eth-IPv4-IPSec on DUT1-DUT2
+| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with multiple
+| ... | IPsec tunnels between them. DUTs get IPv4 traffic from TG, encrypt it
+| ... | and send to another DUT, where packets are decrypted and sent back to TG
+| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop\
+| ... | Rate) with zero packet loss tolerance and throughput PDR (Partial Drop\
+| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage\
+| ... | of packets transmitted. NDR and PDR are discovered for different\
+| ... | Ethernet L2 frame sizes using MLRsearch library.\
+| ... | TG traffic profile contains two L3 flow-groups
+| ... | (flow-group per direction, number of flows per flow-group equals to
+| ... | number of IPSec tunnels) with all packets
+| ... | containing Ethernet header, IPv4 header with IP protocol=61 and
+| ... | static payload. MAC addresses are matching MAC addresses of the TG
+| ... | node interfaces. Incrementing of IP.dst (IPv4 destination address) field
+| ... | is applied to both streams.
+| ... | *[Ref] Applicable standard specifications:* RFC4303 and RFC2544.
+
+*** Variables ***
+| ${nic_name}= | Intel-X710
+| ${overhead}= | ${58}
+| ${tg_if1_ip4}= | 192.168.10.2
+| ${dut1_if1_ip4}= | 192.168.10.1
+| ${dut1_if2_ip4}= | 100.0.0.1
+| ${dut2_if1_ip4}= | 100.0.0.2
+| ${dut2_if2_ip4}= | 192.168.20.1
+| ${tg_if2_ip4}= | 192.168.20.2
+| ${raddr_ip4}= | 20.0.0.0
+| ${laddr_ip4}= | 10.0.0.0
+| ${addr_range}= | ${24}
+| ${n_tunnels}= | ${1}
+# Traffic profile:
+| ${traffic_profile}= | trex-sl-3n-ethip4-ip4dst${n_tunnels}
+
+*** Keywords ***
+| Local Template
+| | [Documentation]
+| | ... | [Cfg] DUTs run 1 IPsec tunnel AES_CBC_128 / HMAC_256_SHA in each
+| | ... | direction.\
+| | ... | Each DUT uses ${phy_cores} physical core(s) for worker threads.
+| | ... | [Ver] Measure NDR and PDR values using MLRsearch algorithm.\
+| | ...
+| | ... | *Arguments:*
+| | ... | - frame_size - Framesize in Bytes in integer or string (IMIX_v4_1).
+| | ... | Type: integer, string
+| | ... | - phy_cores - Number of physical cores. Type: integer
+| | ... | - rxq - Number of RX queues, default value: ${None}. Type: integer
+| | ...
+| | [Arguments] | ${frame_size} | ${phy_cores} | ${rxq}=${None}
+| | ...
+| | Set Test Variable | \${frame_size}
+| | ...
+| | # These are enums (not strings) so they cannot be in Variables table.
+| | ${encr_alg}= | Crypto Alg AES CBC 128
+| | ${auth_alg}= | Integ Alg SHA 256 128
+| | ...
+| | Given Add worker threads and rxqueues to all DUTs | ${phy_cores} | ${rxq}
+| | And Add PCI devices to all DUTs
+| | Set Max Rate And Jumbo And Handle Multi Seg
+| | And Add cryptodev to all DUTs | ${phy_cores}
+| | And Apply startup configuration on all VPP DUTs
+| | And Initialize IPSec in 3-node circular topology
+| | And VPP IPsec Add Multiple Tunnels
+| | ... | ${${nodes} | ${dut1_if2} | ${dut2_if1} | ${n_tunnels}
+| | ... | ${encr_alg} | ${auth_alg} | ${dut1_if2_ip4} | ${dut2_if1_ip4}
+| | ... | ${laddr_ip4} | ${raddr_ip4} | ${addr_range}
+| | Then Find NDR and PDR intervals using optimized search
+
+*** Test Cases ***
+| tc01-64B-1c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 64B | 1C
+| | frame_size=${64} | phy_cores=${1}
+
+| tc02-64B-2c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 64B | 2C
+| | frame_size=${64} | phy_cores=${2}
+
+| tc03-64B-4c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 64B | 4C
+| | frame_size=${64} | phy_cores=${4}
+
+| tc04-1518B-1c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 1518B | 1C
+| | frame_size=${1518} | phy_cores=${1}
+
+| tc05-1518B-2c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 1518B | 2C
+| | frame_size=${1518} | phy_cores=${2}
+
+| tc06-1518B-4c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 1518B | 4C
+| | frame_size=${1518} | phy_cores=${4}
+
+| tc07-9000B-1c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 9000B | 1C
+| | frame_size=${9000} | phy_cores=${1}
+
+| tc08-9000B-2c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 9000B | 2C
+| | frame_size=${9000} | phy_cores=${2}
+
+| tc09-9000B-4c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | 9000B | 4C
+| | frame_size=${9000} | phy_cores=${4}
+
+| tc10-IMIX-1c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | IMIX | 1C
+| | frame_size=IMIX_v4_1 | phy_cores=${1}
+
+| tc11-IMIX-2c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | IMIX | 2C
+| | frame_size=IMIX_v4_1 | phy_cores=${2}
+
+| tc12-IMIX-4c-ethip4ipsec1tnlhw-ip4base-policy-aes128cbc-hmac256sha-ndrpdr
+| | [Tags] | IMIX | 4C
+| | frame_size=IMIX_v4_1 | phy_cores=${4}
ight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2015-2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * ip/ip4_forward.h: IP v4 forwarding
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#ifndef __included_ip4_forward_h__
#define __included_ip4_forward_h__

#include <vppinfra/cache.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/dpo/load_balance_map.h>
#include <vnet/ip/ip4_inlines.h>

/**
 * @file
 * @brief IPv4 Forwarding.
 *
 * This file contains the source code for IPv4 forwarding.
 */

always_inline uword
ip4_lookup_inline (vlib_main_t * vm,
		   vlib_node_runtime_t * node, vlib_frame_t * frame)
{
  ip4_main_t *im = &ip4_main;
  vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
  u32 n_left, *from;
  u32 thread_index = vm->thread_index;
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
  vlib_buffer_t **b = bufs;
  u16 nexts[VLIB_FRAME_SIZE], *next;

  from = vlib_frame_vector_args (frame);
  n_left = frame->n_vectors;
  next = nexts;
  vlib_get_buffers (vm, from, bufs, n_left);

#if (CLIB_N_PREFETCHES >= 8)
  while (n_left >= 4)
    {
      ip4_header_t *ip0, *ip1, *ip2, *ip3;
      const load_balance_t *lb0, *lb1, *lb2, *lb3;
      ip4_address_t *dst_addr0, *dst_addr1, *dst_addr2, *dst_addr3;
      u32 lb_index0, lb_index1, lb_index2, lb_index3;
      flow_hash_config_t flow_hash_config0, flow_hash_config1;
      flow_hash_config_t flow_hash_config2, flow_hash_config3;
      u32 hash_c0, hash_c1, hash_c2, hash_c3;
      const dpo_id_t *dpo0, *dpo1, *dpo2, *dpo3;

      /* Prefetch next iteration. */
      if (n_left >= 8)
	{
	  vlib_prefetch_buffer_header (b[4], LOAD);
	  vlib_prefetch_buffer_header (b[5], LOAD);
	  vlib_prefetch_buffer_header (b[6], LOAD);
	  vlib_prefetch_buffer_header (b[7], LOAD);

	  CLIB_PREFETCH (b[4]->data, sizeof (ip0[0]), LOAD);
	  CLIB_PREFETCH (b[5]->data, sizeof (ip0[0]), LOAD);
	  CLIB_PREFETCH (b[6]->data, sizeof (ip0[0]), LOAD);
	  CLIB_PREFETCH (b[7]->data, sizeof (ip0[0]), LOAD);
	}

      ip0 = vlib_buffer_get_current (b[0]);
      ip1 = vlib_buffer_get_current (b[1]);
      ip2 = vlib_buffer_get_current (b[2]);
      ip3 = vlib_buffer_get_current (b[3]);

      dst_addr0 = &ip0->dst_address;
      dst_addr1 = &ip1->dst_address;
      dst_addr2 = &ip2->dst_address;
      dst_addr3 = &ip3->dst_address;

      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);
      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[2]);
      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[3]);

      ip4_fib_forwarding_lookup_x4 (
	vnet_buffer (b[0])->ip.fib_index, vnet_buffer (b[1])->ip.fib_index,
	vnet_buffer (b[2])->ip.fib_index, vnet_buffer (b[3])->ip.fib_index,
	dst_addr0, dst_addr1, dst_addr2, dst_addr3, &lb_index0, &lb_index1,
	&lb_index2, &lb_index3);

      ASSERT (lb_index0 && lb_index1 && lb_index2 && lb_index3);
      lb0 = load_balance_get (lb_index0);
      lb1 = load_balance_get (lb_index1);
      lb2 = load_balance_get (lb_index2);
      lb3 = load_balance_get (lb_index3);

      ASSERT (lb0->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb0->lb_n_buckets));
      ASSERT (lb1->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb1->lb_n_buckets));
      ASSERT (lb2->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb2->lb_n_buckets));
      ASSERT (lb3->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb3->lb_n_buckets));

      /* Use flow hash to compute multipath adjacency. */
      hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
      hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
      hash_c2 = vnet_buffer (b[2])->ip.flow_hash = 0;
      hash_c3 = vnet_buffer (b[3])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
	{
	  flow_hash_config0 = lb0->lb_hash_config;
	  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
	    ip4_compute_flow_hash (ip0, flow_hash_config0);
	  dpo0 =
	    load_balance_get_fwd_bucket (lb0,
					 (hash_c0 &
					  (lb0->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo0 = load_balance_get_bucket_i (lb0, 0);
	}
      if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
	{
	  flow_hash_config1 = lb1->lb_hash_config;
	  hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
	    ip4_compute_flow_hash (ip1, flow_hash_config1);
	  dpo1 =
	    load_balance_get_fwd_bucket (lb1,
					 (hash_c1 &
					  (lb1->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo1 = load_balance_get_bucket_i (lb1, 0);
	}
      if (PREDICT_FALSE (lb2->lb_n_buckets > 1))
	{
	  flow_hash_config2 = lb2->lb_hash_config;
	  hash_c2 = vnet_buffer (b[2])->ip.flow_hash =
	    ip4_compute_flow_hash (ip2, flow_hash_config2);
	  dpo2 =
	    load_balance_get_fwd_bucket (lb2,
					 (hash_c2 &
					  (lb2->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo2 = load_balance_get_bucket_i (lb2, 0);
	}
      if (PREDICT_FALSE (lb3->lb_n_buckets > 1))
	{
	  flow_hash_config3 = lb3->lb_hash_config;
	  hash_c3 = vnet_buffer (b[3])->ip.flow_hash =
	    ip4_compute_flow_hash (ip3, flow_hash_config3);
	  dpo3 =
	    load_balance_get_fwd_bucket (lb3,
					 (hash_c3 &
					  (lb3->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo3 = load_balance_get_bucket_i (lb3, 0);
	}

      next[0] = dpo0->dpoi_next_node;
      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
      next[1] = dpo1->dpoi_next_node;
      vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
      next[2] = dpo2->dpoi_next_node;
      vnet_buffer (b[2])->ip.adj_index[VLIB_TX] = dpo2->dpoi_index;
      next[3] = dpo3->dpoi_next_node;
      vnet_buffer (b[3])->ip.adj_index[VLIB_TX] = dpo3->dpoi_index;

      vlib_increment_combined_counter
	(cm, thread_index, lb_index0, 1,
	 vlib_buffer_length_in_chain (vm, b[0]));
      vlib_increment_combined_counter
	(cm, thread_index, lb_index1, 1,
	 vlib_buffer_length_in_chain (vm, b[1]));
      vlib_increment_combined_counter
	(cm, thread_index, lb_index2, 1,
	 vlib_buffer_length_in_chain (vm, b[2]));
      vlib_increment_combined_counter
	(cm, thread_index, lb_index3, 1,
	 vlib_buffer_length_in_chain (vm, b[3]));

      b += 4;
      next += 4;
      n_left -= 4;
    }
#elif (CLIB_N_PREFETCHES >= 4)
  while (n_left >= 4)
    {
      ip4_header_t *ip0, *ip1;
      const load_balance_t *lb0, *lb1;
      ip4_address_t *dst_addr0, *dst_addr1;
      u32 lb_index0, lb_index1;
      flow_hash_config_t flow_hash_config0, flow_hash_config1;
      u32 hash_c0, hash_c1;
      const dpo_id_t *dpo0, *dpo1;

      /* Prefetch next iteration. */
      {
	vlib_prefetch_buffer_header (b[2], LOAD);
	vlib_prefetch_buffer_header (b[3], LOAD);

	CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
	CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
      }

      ip0 = vlib_buffer_get_current (b[0]);
      ip1 = vlib_buffer_get_current (b[1]);

      dst_addr0 = &ip0->dst_address;
      dst_addr1 = &ip1->dst_address;

      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);
      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[1]);

      ip4_fib_forwarding_lookup_x2 (
	vnet_buffer (b[0])->ip.fib_index, vnet_buffer (b[1])->ip.fib_index,
	dst_addr0, dst_addr1, &lb_index0, &lb_index1);

      ASSERT (lb_index0 && lb_index1);
      lb0 = load_balance_get (lb_index0);
      lb1 = load_balance_get (lb_index1);

      ASSERT (lb0->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb0->lb_n_buckets));
      ASSERT (lb1->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb1->lb_n_buckets));

      /* Use flow hash to compute multipath adjacency. */
      hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
      hash_c1 = vnet_buffer (b[1])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
	{
	  flow_hash_config0 = lb0->lb_hash_config;
	  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
	    ip4_compute_flow_hash (ip0, flow_hash_config0);
	  dpo0 =
	    load_balance_get_fwd_bucket (lb0,
					 (hash_c0 &
					  (lb0->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo0 = load_balance_get_bucket_i (lb0, 0);
	}
      if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
	{
	  flow_hash_config1 = lb1->lb_hash_config;
	  hash_c1 = vnet_buffer (b[1])->ip.flow_hash =
	    ip4_compute_flow_hash (ip1, flow_hash_config1);
	  dpo1 =
	    load_balance_get_fwd_bucket (lb1,
					 (hash_c1 &
					  (lb1->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo1 = load_balance_get_bucket_i (lb1, 0);
	}

      next[0] = dpo0->dpoi_next_node;
      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
      next[1] = dpo1->dpoi_next_node;
      vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;

      vlib_increment_combined_counter
	(cm, thread_index, lb_index0, 1,
	 vlib_buffer_length_in_chain (vm, b[0]));
      vlib_increment_combined_counter
	(cm, thread_index, lb_index1, 1,
	 vlib_buffer_length_in_chain (vm, b[1]));

      b += 2;
      next += 2;
      n_left -= 2;
    }
#endif
  while (n_left > 0)
    {
      ip4_header_t *ip0;
      const load_balance_t *lb0;
      ip4_address_t *dst_addr0;
      u32 lbi0;
      flow_hash_config_t flow_hash_config0;
      const dpo_id_t *dpo0;
      u32 hash_c0;

      ip0 = vlib_buffer_get_current (b[0]);
      dst_addr0 = &ip0->dst_address;
      ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, b[0]);

      lbi0 = ip4_fib_forwarding_lookup (vnet_buffer (b[0])->ip.fib_index,
					dst_addr0);

      ASSERT (lbi0);
      lb0 = load_balance_get (lbi0);

      ASSERT (lb0->lb_n_buckets > 0);
      ASSERT (is_pow2 (lb0->lb_n_buckets));

      /* Use flow hash to compute multipath adjacency. */
      hash_c0 = vnet_buffer (b[0])->ip.flow_hash = 0;
      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
	{
	  flow_hash_config0 = lb0->lb_hash_config;

	  hash_c0 = vnet_buffer (b[0])->ip.flow_hash =
	    ip4_compute_flow_hash (ip0, flow_hash_config0);
	  dpo0 =
	    load_balance_get_fwd_bucket (lb0,
					 (hash_c0 &
					  (lb0->lb_n_buckets_minus_1)));
	}
      else
	{
	  dpo0 = load_balance_get_bucket_i (lb0, 0);
	}

      next[0] = dpo0->dpoi_next_node;
      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;

      vlib_increment_combined_counter (cm, thread_index, lbi0, 1,
				       vlib_buffer_length_in_chain (vm,
								    b[0]));

      b += 1;
      next += 1;
      n_left -= 1;
    }

  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);

  if (node->flags & VLIB_NODE_FLAG_TRACE)
    ip4_forward_next_trace (vm, node, frame, VLIB_TX);

  return frame->n_vectors;
}

#endif /* __included_ip4_forward_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */