/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * main.c: main vector processing loop * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include #include #include CJ_GLOBAL_LOG_PROTOTYPE; /* Actually allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_ALLOC (VLIB_FRAME_SIZE + 4) u32 wraps; always_inline u32 vlib_frame_bytes (u32 n_scalar_bytes, u32 n_vector_bytes) { u32 n_bytes; /* Make room for vlib_frame_t plus scalar arguments. */ n_bytes = vlib_frame_vector_byte_offset (n_scalar_bytes); /* Make room for vector arguments. Allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_EXTRA 4 n_bytes += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * n_vector_bytes; /* Magic number is first 32bit number after vector data. Used to make sure that vector data is never overrun. */ #define VLIB_FRAME_MAGIC (0xabadc0ed) n_bytes += sizeof (u32); /* Pad to cache line. */ n_bytes = round_pow2 (n_bytes, CLIB_CACHE_LINE_BYTES); return n_bytes; } always_inline u32 * vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node) { void *p = f; p += vlib_frame_vector_byte_offset (node->scalar_size); p += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * node->vector_size; return p; } static inline vlib_frame_size_t * get_frame_size_info (vlib_node_main_t * nm, u32 n_scalar_bytes, u32 n_vector_bytes) { #ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES uword key = (n_scalar_bytes << 16) | n_vector_bytes; uword *p, i; p = hash_get (nm->frame_size_hash, key); if (p) i = p[0]; else { i = vec_len (nm->frame_sizes); vec_validate (nm->frame_sizes, i); hash_set (nm->frame_size_hash, key, i); } return vec_elt_at_index (nm->frame_sizes, i); #else ASSERT (vlib_frame_bytes (n_scalar_bytes, n_vector_bytes) == (vlib_frame_bytes (0, 4))); return vec_elt_at_index (nm->frame_sizes, 0); #endif } static vlib_frame_t * vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index, u32 frame_flags) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_node_t *to_node; vlib_frame_t *f; u32 l, n, scalar_size, vector_size; to_node = vlib_get_node (vm, to_node_index); scalar_size = to_node->scalar_size; vector_size = to_node->vector_size; fs = get_frame_size_info (nm, scalar_size, vector_size); n = vlib_frame_bytes (scalar_size, vector_size); if ((l = vec_len (fs->free_frames)) > 0) { /* Allocate from end of free list. */ f = fs->free_frames[l - 1]; _vec_len (fs->free_frames) = l - 1; } else { f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN); } /* Poison frame when debugging. */ if (CLIB_DEBUG > 0) clib_memset (f, 0xfe, n); /* Insert magic number. */ { u32 *magic; magic = vlib_frame_find_magic (f, to_node); *magic = VLIB_FRAME_MAGIC; } f->frame_flags = VLIB_FRAME_IS_ALLOCATED | frame_flags; f->n_vectors = 0; f->scalar_size = scalar_size; f->vector_size = vector_size; f->flags = 0; fs->n_alloc_frames += 1; return f; } /* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX. Returns frame index. */ static vlib_frame_t * vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime, u32 to_next_index) { vlib_node_t *from_node; from_node = vlib_get_node (vm, from_node_runtime->node_index); ASSERT (to_next_index < vec_len (from_node->next_nodes)); return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index], /* frame_flags */ 0); } vlib_frame_t * vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index) { vlib_frame_t *f = vlib_frame_alloc_to_node (vm, to_node_index, /* frame_flags */ VLIB_FRAME_FREE_AFTER_DISPATCH); return vlib_get_frame (vm, f); } void vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f) { vlib_pending_frame_t *p; vlib_node_t *to_node; if (f->n_vectors == 0) return; to_node = vlib_get_node (vm, to_node_index); vec_add2 (vm->node_main.pending_frames, p, 1); f->frame_flags |= VLIB_FRAME_PENDING; p->frame = vlib_get_frame (vm, f); p->node_runtime_index = to_node->runtime_index; p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME; } /* Free given frame. */ void vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *node; vlib_frame_size_t *fs; ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED); node = vlib_get_node (vm, r->node_index); fs = get_frame_size_info (nm, node->scalar_size, node->vector_size); ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED); /* No next frames may point to freed frame. */ if (CLIB_DEBUG > 0) { vlib_next_frame_t *nf; vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame != f); } f->frame_flags &= ~(VLIB_FRAME_IS_ALLOCATED | VLIB_FRAME_NO_APPEND); vec_add1 (fs->free_frames, f); ASSERT (fs->n_alloc_frames > 0); fs->n_alloc_frames -= 1; } static clib_error_t * show_frame_stats (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_cli_output (vm, "%=6s%=12s%=12s", "Size", "# Alloc", "# Free"); vec_foreach (fs, nm->frame_sizes) { u32 n_alloc = fs->n_alloc_frames; u32 n_free = vec_len (fs->free_frames); if (n_alloc + n_free > 0) vlib_cli_output (vm, "%=6d%=12d%=12d", fs - nm->frame_sizes, n_alloc, n_free); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_frame_stats_cli, static) = { .path = "show vlib frame-allocation", .short_help = "Show node dispatch frame statistics", .function = show_frame_stats, }; /* *INDENT-ON* */ /* Change ownership of enqueue rights to given next node. */ static void vlib_next_frame_change_ownership (vlib_main_t * vm, vlib_node_runtime_t * node_runtime, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *next_frame; vlib_node_t *node, *next_node; node = vec_elt (nm->nodes, node_runtime->node_index); /* Only internal & input nodes are allowed to call other nodes. */ ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL || node->type == VLIB_NODE_TYPE_INPUT || node->type == VLIB_NODE_TYPE_PROCESS); ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes); next_frame = vlib_node_runtime_get_next_frame (vm, node_runtime, next_index); next_node = vec_elt (nm->nodes, node->next_nodes[next_index]); if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX) { /* Get frame from previous owner. */ vlib_next_frame_t *owner_next_frame; vlib_next_frame_t tmp; owner_next_frame = vlib_node_get_next_frame (vm, next_node->owner_node_index, next_node->owner_next_index); /* Swap target next frame with owner's. */ tmp = owner_next_frame[0]; owner_next_frame[0] = next_frame[0]; next_frame[0] = tmp; /* * If next_frame is already pending, we have to track down * all pending frames and fix their next_frame_index fields. */ if (next_frame->flags & VLIB_FRAME_PENDING) { vlib_pending_frame_t *p; if (next_frame->frame != NULL) { vec_foreach (p, nm->pending_frames) { if (p->frame == next_frame->frame) { p->next_frame_index = next_frame - vm->node_main.next_frames; } } } } } else { /* No previous owner. Take ownership. */ next_frame->flags |= VLIB_FRAME_OWNER; } /* Record new owner. */ next_node->owner_node_index = node->index; next_node->owner_next_index = next_index; /* Now we should be owner. */ ASSERT (next_frame->flags & VLIB_FRAME_OWNER); } /* Make sure that magic number is still there. Otherwise, it is likely that caller has overrun frame arguments. */ always_inline void validate_frame_magic (vlib_main_t * vm, vlib_frame_t * f, vlib_node_t * n, uword next_index) { vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]); u32 *magic = vlib_frame_find_magic (f, next_node); ASSERT (VLIB_FRAME_MAGIC == magic[0]); } vlib_frame_t * vlib_get_next_frame_internal (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next_index, u32 allocate_new_next_frame) { vlib_frame_t *f; vlib_next_frame_t *nf; u32 n_used; nf = vlib_node_runtime_get_next_frame (vm, node, next_index); /* Make sure this next frame owns right to enqueue to destination frame. */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER))) vlib_next_frame_change_ownership (vm, node, next_index); /* ??? Don't need valid flag: can use frame_index == ~0 */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED))) { nf->frame = vlib_frame_alloc (vm, node, next_index); nf->flags |= VLIB_FRAME_IS_ALLOCATED; } f = nf->frame; /* Has frame been removed from pending vector (e.g. finished dispatching)? If so we can reuse frame. */ if ((nf->flags & VLIB_FRAME_PENDING) && !(f->frame_flags & VLIB_FRAME_PENDING)) { nf->flags &= ~VLIB_FRAME_PENDING; f->n_vectors = 0; f->flags = 0; } /* Allocate new frame if current one is marked as no-append or it is already full. */ n_used = f->n_vectors; if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0) || (f->frame_flags & VLIB_FRAME_NO_APPEND)) { /* Old frame may need to be freed after dispatch, since we'll have two redundant frames from node -> next node. */ if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH)) { vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame); f_old->frame_flags |= VLIB_FRAME_FREE_AFTER_DISPATCH; } /* Allocate new frame to replace full one. */ f = nf->frame = vlib_frame_alloc (vm, node, next_index); n_used = f->n_vectors; } /* Should have free vectors in frame now. */ ASSERT (n_used < VLIB_FRAME_SIZE); if (CLIB_DEBUG > 0) { validate_frame_magic (vm, f, vlib_get_node (vm, node->node_index), next_index); } return f; } static void vlib_put_next_frame_validate (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; vlib_node_runtime_t *next_rt; vlib_node_t *next_node; u32 n_before, n_after; nf = vlib_node_runtime_get_next_frame (vm, rt, next_index); f = vlib_get_frame (vm, nf->frame); ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_after = VLIB_FRAME_SIZE - n_vectors_left; n_before = f->n_vectors; ASSERT (n_after >= n_before); next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL], nf->node_runtime_index); next_node = vlib_get_node (vm, next_rt->node_index); if (n_after > 0 && next_node->validate_frame) { u8 *msg = next_node->validate_frame (vm, rt, f); if (msg) { clib_warning ("%v", msg); ASSERT (0); } vec_free (msg); } } void vlib_put_next_frame (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; u32 n_vectors_in_frame; if (CLIB_DEBUG > 0) vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left); nf = vlib_node_runtime_get_next_frame (vm, r, next_index); f = vlib_get_frame (vm, nf->frame); /* Make sure that magic number is still there. Otherwise, caller has overrun frame meta data. */ if (CLIB_DEBUG > 0) { vlib_node_t *node = vlib_get_node (vm, r->node_index); validate_frame_magic (vm, f, node, next_index); } /* Convert # of vectors left -> number of vectors there. */ ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left; f->n_vectors = n_vectors_in_frame; /* If vectors were added to frame, add to pending vector. */ if (PREDICT_TRUE (n_vectors_in_frame > 0)) { vlib_pending_frame_t *p; u32 v0, v1; r->cached_next_index = next_index; if (!(f->frame_flags & VLIB_FRAME_PENDING)) { __attribute__ ((unused)) vlib_node_t *node; vlib_node_t *next_node; vlib_node_runtime_t *next_runtime; node = vlib_get_node (vm, r->node_index); next_node = vlib_get_next_node (vm, r->node_index, next_index); next_runtime = vlib_node_get_runtime (vm, next_node->index); vec_add2 (nm->pending_frames, p, 1); p->frame = nf->frame; p->node_runtime_index = nf->node_runtime_index; p->next_frame_index = nf - nm->next_frames; nf->flags |= VLIB_FRAME_PENDING; f->frame_flags |= VLIB_FRAME_PENDING; /* * If we're going to dispatch this frame on another thread, * force allocation of a new frame. Otherwise, we create * a dangling frame reference. Each thread has its own copy of * the next_frames vector. */ if (0 && r->thread_index != next_runtime->thread_index) { nf->frame
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * ip/ip6_forward.h: IP v6 forwarding
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#ifndef __included_ip6_forward_h__
#define __included_ip6_forward_h__

#include <vnet/fib/ip6_fib.h>
#include <vnet/dpo/load_balance_map.h>

/**
 * @file
 * @brief IPv6 Forwarding.
 *
 * This file contains the source code for IPv6 forwarding.
 */


always_inline uword
ip6_lookup_inline (vlib_main_t * vm,
		   vlib_node_runtime_t * node, vlib_frame_t * frame)
{
  ip6_main_t *im = &ip6_main;
  vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters;
  u32 n_left_from, n_left_to_next, *from, *to_next;
  ip_lookup_next_t next;
  u32 thread_index = vm->thread_index;

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;
  next = node->cached_next_index;

  while (n_left_from > 0)
    {
      vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  vlib_buffer_t *p0, *p1;
	  u32 pi0, pi1, lbi0, lbi1, wrong_next;
	  ip_lookup_next_t next0, next1;
	  ip6_header_t *ip0, *ip1;
	  ip6_address_t *dst_addr0, *dst_addr1;
	  u32 flow_hash_config0, flow_hash_config1;
	  const dpo_id_t *dpo0, *dpo1;
	  const load_balance_t *lb0, *lb1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t *p2, *p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, LOAD);
	    vlib_prefetch_buffer_header (p3, LOAD);
	    CLIB_PREFETCH (p2->data, sizeof (ip0[0]), LOAD);
	    CLIB_PREFETCH (p3->data, sizeof (ip0[0]), LOAD);
	  }

	  pi0 = to_next[0] = from[0];
	  pi1 = to_next[1] = from[1];

	  p0 = vlib_get_buffer (vm, pi0);
	  p1 = vlib_get_buffer (vm, pi1);

	  ip0 = vlib_buffer_get_current (p0);
	  ip1 = vlib_buffer_get_current (p1);

	  dst_addr0 = &ip0->dst_address;
	  dst_addr1 = &ip1->dst_address;

	  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p0);
	  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p1);

	  lbi0 = ip6_fib_table_fwding_lookup (vnet_buffer (p0)->ip.fib_index,
					      dst_addr0);
	  lbi1 = ip6_fib_table_fwding_lookup (vnet_buffer (p1)->ip.fib_index,
					      dst_addr1);

	  lb0 = load_balance_get (lbi0);
	  lb1 = load_balance_get (lbi1);
	  ASSERT (lb0->lb_n_buckets > 0);
	  ASSERT (lb1->lb_n_buckets > 0);
	  ASSERT (is_pow2 (lb0->lb_n_buckets));
	  ASSERT (is_pow2 (lb1->lb_n_buckets));

	  vnet_buffer (p0)->ip.flow_hash = vnet_buffer (p1)->ip.flow_hash = 0;

	  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
	    {
	      flow_hash_config0 = lb0->lb_hash_config;
	      vnet_buffer (p0)->ip.flow_hash =
		ip6_compute_flow_hash (ip0, flow_hash_config0);
	      dpo0 =
		load_balance_get_fwd_bucket (lb0,
					     (vnet_buffer (p0)->ip.flow_hash &
					      (lb0->lb_n_buckets_minus_1)));
	    }
	  else
	    {
	      dpo0 = load_balance_get_bucket_i (lb0, 0);
	    }
	  if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
	    {
	      flow_hash_config1 = lb1->lb_hash_config;
	      vnet_buffer (p1)->ip.flow_hash =
		ip6_compute_flow_hash (ip1, flow_hash_config1);
	      dpo1 =
		load_balance_get_fwd_bucket (lb1,
					     (vnet_buffer (p1)->ip.flow_hash &
					      (lb1->lb_n_buckets_minus_1)));
	    }
	  else
	    {
	      dpo1 = load_balance_get_bucket_i (lb1, 0);
	    }
	  next0 = dpo0->dpoi_next_node;
	  next1 = dpo1->dpoi_next_node;

	  /* Only process the HBH Option Header if explicitly configured to do so */
	  if (PREDICT_FALSE
	      (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
	    {
	      next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
		(ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
	    }
	  if (PREDICT_FALSE
	      (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
	    {
	      next1 = (dpo_is_adj (dpo1) && im->hbh_enabled) ?
		(ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next1;
	    }
	  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
	  vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;

	  vlib_increment_combined_counter
	    (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
	  vlib_increment_combined_counter
	    (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));

	  from += 2;
	  to_next += 2;
	  n_left_to_next -= 2;
	  n_left_from -= 2;

	  wrong_next = (next0 != next) + 2 * (next1 != next);
	  if (PREDICT_FALSE (wrong_next != 0))
	    {
	      switch (wrong_next)
		{
		case 1:
		  /* A B A */
		  to_next[-2] = pi1;
		  to_next -= 1;
		  n_left_to_next += 1;
		  vlib_set_next_frame_buffer (vm, node, next0, pi0);
		  break;

		case 2:
		  /* A A B */
		  to_next -= 1;
		  n_left_to_next += 1;
		  vlib_set_next_frame_buffer (vm, node, next1, pi1);
		  break;

		case 3:
		  /* A B C */
		  to_next -= 2;
		  n_left_to_next += 2;
		  vlib_set_next_frame_buffer (vm, node, next0, pi0);
		  vlib_set_next_frame_buffer (vm, node, next1, pi1);
		  if (next0 == next1)
		    {
		      /* A B B */
		      vlib_put_next_frame (vm, node, next, n_left_to_next);
		      next = next1;
		      vlib_get_next_frame (vm, node, next, to_next,
					   n_left_to_next);
		    }
		}
	    }
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  vlib_buffer_t *p0;
	  ip6_header_t *ip0;
	  u32 pi0, lbi0;
	  ip_lookup_next_t next0;
	  load_balance_t *lb0;
	  ip6_address_t *dst_addr0;
	  u32 flow_hash_config0;
	  const dpo_id_t *dpo0;

	  pi0 = from[0];
	  to_next[0] = pi0;

	  p0 = vlib_get_buffer (vm, pi0);
	  ip0 = vlib_buffer_get_current (p0);
	  dst_addr0 = &ip0->dst_address;
	  ip_lookup_set_buffer_fib_index (im->fib_index_by_sw_if_index, p0);
	  lbi0 = ip6_fib_table_fwding_lookup (vnet_buffer (p0)->ip.fib_index,
					      dst_addr0);

	  lb0 = load_balance_get (lbi0);
	  flow_hash_config0 = lb0->lb_hash_config;

	  vnet_buffer (p0)->ip.flow_hash = 0;
	  ASSERT (lb0->lb_n_buckets > 0);
	  ASSERT (is_pow2 (lb0->lb_n_buckets));

	  if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
	    {
	      flow_hash_config0 = lb0->lb_hash_config;
	      vnet_buffer (p0)->ip.flow_hash =
		ip6_compute_flow_hash (ip0, flow_hash_config0);
	      dpo0 =
		load_balance_get_fwd_bucket (lb0,
					     (vnet_buffer (p0)->ip.flow_hash &
					      (lb0->lb_n_buckets_minus_1)));
	    }
	  else
	    {
	      dpo0 = load_balance_get_bucket_i (lb0, 0);
	    }

	  dpo0 = load_balance_get_bucket_i (lb0,
					    (vnet_buffer (p0)->ip.flow_hash &
					     lb0->lb_n_buckets_minus_1));
	  next0 = dpo0->dpoi_next_node;

	  /* Only process the HBH Option Header if explicitly configured to do so */
	  if (PREDICT_FALSE
	      (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
	    {
	      next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
		(ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
	    }
	  vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;

	  vlib_increment_combined_counter
	    (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));

	  from += 1;
	  to_next += 1;
	  n_left_to_next -= 1;
	  n_left_from -= 1;

	  if (PREDICT_FALSE (next0 != next))
	    {
	      n_left_to_next += 1;
	      vlib_put_next_frame (vm, node, next, n_left_to_next);
	      next = next0;
	      vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
	      to_next[0] = pi0;
	      to_next += 1;
	      n_left_to_next -= 1;
	    }
	}

      vlib_put_next_frame (vm, node, next, n_left_to_next);
    }

  if (node->flags & VLIB_NODE_FLAG_TRACE)
    ip6_forward_next_trace (vm, node, frame, VLIB_TX);

  return frame->n_vectors;
}

#endif /*__included_ip6_forward_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
pm->packet_type = PCAP_PACKET_TYPE_vpp; if (pm->lock == 0) clib_spinlock_init (&(pm->lock)); vm->dispatch_pcap_enable = 1; vlib_cli_output (vm, "pcap dispatch capture on..."); } } return error; } static clib_error_t * pcap_dispatch_trace_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { return pcap_dispatch_trace_command_internal (vm, input, cmd, VLIB_RX); } /*? * This command is used to start or stop pcap dispatch trace capture, or show * the capture status. * * This command has the following optional parameters: * * - on|off - Used to start or stop capture. * * - max - Depth of local buffer. Once 'nn' number * of packets have been received, buffer is flushed to file. Once another * 'nn' number of packets have been received, buffer is flushed * to file, overwriting previous write. If not entered, value defaults * to 100. Can only be updated if packet capture is off. * * - file - Used to specify the output filename. The file will * be placed in the '/tmp' directory, so only the filename is * supported. Directory should not be entered. If file already exists, file * will be overwritten. If no filename is provided, '/tmp/vpe.pcap' * will be used. Can only be updated if packet capture is off. * * - status - Displays the current status and configured attributes * associated with a packet capture. If packet capture is in progress, * 'status' also will return the number of packets currently in * the local buffer. All additional attributes entered on command line * with 'status' will be ignored and not applied. * * @cliexpar * Example of how to display the status of capture when off: * @cliexstart{pcap dispatch trace status} * max is 100, for any interface to file /tmp/vpe.pcap * pcap dispatch capture is off... * @cliexend * Example of how to start a dispatch trace capture: * @cliexstart{pcap dispatch trace on max 35 file dispatchTrace.pcap} * pcap dispatch capture on... * @cliexend * Example of how to start a dispatch trace capture with buffer tracing * @cliexstart{pcap dispatch trace on max 10000 file dispatchTrace.pcap buffer-trace dpdk-input 1000} * pcap dispatch capture on... * @cliexend * Example of how to display the status of a tx packet capture in progress: * @cliexstart{pcap tx trace status} * max is 35, dispatch trace to file /tmp/vppTest.pcap * pcap tx capture is on: 20 of 35 pkts... * @cliexend * Example of how to stop a tx packet capture: * @cliexstart{vppctl pcap dispatch trace off} * captured 21 pkts... * saved to /tmp/dispatchTrace.pcap... * @cliexend ?*/ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (pcap_dispatch_trace_command, static) = { .path = "pcap dispatch trace", .short_help = "pcap dispatch trace [on|off] [max ] [file ] [status]\n" " [buffer-trace ]", .function = pcap_dispatch_trace_command_fn, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */