summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2019-01-31 18:50:04 +0100
committerFlorin Coras <florin.coras@gmail.com>2019-01-31 22:25:44 +0000
commitf646d74392490cf162a615badb92f62b573c694d (patch)
treecace20119d60ae83ef289dd15bada88bafe8bdd6 /src
parent0ede47aa452faceed85965ddb727c305059b856a (diff)
buffers: vallidate that buffer is allocated during buffer pool put
Change-Id: I8044b34a37fe1994a8dfa1ca89929f3642c72e8d Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/plugins/dpdk/buffer.c2
-rw-r--r--src/vlib/buffer_funcs.h13
2 files changed, 6 insertions, 9 deletions
diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c
index 7f6c118a4a5..1894df42805 100644
--- a/src/plugins/dpdk/buffer.c
+++ b/src/plugins/dpdk/buffer.c
@@ -222,7 +222,7 @@ CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
{
vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
n, sizeof (struct rte_mbuf));
- vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n);
}
return 0;
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 37ddcd4a206..09ffd06428f 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -635,7 +635,7 @@ vlib_buffer_alloc_to_ring_from_pool (vlib_main_t * vm, u32 * ring, u32 start,
return n_alloc;
}
-static void
+static_always_inline void
vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index,
u32 * buffers, u32 n_buffers)
{
@@ -643,6 +643,10 @@ vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index,
vlib_buffer_pool_thread_t *bpt =
vec_elt_at_index (bp->threads, vm->thread_index);
+ if (CLIB_DEBUG > 0)
+ vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
+ VLIB_BUFFER_KNOWN_ALLOCATED);
+
vec_add_aligned (bpt->cached_buffers, buffers, n_buffers,
CLIB_CACHE_LINE_BYTES);
@@ -733,10 +737,6 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
vlib_buffer_copy_template (b[3], &bt);
n_queue += 4;
- if (CLIB_DEBUG > 0)
- vlib_buffer_validate_alloc_free (vm, buffers, 4,
- VLIB_BUFFER_KNOWN_ALLOCATED);
-
vlib_buffer_validate (vm, b[0]);
vlib_buffer_validate (vm, b[1]);
vlib_buffer_validate (vm, b[2]);
@@ -786,9 +786,6 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0)
{
- if (CLIB_DEBUG > 0)
- vlib_buffer_validate_alloc_free (vm, &bi, 1,
- VLIB_BUFFER_KNOWN_ALLOCATED);
vlib_buffer_copy_template (b[0], &bt);
queue[n_queue++] = bi;
}
ref='#n258'>258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
/*
 * Copyright (c) 2017-2019 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/udp/udp_encap.h>
#include <vnet/udp/udp.h>

typedef struct udp4_encap_trace_t_
{
  udp_header_t udp;
  ip4_header_t ip;
  u32 flow_hash;
  udp_encap_fixup_flags_t flags;
} udp4_encap_trace_t;

typedef struct udp6_encap_trace_t_
{
  udp_header_t udp;
  ip6_header_t ip;
  u32 flow_hash;
  udp_encap_fixup_flags_t flags;
} udp6_encap_trace_t;

extern vlib_combined_counter_main_t udp_encap_counters;

static u8 *
format_udp4_encap_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  u32 indent = format_get_indent (s);
  udp4_encap_trace_t *t;

  t = va_arg (*args, udp4_encap_trace_t *);

  s = format (s, "flags: %U, flow hash: 0x%08x\n%U%U\n%U%U",
	      format_udp_encap_fixup_flags, t->flags, t->flow_hash,
	      format_white_space, indent, format_ip4_header, &t->ip,
	      sizeof (t->ip), format_white_space, indent, format_udp_header,
	      &t->udp, sizeof (t->udp));
  return (s);
}

static u8 *
format_udp6_encap_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  u32 indent = format_get_indent (s);
  udp6_encap_trace_t *t;

  t = va_arg (*args, udp6_encap_trace_t *);

  s = format (s, "flags: %U, flow hash: 0x%08x\n%U%U\n%U%U",
	      format_udp_encap_fixup_flags, t->flags, t->flow_hash,
	      format_white_space, indent, format_ip6_header, &t->ip,
	      sizeof (t->ip), format_white_space, indent, format_udp_header,
	      &t->udp, sizeof (t->udp));
  return (s);
}

always_inline uword
udp_encap_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
		  vlib_frame_t *frame, ip_address_family_t encap_family,
		  ip_address_family_t payload_family)
{
  vlib_combined_counter_main_t *cm = &udp_encap_counters;
  u32 *from = vlib_frame_vector_args (frame);
  u32 n_left_from, n_left_to_next, *to_next, next_index;
  u32 thread_index = vm->thread_index;

  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0)
    {
      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  vlib_buffer_t *b0, *b1;
	  udp_encap_t *ue0, *ue1;
	  u32 bi0, next0, uei0;
	  u32 bi1, next1, uei1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t *p2, *p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, STORE);
	    vlib_prefetch_buffer_header (p3, STORE);
	  }

	  bi0 = to_next[0] = from[0];
	  bi1 = to_next[1] = from[1];

	  from += 2;
	  n_left_from -= 2;
	  to_next += 2;
	  n_left_to_next -= 2;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);

	  uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
	  uei1 = vnet_buffer (b1)->ip.adj_index[VLIB_TX];

	  vlib_increment_combined_counter (cm, thread_index, uei0, 1,
					   vlib_buffer_length_in_chain (vm,
									b0));
	  vlib_increment_combined_counter (cm, thread_index, uei1, 1,
					   vlib_buffer_length_in_chain (vm,
									b1));

	  /* Rewrite packet header and updates lengths. */
	  ue0 = udp_encap_get (uei0);
	  ue1 = udp_encap_get (uei1);

	  /* Paint */
	  if (encap_family == AF_IP6)
	    {
	      const u8 n_bytes =
		sizeof (udp_header_t) + sizeof (ip6_header_t);
	      ip_udp_encap_two (vm, b0, b1, (u8 *) &ue0->ue_hdrs,
				(u8 *) &ue1->ue_hdrs, n_bytes, encap_family,
				payload_family, ue0->ue_flags, ue1->ue_flags);

	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
		{
		  udp6_encap_trace_t *tr =
		    vlib_add_trace (vm, node, b0, sizeof (*tr));
		  tr->udp = ue0->ue_hdrs.ip6.ue_udp;
		  tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
		  tr->flags = ue0->ue_flags;
		  tr->flow_hash = vnet_buffer (b0)->ip.flow_hash;
		}
	      if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
		{
		  udp6_encap_trace_t *tr =
		    vlib_add_trace (vm, node, b1, sizeof (*tr));
		  tr->udp = ue1->ue_hdrs.ip6.ue_udp;
		  tr->ip = ue1->ue_hdrs.ip6.ue_ip6;
		  tr->flags = ue1->ue_flags;
		  tr->flow_hash = vnet_buffer (b1)->ip.flow_hash;
		}
	    }
	  else
	    {
	      const u8 n_bytes =
		sizeof (udp_header_t) + sizeof (ip4_header_t);

	      ip_udp_encap_two (vm, b0, b1, (u8 *) &ue0->ue_hdrs,
				(u8 *) &ue1->ue_hdrs, n_bytes, encap_family,
				payload_family, ue0->ue_flags, ue1->ue_flags);

	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
		{
		  udp4_encap_trace_t *tr =
		    vlib_add_trace (vm, node, b0, sizeof (*tr));
		  tr->udp = ue0->ue_hdrs.ip4.ue_udp;
		  tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
		  tr->flags = ue0->ue_flags;
		  tr->flow_hash = vnet_buffer (b0)->ip.flow_hash;
		}
	      if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
		{
		  udp4_encap_trace_t *tr =
		    vlib_add_trace (vm, node, b1, sizeof (*tr));
		  tr->udp = ue1->ue_hdrs.ip4.ue_udp;
		  tr->ip = ue1->ue_hdrs.ip4.ue_ip4;
		  tr->flags = ue1->ue_flags;
		  tr->flow_hash = vnet_buffer (b1)->ip.flow_hash;
		}
	    }

	  next0 = ue0->ue_dpo.dpoi_next_node;
	  next1 = ue1->ue_dpo.dpoi_next_node;
	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;
	  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = ue1->ue_dpo.dpoi_index;

	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, next0, next1);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0, next0, uei0;
	  vlib_buffer_t *b0;
	  udp_encap_t *ue0;

	  bi0 = to_next[0] = from[0];

	  from += 1;
	  n_left_from -= 1;
	  to_next += 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

	  uei0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];

	  /* Rewrite packet header and updates lengths. */
	  ue0 = udp_encap_get (uei0);

	  vlib_increment_combined_counter (cm, thread_index, uei0, 1,
					   vlib_buffer_length_in_chain (vm,
									b0));

	  /* Paint */
	  if (encap_family == AF_IP6)
	    {
	      const u8 n_bytes =
		sizeof (udp_header_t) + sizeof (ip6_header_t);
	      ip_udp_encap_one (vm, b0, (u8 *) &ue0->ue_hdrs.ip6, n_bytes,
				encap_family, payload_family, ue0->ue_flags);

	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
		{
		  udp6_encap_trace_t *tr =
		    vlib_add_trace (vm, node, b0, sizeof (*tr));
		  tr->udp = ue0->ue_hdrs.ip6.ue_udp;
		  tr->ip = ue0->ue_hdrs.ip6.ue_ip6;
		  tr->flags = ue0->ue_flags;
		  tr->flow_hash = vnet_buffer (b0)->ip.flow_hash;
		}
	    }
	  else
	    {
	      const u8 n_bytes =
		sizeof (udp_header_t) + sizeof (ip4_header_t);

	      ip_udp_encap_one (vm, b0, (u8 *) &ue0->ue_hdrs.ip4, n_bytes,
				encap_family, payload_family, ue0->ue_flags);

	      if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
		{
		  udp4_encap_trace_t *tr =
		    vlib_add_trace (vm, node, b0, sizeof (*tr));
		  tr->udp = ue0->ue_hdrs.ip4.ue_udp;
		  tr->ip = ue0->ue_hdrs.ip4.ue_ip4;
		  tr->flags = ue0->ue_flags;
		  tr->flow_hash = vnet_buffer (b0)->ip.flow_hash;
		}
	    }

	  next0 = ue0->ue_dpo.dpoi_next_node;
	  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ue0->ue_dpo.dpoi_index;

	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  return frame->n_vectors;
}

VLIB_NODE_FN (udp4o4_encap_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return udp_encap_inline (vm, node, frame, AF_IP4, AF_IP4);
}

VLIB_NODE_FN (udp6o4_encap_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return udp_encap_inline (vm, node, frame, AF_IP4, AF_IP6);
}

VLIB_NODE_FN (udp4_encap_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return udp_encap_inline (vm, node, frame, AF_IP4, N_AF);
}

VLIB_NODE_FN (udp6o6_encap_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return udp_encap_inline (vm, node, frame, AF_IP6, AF_IP6);
}

VLIB_NODE_FN (udp4o6_encap_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return udp_encap_inline (vm, node, frame, AF_IP6, AF_IP4);
}

VLIB_NODE_FN (udp6_encap_node)
(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
{
  return udp_encap_inline (vm, node, frame, AF_IP6, N_AF);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (udp4o4_encap_node) = {
  .name = "udp4o4-encap",
  .vector_size = sizeof (u32),
  .format_trace = format_udp4_encap_trace,
  .n_next_nodes = 0,
};

VLIB_REGISTER_NODE (udp6o4_encap_node) = {
  .name = "udp6o4-encap",
  .vector_size = sizeof (u32),
  .format_trace = format_udp4_encap_trace,
  .n_next_nodes = 0,
  .sibling_of = "udp4o4-encap",
};

VLIB_REGISTER_NODE (udp4_encap_node) = {
  .name = "udp4-encap",
  .vector_size = sizeof (u32),
  .format_trace = format_udp4_encap_trace,
  .n_next_nodes = 0,
  .sibling_of = "udp4o4-encap",
};

VLIB_REGISTER_NODE (udp6o6_encap_node) = {
  .name = "udp6o6-encap",
  .vector_size = sizeof (u32),
  .format_trace = format_udp6_encap_trace,
  .n_next_nodes = 0,
};

VLIB_REGISTER_NODE (udp4o6_encap_node) = {
  .name = "udp4o6-encap",
  .vector_size = sizeof (u32),
  .format_trace = format_udp6_encap_trace,
  .n_next_nodes = 0,
  .sibling_of = "udp6o6-encap",
};

VLIB_REGISTER_NODE (udp6_encap_node) = {
  .name = "udp6-encap",
  .vector_size = sizeof (u32),
  .format_trace = format_udp6_encap_trace,
  .n_next_nodes = 0,
  .sibling_of = "udp6o6-encap",
};
/* *INDENT-ON* */


/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */