summaryrefslogtreecommitdiffstats
path: root/src/plugins/nat/nat.api
diff options
context:
space:
mode:
authorJuraj Sloboda <jsloboda@cisco.com>2017-11-23 13:20:48 +0100
committerOle Trøan <otroan@employees.org>2017-12-20 11:12:24 +0000
commit7b929793feba7d966c34b1ddb31dc818174f3a57 (patch)
tree34eba85b00e775127732dddb56e679d14fc9cc75 /src/plugins/nat/nat.api
parent5ff506a194f4951372500608a233c5c4cc6990a9 (diff)
Translate matching packets using NAT (VPP-1069)
Add API function which enables forwarding of packets not matching existing translation or static mapping instead of dropping them. When forwarding is enabled matching packets will be translated while non-matching packets will be forwarded without translation. Change-Id: Ic13040cbad16d3a1ecdc3e02a497171bef6aa413 Signed-off-by: Juraj Sloboda <jsloboda@cisco.com>
Diffstat (limited to 'src/plugins/nat/nat.api')
-rw-r--r--src/plugins/nat/nat.api31
1 files changed, 31 insertions, 0 deletions
diff --git a/src/plugins/nat/nat.api b/src/plugins/nat/nat.api
index 2a8eeb9489f..d6a912b72d0 100644
--- a/src/plugins/nat/nat.api
+++ b/src/plugins/nat/nat.api
@@ -606,6 +606,37 @@ autoreply define nat44_del_session {
u32 vrf_id;
};
+/** \brief Enable/disable forwarding for NAT44
+ Forward packets which don't match existing translation
+ or static mapping instead of dropping them.
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param enable - 0 for enable, 1 for disable
+*/
+autoreply define nat44_forwarding_enable_disable {
+ u32 client_index;
+ u32 context;
+ u8 enable;
+};
+
+/** \brief Check if forwarding is enabled or disabled
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+define nat44_forwarding_is_enabled {
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Response to check if forwarding is enabled or disabled
+ @param context - sender context, to match reply w/ request
+ @param enabled - 1 if enabled, 0 if disabled
+*/
+define nat44_forwarding_is_enabled_reply {
+ u32 context;
+ u8 enabled;
+};
+
/*
* Deterministic NAT (CGN) APIs
ef='#n231'>231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
/* SPDX-License-Identifier: Apache-2.0
 * Copyright(c) 2021 Cisco Systems, Inc.
 */

#include <vppinfra/clib.h>
#include <vlib/vlib.h>
#include <vppinfra/vector_funcs.h>

typedef struct
{
  uword used_elts[VLIB_FRAME_SIZE / 64];
  u32 uword_offset;
} extract_data_t;

static_always_inline u32 *
extract_unused_elts_x64 (u32 *elts, u16 *indices, u16 index, int n_left,
			 u64 *bmp, u32 *dst)
{
  u64 mask = 0;
#if defined(CLIB_HAVE_VEC128)
  mask = clib_compare_u16_x64 (index, indices);
  if (n_left == 64)
    {
      if (mask == ~0ULL)
	{
	  clib_memcpy_u32 (dst, elts, 64);
	  *bmp = ~0ULL;
	  return dst + 64;
	}
    }
  else
    mask &= pow2_mask (n_left);

  *bmp |= mask;

#if defined(CLIB_HAVE_VEC512_COMPRESS)
  u32x16u *ev = (u32x16u *) elts;
  for (int i = 0; i < 4; i++)
    {
      int cnt = _popcnt32 ((u16) mask);
      u32x16_compress_store (ev[i], mask, dst);
      dst += cnt;
      mask >>= 16;
    }

#elif defined(CLIB_HAVE_VEC256_COMPRESS)
  u32x8u *ev = (u32x8u *) elts;
  for (int i = 0; i < 8; i++)
    {
      int cnt = _popcnt32 ((u8) mask);
      u32x8_compress_store (ev[i], mask, dst);
      dst += cnt;
      mask >>= 8;
    }
#elif defined(CLIB_HAVE_VEC256)
  while (mask)
    {
      u16 bit = count_trailing_zeros (mask);
      mask = clear_lowest_set_bit (mask);
      dst++[0] = elts[bit];
    }
#else
  while (mask)
    {
      u16 bit = count_trailing_zeros (mask);
      mask ^= 1ULL << bit;
      dst++[0] = elts[bit];
    }
#endif
#else
  for (int i = 0; i < n_left; i++)
    {
      if (indices[i] == index)
	{
	  dst++[0] = elts[i];
	  mask |= 1ULL << i;
	}
    }
  *bmp |= mask;
#endif
  return dst;
}

static_always_inline u32
extract_unused_elts_by_index (extract_data_t *d, u32 *elts, u16 *indices,
			      u16 index, int n_left, u32 *dst)
{
  u32 *dst0 = dst;
  u64 *bmp = d->used_elts;
  while (n_left >= 64)
    {
      dst = extract_unused_elts_x64 (elts, indices, index, 64, bmp, dst);

      /* next */
      indices += 64;
      elts += 64;
      bmp++;
      n_left -= 64;
    }

  if (n_left)
    dst = extract_unused_elts_x64 (elts, indices, index, n_left, bmp, dst);

  return dst - dst0;
}

static_always_inline u32
find_first_unused_elt (extract_data_t *d)
{
  u64 *ue = d->used_elts + d->uword_offset;

  while (PREDICT_FALSE (ue[0] == ~0))
    {
      ue++;
      d->uword_offset++;
    }

  return d->uword_offset * 64 + count_trailing_zeros (~ue[0]);
}

static_always_inline u32
enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node, extract_data_t *d,
	     u16 next_index, u32 *buffers, u16 *nexts, u32 n_buffers,
	     u32 n_left, u32 *tmp)
{
  vlib_frame_t *f;
  u32 n_extracted, n_free;
  u32 *to;

  f = vlib_get_next_frame_internal (vm, node, next_index, 0);

  n_free = VLIB_FRAME_SIZE - f->n_vectors;

  /* if frame contains enough space for worst case scenario, we can avoid
   * use of tmp */
  if (n_free >= n_left)
    to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
  else
    to = tmp;

  n_extracted = extract_unused_elts_by_index (d, buffers, nexts, next_index,
					      n_buffers, to);

  if (to != tmp)
    {
      /* indices already written to frame, just close it */
      vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
    }
  else if (n_free >= n_extracted)
    {
      /* enough space in the existing frame */
      to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
      vlib_buffer_copy_indices (to, tmp, n_extracted);
      vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
    }
  else
    {
      /* full frame */
      to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
      vlib_buffer_copy_indices (to, tmp, n_free);
      vlib_put_next_frame (vm, node, next_index, 0);

      /* second frame */
      u32 n_2nd_frame = n_extracted - n_free;
      f = vlib_get_next_frame_internal (vm, node, next_index, 1);
      to = vlib_frame_vector_args (f);
      vlib_buffer_copy_indices (to, tmp + n_free, n_2nd_frame);
      vlib_put_next_frame (vm, node, next_index,
			   VLIB_FRAME_SIZE - n_2nd_frame);
    }

  return n_left - n_extracted;
}

void __clib_section (".vlib_buffer_enqueue_to_next_fn")
CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts,
 uword count)
{
  u32 tmp[VLIB_FRAME_SIZE];
  u32 n_left;
  u16 next_index;

  while (count >= VLIB_FRAME_SIZE)
    {
      extract_data_t d = {};
      n_left = VLIB_FRAME_SIZE;

      next_index = nexts[0];
      n_left = enqueue_one (vm, node, &d, next_index, buffers, nexts,
			    VLIB_FRAME_SIZE, n_left, tmp);

      while (n_left)
	{
	  next_index = nexts[find_first_unused_elt (&d)];
	  n_left = enqueue_one (vm, node, &d, next_index, buffers, nexts,
				VLIB_FRAME_SIZE, n_left, tmp);
	}

      buffers += VLIB_FRAME_SIZE;
      nexts += VLIB_FRAME_SIZE;
      count -= VLIB_FRAME_SIZE;
    }

  if (count)
    {
      extract_data_t d = {};
      next_index = nexts[0];
      n_left = count;

      n_left = enqueue_one (vm, node, &d, next_index, buffers, nexts, count,
			    n_left, tmp);

      while (n_left)
	{
	  next_index = nexts[find_first_unused_elt (&d)];
	  n_left = enqueue_one (vm, node, &d, next_index, buffers, nexts,
				count, n_left, tmp);
	}
    }
}

CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_next_fn);

void __clib_section (".vlib_buffer_enqueue_to_single_next_fn")
CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn)
(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index,
 u32 count)
{
  u32 *to_next, n_left_to_next, n_enq;

  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

  if (PREDICT_TRUE (n_left_to_next >= count))
    {
      vlib_buffer_copy_indices (to_next, buffers, count);
      n_left_to_next -= count;
      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
      return;
    }

  n_enq = n_left_to_next;
next:
  vlib_buffer_copy_indices (to_next, buffers, n_enq);
  n_left_to_next -= n_enq;

  if (PREDICT_FALSE (count > n_enq))
    {
      count -= n_enq;
      buffers += n_enq;

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
      n_enq = clib_min (n_left_to_next, count);
      goto next;
    }
  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_single_next_fn);

u32 __clib_section (".vlib_buffer_enqueue_to_thread_fn")
CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_fn)
(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices,
 u16 *thread_indices, u32 n_packets, int drop_on_congestion)
{
  vlib_thread_main_t *tm = vlib_get_thread_main ();
  vlib_frame_queue_main_t *fqm;
  vlib_frame_queue_per_thread_data_t *ptd;
  u32 n_left = n_packets;
  u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
  vlib_frame_queue_elt_t *hf = 0;
  u32 n_left_to_next_thread = 0, *to_next_thread = 0;
  u32 next_thread_index, current_thread_index = ~0;
  int i;

  fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
  ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);

  while (n_left)
    {
      next_thread_index = thread_indices[0];

      if (next_thread_index != current_thread_index)
	{
	  if (drop_on_congestion &&
	      is_vlib_frame_queue_congested (
		frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
		ptd->congested_handoff_queue_by_thread_index))
	    {
	      dbi[0] = buffer_indices[0];
	      dbi++;
	      n_drop++;
	      goto next;
	    }

	  if (hf)
	    hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;

	  hf = vlib_get_worker_handoff_queue_elt (
	    frame_queue_index, next_thread_index,
	    ptd->handoff_queue_elt_by_thread_index);

	  n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
	  to_next_thread = &hf->buffer_index[hf->n_vectors];
	  current_thread_index = next_thread_index;
	}

      to_next_thread[0] = buffer_indices[0];
      to_next_thread++;
      n_left_to_next_thread--;

      if (n_left_to_next_thread == 0)
	{
	  hf->n_vectors = VLIB_FRAME_SIZE;
	  vlib_put_frame_queue_elt (hf);
	  vlib_get_main_by_index (current_thread_index)->check_frame_queues =
	    1;
	  current_thread_index = ~0;
	  ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
	  hf = 0;
	}

      /* next */
    next:
      thread_indices += 1;
      buffer_indices += 1;
      n_left -= 1;
    }

  if (hf)
    hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;

  /* Ship frames to the thread nodes */
  for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
    {
      if (ptd->handoff_queue_elt_by_thread_index[i])
	{
	  hf = ptd->handoff_queue_elt_by_thread_index[i];
	  /*
	   * It works better to let the handoff node
	   * rate-adapt, always ship the handoff queue element.
	   */
	  if (1 || hf->n_vectors == hf->last_n_vectors)
	    {
	      vlib_put_frame_queue_elt (hf);
	      vlib_get_main_by_index (i)->check_frame_queues = 1;
	      ptd->handoff_queue_elt_by_thread_index[i] = 0;
	    }
	  else
	    hf->last_n_vectors = hf->n_vectors;
	}
      ptd->congested_handoff_queue_by_thread_index[i] =
	(vlib_frame_queue_t *) (~0);
    }

  if (drop_on_congestion && n_drop)
    vlib_buffer_free (vm, drop_list, n_drop);

  return n_packets - n_drop;
}

CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_thread_fn);

#ifndef CLIB_MARCH_VARIANT
vlib_buffer_func_main_t vlib_buffer_func_main;

static clib_error_t *
vlib_buffer_funcs_init (vlib_main_t *vm)
{
  vlib_buffer_func_main_t *bfm = &vlib_buffer_func_main;
  bfm->buffer_enqueue_to_next_fn =
    CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_next_fn);
  bfm->buffer_enqueue_to_single_next_fn =
    CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_single_next_fn);
  bfm->buffer_enqueue_to_thread_fn =
    CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_thread_fn);
  return 0;
}

VLIB_INIT_FUNCTION (vlib_buffer_funcs_init);
#endif