summaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan-gbp/encap.c
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2018-11-13 16:34:13 -0500
committerFlorin Coras <florin.coras@gmail.com>2018-11-14 15:54:01 +0000
commit178cf493d009995b28fdf220f04c98860ff79a9b (patch)
tree097c1be82b8f6fa9bc04b9b1e193158e2e4997eb /src/vnet/vxlan-gbp/encap.c
parent6917b94f2146aa51195a6a2a1ccd8416a1d74bf3 (diff)
Remove c-11 memcpy checks from perf-critical code
Change-Id: Id4f37f5d4a03160572954a416efa1ef9b3d79ad1 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vnet/vxlan-gbp/encap.c')
-rw-r--r--src/vnet/vxlan-gbp/encap.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/src/vnet/vxlan-gbp/encap.c b/src/vnet/vxlan-gbp/encap.c
index 07142c8a521..d260ccc0deb 100644
--- a/src/vnet/vxlan-gbp/encap.c
+++ b/src/vnet/vxlan-gbp/encap.c
@@ -186,10 +186,12 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
/* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
* and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
* use memcpy as a workaround */
- clib_memcpy (underlay0, t0->rewrite_header.data + rw_hdr_offset,
- underlay_hdr_len);
- clib_memcpy (underlay1, t1->rewrite_header.data + rw_hdr_offset,
- underlay_hdr_len);
+ clib_memcpy_fast (underlay0,
+ t0->rewrite_header.data + rw_hdr_offset,
+ underlay_hdr_len);
+ clib_memcpy_fast (underlay1,
+ t1->rewrite_header.data + rw_hdr_offset,
+ underlay_hdr_len);
ip4_header_t *ip4_0, *ip4_1;
qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
@@ -368,8 +370,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
/* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
* and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
* use memcpy as a workaround */
- clib_memcpy (underlay0, t0->rewrite_header.data + rw_hdr_offset,
- underlay_hdr_len);
+ clib_memcpy_fast (underlay0,
+ t0->rewrite_header.data + rw_hdr_offset,
+ underlay_hdr_len);
u32 len0 = vlib_buffer_length_in_chain (vm, b0);
u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
> 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
/*
 * nsh_pop.c - nsh POP only processing
 *
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/vnet.h>
#include <vnet/plugin/plugin.h>
#include <nsh/nsh.h>
#include <vnet/gre/gre.h>
#include <vnet/vxlan/vxlan.h>
#include <vnet/vxlan-gpe/vxlan_gpe.h>
#include <vnet/l2/l2_classify.h>

#include <vlibapi/api.h>
#include <vlibmemory/api.h>

extern nsh_option_map_t * nsh_md2_lookup_option (u16 class, u8 type);

extern u8 * format_nsh_header (u8 * s, va_list * args);
extern u8 * format_nsh_node_map_trace (u8 * s, va_list * args);
extern u8 * format_nsh_pop_header (u8 * s, va_list * args);
extern u8 * format_nsh_pop_node_map_trace (u8 * s, va_list * args);

static uword
nsh_pop_inline (vlib_main_t * vm,
               vlib_node_runtime_t * node,
               vlib_frame_t * from_frame)
{
  u32 n_left_from, next_index, *from, *to_next;
  nsh_main_t * nm = &nsh_main;

  from = vlib_frame_vector_args(from_frame);
  n_left_from = from_frame->n_vectors;

  next_index = node->cached_next_index;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  u32 bi0, bi1;
	  vlib_buffer_t * b0, *b1;
	  u32 next0 = NSH_NODE_NEXT_DROP, next1 = NSH_NODE_NEXT_DROP;
	  uword * entry0, *entry1;
	  nsh_base_header_t * hdr0 = 0, *hdr1 = 0;
	  u32 header_len0 = 0, header_len1 = 0;
	  u32 nsp_nsi0, nsp_nsi1;
	  u32 error0, error1;
	  nsh_map_t * map0 = 0, *map1 = 0;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t * p2, *p3;

	    p2 = vlib_get_buffer(vm, from[2]);
	    p3 = vlib_get_buffer(vm, from[3]);

	    vlib_prefetch_buffer_header(p2, LOAD);
	    vlib_prefetch_buffer_header(p3, LOAD);

	    CLIB_PREFETCH(p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH(p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  from += 2;
	  to_next += 2;
	  n_left_from -= 2;
	  n_left_to_next -= 2;

	  error0 = 0;
	  error1 = 0;

	  b0 = vlib_get_buffer(vm, bi0);
	  b1 = vlib_get_buffer(vm, bi1);
	  hdr0 = vlib_buffer_get_current(b0);
          nsp_nsi0 = hdr0->nsp_nsi;
          header_len0 = hdr0->length * 4;

          hdr1 = vlib_buffer_get_current(b1);
	  nsp_nsi1 = hdr1->nsp_nsi;
	  header_len1 = hdr1->length * 4;

	  /* Process packet 0 */
	  entry0 = hash_get_mem(nm->nsh_mapping_by_key, &nsp_nsi0);
	  if (PREDICT_FALSE(entry0 == 0))
	    {
	      error0 = NSH_NODE_ERROR_NO_MAPPING;
	      goto trace0;
	    }

	  /* Entry should point to a mapping ...*/
	  map0 = pool_elt_at_index(nm->nsh_mappings, entry0[0]);
	  if (PREDICT_FALSE(map0 == 0))
	    {
	      error0 = NSH_NODE_ERROR_NO_MAPPING;
	      goto trace0;
	    }

	  /* set up things for next node to transmit ie which node to handle it and where */
	  next0 = map0->next_node;
	  //vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;

	  if(PREDICT_FALSE(map0->nsh_action == NSH_ACTION_POP))
	    {
	      /* Manipulate MD2 */
              if(PREDICT_FALSE(hdr0->md_type == 2))
        	{
        	  if (PREDICT_FALSE(next0 == NSH_NODE_NEXT_DROP))
        	    {
        	      error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
        	      goto trace0;
        	    }
	          //vnet_buffer(b0)->sw_if_index[VLIB_RX] = map0->sw_if_index;
        	}

              /* Pop NSH header */
	      vlib_buffer_advance(b0, (word)header_len0);
	      goto trace0;
	    }

	  entry0 = hash_get_mem(nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
	  if (PREDICT_FALSE(entry0 == 0))
	    {
	      error0 = NSH_NODE_ERROR_NO_ENTRY;
	      goto trace0;
	    }

        trace0: b0->error = error0 ? node->errors[error0] : 0;

          if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
            {
              nsh_input_trace_t *tr = vlib_add_trace(vm, node, b0, sizeof(*tr));
              clib_memcpy_fast ( &(tr->trace_data), hdr0, (hdr0->length*4) );
            }

	  /* Process packet 1 */
	  entry1 = hash_get_mem(nm->nsh_mapping_by_key, &nsp_nsi1);
	  if (PREDICT_FALSE(entry1 == 0))
	    {
	      error1 = NSH_NODE_ERROR_NO_MAPPING;
	      goto trace1;
	    }

	  /* Entry should point to a mapping ...*/
	  map1 = pool_elt_at_index(nm->nsh_mappings, entry1[0]);
	  if (PREDICT_FALSE(map1 == 0))
	    {
	      error1 = NSH_NODE_ERROR_NO_MAPPING;
	      goto trace1;
	    }

	  /* set up things for next node to transmit ie which node to handle it and where */
	  next1 = map1->next_node;
	  //vnet_buffer(b1)->sw_if_index[VLIB_TX] = map1->sw_if_index;

	  if(PREDICT_FALSE(map1->nsh_action == NSH_ACTION_POP))
	    {
	      /* Manipulate MD2 */
              if(PREDICT_FALSE(hdr1->md_type == 2))
        	{
        	  if (PREDICT_FALSE(next1 == NSH_NODE_NEXT_DROP))
        	    {
        	      error1 = NSH_NODE_ERROR_INVALID_OPTIONS;
        	      goto trace1;
        	    }
	          //vnet_buffer(b1)->sw_if_index[VLIB_RX] = map1->sw_if_index;
        	}

              /* Pop NSH header */
	      vlib_buffer_advance(b1, (word)header_len1);
	      goto trace1;
	    }

	  entry1 = hash_get_mem(nm->nsh_entry_by_key, &map1->mapped_nsp_nsi);
	  if (PREDICT_FALSE(entry1 == 0))
	    {
	      error1 = NSH_NODE_ERROR_NO_ENTRY;
	      goto trace1;
	    }


	trace1: b1->error = error1 ? node->errors[error1] : 0;

	  if (PREDICT_FALSE(b1->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      nsh_input_trace_t *tr = vlib_add_trace(vm, node, b1, sizeof(*tr));
	      clib_memcpy_fast ( &(tr->trace_data), hdr1, (hdr1->length*4) );
	    }

	  vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
					  n_left_to_next, bi0, bi1, next0, next1);

	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0 = 0;
	  vlib_buffer_t * b0 = NULL;
	  u32 next0 = NSH_NODE_NEXT_DROP;
	  uword * entry0;
	  nsh_base_header_t * hdr0 = 0;
	  u32 header_len0 = 0;
	  u32 nsp_nsi0;
	  u32 error0;
	  nsh_map_t * map0 = 0;

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;
	  error0 = 0;

	  b0 = vlib_get_buffer(vm, bi0);
	  hdr0 = vlib_buffer_get_current(b0);

          nsp_nsi0 = hdr0->nsp_nsi;
          header_len0 = hdr0->length * 4;

	  entry0 = hash_get_mem(nm->nsh_mapping_by_key, &nsp_nsi0);

	  if (PREDICT_FALSE(entry0 == 0))
	    {
	      error0 = NSH_NODE_ERROR_NO_MAPPING;
	      goto trace00;
	    }

	  /* Entry should point to a mapping ...*/
	  map0 = pool_elt_at_index(nm->nsh_mappings, entry0[0]);

	  if (PREDICT_FALSE(map0 == 0))
	    {
	      error0 = NSH_NODE_ERROR_NO_MAPPING;
	      goto trace00;
	    }

	  /* set up things for next node to transmit ie which node to handle it and where */
	  next0 = map0->next_node;
	  //vnet_buffer(b0)->sw_if_index[VLIB_TX] = map0->sw_if_index;

	  if(PREDICT_FALSE(map0->nsh_action == NSH_ACTION_POP))
	    {
	      /* Manipulate MD2 */
              if(PREDICT_FALSE(hdr0->md_type == 2))
        	{
        	  if (PREDICT_FALSE(next0 == NSH_NODE_NEXT_DROP))
        	    {
        	      error0 = NSH_NODE_ERROR_INVALID_OPTIONS;
        	      goto trace00;
        	    }
	          //vnet_buffer(b0)->sw_if_index[VLIB_RX] = map0->sw_if_index;
        	}

              /* Pop NSH header */
	      vlib_buffer_advance(b0, (word)header_len0);
	      goto trace00;
	    }

	  entry0 = hash_get_mem(nm->nsh_entry_by_key, &map0->mapped_nsp_nsi);
	  if (PREDICT_FALSE(entry0 == 0))
	    {
	      error0 = NSH_NODE_ERROR_NO_ENTRY;
	      goto trace00;
	    }

	  trace00: b0->error = error0 ? node->errors[error0] : 0;

	  if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      nsh_input_trace_t *tr = vlib_add_trace(vm, node, b0, sizeof(*tr));
	      clib_memcpy_fast ( &(tr->trace_data[0]), hdr0, (hdr0->length*4) );
	    }

	  vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next,
					  n_left_to_next, bi0, next0);
	}

      vlib_put_next_frame(vm, node, next_index, n_left_to_next);

    }

  return from_frame->n_vectors;
}

/**
 * @brief Graph processing dispatch function for NSH Input
 *
 * @node nsh_input
 * @param *vm
 * @param *node
 * @param *from_frame
 *
 * @return from_frame->n_vectors
 *
 */
VLIB_NODE_FN (nsh_pop_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
                  vlib_frame_t * from_frame)
{
  return nsh_pop_inline (vm, node, from_frame);
}

static char * nsh_pop_node_error_strings[] = {
#define _(sym,string) string,
  foreach_nsh_node_error
#undef _
};

/* register nsh-input node */
VLIB_REGISTER_NODE (nsh_pop_node) = {
  .name = "nsh-pop",
  .vector_size = sizeof (u32),
  .format_trace = format_nsh_pop_node_map_trace,
  .format_buffer = format_nsh_pop_header,
  .type = VLIB_NODE_TYPE_INTERNAL,

  .n_errors = ARRAY_LEN(nsh_pop_node_error_strings),
  .error_strings = nsh_pop_node_error_strings,

  .n_next_nodes = NSH_NODE_N_NEXT,

  .next_nodes = {
#define _(s,n) [NSH_NODE_NEXT_##s] = n,
    foreach_nsh_node_next
#undef _
  },
};