/* * Copyright (c) 2016 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include //GC runs at most once every so many seconds #define LB_GARBAGE_RUN 60 //After so many seconds. It is assumed that inter-core race condition will not occur. #define LB_CONCURRENCY_TIMEOUT 10 // FIB source for adding routes static fib_source_t lb_fib_src; lb_main_t lb_main; #define lb_get_writer_lock() clib_spinlock_lock (&lb_main.writer_lock) #define lb_put_writer_lock() clib_spinlock_unlock (&lb_main.writer_lock) static void lb_as_stack (lb_as_t *as); const static char * const lb_dpo_gre4_ip4[] = { "lb4-gre4" , NULL }; const static char * const lb_dpo_gre4_ip6[] = { "lb6-gre4" , NULL }; const static char* const * const lb_dpo_gre4_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_gre4_ip4, [DPO_PROTO_IP6] = lb_dpo_gre4_ip6, }; const static char * const lb_dpo_gre6_ip4[] = { "lb4-gre6" , NULL }; const static char * const lb_dpo_gre6_ip6[] = { "lb6-gre6" , NULL }; const static char* const * const lb_dpo_gre6_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_gre6_ip4, [DPO_PROTO_IP6] = lb_dpo_gre6_ip6, }; const static char * const lb_dpo_gre4_ip4_port[] = { "lb4-gre4-port" , NULL }; const static char * const lb_dpo_gre4_ip6_port[] = { "lb6-gre4-port" , NULL }; const static char* const * const lb_dpo_gre4_port_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_gre4_ip4_port, [DPO_PROTO_IP6] = lb_dpo_gre4_ip6_port, }; const static char * const lb_dpo_gre6_ip4_port[] = { "lb4-gre6-port" , NULL }; const static char * const lb_dpo_gre6_ip6_port[] = { "lb6-gre6-port" , NULL }; const static char* const * const lb_dpo_gre6_port_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_gre6_ip4_port, [DPO_PROTO_IP6] = lb_dpo_gre6_ip6_port, }; const static char * const lb_dpo_l3dsr_ip4[] = {"lb4-l3dsr" , NULL}; const static char* const * const lb_dpo_l3dsr_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_l3dsr_ip4, }; const static char * const lb_dpo_l3dsr_ip4_port[] = {"lb4-l3dsr-port" , NULL}; const static char* const * const lb_dpo_l3dsr_port_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_l3dsr_ip4_port, }; const static char * const lb_dpo_nat4_ip4_port[] = { "lb4-nat4-port" , NULL }; const static char* const * const lb_dpo_nat4_port_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = lb_dpo_nat4_ip4_port, }; const static char * const lb_dpo_nat6_ip6_port[] = { "lb6-nat6-port" , NULL }; const static char* const * const lb_dpo_nat6_port_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP6] = lb_dpo_nat6_ip6_port, }; u32 lb_hash_time_now(vlib_main_t * vm) { return (u32) (vlib_time_now(vm) + 10000); } u8 *format_lb_main (u8 * s, va_list * args) { vlib_thread_main_t *tm = vlib_get_thread_main(); lb_main_t *lbm = &lb_main; s = format(s, "lb_main"); s = format(s, " ip4-src-address: %U \n", format_ip4_address, &lbm->ip4_src_address); s = format(s, " ip6-src-address: %U \n", format_ip6_address, &lbm->ip6_src_address); s = format(s, " #vips: %u\n", pool_elts(lbm->vips)); s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1); u32 thread_index; for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) { lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht; if (h) { s = format(s, "core %d\n", thread_index); s = format(s, " timeout: %ds\n", h->timeout); s = format(s, " usage: %d / %d\n", lb_hash_elts(h, lb_hash_time_now(vlib_get_main())), lb_hash_size(h)); } } return s; } static char *lb_vip_type_strings[] = { [LB_VIP_TYPE_IP6_GRE6] = "ip6-gre6", [LB_VIP_TYPE_IP6_GRE4] = "ip6-gre4", [LB_VIP_TYPE_IP4_GRE6] = "ip4-gre6", [LB_VIP_TYPE_IP4_GRE4] = "ip4-gre4", [LB_VIP_TYPE_IP4_L3DSR] = "ip4-l3dsr", [LB_VIP_TYPE_IP4_NAT4] = "ip4-nat4", [LB_VIP_TYPE_IP6_NAT6] = "ip6-nat6", }; u8 *format_lb_vip_type (u8 * s, va_list * args) { lb_vip_type_t vipt = va_arg (*args, lb_vip_type_t); u32 i; for (i=0; itype, format_ip46_prefix, &vip->prefix, vip->plen, IP46_TYPE_ANY, vip->new_flow_table_mask + 1, pool_elts(vip->as_indexes), (vip->flags & LB_VIP_FLAGS_USED)?"":" removed"); if (vip->port != 0) { s = format(s, " protocol:%u port:%u ", vip->protocol, vip->port); } if (vip->type == LB_VIP_TYPE_IP4_L3DSR) { s = format(s, " dscp:%u", vip->encap_args.dscp); } else if ((vip->type == LB_VIP_TYPE_IP4_NAT4) || (vip->type == LB_VIP_TYPE_IP6_NAT6)) { s = format (s, " type:%s port:%u target_port:%u", (vip->encap_args.srv_type == LB_SRV_TYPE_CLUSTERIP)?"clusterip": "nodeport", ntohs(vip->port), ntohs(vip->encap_args.target_port)); } return s; } u8 *format_lb_as (u8 * s, va_list * args) { lb_as_t *as = va_arg (*args, lb_as_t *); return format(s, "%U %s", format_ip46_address, &as->address, IP46_TYPE_ANY, (as->flags & LB_AS_FLAGS_USED)?"used":"removed"); } u8 *format_lb_vip_detailed (u8 * s, va_list * args) { lb_main_t *lbm = &lb_main; lb_vip_t *vip = va_arg (*args, lb_vip_t *); u32 indent = format_get_indent (s); s = format(s, "%U %U [%lu] %U%s\n" "%U new_size:%u\n", format_white_space, indent, format_lb_vip_type, vip->type, vip - lbm->vips, format_ip46_prefix, &vip->prefix, (u32) vip->plen, IP46_TYPE_ANY, (vip->flags & LB_VIP_FLAGS_USED)?"":" removed", format_white_space, indent, vip->new_flow_table_mask + 1); if (vip->port != 0) { s = format(s, "%U protocol:%u port:%u\n", format_white_space, indent, vip->protocol, vip->port); } if (vip->type == LB_VIP_TYPE_IP4_L3DSR) { s = format(s, "%U dscp:%u\n", format_white_space, indent, vip->encap_args.dscp); } else if ((vip->type == LB_VIP_TYPE_IP4_NAT4) || (vip->type == LB_VIP_TYPE_IP6_NAT6)) { s = format (s, "%U type:%s port:%u target_port:%u", format_white_space, indent, (vip->encap_args.srv_type == LB_SRV_TYPE_CLUSTERIP)?"clusterip": "nodeport", ntohs(vip->port), ntohs(vip->encap_args.target_port)); } //Print counters s = format(s, "%U counters:\n", format_white_space, indent); u32 i; for (i=0; ivip_counters[i].name, vlib_get_simple_counter(&lbm->vip_counters[i], vip - lbm->vips)); s = format(s, "%U #as:%u\n", format_white_space, indent, pool_elts(vip->as_indexes)); //Let's count the buckets for each AS u32 *count = 0; vec_validate(count, pool_len(lbm->ass)); //Possibly big alloc for not much... lb_new_flow_entry_t *nfe; vec_foreach(nfe, vip->new_flow_table) count[nfe->as_index]++; lb_as_t *as; u32 *as_index; pool_foreach(as_index, vip->as_indexes, { as = &lbm->ass[*as_index]; s = format(s, "%U %U %u buckets %Lu flows dpo:%u %s\n", format_white_space, indent, format_ip46_address, &as->address, IP46_TYPE_ANY, count[as - lbm->ass], vlib_refcount_get(&lbm->as_refcount, as - lbm->ass), as->dpo.dpoi_index, (as->flags & LB_AS_FLAGS_USED)?"used":" removed"); }); vec_free(count); return s; } typedef struct { u32 as_index; u32 last; u32 skip; } lb_pseudorand_t; static int lb_pseudorand_compare(void *a, void *b) { lb_as_t *asa, *asb; lb_main_t *lbm = &lb_main; asa = &lbm->ass[((lb_pseudorand_t *)a)->as_index]; asb = &lbm->ass[((lb_pseudorand_t *)b)->as_index]; return memcmp(&asa->address, &asb->address, sizeof(asb->address)); } static void lb_vip_garbage_collection(lb_vip_t *vip) { lb_main_t *lbm = &lb_main; lb_snat4_key_t m_key4; clib_bihash_kv_8_8_t kv4, value4; lb_snat6_key_t m_key6; clib_bihash_kv_24_8_t kv6, value6; lb_snat_mapping_t *m = 0; CLIB_SPINLOCK_ASSERT_LOCKED (&lbm->writer_lock); u32 now = (u32) vlib_time_now(vlib_get_main()); if (!clib_u32_loop_gt(now, vip->last_garbage_collection + LB_GARBAGE_RUN)) return; vip->last_garbage_collection = now; lb_as_t *as; u32 *as_index; pool_foreach(as_index, vip->as_indexes, { as = &lbm->ass[*as_index]; if (!(as->flags & LB_AS_FLAGS_USED) && //Not used cli
/*
 *------------------------------------------------------------------
 * Copyright (c) 2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *------------------------------------------------------------------
 */

#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
#include <vlib/pci/pci.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/devices/devices.h>

#include <avf/avf.h>

#define AVF_TXQ_DESC_CMD(x)             (1 << (x + 4))
#define AVF_TXQ_DESC_CMD_EOP		AVF_TXQ_DESC_CMD(0)
#define AVF_TXQ_DESC_CMD_RS		AVF_TXQ_DESC_CMD(1)

static_always_inline u8
avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
{
  return d->qword[1] & 0x0f;
}

uword
CLIB_MULTIARCH_FN (avf_interface_tx) (vlib_main_t * vm,
				      vlib_node_runtime_t * node,
				      vlib_frame_t * frame)
{
  avf_main_t *am = &avf_main;
  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
  avf_device_t *ad = pool_elt_at_index (am->devices, rd->dev_instance);
  u32 thread_index = vlib_get_thread_index ();
  u8 qid = thread_index;
  avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
  avf_tx_desc_t *d0, *d1, *d2, *d3;
  u32 *buffers = vlib_frame_args (frame);
  u32 bi0, bi1, bi2, bi3;
  u16 n_left = frame->n_vectors;
  vlib_buffer_t *b0, *b1, *b2, *b3;
  u16 mask = txq->size - 1;

  clib_spinlock_lock_if_init (&txq->lock);

  /* release cosumed bufs */
  if (txq->n_bufs)
    {
      u16 first, slot, n_free = 0;
      first = slot = (txq->next - txq->n_bufs) & mask;
      d0 = txq->descs + slot;
      while (n_free < txq->n_bufs && avf_tx_desc_get_dtyp (d0) == 0x0F)
	{
	  n_free++;
	  slot = (slot + 1) & mask;
	  d0 = txq->descs + slot;
	}

      if (n_free)
	{
	  txq->n_bufs -= n_free;;
	  vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
				      n_free);
	}
    }

  while (n_left >= 7)
    {
      u16 slot0, slot1, slot2, slot3;

      vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
      vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
      vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
      vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);

      slot0 = txq->next;
      slot1 = (txq->next + 1) & mask;
      slot2 = (txq->next + 2) & mask;
      slot3 = (txq->next + 3) & mask;

      d0 = txq->descs + slot0;
      d1 = txq->descs + slot1;
      d2 = txq->descs + slot2;
      d3 = txq->descs + slot3;

      bi0 = buffers[0];
      bi1 = buffers[1];
      bi2 = buffers[2];
      bi3 = buffers[3];

      txq->bufs[slot0] = bi0;
      txq->bufs[slot1] = bi1;
      txq->bufs[slot2] = bi2;
      txq->bufs[slot3] = bi3;
      b0 = vlib_get_buffer (vm, bi0);
      b1 = vlib_get_buffer (vm, bi1);
      b2 = vlib_get_buffer (vm, bi2);
      b3 = vlib_get_buffer (vm, bi3);

#if 0
      d->qword[0] = vlib_get_buffer_data_physical_address (vm, bi0) +
	b0->current_data;
#else
      d0->qword[0] = pointer_to_uword (b0->data);
      d1->qword[0] = pointer_to_uword (b1->data);
      d2->qword[0] = pointer_to_uword (b2->data);
      d3->qword[0] = pointer_to_uword (b3->data);

#endif
      u64 bits = AVF_TXQ_DESC_CMD_EOP | AVF_TXQ_DESC_CMD_RS;
      d0->qword[1] = ((u64) b0->current_length) << 34 | bits;
      d1->qword[1] = ((u64) b1->current_length) << 34 | bits;
      d2->qword[1] = ((u64) b2->current_length) << 34 | bits;
      d3->qword[1] = ((u64) b3->current_length) << 34 | bits;

      txq->next = (txq->next + 4) & mask;
      txq->n_bufs += 4;
      buffers += 4;
      n_left -= 4;
    }

  while (n_left)
    {
      d0 = txq->descs + txq->next;
      bi0 = buffers[0];
      txq->bufs[txq->next] = bi0;
      b0 = vlib_get_buffer (vm, bi0);

#if 0
      d->qword[0] = vlib_get_buffer_data_physical_address (vm, bi0) +
	b0->current_data;
#else
      d0->qword[0] = pointer_to_uword (b0->data);

#endif
      d0->qword[1] = ((u64) b0->current_length) << 34;
      d0->qword[1] |= AVF_TXQ_DESC_CMD_EOP | AVF_TXQ_DESC_CMD_RS;

      txq->next = (txq->next + 1) & mask;
      txq->n_bufs++;
      buffers++;
      n_left--;
    }
  CLIB_MEMORY_BARRIER ();
  *(txq->qtx_tail) = txq->next;

  clib_spinlock_unlock_if_init (&txq->lock);

  return frame->n_vectors - n_left;
}

#ifndef CLIB_MULTIARCH_VARIANT
#if __x86_64__
vlib_node_function_t __clib_weak avf_interface_tx_avx512;
vlib_node_function_t __clib_weak avf_interface_tx_avx2;
static void __clib_constructor
avf_interface_tx_multiarch_select (void)
{
  if (avf_interface_tx_avx512 && clib_cpu_supports_avx512f ())
    avf_device_class.tx_function = avf_interface_tx_avx512;
  else if (avf_interface_tx_avx2 && clib_cpu_supports_avx2 ())
    avf_device_class.tx_function = avf_interface_tx_avx2;
}
#endif
#endif

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
rate a valid flow table for default VIP */ default_vip->as_indexes = NULL; lb_get_writer_lock(); lb_vip_update_new_flow_table(default_vip); lb_put_writer_lock(); lbm->vip_index_by_nodeport = hash_create_mem (0, sizeof(u16), sizeof (uword)); clib_bihash_init_8_8 (&lbm->vip_index_per_port, "vip_index_per_port", LB_VIP_PER_PORT_BUCKETS, LB_VIP_PER_PORT_MEMORY_SIZE); clib_bihash_init_8_8 (&lbm->mapping_by_as4, "mapping_by_as4", LB_MAPPING_BUCKETS, LB_MAPPING_MEMORY_SIZE); clib_bihash_init_24_8 (&lbm->mapping_by_as6, "mapping_by_as6", LB_MAPPING_BUCKETS, LB_MAPPING_MEMORY_SIZE); #define _(a,b,c) lbm->vip_counters[c].name = b; lb_foreach_vip_counter #undef _ lb_fib_src = fib_source_allocate("lb", FIB_SOURCE_PRIORITY_HI, FIB_SOURCE_BH_SIMPLE); return NULL; } VLIB_INIT_FUNCTION (lb_init);