/* * Copyright (c) 2017-2019 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM"); extern struct rte_mbuf *dpdk_mbuf_template_by_pool_index; #ifndef CLIB_MARCH_VARIANT struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0; struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index = 0; struct rte_mbuf *dpdk_mbuf_template_by_pool_index = 0; clib_error_t * dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp) { uword buffer_mem_start = vm->buffer_main->buffer_mem_start; struct rte_mempool *mp, *nmp; struct rte_pktmbuf_pool_private priv; enum rte_iova_mode iova_mode; u32 i; u8 *name = 0; u32 elt_size = sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size; /* create empty mempools */ vec_validate_aligned (dpdk_mempool_by_buffer_pool_index, bp->index, CLIB_CACHE_LINE_BYTES); vec_validate_aligned (dpdk_no_cache_mempool_by_buffer_pool_index, bp->index, CLIB_CACHE_LINE_BYTES); /* normal mempool */ name = format (name, "vpp pool %u%c", bp->index, 0); mp = rte_mempool_create_empty ((char *) name, bp->n_buffers, elt_size, 512, sizeof (priv), bp->numa_node, 0); if (!mp) { vec_free (name); return clib_error_return (0, "failed to create normal mempool for numa node %u", bp->index); } vec_reset_length (name); /* non-cached mempool */ name = format (name, "vpp pool %u (no cache)%c", bp->index, 0); nmp = rte_mempool_create_empty ((char *) name, bp->n_buffers, elt_size, 0, sizeof (priv), bp->numa_node, 0); if (!nmp) { rte_mempool_free (mp); vec_free (name); return clib_error_return (0, "failed to create non-cache mempool for numa nude %u", bp->index); } vec_free (name); dpdk_mempool_by_buffer_pool_index[bp->index] = mp; dpdk_no_cache_mempool_by_buffer_pool_index[bp->index] = nmp; mp->pool_id = nmp->pool_id = bp->index; rte_mempool_set_ops_byname (mp, "vpp", NULL); rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL); /* Call the mempool priv initializer */ memset (&priv, 0, sizeof (priv)); priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE + vlib_buffer_get_default_data_size (vm); priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE; rte_pktmbuf_pool_init (mp, &priv); rte_pktmbuf_pool_init (nmp, &priv); iova_mode = rte_eal_iova_mode (); /* populate mempool object buffer header */ for (i = 0; i < bp->n_buffers; i++) { struct rte_mempool_objhdr *hdr; vlib_buffer_t *b = vlib_get_buffer (vm, bp->buffers[i]); struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b); hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr)); hdr->mp = mp; hdr->iova = (iova_mode == RTE_IOVA_VA) ? pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb); STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next); STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next); mp->populated_size++; nmp->populated_size++; } /* call the object initializers */ rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0); /* create mbuf header tempate from the first buffer in the pool */ vec_validate_aligned (dpdk_mbuf_template_by_pool_index, bp->index, CLIB_CACHE_LINE_BYTES); clib_memcpy (vec_elt_at_index (dpdk_mbuf_template_by_pool_index, bp->index), rte_mbuf_from_vlib_buffer (vlib_buffer_ptr_from_index (buffer_mem_start, *bp->buffers, 0)), sizeof (struct rte_mbuf)); for (i = 0; i < bp->n_buffers; i++) { vlib_buffer_t *b; b = vlib_buffer_ptr_from_index (buffer_mem_start, bp->buffers[i], 0); vlib_buffer_copy_template (b, &bp->buffer_template); } /* map DMA pages if at least one physical device exists */ if (rte_eth_dev_count_avail ()) { uword i; size_t page_sz; vlib_physmem_map_t *pm; int do_vfio_map = 1; pm = vlib_physmem_get_map (vm, bp->physmem_map_index); page_sz = 1ULL << pm->log2_page_size; for (i = 0; i < pm->n_pages; i++) { char *va = ((char *) pm->base) + i * page_sz; uword pa = (iova_mode == RTE_IOVA_VA) ? pointer_to_uword (va) : pm->page_table[i]; if (do_vfio_map && #if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0) rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz)) #else rte_vfio_container_dma_map (RTE_VFIO_DEFAULT_CONTAINER_FD, pointer_to_uword (va), pa, page_sz)) #endif do_vfio_map = 0; struct rte_mempool_memhdr *memhdr; memhdr = clib_mem_alloc (sizeof (*memhdr)); memhdr->mp = mp; memhdr->addr = va; memhdr->iova = pa; memhdr->len = page_sz; memhdr->free_cb = 0; memhdr->opaque = 0; STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next); mp->nb_mem_chunks++; } } return 0; } static int dpdk_ops_vpp_alloc (struct rte_mempool *mp) { clib_warning (""); return 0; } static voi
# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

add_vpp_plugin(cdp
  SOURCES
  cdp.c
  cdp_input.c
  cdp_node.c
  cdp_periodic.c

  API_FILES
  cdp.api

  API_TEST_SOURCES
  cdp_test.c

  INSTALL_HEADERS
  cdp.h
  cdp_protocol.h
)
pool_ops ops = { }; strncpy (ops.name, "vpp", 4); ops.alloc = dpdk_ops_vpp_alloc; ops.free = dpdk_ops_vpp_free; ops.get_count = dpdk_ops_vpp_get_count; ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue); ops.dequeue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_dequeue); rte_mempool_register_ops (&ops); strncpy (ops.name, "vpp-no-cache", 13); ops.get_count = dpdk_ops_vpp_get_count_no_cache; ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue_no_cache); ops.dequeue = dpdk_ops_vpp_dequeue_no_cache; rte_mempool_register_ops (&ops); /* *INDENT-OFF* */ vec_foreach (bp, vm->buffer_main->buffer_pools) if (bp->start && (err = dpdk_buffer_pool_init (vm, bp))) return err; /* *INDENT-ON* */ return 0; } VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) + sizeof (struct rte_mbuf)); #endif /** @endcond */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */