diff options
author | Damjan Marion <damarion@cisco.com> | 2017-07-20 18:10:35 +0200 |
---|---|---|
committer | Neale Ranns <nranns@cisco.com> | 2017-10-04 10:46:51 +0000 |
commit | 206243c1b79b70d9370e1bdc47a200289d48ddf4 (patch) | |
tree | f7dc56d76cc02feb5685605d01d1abf63eb40cc9 /src/plugins/dpdk/buffer.c | |
parent | 7b7ba572ab486d57b59c12af521175a6bcd7a52b (diff) |
dpdk: use vpp physmem allocator for dpdk buffers
This allows us to have single contignuous allocation for DPDK buffers
with single mmap FD, so buffer memory can be easily shared with diffrent
process.
As a consequence dpdk socket-mem is no longer in charge for allocating
buffer memory, but still we need some space allocated for dpdk
structures so default socket-mem is reduced form 256 to 64 MB.
For a default of 16K buffers per numa node, physmem allocation is now
40MB, so basically this change reduces footprint from 256MB per socket
to 48 (64 + 40).
Change-Id: Ic8cfe83930a18411545b37a12b14aac89affd04f
Signed-off-by: Damjan Marion <damarion@cisco.com>
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins/dpdk/buffer.c')
-rw-r--r-- | src/plugins/dpdk/buffer.c | 85 |
1 files changed, 66 insertions, 19 deletions
diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index e09d80194ed..c23a42070b1 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -409,13 +409,26 @@ dpdk_packet_template_init (vlib_main_t * vm, vlib_worker_thread_barrier_release (vm); } +typedef struct +{ + /* must be first */ + struct rte_pktmbuf_pool_private mbp_priv; + vlib_physmem_region_index_t region_index; +} dpdk_mempool_private_t; + clib_error_t * dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, unsigned socket_id) { dpdk_main_t *dm = &dpdk_main; struct rte_mempool *rmp; - int i; + dpdk_mempool_private_t priv; + vlib_physmem_region_t *pr; + vlib_physmem_region_index_t pri; + u8 *pool_name; + unsigned elt_size; + u32 size; + i32 i, ret; vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES); @@ -423,29 +436,64 @@ dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, if (dm->pktmbuf_pools[socket_id]) return 0; - u8 *pool_name = format (0, "mbuf_pool_socket%u%c", socket_id, 0); + pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0); + + elt_size = sizeof (struct rte_mbuf) + + VLIB_BUFFER_HDR_SIZE /* priv size */ + + VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */ - rmp = rte_pktmbuf_pool_create ((char *) pool_name, /* pool name */ - num_mbufs, /* number of mbufs */ - 512, /* cache size */ - VLIB_BUFFER_HDR_SIZE, /* priv size */ - VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE, /* dataroom size */ - socket_id); /* cpu socket */ + size = rte_mempool_xmem_size (num_mbufs, elt_size, 21); + clib_error_t *error = 0; + error = + vlib_physmem_region_alloc (vm, (char *) pool_name, size, socket_id, + VLIB_PHYSMEM_F_HAVE_BUFFERS, &pri); + if (error) + clib_error_report (error); + + pr = vlib_physmem_get_region (vm, pri); + + priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE + + VLIB_BUFFER_DATA_SIZE; + priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE; + +#if 0 + /* Check that pg_shift parameter is valid. */ + if (pg_shift > MEMPOOL_PG_SHIFT_MAX) + { + rte_errno = EINVAL; + return NULL; + } +#endif + rmp = rte_mempool_create_empty ((char *) pool_name, /* pool name */ + num_mbufs, /* number of mbufs */ + elt_size, 512, /* cache size */ + sizeof (dpdk_mempool_private_t), /* private data size */ + socket_id, 0); /* flags */ if (rmp) { - { - struct rte_mempool_memhdr *memhdr; + rte_mempool_set_ops_byname (rmp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL); - STAILQ_FOREACH (memhdr, &rmp->mem_list, next) - vlib_buffer_add_mem_range (vm, (uword) memhdr->addr, memhdr->len); - } - if (rmp) + /* call the mempool priv initializer */ + rte_pktmbuf_pool_init (rmp, &priv); + + ret = rte_mempool_populate_phys_tab (rmp, pr->mem, pr->page_table, + pr->n_pages, pr->log2_page_size, + NULL, NULL); + if (ret == (i32) rmp->size) { + /* call the object initializers */ + rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0); + + dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp); + privp->region_index = pri; + dm->pktmbuf_pools[socket_id] = rmp; - vec_free (pool_name); + return 0; } + + rte_mempool_free (rmp); } vec_free (pool_name); @@ -455,10 +503,9 @@ dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs, { if (dm->pktmbuf_pools[i]) { - clib_warning - ("WARNING: Failed to allocate mempool for CPU socket %u. " - "Threads running on socket %u will use socket %u mempool.", - socket_id, socket_id, i); + clib_warning ("WARNING: Failed to allocate mempool for CPU socket " + "%u. Threads running on socket %u will use socket %u " + "mempool.", socket_id, socket_id, i); dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i]; return 0; } |