summaryrefslogtreecommitdiffstats
path: root/src/plugins/dpdk/buffer.c
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2019-01-21 11:48:34 +0100
committerDave Barach <openvpp@barachs.net>2019-01-30 16:19:22 +0000
commit910d3694e8b22c9d14e5f2913d14ae149e184620 (patch)
treee4993e93e4d7dba51a5898e82bb6149a3e4bd7ba /src/plugins/dpdk/buffer.c
parent4fd5a9d3e6abdf61f266da8400a299fe5b0eb0ed (diff)
buffers: major cleanup and improvements
This patch introduces following changes: - deprecated free lists which are not used and not compatible with external buffer managers (i.e. DPDK) - introduces native support for per-numa buffer pools - significantly improves performance of buffer alloc and free Change-Id: I4a8e723ae47056717afd6cac0efe87cb731b5be7 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins/dpdk/buffer.c')
-rw-r--r--src/plugins/dpdk/buffer.c750
1 files changed, 271 insertions, 479 deletions
diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c
index aed194074cc..73310eff95a 100644
--- a/src/plugins/dpdk/buffer.c
+++ b/src/plugins/dpdk/buffer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Copyright (c) 2017-2019 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -12,586 +12,378 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-/*
- * buffer.c: allocate/free network buffers.
- *
- * Copyright (c) 2008 Eliot Dresselhaus
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/**
- * @file
- *
- * Allocate/free network buffers.
- */
#include <unistd.h>
+#include <errno.h>
#include <rte_config.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_launch.h>
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_prefetch.h>
-#include <rte_lcore.h>
-#include <rte_per_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_interrupts.h>
-#include <rte_vfio.h>
-#include <rte_random.h>
-#include <rte_debug.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_ring.h>
-#include <rte_mempool.h>
#include <rte_mbuf.h>
-#include <rte_version.h>
+#include <rte_ethdev.h>
+#include <rte_vfio.h>
#include <vlib/vlib.h>
-#include <vlib/unix/unix.h>
-#include <vnet/vnet.h>
-#include <dpdk/device/dpdk.h>
-#include <dpdk/device/dpdk_priv.h>
#include <dpdk/buffer.h>
STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
-typedef struct
-{
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- struct rte_mbuf **mbuf_alloc_list;
-} dpdk_buffer_per_thread_data;
typedef struct
{
- int vfio_container_fd;
- dpdk_buffer_per_thread_data *ptd;
-} dpdk_buffer_main_t;
-
-dpdk_buffer_main_t dpdk_buffer_main;
-
-static_always_inline void
-dpdk_rte_pktmbuf_free (vlib_main_t * vm, u32 thread_index, vlib_buffer_t * b,
- int maybe_next)
-{
- struct rte_mbuf *mb;
- u32 next, flags;
+ /* must be first */
+ struct rte_pktmbuf_pool_private mbp_priv;
+ u8 buffer_pool_index;
+} dpdk_mempool_private_t;
-next:
- flags = b->flags;
- next = b->next_buffer;
- mb = rte_mbuf_from_vlib_buffer (b);
-
- if (PREDICT_FALSE (b->n_add_refs))
- {
- rte_mbuf_refcnt_update (mb, b->n_add_refs);
- b->n_add_refs = 0;
- }
-
- if ((mb = rte_pktmbuf_prefree_seg (mb)))
- rte_mempool_put (mb->pool, mb);
-
- if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT))
- {
- b = vlib_get_buffer (vm, next);
- goto next;
- }
-}
+#ifndef CLIB_MARCH_VARIANT
+struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0;
+struct rte_mempool **dpdk_no_cache_mempool_by_buffer_pool_index = 0;
-/* Make sure free list has at least given number of free buffers. */
-uword
-CLIB_MULTIARCH_FN (dpdk_buffer_fill_free_list) (vlib_main_t * vm,
- vlib_buffer_free_list_t * fl,
- uword min_free_buffers)
+clib_error_t *
+dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp)
{
- dpdk_main_t *dm = &dpdk_main;
- dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
- struct rte_mbuf **mb;
- uword n_left, first;
- word n_alloc;
- unsigned socket_id = rte_socket_id ();
- u32 thread_index = vlib_get_thread_index ();
- dpdk_buffer_per_thread_data *d = vec_elt_at_index (dbm->ptd, thread_index);
- struct rte_mempool *rmp = dm->pktmbuf_pools[socket_id];
- dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
- vlib_buffer_t bt;
+ struct rte_mempool *mp, *nmp;
+ dpdk_mempool_private_t priv;
+ enum rte_iova_mode iova_mode;
u32 *bi;
+ u8 *name = 0;
- /* Too early? */
- if (PREDICT_FALSE (rmp == 0))
- return 0;
-
- /* Already have enough free buffers on free list? */
- n_alloc = min_free_buffers - vec_len (fl->buffers);
- if (n_alloc <= 0)
- return min_free_buffers;
-
- /* Always allocate round number of buffers. */
- n_alloc = round_pow2 (n_alloc, CLIB_CACHE_LINE_BYTES / sizeof (u32));
+ u32 elt_size =
+ sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
- /* Always allocate new buffers in reasonably large sized chunks. */
- n_alloc = clib_max (n_alloc, fl->min_n_buffers_each_alloc);
-
- vec_validate_aligned (d->mbuf_alloc_list, n_alloc - 1,
+ /* create empty mempools */
+ vec_validate_aligned (dpdk_mempool_by_buffer_pool_index, bp->index,
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dpdk_no_cache_mempool_by_buffer_pool_index, bp->index,
CLIB_CACHE_LINE_BYTES);
- if (rte_mempool_get_bulk (rmp, (void *) d->mbuf_alloc_list, n_alloc) < 0)
- return 0;
+ /* normal mempool */
+ name = format (name, "vpp pool %u%c", bp->index, 0);
+ mp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
+ elt_size, 512, sizeof (priv),
+ bp->numa_node, 0);
+ vec_reset_length (name);
- clib_memset (&bt, 0, sizeof (vlib_buffer_t));
- bt.buffer_pool_index = privp->buffer_pool_index;
+ /* non-cached mempool */
+ name = format (name, "vpp pool %u (no cache)%c", bp->index, 0);
+ nmp = rte_mempool_create_empty ((char *) name, vec_len (bp->buffers),
+ elt_size, 0, sizeof (priv),
+ bp->numa_node, 0);
+ vec_free (name);
- _vec_len (d->mbuf_alloc_list) = n_alloc;
+ dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
+ dpdk_no_cache_mempool_by_buffer_pool_index[bp->index] = nmp;
- first = vec_len (fl->buffers);
- vec_resize_aligned (fl->buffers, n_alloc, CLIB_CACHE_LINE_BYTES);
+ rte_mempool_set_ops_byname (mp, "vpp", NULL);
+ rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL);
- n_left = n_alloc;
- mb = d->mbuf_alloc_list;
- bi = fl->buffers + first;
+ /* Call the mempool priv initializer */
+ priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
+ VLIB_BUFFER_DATA_SIZE;
+ priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
+ priv.buffer_pool_index = bp->index;
+ rte_pktmbuf_pool_init (mp, &priv);
+ rte_pktmbuf_pool_init (nmp, &priv);
- ASSERT (n_left % 8 == 0);
+ iova_mode = rte_eal_iova_mode ();
- while (n_left >= 8)
+ /* populate mempool object buffer header */
+ vec_foreach (bi, bp->buffers)
+ {
+ struct rte_mempool_objhdr *hdr;
+ vlib_buffer_t *b = vlib_get_buffer (vm, *bi);
+ struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
+ hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
+ hdr->mp = mp;
+ hdr->iova = (iova_mode == RTE_IOVA_VA) ?
+ pointer_to_uword (mb) : vlib_physmem_get_pa (vm, mb);
+ STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
+ STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
+ mp->populated_size++;
+ nmp->populated_size++;
+ }
+
+ /* call the object initializers */
+ rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
+
+ /* map DMA pages if at least one physical device exists */
+ if (rte_eth_dev_count_avail ())
{
- if (PREDICT_FALSE (n_left < 24))
- goto no_prefetch;
-
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[16]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[17]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[18]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[19]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[20]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[21]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[22]), STORE);
- vlib_prefetch_buffer_header (vlib_buffer_from_rte_mbuf (mb[23]), STORE);
-
- no_prefetch:
- vlib_get_buffer_indices_with_offset (vm, (void **) mb, bi, 8,
- sizeof (struct rte_mbuf));
+ uword i;
+ size_t page_sz;
+ vlib_physmem_map_t *pm;
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[0]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[1]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[2]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[3]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[4]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[5]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[6]), &bt);
- vlib_buffer_copy_template (vlib_buffer_from_rte_mbuf (mb[7]), &bt);
-
- n_left -= 8;
- mb += 8;
- bi += 8;
- }
+ pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
+ page_sz = 1ULL << pm->log2_page_size;
- if (fl->buffer_init_function)
- fl->buffer_init_function (vm, fl, fl->buffers + first, n_alloc);
+ for (i = 0; i < pm->n_pages; i++)
+ {
+ char *va = ((char *) pm->base) + i * page_sz;
+ uword pa = (iova_mode == RTE_IOVA_VA) ?
+ pointer_to_uword (va) : pm->page_table[i];
- fl->n_alloc += n_alloc;
+ if (rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
+ break;
+ }
+ }
- return n_alloc;
+ return 0;
}
-static_always_inline void
-dpdk_prefetch_buffer (vlib_buffer_t * b)
+static int
+dpdk_ops_vpp_alloc (struct rte_mempool *mp)
{
- struct rte_mbuf *mb;
- mb = rte_mbuf_from_vlib_buffer (b);
- CLIB_PREFETCH (mb, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
+ clib_warning ("");
+ return 0;
}
-static_always_inline void
-recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
- vlib_buffer_t * b)
+static void
+dpdk_ops_vpp_free (struct rte_mempool *mp)
{
- u32 thread_index = vlib_get_thread_index ();
-
- dpdk_rte_pktmbuf_free (vm, thread_index, b, 1);
+ clib_warning ("");
}
+#endif
+
static_always_inline void
-vlib_buffer_free_inline (vlib_main_t * vm,
- u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
+dpdk_ops_vpp_enqueue_one (vlib_buffer_t * bt, void *obj)
{
- vlib_buffer_main_t *bm = vm->buffer_main;
- vlib_buffer_t *bufp[n_buffers], **b = bufp;
- u32 thread_index = vm->thread_index;
- int i = 0;
- u32 simple_mask = VLIB_BUFFER_NEXT_PRESENT;
- u32 n_left, *bi;
- u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
- u32 follow_buffer_next);
-
- cb = bm->buffer_free_callback;
+ /* Only non-replicated packets (b->ref_count == 1) expected */
- if (PREDICT_FALSE (cb != 0))
- n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
+ struct rte_mbuf *mb = obj;
+ vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
+ ASSERT (b->ref_count == 1);
+ ASSERT (b->buffer_pool_index == bt->buffer_pool_index);
+ vlib_buffer_copy_template (b, bt);
+}
- if (!n_buffers)
- return;
+int
+CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
+ void *const *obj_table, unsigned n)
+{
+ const int batch_size = 32;
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_t bt;
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (mp);
+ u8 buffer_pool_index = privp->buffer_pool_index;
+ vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
+ u32 bufs[batch_size];
+ u32 n_left = n;
+ void *const *obj = obj_table;
- n_left = n_buffers;
- bi = buffers;
- b = bufp;
- vlib_get_buffers (vm, bi, b, n_buffers);
+ vlib_buffer_copy_template (&bt, &bp->buffer_template);
while (n_left >= 4)
{
- u32 or_flags;
- vlib_buffer_t **p;
-
- if (n_left < 16)
- goto no_prefetch;
-
- p = b + 12;
- dpdk_prefetch_buffer (p[0]);
- dpdk_prefetch_buffer (p[1]);
- dpdk_prefetch_buffer (p[2]);
- dpdk_prefetch_buffer (p[3]);
- no_prefetch:
-
- for (i = 0; i < 4; i++)
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[i]);
-
- or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
-
- if (or_flags & simple_mask)
- {
- recycle_or_free (vm, bm, bi[0], b[0]);
- recycle_or_free (vm, bm, bi[1], b[1]);
- recycle_or_free (vm, bm, bi[2], b[2]);
- recycle_or_free (vm, bm, bi[3], b[3]);
- }
- else
- {
- dpdk_rte_pktmbuf_free (vm, thread_index, b[0], 0);
- dpdk_rte_pktmbuf_free (vm, thread_index, b[1], 0);
- dpdk_rte_pktmbuf_free (vm, thread_index, b[2], 0);
- dpdk_rte_pktmbuf_free (vm, thread_index, b[3], 0);
- }
- bi += 4;
- b += 4;
+ dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
+ dpdk_ops_vpp_enqueue_one (&bt, obj[1]);
+ dpdk_ops_vpp_enqueue_one (&bt, obj[2]);
+ dpdk_ops_vpp_enqueue_one (&bt, obj[3]);
+ obj += 4;
n_left -= 4;
}
+
while (n_left)
{
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]);
- recycle_or_free (vm, bm, bi[0], b[0]);
- bi += 1;
- b += 1;
+ dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
+ obj += 1;
n_left -= 1;
}
-}
-
-void
-CLIB_MULTIARCH_FN (dpdk_buffer_free) (vlib_main_t * vm, u32 * buffers,
- u32 n_buffers)
-{
- vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
- 1);
-}
-
-void
-CLIB_MULTIARCH_FN (dpdk_buffer_free_no_next) (vlib_main_t * vm, u32 * buffers,
- u32 n_buffers)
-{
- vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
- 0);
-}
-#ifndef CLIB_MARCH_VARIANT
-clib_error_t *
-dpdk_pool_create (vlib_main_t * vm, u8 * pool_name, u32 elt_size,
- u32 num_elts, u32 pool_priv_size, u16 cache_size, u8 numa,
- struct rte_mempool **_mp, u32 * map_index)
-{
- struct rte_mempool *mp;
- enum rte_iova_mode iova_mode;
- dpdk_mempool_private_t priv;
- vlib_physmem_map_t *pm;
- clib_error_t *error = 0;
- size_t min_chunk_size, align;
- int map_dma = 1;
- u32 size;
- i32 ret;
- uword i;
-
- mp = rte_mempool_create_empty ((char *) pool_name, num_elts, elt_size,
- 512, pool_priv_size, numa, 0);
- if (!mp)
- return clib_error_return (0, "failed to create %s", pool_name);
-
- rte_mempool_set_ops_byname (mp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
-
- size = rte_mempool_op_calc_mem_size_default (mp, num_elts, 21,
- &min_chunk_size, &align);
-
- if ((error = vlib_physmem_shared_map_create (vm, (char *) pool_name, size,
- 0, numa, map_index)))
+ while (n >= batch_size)
{
- rte_mempool_free (mp);
- return error;
+ vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
+ batch_size,
+ sizeof (struct rte_mbuf));
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
+ n -= batch_size;
+ obj_table += batch_size;
}
- pm = vlib_physmem_get_map (vm, *map_index);
-
- /* Call the mempool priv initializer */
- priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
- VLIB_BUFFER_DATA_SIZE;
- priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
- rte_pktmbuf_pool_init (mp, &priv);
- if (rte_eth_dev_count_avail () == 0)
- map_dma = 0;
-
- iova_mode = rte_eal_iova_mode ();
- for (i = 0; i < pm->n_pages; i++)
+ if (n)
{
- size_t page_sz = 1ULL << pm->log2_page_size;
- char *va = ((char *) pm->base) + i * page_sz;
- uword pa = iova_mode == RTE_IOVA_VA ?
- pointer_to_uword (va) : pm->page_table[i];
- ret = rte_mempool_populate_iova (mp, va, pa, page_sz, 0, 0);
- if (ret < 0)
- {
- rte_mempool_free (mp);
- return clib_error_return (0, "failed to populate %s", pool_name);
- }
- /* -1 likely means there is no PCI devices assigned to vfio
- container or noiommu mode is used so we stop trying */
- if (map_dma && rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
- map_dma = 0;
+ vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
+ n, sizeof (struct rte_mbuf));
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
}
- _mp[0] = mp;
-
return 0;
}
-clib_error_t *
-dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
- unsigned socket_id)
-{
- dpdk_main_t *dm = &dpdk_main;
- struct rte_mempool *rmp;
- clib_error_t *error = 0;
- u8 *pool_name;
- u32 elt_size, i;
- u32 map_index;
+CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue);
- vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
-
- /* pool already exists, nothing to do */
- if (dm->pktmbuf_pools[socket_id])
- return 0;
-
- pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
-
- elt_size = sizeof (struct rte_mbuf) +
- VLIB_BUFFER_HDR_SIZE /* priv size */ +
- VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
-
- error = dpdk_pool_create (vm, pool_name, elt_size, num_mbufs,
- sizeof (dpdk_mempool_private_t), 512, socket_id,
- &rmp, &map_index);
-
- vec_free (pool_name);
+static_always_inline void
+dpdk_ops_vpp_enqueue_no_cache_one (vlib_main_t * vm, struct rte_mempool *old,
+ struct rte_mempool *new, void *obj,
+ vlib_buffer_t * bt)
+{
+ struct rte_mbuf *mb = obj;
+ vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
- if (!error)
+ if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0)
{
- /* call the object initializers */
- rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
-
- dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
- privp->buffer_pool_index =
- vlib_buffer_register_physmem_map (vm, map_index);
+ u32 bi = vlib_get_buffer_index (vm, b);
+ mb->pool = new;
+ vlib_buffer_copy_template (b, bt);
+ vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1);
+ return;
+ }
+}
- dm->pktmbuf_pools[socket_id] = rmp;
+int
+CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue_no_cache) (struct rte_mempool * cmp,
+ void *const *obj_table,
+ unsigned n)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_t bt;
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (cmp);
+ struct rte_mempool *mp;
+ mp = dpdk_mempool_by_buffer_pool_index[privp->buffer_pool_index];
+ u8 buffer_pool_index = privp->buffer_pool_index;
+ vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
+ vlib_buffer_copy_template (&bt, &bp->buffer_template);
- return 0;
+ while (n >= 4)
+ {
+ dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
+ dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[1], &bt);
+ dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[2], &bt);
+ dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[3], &bt);
+ obj_table += 4;
+ n -= 4;
}
- clib_error_report (error);
-
- /* no usable pool for this socket, try to use pool from another one */
- for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
+ while (n)
{
- if (dm->pktmbuf_pools[i])
- {
- clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
- "%u. Threads running on socket %u will use socket %u "
- "mempool.", socket_id, socket_id, i);
- dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
- return 0;
- }
+ dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
+ obj_table += 1;
+ n -= 1;
}
- return clib_error_return (0, "failed to allocate mempool on socket %u",
- socket_id);
+ return 0;
}
-#if CLIB_DEBUG > 0
+CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue_no_cache);
-u32 *vlib_buffer_state_validation_lock;
-uword *vlib_buffer_state_validation_hash;
-void *vlib_buffer_state_heap;
-
-static clib_error_t *
-buffer_state_validation_init (vlib_main_t * vm)
+int
+CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp,
+ void **obj_table, unsigned n)
{
- void *oldheap;
-
- vlib_buffer_state_heap =
- mheap_alloc_with_lock (0, 10 << 20, 0 /* locked */ );
- oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
+ const int batch_size = 32;
+ vlib_main_t *vm = vlib_get_main ();
+ u32 bufs[batch_size], total = 0, n_alloc = 0;
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (mp);
+ u8 buffer_pool_index = privp->buffer_pool_index;
+ void **obj = obj_table;
+
+ while (n >= batch_size)
+ {
+ n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, batch_size,
+ buffer_pool_index);
+ if (n_alloc != batch_size)
+ goto alloc_fail;
+
+ vlib_get_buffers_with_offset (vm, bufs, obj, batch_size,
+ -(i32) sizeof (struct rte_mbuf));
+ total += batch_size;
+ obj += batch_size;
+ n -= batch_size;
+ }
- vlib_buffer_state_validation_hash = hash_create (0, sizeof (uword));
- vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
- CLIB_CACHE_LINE_BYTES);
- clib_mem_set_heap (oldheap);
- return 0;
-}
+ if (n)
+ {
+ n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, n, buffer_pool_index);
-VLIB_INIT_FUNCTION (buffer_state_validation_init);
-#endif
+ if (n_alloc != n)
+ goto alloc_fail;
-#if CLI_DEBUG
-struct dpdk_validate_buf_result
-{
- u32 invalid;
- u32 uninitialized;
-};
+ vlib_get_buffers_with_offset (vm, bufs, obj, n,
+ -(i32) sizeof (struct rte_mbuf));
+ }
-#define DPDK_TRAJECTORY_POISON 31
+ return 0;
-static void
-dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque,
- void *obj, unsigned obj_idx)
-{
- vlib_buffer_t *b;
- struct dpdk_validate_buf_result *counter = opaque;
- b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
- if (b->pre_data[0] != 0)
+alloc_fail:
+ /* dpdk doesn't support partial alloc, so we need to return what we
+ already got */
+ if (n_alloc)
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n_alloc);
+ obj = obj_table;
+ while (total)
{
- if (b->pre_data[0] == DPDK_TRAJECTORY_POISON)
- counter->uninitialized++;
- else
- counter->invalid++;
+ vlib_get_buffer_indices_with_offset (vm, obj, bufs, batch_size,
+ sizeof (struct rte_mbuf));
+ vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
+
+ obj += batch_size;
+ total -= batch_size;
}
+ return -ENOENT;
}
-int
-dpdk_buffer_validate_trajectory_all (u32 * uninitialized)
+CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_dequeue);
+
+#ifndef CLIB_MARCH_VARIANT
+
+static int
+dpdk_ops_vpp_dequeue_no_cache (struct rte_mempool *mp, void **obj_table,
+ unsigned n)
{
- dpdk_main_t *dm = &dpdk_main;
- struct dpdk_validate_buf_result counter = { 0 };
- int i;
-
- for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
- rte_mempool_obj_iter (dm->pktmbuf_pools[i],
- dpdk_buffer_validate_trajectory, &counter);
- if (uninitialized)
- *uninitialized = counter.uninitialized;
- return counter.invalid;
+ clib_error ("bug");
+ return 0;
}
-static void
-dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque,
- void *obj, unsigned obj_idx)
+static unsigned
+dpdk_ops_vpp_get_count (const struct rte_mempool *mp)
{
- vlib_buffer_t *b;
- b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
- b->pre_data[0] = DPDK_TRAJECTORY_POISON;
+ clib_warning ("");
+ return 0;
}
-void
-dpdk_buffer_poison_trajectory_all (void)
+static unsigned
+dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp)
{
- dpdk_main_t *dm = &dpdk_main;
- int i;
-
- for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
- rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory,
- 0);
+ dpdk_mempool_private_t *privp;
+ struct rte_mempool *cmp;
+ privp = rte_mempool_get_priv ((struct rte_mempool *) mp);
+ cmp = dpdk_no_cache_mempool_by_buffer_pool_index[privp->buffer_pool_index];
+ return dpdk_ops_vpp_get_count (cmp);
}
-#endif
-static clib_error_t *
-dpdk_buffer_init (vlib_main_t * vm)
+clib_error_t *
+dpdk_buffer_pools_create (vlib_main_t * vm)
{
- dpdk_buffer_main_t *dbm = &dpdk_buffer_main;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
-
- vec_validate_aligned (dbm->ptd, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
-
- dbm->vfio_container_fd = -1;
-
+ clib_error_t *err;
+ vlib_buffer_pool_t *bp;
+
+ struct rte_mempool_ops ops = { };
+
+ strncpy (ops.name, "vpp", 4);
+ ops.alloc = dpdk_ops_vpp_alloc;
+ ops.free = dpdk_ops_vpp_free;
+ ops.get_count = dpdk_ops_vpp_get_count;
+ ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue);
+ ops.dequeue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_dequeue);
+ rte_mempool_register_ops (&ops);
+
+ strncpy (ops.name, "vpp-no-cache", 13);
+ ops.get_count = dpdk_ops_vpp_get_count_no_cache;
+ ops.enqueue = CLIB_MARCH_FN_POINTER (dpdk_ops_vpp_enqueue_no_cache);
+ ops.dequeue = dpdk_ops_vpp_dequeue_no_cache;
+ rte_mempool_register_ops (&ops);
+
+ /* *INDENT-OFF* */
+ vec_foreach (bp, vm->buffer_main->buffer_pools)
+ if (bp->start && (err = dpdk_buffer_pool_init (vm, bp)))
+ return err;
+ /* *INDENT-ON* */
return 0;
}
-VLIB_INIT_FUNCTION (dpdk_buffer_init);
-
-/* *INDENT-OFF* */
-VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = {
- .vlib_buffer_fill_free_list_cb = &dpdk_buffer_fill_free_list,
- .vlib_buffer_free_cb = &dpdk_buffer_free,
- .vlib_buffer_free_no_next_cb = &dpdk_buffer_free_no_next,
-};
-/* *INDENT-ON* */
-
-#if __x86_64__
-vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx512;
-vlib_buffer_fill_free_list_cb_t __clib_weak dpdk_buffer_fill_free_list_avx2;
-vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx512;
-vlib_buffer_free_cb_t __clib_weak dpdk_buffer_free_avx2;
-vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx512;
-vlib_buffer_free_no_next_cb_t __clib_weak dpdk_buffer_free_no_next_avx2;
-
-static void __clib_constructor
-dpdk_input_multiarch_select (void)
-{
- vlib_buffer_callbacks_t *cb = &__dpdk_buffer_callbacks;
- if (dpdk_buffer_fill_free_list_avx512 && clib_cpu_supports_avx512f ())
- {
- cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx512;
- cb->vlib_buffer_free_cb = dpdk_buffer_free_avx512;
- cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx512;
- }
- else if (dpdk_buffer_fill_free_list_avx2 && clib_cpu_supports_avx2 ())
- {
- cb->vlib_buffer_fill_free_list_cb = dpdk_buffer_fill_free_list_avx2;
- cb->vlib_buffer_free_cb = dpdk_buffer_free_avx2;
- cb->vlib_buffer_free_no_next_cb = dpdk_buffer_free_no_next_avx2;
- }
-}
-#endif
+VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) +
+ sizeof (struct rte_mbuf));
+
#endif
/** @endcond */