summaryrefslogtreecommitdiffstats
path: root/src/plugins/memif/memif.c
diff options
context:
space:
mode:
authorMarvin Liu <yong.liu@intel.com>2023-03-15 01:01:38 +0800
committerDamjan Marion <dmarion@0xa5.net>2023-04-25 15:18:27 +0000
commitcada0c5075ebf4c59db3192f190b35bf588fac34 (patch)
treef5852b11087ab0c2f2aa13a6e36ca96d6b568787 /src/plugins/memif/memif.c
parentefad24a84d35458e2c672b94027e54923a42fd25 (diff)
memif: support dma option
Introduce async model into memif by utilizing new DMA API. Original process is broken down to submission stage and completion stage. As multiple submissions may in flight simultaneously, per thread data is no longer safe, now replace thread data into each dma data structure. As slave side already support zero copy mode, DMA option is only added in master side. Type: feature Signed-off-by: Marvin Liu <yong.liu@intel.com> Change-Id: I084f253866f5127cdc73b9a08c8ce73b091488f3
Diffstat (limited to 'src/plugins/memif/memif.c')
-rw-r--r--src/plugins/memif/memif.c89
1 files changed, 89 insertions, 0 deletions
diff --git a/src/plugins/memif/memif.c b/src/plugins/memif/memif.c
index c9d2f008cca..37028d8223e 100644
--- a/src/plugins/memif/memif.c
+++ b/src/plugins/memif/memif.c
@@ -301,6 +301,37 @@ memif_connect (memif_if_t * mif)
mq->queue_index =
vnet_hw_if_register_tx_queue (vnm, mif->hw_if_index, i);
clib_spinlock_init (&mq->lockp);
+
+ if (mif->flags & MEMIF_IF_FLAG_USE_DMA)
+ {
+ memif_dma_info_t *dma_info;
+ mq->dma_head = 0;
+ mq->dma_tail = 0;
+ mq->dma_info_head = 0;
+ mq->dma_info_tail = 0;
+ mq->dma_info_size = MEMIF_DMA_INFO_SIZE;
+ vec_validate_aligned (mq->dma_info, MEMIF_DMA_INFO_SIZE,
+ CLIB_CACHE_LINE_BYTES);
+
+ vec_foreach (dma_info, mq->dma_info)
+ {
+ vec_validate_aligned (dma_info->data.desc_data,
+ pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dma_info->data.desc_len,
+ pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dma_info->data.desc_status,
+ pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dma_info->data.copy_ops, 0,
+ CLIB_CACHE_LINE_BYTES);
+ vec_reset_length (dma_info->data.copy_ops);
+ vec_validate_aligned (dma_info->data.buffers, 0,
+ CLIB_CACHE_LINE_BYTES);
+ vec_reset_length (dma_info->data.buffers);
+ }
+ }
}
if (vec_len (mif->tx_queues) > 0)
@@ -331,6 +362,37 @@ memif_connect (memif_if_t * mif)
qi = vnet_hw_if_register_rx_queue (vnm, mif->hw_if_index, i,
VNET_HW_IF_RXQ_THREAD_ANY);
mq->queue_index = qi;
+
+ if (mif->flags & MEMIF_IF_FLAG_USE_DMA)
+ {
+ memif_dma_info_t *dma_info;
+ mq->dma_head = 0;
+ mq->dma_tail = 0;
+ mq->dma_info_head = 0;
+ mq->dma_info_tail = 0;
+ mq->dma_info_size = MEMIF_DMA_INFO_SIZE;
+ vec_validate_aligned (mq->dma_info, MEMIF_DMA_INFO_SIZE,
+ CLIB_CACHE_LINE_BYTES);
+ vec_foreach (dma_info, mq->dma_info)
+ {
+ vec_validate_aligned (dma_info->data.desc_data,
+ pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dma_info->data.desc_len,
+ pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dma_info->data.desc_status,
+ pow2_mask (max_log2_ring_sz),
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (dma_info->data.copy_ops, 0,
+ CLIB_CACHE_LINE_BYTES);
+ vec_reset_length (dma_info->data.copy_ops);
+ vec_validate_aligned (dma_info->data.buffers, 0,
+ CLIB_CACHE_LINE_BYTES);
+ vec_reset_length (dma_info->data.buffers);
+ }
+ }
+
if (mq->int_fd > -1)
{
template.file_descriptor = mq->int_fd;
@@ -902,6 +964,16 @@ VNET_HW_INTERFACE_CLASS (memif_ip_hw_if_class, static) = {
};
/* *INDENT-ON* */
+static void
+memif_prepare_dma_args (vlib_dma_config_t *args)
+{
+ args->max_batches = 256;
+ args->max_transfer_size = VLIB_BUFFER_DEFAULT_DATA_SIZE;
+ args->barrier_before_last = 1;
+ args->sw_fallback = 1;
+ args->callback_fn = NULL;
+}
+
clib_error_t *
memif_create_if (vlib_main_t *vm, memif_create_if_args_t *args)
{
@@ -989,6 +1061,20 @@ memif_create_if (vlib_main_t *vm, memif_create_if_args_t *args)
if (args->secret)
mif->secret = vec_dup (args->secret);
+ /* register dma config if enabled */
+ if (args->use_dma)
+ {
+ vlib_dma_config_t dma_args;
+ bzero (&dma_args, sizeof (dma_args));
+ memif_prepare_dma_args (&dma_args);
+
+ dma_args.max_transfers = 1 << args->log2_ring_size;
+ dma_args.callback_fn = memif_dma_completion_cb;
+ mif->dma_input_config = vlib_dma_config_add (vm, &dma_args);
+ dma_args.callback_fn = memif_tx_dma_completion_cb;
+ mif->dma_tx_config = vlib_dma_config_add (vm, &dma_args);
+ }
+
if (mif->mode == MEMIF_INTERFACE_MODE_ETHERNET)
{
@@ -1077,6 +1163,9 @@ memif_create_if (vlib_main_t *vm, memif_create_if_args_t *args)
mif->flags |= MEMIF_IF_FLAG_ZERO_COPY;
}
+ if (args->use_dma)
+ mif->flags |= MEMIF_IF_FLAG_USE_DMA;
+
vnet_hw_if_set_caps (vnm, mif->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
vnet_hw_if_set_input_node (vnm, mif->hw_if_index, memif_input_node.index);
mhash_set (&msf->dev_instance_by_id, &mif->id, mif->dev_instance, 0);