aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/dma_intel/dsa_intel.h
diff options
context:
space:
mode:
authorMarvin Liu <yong.liu@intel.com>2022-08-17 09:38:40 +0800
committerDamjan Marion <dmarion@0xa5.net>2022-08-25 19:05:40 +0000
commitabd5669422c5805da5135496d5e5a394fa5aa602 (patch)
treea464eb14b5e04b19042e92bb83ca7b8567731f19 /src/plugins/dma_intel/dsa_intel.h
parent9a6ad01c0d443f002eafa709813d021bf0c98eac (diff)
vlib: introduce DMA infrastructure
This patch introduces DMA infrastructure into vlib. This is well known that large amount of memory movements will drain core resource. Nowadays more and more hardware accelerators were designed out for freeing core from this burden. Meanwhile some restrictions still remained when utilizing hardware accelerators, e.g. cross numa throughput will have a significant drop compared to same node. Normally the number of hardware accelerator instances will less than cores number, not to mention that applications number will even beyond the number of cores. Some hardware may support share virtual address with cores, while others are not. Here we introduce new DMA infrastructure which can fulfill the requirements of vpp applications like session and memif and in the meantime dealing with hardware limitations. Here is some design backgrounds: Backend is the abstract of resource which allocated from DMA device and can do some basic operations like configuration, DMA copy and result query. Config is the abstract of application DMA requirement. Application need to request an unique config index from DMA infrastructure. This unique config index is associated with backend resource. Two options cpu fallback and barrier before last can be specified in config. DMA transfer will be performed by CPU when backend is busy if cpu fallback option is enabled. DMA transfer callback will be in order if barrier before last option is enabled. We constructs all the stuffs that DMA transfer request needed into DMA batch. It contains the pattern of DMA descriptors and function pointers for submission and callback. One DMA transfer request need multiple times batch update and one time batch submission. DMA backends will assigned to config's workers threads equally. Lock will be used for thread-safety if same backends assigned to multiple threads. Backend node will check all the pending requests in worker thread and do callback with the pointer of DMA batch if transfer completed. Application can utilize cookie in DMA batch for selves usage. DMA architecture: +----------+ +----------+ +----------+ +----------+ | Config1 | | Config2 | | Config1 | | Config2 | +----------+ +----------+ +----------+ +----------+ || || || || +-------------------------+ +-------------------------+ | DMA polling thread A | | DMA polling thread B | +-------------------------+ +-------------------------+ || || +----------+ +----------+ | Backend1 | | Backend2 | +----------+ +----------+ Type: feature Signed-off-by: Marvin Liu <yong.liu@intel.com> Change-Id: I1725e0c26687985aac29618c9abe4f5e0de08ebf
Diffstat (limited to 'src/plugins/dma_intel/dsa_intel.h')
-rw-r--r--src/plugins/dma_intel/dsa_intel.h160
1 files changed, 160 insertions, 0 deletions
diff --git a/src/plugins/dma_intel/dsa_intel.h b/src/plugins/dma_intel/dsa_intel.h
new file mode 100644
index 00000000000..a52d4bff323
--- /dev/null
+++ b/src/plugins/dma_intel/dsa_intel.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2022 Intel and/or its affiliates.
+ */
+
+#ifndef __dma_intel_dsa_intel_h__
+#define __dma_intel_dsa_intel_h__
+
+#include <vlib/vlib.h>
+#include <vlib/dma/dma.h>
+#include <vlib/pci/pci.h>
+#include <vppinfra/format.h>
+typedef struct
+{
+ u32 pasid;
+ u32 op_flags;
+ u64 completion;
+ union
+ {
+ void *src;
+ void *desc_addr;
+ };
+ void *dst;
+ u32 size;
+ u16 intr_handle;
+ /* remaining 26 bytes are reserved */
+ u16 __reserved[13];
+} intel_dsa_desc_t;
+
+STATIC_ASSERT_SIZEOF (intel_dsa_desc_t, 64);
+
+#define DSA_DEV_PATH "/dev/dsa"
+#define SYS_DSA_PATH "/sys/bus/dsa/devices"
+
+typedef enum
+{
+ INTEL_DSA_DEVICE_TYPE_UNKNOWN,
+ INTEL_DSA_DEVICE_TYPE_KERNEL,
+ INTEL_DSA_DEVICE_TYPE_USER,
+ INTEL_DSA_DEVICE_TYPE_MDEV,
+} intel_dsa_wq_type_t;
+
+enum dsa_ops
+{
+ INTEL_DSA_OP_NOP = 0,
+ INTEL_DSA_OP_BATCH,
+ INTEL_DSA_OP_DRAIN,
+ INTEL_DSA_OP_MEMMOVE,
+ INTEL_DSA_OP_FILL
+};
+#define INTEL_DSA_OP_SHIFT 24
+#define INTEL_DSA_FLAG_FENCE (1 << 0)
+#define INTEL_DSA_FLAG_BLOCK_ON_FAULT (1 << 1)
+#define INTEL_DSA_FLAG_COMPLETION_ADDR_VALID (1 << 2)
+#define INTEL_DSA_FLAG_REQUEST_COMPLETION (1 << 3)
+#define INTEL_DSA_FLAG_CACHE_CONTROL (1 << 8)
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile void *portal; /* portal exposed by dedicated work queue */
+ u64 submitted;
+ u64 completed;
+ u64 sw_fallback;
+ u32 max_transfer_size; /* maximum size of each transfer */
+ u16 max_transfers; /* maximum number referenced in a batch */
+ u16 n_threads; /* number of threads using this channel */
+ u16 n_enq; /* number of batches currently enqueued */
+ union
+ {
+ u16 wq_control;
+ struct
+ {
+ u16 type : 2;
+ u16 state : 1;
+ u16 ats_disable : 1;
+ u16 block_on_fault : 1;
+ u16 mode : 1;
+ };
+ };
+ u8 lock; /* spinlock, only used if m_threads > 1 */
+ u8 numa; /* numa node */
+ u8 size; /* size of work queue */
+ u8 did; /* dsa device id */
+ u8 qid; /* work queue id */
+} intel_dsa_channel_t;
+
+typedef struct intel_dsa_batch
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (start);
+ vlib_dma_batch_t batch; /* must be first */
+ intel_dsa_channel_t *ch;
+ u32 config_heap_index;
+ u32 max_transfers;
+ u32 config_index;
+ union
+ {
+ struct
+ {
+ u32 barrier_before_last : 1;
+ u32 sw_fallback : 1;
+ };
+ u32 features;
+ };
+ CLIB_CACHE_LINE_ALIGN_MARK (completion_cl);
+#define INTEL_DSA_STATUS_IDLE 0x0
+#define INTEL_DSA_STATUS_SUCCESS 0x1
+#define INTEL_DSA_STATUS_BUSY 0xa
+#define INTEL_DSA_STATUS_CPU_SUCCESS 0xb
+ u8 status;
+ /* to avoid read-modify-write completion is written as 64-byte
+ * DMA FILL operation */
+ CLIB_CACHE_LINE_ALIGN_MARK (descriptors);
+ intel_dsa_desc_t descs[0];
+} intel_dsa_batch_t;
+
+STATIC_ASSERT_OFFSET_OF (intel_dsa_batch_t, batch, 0);
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ intel_dsa_batch_t batch_template;
+ u32 alloc_size;
+ u32 max_transfers;
+ intel_dsa_batch_t **freelist;
+} intel_dsa_config_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ intel_dsa_channel_t *ch; /* channel used by this thread */
+ intel_dsa_batch_t **pending_batches;
+} intel_dsa_thread_t;
+
+typedef struct
+{
+ intel_dsa_channel_t ***channels;
+ intel_dsa_thread_t *dsa_threads;
+ intel_dsa_config_t *dsa_config_heap;
+ uword *dsa_config_heap_handle_by_config_index;
+ /* spin lock protect pmem */
+ clib_spinlock_t lock;
+} intel_dsa_main_t;
+
+extern intel_dsa_main_t intel_dsa_main;
+extern vlib_dma_backend_t intel_dsa_backend;
+format_function_t format_intel_dsa_addr;
+
+#define dsa_log_debug(f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_DEBUG, intel_dsa_log.class, "%s: " f, __func__, \
+ ##__VA_ARGS__)
+
+#define dsa_log_info(f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_INFO, intel_dsa_log.class, "%s: " f, __func__, \
+ ##__VA_ARGS__)
+
+#define dsa_log_error(f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_ERR, intel_dsa_log.class, "%s: " f, __func__, \
+ ##__VA_ARGS__)
+
+#endif