diff options
Diffstat (limited to 'build/external/patches/dpdk_18.08/0001-net-mlx5-support-externally-allocated-mempool.patch')
-rw-r--r-- | build/external/patches/dpdk_18.08/0001-net-mlx5-support-externally-allocated-mempool.patch | 270 |
1 files changed, 0 insertions, 270 deletions
diff --git a/build/external/patches/dpdk_18.08/0001-net-mlx5-support-externally-allocated-mempool.patch b/build/external/patches/dpdk_18.08/0001-net-mlx5-support-externally-allocated-mempool.patch deleted file mode 100644 index 87c9cf92469..00000000000 --- a/build/external/patches/dpdk_18.08/0001-net-mlx5-support-externally-allocated-mempool.patch +++ /dev/null @@ -1,270 +0,0 @@ -From bd42c77c457146bede32333558b4e0414b30683e Mon Sep 17 00:00:00 2001 -From: Yongseok Koh <yskoh@mellanox.com> -Date: Fri, 24 Aug 2018 16:46:49 -0700 -Subject: [PATCH] net/mlx5: support externally allocated mempool - -When MLX PMD registers memory for DMA, it accesses the global memseg list -of DPDK to maximize the range of registration so that LKey search can be -more efficient. Granularity of MR registration is per page. - -Externally allocated memory shouldn't be used for DMA because it can't be -searched in the memseg list and free event can't be tracked by DPDK. -However, if the external memory is static (allocated on startup and never -freed), such memory can also be registered by little tweak in the code. - -Signed-off-by: Yongseok Koh <yskoh@mellanox.com> ---- - drivers/net/mlx5/mlx5_mr.c | 155 +++++++++++++++++++++++++++++++++++++++++++ - drivers/net/mlx5/mlx5_rxtx.h | 35 +++++++++- - 2 files changed, 189 insertions(+), 1 deletion(-) - -diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c -index 08105a443..876622e91 100644 ---- a/drivers/net/mlx5/mlx5_mr.c -+++ b/drivers/net/mlx5/mlx5_mr.c -@@ -277,6 +277,23 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry, - uintptr_t end = 0; - uint32_t idx = 0; - -+ /* MR for external memory doesn't have memseg list. */ -+ if (mr->msl == NULL) { -+ struct ibv_mr *ibv_mr = mr->ibv_mr; -+ -+ assert(mr->ms_bmp_n == 1); -+ assert(mr->ms_n == 1); -+ assert(base_idx == 0); -+ /* -+ * Can't search it from memseg list but get it directly from -+ * verbs MR as there's only one chunk. -+ */ -+ entry->start = (uintptr_t)ibv_mr->addr; -+ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length; -+ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); -+ /* Returning 1 ends iteration. */ -+ return 1; -+ } - for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { - if (rte_bitmap_get(mr->ms_bmp, idx)) { - const struct rte_memseg_list *msl; -@@ -818,6 +835,7 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) - mr = mr_lookup_dev_list(dev, &entry, start); - if (mr == NULL) - continue; -+ assert(mr->msl); /* Can't be external memory. */ - ms = rte_mem_virt2memseg((void *)start, msl); - assert(ms != NULL); - assert(msl->page_sz == ms->hugepage_sz); -@@ -1070,6 +1088,139 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) - (void *)mr_ctrl, mr_ctrl->cur_gen); - } - -+/** -+ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp(). -+ * -+ * Externally allocated chunk is registered and a MR is created for the chunk. -+ * The MR object is added to the global list. If memseg list of a MR object -+ * (mr->msl) is null, the MR object can be regarded as externally allocated -+ * memory. -+ * -+ * Once external memory is registered, it should be static. If the memory is -+ * freed and the virtual address range has different physical memory mapped -+ * again, it may cause crash on device due to the wrong translation entry. PMD -+ * can't track the free event of the external memory for now. -+ */ -+static void -+mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, -+ struct rte_mempool_memhdr *memhdr, -+ unsigned mem_idx __rte_unused) -+{ -+ struct mr_update_mp_data *data = opaque; -+ struct rte_eth_dev *dev = data->dev; -+ struct priv *priv = dev->data->dev_private; -+ struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl; -+ struct mlx5_mr *mr = NULL; -+ uintptr_t addr = (uintptr_t)memhdr->addr; -+ size_t len = memhdr->len; -+ struct mlx5_mr_cache entry; -+ uint32_t lkey; -+ -+ /* If already registered, it should return. */ -+ rte_rwlock_read_lock(&priv->mr.rwlock); -+ lkey = mr_lookup_dev(dev, &entry, addr); -+ rte_rwlock_read_unlock(&priv->mr.rwlock); -+ if (lkey != UINT32_MAX) -+ return; -+ mr = rte_zmalloc_socket(NULL, -+ RTE_ALIGN_CEIL(sizeof(*mr), -+ RTE_CACHE_LINE_SIZE), -+ RTE_CACHE_LINE_SIZE, mp->socket_id); -+ if (mr == NULL) { -+ DRV_LOG(WARNING, -+ "port %u unable to allocate memory for a new MR of" -+ " mempool (%s).", -+ dev->data->port_id, mp->name); -+ data->ret = -1; -+ return; -+ } -+ DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)", -+ dev->data->port_id, mem_idx, mp->name); -+ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len, -+ IBV_ACCESS_LOCAL_WRITE); -+ if (mr->ibv_mr == NULL) { -+ DRV_LOG(WARNING, -+ "port %u fail to create a verbs MR for address (%p)", -+ dev->data->port_id, (void *)addr); -+ rte_free(mr); -+ data->ret = -1; -+ return; -+ } -+ mr->msl = NULL; /* Mark it is external memory. */ -+ mr->ms_bmp = NULL; -+ mr->ms_n = 1; -+ mr->ms_bmp_n = 1; -+ rte_rwlock_write_lock(&priv->mr.rwlock); -+ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); -+ DRV_LOG(DEBUG, -+ "port %u MR CREATED (%p) for external memory %p:\n" -+ " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," -+ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", -+ dev->data->port_id, (void *)mr, (void *)addr, -+ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey), -+ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); -+ /* Insert to the global cache table. */ -+ mr_insert_dev_cache(dev, mr); -+ rte_rwlock_write_unlock(&priv->mr.rwlock); -+ /* Insert to the local cache table */ -+ mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr); -+} -+ -+/** -+ * Register MR for entire memory chunks in a Mempool having externally allocated -+ * memory and fill in local cache. -+ * -+ * @param dev -+ * Pointer to Ethernet device. -+ * @param mr_ctrl -+ * Pointer to per-queue MR control structure. -+ * @param mp -+ * Pointer to registering Mempool. -+ * -+ * @return -+ * 0 on success, -1 on failure. -+ */ -+static uint32_t -+mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, -+ struct rte_mempool *mp) -+{ -+ struct mr_update_mp_data data = { -+ .dev = dev, -+ .mr_ctrl = mr_ctrl, -+ .ret = 0, -+ }; -+ -+ rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data); -+ return data.ret; -+} -+ -+/** -+ * Register MR entire memory chunks in a Mempool having externally allocated -+ * memory and search LKey of the address to return. -+ * -+ * @param dev -+ * Pointer to Ethernet device. -+ * @param addr -+ * Search key. -+ * @param mp -+ * Pointer to registering Mempool where addr belongs. -+ * -+ * @return -+ * LKey for address on success, UINT32_MAX on failure. -+ */ -+uint32_t -+mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, -+ struct rte_mempool *mp) -+{ -+ struct mlx5_txq_ctrl *txq_ctrl = -+ container_of(txq, struct mlx5_txq_ctrl, txq); -+ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; -+ struct priv *priv = txq_ctrl->priv; -+ -+ mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp); -+ return mlx5_tx_addr2mr_bh(txq, addr); -+} -+ - /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */ - static void - mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, -@@ -1113,6 +1264,10 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, - }; - - rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); -+ if (data.ret < 0 && rte_errno == ENXIO) { -+ /* Mempool may have externally allocated memory. */ -+ return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp); -+ } - return data.ret; - } - -diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h -index f53bb43c3..b61c23b33 100644 ---- a/drivers/net/mlx5/mlx5_rxtx.h -+++ b/drivers/net/mlx5/mlx5_rxtx.h -@@ -347,6 +347,8 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, - void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); - uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); - uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr); -+uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, -+ struct rte_mempool *mp); - - #ifndef NDEBUG - /** -@@ -534,6 +536,24 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) - } - - /** -+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the -+ * cloned mbuf is allocated is returned instead. -+ * -+ * @param buf -+ * Pointer to mbuf. -+ * -+ * @return -+ * Memory pool where data is located for given mbuf. -+ */ -+static struct rte_mempool * -+mlx5_mb2mp(struct rte_mbuf *buf) -+{ -+ if (unlikely(RTE_MBUF_INDIRECT(buf))) -+ return rte_mbuf_from_indirect(buf)->pool; -+ return buf->pool; -+} -+ -+/** - * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx - * as mempool is pre-configured and static. - * -@@ -591,7 +611,20 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr) - return mlx5_tx_addr2mr_bh(txq, addr); - } - --#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) -+static __rte_always_inline uint32_t -+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) -+{ -+ uintptr_t addr = (uintptr_t)mb->buf_addr; -+ uint32_t lkey = mlx5_tx_addr2mr(txq, addr); -+ -+ if (likely(lkey != UINT32_MAX)) -+ return lkey; -+ if (rte_errno == ENXIO) { -+ /* Mempool may have externally allocated memory. */ -+ lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb)); -+ } -+ return lkey; -+} - - /** - * Ring TX queue doorbell and flush the update if requested. --- -2.11.0 - |