diff options
author | Damjan Marion <damarion@cisco.com> | 2018-09-18 18:41:38 +0200 |
---|---|---|
committer | Damjan Marion <damarion@cisco.com> | 2018-09-20 14:30:54 +0200 |
commit | 4a6cb83d334e391f85332ea38a2e467ac7743e4b (patch) | |
tree | 7d667a9d07c33e1a948f73892169f61da595ca4f /build/external/dpdk-18.08_patches | |
parent | 9c0a3c423ee0b9326f600a00c1bd46fef45d4975 (diff) |
rename vpp-dpdk-dev to vpp-ext-deps
We need to have new tenants in the development package.
This is first of series of patches which will allow us to have multiple
external libs and tools packaged for developer's convenience.
Change-Id: I884bd75fba96005bbf8cea92774682b2228e0e22
Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'build/external/dpdk-18.08_patches')
3 files changed, 559 insertions, 0 deletions
diff --git a/build/external/dpdk-18.08_patches/0001-net-mlx5-support-externally-allocated-mempool.patch b/build/external/dpdk-18.08_patches/0001-net-mlx5-support-externally-allocated-mempool.patch new file mode 100644 index 00000000000..87c9cf92469 --- /dev/null +++ b/build/external/dpdk-18.08_patches/0001-net-mlx5-support-externally-allocated-mempool.patch @@ -0,0 +1,270 @@ +From bd42c77c457146bede32333558b4e0414b30683e Mon Sep 17 00:00:00 2001 +From: Yongseok Koh <yskoh@mellanox.com> +Date: Fri, 24 Aug 2018 16:46:49 -0700 +Subject: [PATCH] net/mlx5: support externally allocated mempool + +When MLX PMD registers memory for DMA, it accesses the global memseg list +of DPDK to maximize the range of registration so that LKey search can be +more efficient. Granularity of MR registration is per page. + +Externally allocated memory shouldn't be used for DMA because it can't be +searched in the memseg list and free event can't be tracked by DPDK. +However, if the external memory is static (allocated on startup and never +freed), such memory can also be registered by little tweak in the code. + +Signed-off-by: Yongseok Koh <yskoh@mellanox.com> +--- + drivers/net/mlx5/mlx5_mr.c | 155 +++++++++++++++++++++++++++++++++++++++++++ + drivers/net/mlx5/mlx5_rxtx.h | 35 +++++++++- + 2 files changed, 189 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c +index 08105a443..876622e91 100644 +--- a/drivers/net/mlx5/mlx5_mr.c ++++ b/drivers/net/mlx5/mlx5_mr.c +@@ -277,6 +277,23 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry, + uintptr_t end = 0; + uint32_t idx = 0; + ++ /* MR for external memory doesn't have memseg list. */ ++ if (mr->msl == NULL) { ++ struct ibv_mr *ibv_mr = mr->ibv_mr; ++ ++ assert(mr->ms_bmp_n == 1); ++ assert(mr->ms_n == 1); ++ assert(base_idx == 0); ++ /* ++ * Can't search it from memseg list but get it directly from ++ * verbs MR as there's only one chunk. ++ */ ++ entry->start = (uintptr_t)ibv_mr->addr; ++ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length; ++ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); ++ /* Returning 1 ends iteration. */ ++ return 1; ++ } + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; +@@ -818,6 +835,7 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; ++ assert(mr->msl); /* Can't be external memory. */ + ms = rte_mem_virt2memseg((void *)start, msl); + assert(ms != NULL); + assert(msl->page_sz == ms->hugepage_sz); +@@ -1070,6 +1088,139 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl) + (void *)mr_ctrl, mr_ctrl->cur_gen); + } + ++/** ++ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp(). ++ * ++ * Externally allocated chunk is registered and a MR is created for the chunk. ++ * The MR object is added to the global list. If memseg list of a MR object ++ * (mr->msl) is null, the MR object can be regarded as externally allocated ++ * memory. ++ * ++ * Once external memory is registered, it should be static. If the memory is ++ * freed and the virtual address range has different physical memory mapped ++ * again, it may cause crash on device due to the wrong translation entry. PMD ++ * can't track the free event of the external memory for now. ++ */ ++static void ++mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, ++ struct rte_mempool_memhdr *memhdr, ++ unsigned mem_idx __rte_unused) ++{ ++ struct mr_update_mp_data *data = opaque; ++ struct rte_eth_dev *dev = data->dev; ++ struct priv *priv = dev->data->dev_private; ++ struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl; ++ struct mlx5_mr *mr = NULL; ++ uintptr_t addr = (uintptr_t)memhdr->addr; ++ size_t len = memhdr->len; ++ struct mlx5_mr_cache entry; ++ uint32_t lkey; ++ ++ /* If already registered, it should return. */ ++ rte_rwlock_read_lock(&priv->mr.rwlock); ++ lkey = mr_lookup_dev(dev, &entry, addr); ++ rte_rwlock_read_unlock(&priv->mr.rwlock); ++ if (lkey != UINT32_MAX) ++ return; ++ mr = rte_zmalloc_socket(NULL, ++ RTE_ALIGN_CEIL(sizeof(*mr), ++ RTE_CACHE_LINE_SIZE), ++ RTE_CACHE_LINE_SIZE, mp->socket_id); ++ if (mr == NULL) { ++ DRV_LOG(WARNING, ++ "port %u unable to allocate memory for a new MR of" ++ " mempool (%s).", ++ dev->data->port_id, mp->name); ++ data->ret = -1; ++ return; ++ } ++ DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)", ++ dev->data->port_id, mem_idx, mp->name); ++ mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len, ++ IBV_ACCESS_LOCAL_WRITE); ++ if (mr->ibv_mr == NULL) { ++ DRV_LOG(WARNING, ++ "port %u fail to create a verbs MR for address (%p)", ++ dev->data->port_id, (void *)addr); ++ rte_free(mr); ++ data->ret = -1; ++ return; ++ } ++ mr->msl = NULL; /* Mark it is external memory. */ ++ mr->ms_bmp = NULL; ++ mr->ms_n = 1; ++ mr->ms_bmp_n = 1; ++ rte_rwlock_write_lock(&priv->mr.rwlock); ++ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); ++ DRV_LOG(DEBUG, ++ "port %u MR CREATED (%p) for external memory %p:\n" ++ " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," ++ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", ++ dev->data->port_id, (void *)mr, (void *)addr, ++ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey), ++ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); ++ /* Insert to the global cache table. */ ++ mr_insert_dev_cache(dev, mr); ++ rte_rwlock_write_unlock(&priv->mr.rwlock); ++ /* Insert to the local cache table */ ++ mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr); ++} ++ ++/** ++ * Register MR for entire memory chunks in a Mempool having externally allocated ++ * memory and fill in local cache. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param mr_ctrl ++ * Pointer to per-queue MR control structure. ++ * @param mp ++ * Pointer to registering Mempool. ++ * ++ * @return ++ * 0 on success, -1 on failure. ++ */ ++static uint32_t ++mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, ++ struct rte_mempool *mp) ++{ ++ struct mr_update_mp_data data = { ++ .dev = dev, ++ .mr_ctrl = mr_ctrl, ++ .ret = 0, ++ }; ++ ++ rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data); ++ return data.ret; ++} ++ ++/** ++ * Register MR entire memory chunks in a Mempool having externally allocated ++ * memory and search LKey of the address to return. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param addr ++ * Search key. ++ * @param mp ++ * Pointer to registering Mempool where addr belongs. ++ * ++ * @return ++ * LKey for address on success, UINT32_MAX on failure. ++ */ ++uint32_t ++mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, ++ struct rte_mempool *mp) ++{ ++ struct mlx5_txq_ctrl *txq_ctrl = ++ container_of(txq, struct mlx5_txq_ctrl, txq); ++ struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl; ++ struct priv *priv = txq_ctrl->priv; ++ ++ mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp); ++ return mlx5_tx_addr2mr_bh(txq, addr); ++} ++ + /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */ + static void + mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, +@@ -1113,6 +1264,10 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl, + }; + + rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data); ++ if (data.ret < 0 && rte_errno == ENXIO) { ++ /* Mempool may have externally allocated memory. */ ++ return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp); ++ } + return data.ret; + } + +diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h +index f53bb43c3..b61c23b33 100644 +--- a/drivers/net/mlx5/mlx5_rxtx.h ++++ b/drivers/net/mlx5/mlx5_rxtx.h +@@ -347,6 +347,8 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, + void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); + uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr); + uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr); ++uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr, ++ struct rte_mempool *mp); + + #ifndef NDEBUG + /** +@@ -534,6 +536,24 @@ mlx5_tx_complete(struct mlx5_txq_data *txq) + } + + /** ++ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the ++ * cloned mbuf is allocated is returned instead. ++ * ++ * @param buf ++ * Pointer to mbuf. ++ * ++ * @return ++ * Memory pool where data is located for given mbuf. ++ */ ++static struct rte_mempool * ++mlx5_mb2mp(struct rte_mbuf *buf) ++{ ++ if (unlikely(RTE_MBUF_INDIRECT(buf))) ++ return rte_mbuf_from_indirect(buf)->pool; ++ return buf->pool; ++} ++ ++/** + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx + * as mempool is pre-configured and static. + * +@@ -591,7 +611,20 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr) + return mlx5_tx_addr2mr_bh(txq, addr); + } + +-#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) ++static __rte_always_inline uint32_t ++mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb) ++{ ++ uintptr_t addr = (uintptr_t)mb->buf_addr; ++ uint32_t lkey = mlx5_tx_addr2mr(txq, addr); ++ ++ if (likely(lkey != UINT32_MAX)) ++ return lkey; ++ if (rte_errno == ENXIO) { ++ /* Mempool may have externally allocated memory. */ ++ lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb)); ++ } ++ return lkey; ++} + + /** + * Ring TX queue doorbell and flush the update if requested. +-- +2.11.0 + diff --git a/build/external/dpdk-18.08_patches/0002-mlx4-support-externally-allocated-mempool.patch b/build/external/dpdk-18.08_patches/0002-mlx4-support-externally-allocated-mempool.patch new file mode 100644 index 00000000000..b32862335e5 --- /dev/null +++ b/build/external/dpdk-18.08_patches/0002-mlx4-support-externally-allocated-mempool.patch @@ -0,0 +1,250 @@ +From c947fd2ec67e9bbacb8b106f320f6e6bae5a9731 Mon Sep 17 00:00:00 2001 +From: Matthew Smith <mgsmith@netgate.com> +Date: Tue, 28 Aug 2018 13:21:04 -0500 +Subject: [PATCH] mlx4: support externally allocated mempool + +Port Mellanox mlx5 PMD patch to work for mlx4 PMD. + +Signed-off-by: Matthew Smith <mgsmith@netgate.com> +--- + drivers/net/mlx4/mlx4_mr.c | 150 +++++++++++++++++++++++++++++++++++++++++++ + drivers/net/mlx4/mlx4_rxtx.h | 35 +++++++++- + 2 files changed, 184 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c +index d23d3c613..55e5555ce 100644 +--- a/drivers/net/mlx4/mlx4_mr.c ++++ b/drivers/net/mlx4/mlx4_mr.c +@@ -289,6 +289,23 @@ mr_find_next_chunk(struct mlx4_mr *mr, struct mlx4_mr_cache *entry, + uintptr_t end = 0; + uint32_t idx = 0; + ++ /* MR for external memory doesn't have memseg list. */ ++ if (mr->msl == NULL) { ++ struct ibv_mr *ibv_mr = mr->ibv_mr; ++ ++ assert(mr->ms_bmp_n == 1); ++ assert(mr->ms_n == 1); ++ assert(base_idx == 0); ++ /* ++ * Can't search it from memseg list but get it directly from ++ * verbs MR as there's only one chunk. ++ */ ++ entry->start = (uintptr_t)ibv_mr->addr; ++ entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length; ++ entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey); ++ /* Returning 1 ends iteration. */ ++ return 1; ++ } + for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) { + if (rte_bitmap_get(mr->ms_bmp, idx)) { + const struct rte_memseg_list *msl; +@@ -809,6 +826,7 @@ mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len) + mr = mr_lookup_dev_list(dev, &entry, start); + if (mr == NULL) + continue; ++ assert(mr->msl); /* Can't be external memory. */ + ms = rte_mem_virt2memseg((void *)start, msl); + assert(ms != NULL); + assert(msl->page_sz == ms->hugepage_sz); +@@ -1055,6 +1073,134 @@ mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl) + (void *)mr_ctrl, mr_ctrl->cur_gen); + } + ++/** ++ * Called during rte_mempool_mem_iter() by mlx4_mr_update_ext_mp(). ++ * ++ * Externally allocated chunk is registered and a MR is created for the chunk. ++ * The MR object is added to the global list. If memseg list of a MR object ++ * (mr->msl) is null, the MR object can be regarded as externally allocated ++ * memory. ++ * ++ * Once external memory is registered, it should be static. If the memory is ++ * freed and the virtual address range has different physical memory mapped ++ * again, it may cause crash on device due to the wrong translation entry. PMD ++ * can't track the free event of the external memory for now. ++ */ ++static void ++mlx4_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque, ++ struct rte_mempool_memhdr *memhdr, ++ unsigned mem_idx __rte_unused) ++{ ++ struct mr_update_mp_data *data = opaque; ++ struct rte_eth_dev *dev = data->dev; ++ struct priv *priv = dev->data->dev_private; ++ struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl; ++ struct mlx4_mr *mr = NULL; ++ uintptr_t addr = (uintptr_t)memhdr->addr; ++ size_t len = memhdr->len; ++ struct mlx4_mr_cache entry; ++ uint32_t lkey; ++ ++ /* If already registered, it should return. */ ++ rte_rwlock_read_lock(&priv->mr.rwlock); ++ lkey = mr_lookup_dev(dev, &entry, addr); ++ rte_rwlock_read_unlock(&priv->mr.rwlock); ++ if (lkey != UINT32_MAX) ++ return; ++ mr = rte_zmalloc_socket(NULL, ++ RTE_ALIGN_CEIL(sizeof(*mr), ++ RTE_CACHE_LINE_SIZE), ++ RTE_CACHE_LINE_SIZE, mp->socket_id); ++ if (mr == NULL) { ++ WARN("port %u unable to allocate memory for a new MR of" ++ " mempool (%s).", ++ dev->data->port_id, mp->name); ++ data->ret = -1; ++ return; ++ } ++ DEBUG("port %u register MR for chunk #%d of mempool (%s)", ++ dev->data->port_id, mem_idx, mp->name); ++ mr->ibv_mr = mlx4_glue->reg_mr(priv->pd, (void *)addr, len, ++ IBV_ACCESS_LOCAL_WRITE); ++ if (mr->ibv_mr == NULL) { ++ WARN("port %u fail to create a verbs MR for address (%p)", ++ dev->data->port_id, (void *)addr); ++ rte_free(mr); ++ data->ret = -1; ++ return; ++ } ++ mr->msl = NULL; /* Mark it is external memory. */ ++ mr->ms_bmp = NULL; ++ mr->ms_n = 1; ++ mr->ms_bmp_n = 1; ++ rte_rwlock_write_lock(&priv->mr.rwlock); ++ LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr); ++ DEBUG("port %u MR CREATED (%p) for external memory %p:\n" ++ " [0x%" PRIxPTR ", 0x%" PRIxPTR ")," ++ " lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u", ++ dev->data->port_id, (void *)mr, (void *)addr, ++ addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey), ++ mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n); ++ /* Insert to the global cache table. */ ++ mr_insert_dev_cache(dev, mr); ++ rte_rwlock_write_unlock(&priv->mr.rwlock); ++ /* Insert to the local cache table */ ++ mlx4_mr_addr2mr_bh(dev, mr_ctrl, addr); ++} ++ ++/** ++ * Register MR for entire memory chunks in a Mempool having externally allocated ++ * memory and fill in local cache. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param mr_ctrl ++ * Pointer to per-queue MR control structure. ++ * @param mp ++ * Pointer to registering Mempool. ++ * ++ * @return ++ * 0 on success, -1 on failure. ++ */ ++static uint32_t ++mlx4_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, ++ struct rte_mempool *mp) ++{ ++ struct mr_update_mp_data data = { ++ .dev = dev, ++ .mr_ctrl = mr_ctrl, ++ .ret = 0, ++ }; ++ ++ rte_mempool_mem_iter(mp, mlx4_mr_update_ext_mp_cb, &data); ++ return data.ret; ++} ++ ++/** ++ * Register MR entire memory chunks in a Mempool having externally allocated ++ * memory and search LKey of the address to return. ++ * ++ * @param dev ++ * Pointer to Ethernet device. ++ * @param addr ++ * Search key. ++ * @param mp ++ * Pointer to registering Mempool where addr belongs. ++ * ++ * @return ++ * LKey for address on success, UINT32_MAX on failure. ++ */ ++uint32_t ++mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, ++ struct rte_mempool *mp) ++{ ++ struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; ++ struct priv *priv = txq->priv; ++ ++ mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp); ++ return mlx4_tx_addr2mr_bh(txq, addr); ++} ++ + /* Called during rte_mempool_mem_iter() by mlx4_mr_update_mp(). */ + static void + mlx4_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque, +@@ -1098,6 +1244,10 @@ mlx4_mr_update_mp(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl, + }; + + rte_mempool_mem_iter(mp, mlx4_mr_update_mp_cb, &data); ++ if (data.ret < 0 && rte_errno == ENXIO) { ++ /* Mempool may have externally allocated memory. */ ++ return mlx4_mr_update_ext_mp(dev, mr_ctrl, mp); ++ } + return data.ret; + } + +diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h +index ffa8abfca..1be060cda 100644 +--- a/drivers/net/mlx4/mlx4_rxtx.h ++++ b/drivers/net/mlx4/mlx4_rxtx.h +@@ -163,6 +163,26 @@ void mlx4_tx_queue_release(void *dpdk_txq); + void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl); + uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr); + uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr); ++uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, ++ struct rte_mempool *mp); ++ ++/** ++ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the ++ * cloned mbuf is allocated is returned instead. ++ * ++ * @param buf ++ * Pointer to mbuf. ++ * ++ * @return ++ * Memory pool where data is located for given mbuf. ++ */ ++static struct rte_mempool * ++mlx4_mb2mp(struct rte_mbuf *buf) ++{ ++ if (unlikely(RTE_MBUF_INDIRECT(buf))) ++ return rte_mbuf_from_indirect(buf)->pool; ++ return buf->pool; ++} + + /** + * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx +@@ -222,6 +242,19 @@ mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr) + return mlx4_tx_addr2mr_bh(txq, addr); + } + +-#define mlx4_tx_mb2mr(rxq, mb) mlx4_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr)) ++static __rte_always_inline uint32_t ++mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb) ++{ ++ uintptr_t addr = (uintptr_t)mb->buf_addr; ++ uint32_t lkey = mlx4_tx_addr2mr(txq, addr); ++ ++ if (likely(lkey != UINT32_MAX)) ++ return lkey; ++ if (rte_errno == ENXIO) { ++ /* Mempool may have externally allocated memory. */ ++ lkey = mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb)); ++ } ++ return lkey; ++} + + #endif /* MLX4_RXTX_H_ */ +-- +2.15.2 (Apple Git-101.1) + diff --git a/build/external/dpdk-18.08_patches/0003-ixgbe-wait-longer-for-link-after-fiber-MAC-setup.patch b/build/external/dpdk-18.08_patches/0003-ixgbe-wait-longer-for-link-after-fiber-MAC-setup.patch new file mode 100644 index 00000000000..93d1601c677 --- /dev/null +++ b/build/external/dpdk-18.08_patches/0003-ixgbe-wait-longer-for-link-after-fiber-MAC-setup.patch @@ -0,0 +1,39 @@ +From ba9b381c532fe57c726752b7db0ab45ab7726c90 Mon Sep 17 00:00:00 2001 +From: Matthew Smith <mgsmith@netgate.com> +Date: Fri, 13 Jul 2018 16:35:57 -0500 +Subject: [PATCH] ixgbe: wait longer for link after fiber MAC setup + +After setting up the link on a fiber port, the maximum wait time for +the link to come up is 500 ms in ixgbe_setup_mac_link_multispeed_fiber(). +On an x550 SFP+ port, this is often not sufficiently long for the link +to come up. This can result in never being able to retrieve accurate +link status for the port using rte_eth_link_get_nowait(). + +Increase the maximum wait time in ixgbe_setup_mac_link_multispeed_fiber() +to 1 s. + +Bugzilla ID: 69 +Fixes: f3430431abaf ("ixgbe/base: add SFP+ dual-speed support") +Cc: stable@dpdk.org + +Signed-off-by: Matthew Smith <mgsmith@netgate.com> +--- + drivers/net/ixgbe/base/ixgbe_common.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c +index e7e9256e5..2fb0a072c 100644 +--- a/drivers/net/ixgbe/base/ixgbe_common.c ++++ b/drivers/net/ixgbe/base/ixgbe_common.c +@@ -5296,7 +5296,7 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ +- for (i = 0; i < 5; i++) { ++ for (i = 0; i < 10; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + +-- +2.15.2 (Apple Git-101.1) + |