diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-19 12:59:01 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-19 12:59:24 +0000 |
commit | 29058550643267a554e0368806dece63b047c5cb (patch) | |
tree | a6f573fe3fbc14585529b83cfcc65da2dceefbf5 /drivers/net/mlx4 | |
parent | 8a853e3f0275efc8b05cb195085d45946942744a (diff) |
New upstream version 18.11-rc4upstream/18.11-rc4
Change-Id: I861e1a2f7df210f57f44f1ad56b9ef789a4675e3
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r-- | drivers/net/mlx4/mlx4_mr.c | 42 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4_rxtx.h | 26 |
2 files changed, 42 insertions, 26 deletions
diff --git a/drivers/net/mlx4/mlx4_mr.c b/drivers/net/mlx4/mlx4_mr.c index bee85864..a0094483 100644 --- a/drivers/net/mlx4/mlx4_mr.c +++ b/drivers/net/mlx4/mlx4_mr.c @@ -354,8 +354,9 @@ mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr) DEBUG("port %u inserting MR(%p) to global cache", dev->data->port_id, (void *)mr); for (n = 0; n < mr->ms_bmp_n; ) { - struct mlx4_mr_cache entry = { 0, }; + struct mlx4_mr_cache entry; + memset(&entry, 0, sizeof(entry)); /* Find a contiguous chunk and advance the index. */ n = mr_find_next_chunk(mr, &entry, n); if (!entry.end) @@ -398,8 +399,9 @@ mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, if (mr->ms_n == 0) continue; for (n = 0; n < mr->ms_bmp_n; ) { - struct mlx4_mr_cache ret = { 0, }; + struct mlx4_mr_cache ret; + memset(&ret, 0, sizeof(ret)); n = mr_find_next_chunk(mr, &ret, n); if (addr >= ret.start && addr < ret.end) { /* Found. */ @@ -571,7 +573,7 @@ mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry, * Find out a contiguous virtual address chunk in use, to which the * given address belongs, in order to register maximum range. In the * best case where mempools are not dynamically recreated and - * '--socket-mem' is speicified as an EAL option, it is very likely to + * '--socket-mem' is specified as an EAL option, it is very likely to * have only one MR(LKey) per a socket and per a hugepage-size even * though the system memory is highly fragmented. */ @@ -688,8 +690,9 @@ alloc_resources: */ for (n = 0; n < ms_n; ++n) { uintptr_t start; - struct mlx4_mr_cache ret = { 0, }; + struct mlx4_mr_cache ret; + memset(&ret, 0, sizeof(ret)); start = data_re.start + n * msl->page_sz; /* Exclude memsegs already registered by other MRs. */ if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) { @@ -1039,7 +1042,7 @@ mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr) * @return * Searched LKey on success, UINT32_MAX on no match. */ -uint32_t +static uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr) { struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; @@ -1051,6 +1054,32 @@ mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr) } /** + * Bottom-half of LKey search on Tx. If it can't be searched in the memseg + * list, register the mempool of the mbuf as externally allocated memory. + * + * @param txq + * Pointer to Tx queue structure. + * @param mb + * Pointer to mbuf. + * + * @return + * Searched LKey on success, UINT32_MAX on no match. + */ +uint32_t +mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb) +{ + uintptr_t addr = (uintptr_t)mb->buf_addr; + uint32_t lkey; + + lkey = mlx4_tx_addr2mr_bh(txq, addr); + if (lkey == UINT32_MAX && rte_errno == ENXIO) { + /* Mempool may have externally allocated memory. */ + return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb)); + } + return lkey; +} + +/** * Flush all of the local cache entries. * * @param mr_ctrl @@ -1277,8 +1306,9 @@ mlx4_mr_dump_dev(struct rte_eth_dev *dev) if (mr->ms_n == 0) continue; for (n = 0; n < mr->ms_bmp_n; ) { - struct mlx4_mr_cache ret = { 0, }; + struct mlx4_mr_cache ret; + memset(&ret, 0, sizeof(ret)); n = mr_find_next_chunk(mr, &ret, n); if (!ret.end) break; diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index 1be060cd..d7ec4e0c 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -162,7 +162,7 @@ void mlx4_tx_queue_release(void *dpdk_txq); void mlx4_mr_flush_local_cache(struct mlx4_mr_ctrl *mr_ctrl); uint32_t mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr); -uint32_t mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr); +uint32_t mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb); uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp); @@ -176,7 +176,7 @@ uint32_t mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, * @return * Memory pool where data is located for given mbuf. */ -static struct rte_mempool * +static inline struct rte_mempool * mlx4_mb2mp(struct rte_mbuf *buf) { if (unlikely(RTE_MBUF_INDIRECT(buf))) @@ -225,9 +225,10 @@ mlx4_rx_addr2mr(struct rxq *rxq, uintptr_t addr) * Searched LKey on success, UINT32_MAX on no match. */ static __rte_always_inline uint32_t -mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr) +mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb) { struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl; + uintptr_t addr = (uintptr_t)mb->buf_addr; uint32_t lkey; /* Check generation bit to see if there's any change on existing MRs. */ @@ -238,23 +239,8 @@ mlx4_tx_addr2mr(struct txq *txq, uintptr_t addr) MLX4_MR_CACHE_N, addr); if (likely(lkey != UINT32_MAX)) return lkey; - /* Take slower bottom-half (binary search) on miss. */ - return mlx4_tx_addr2mr_bh(txq, addr); -} - -static __rte_always_inline uint32_t -mlx4_tx_mb2mr(struct txq *txq, struct rte_mbuf *mb) -{ - uintptr_t addr = (uintptr_t)mb->buf_addr; - uint32_t lkey = mlx4_tx_addr2mr(txq, addr); - - if (likely(lkey != UINT32_MAX)) - return lkey; - if (rte_errno == ENXIO) { - /* Mempool may have externally allocated memory. */ - lkey = mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb)); - } - return lkey; + /* Take slower bottom-half on miss. */ + return mlx4_tx_mb2mr_bh(txq, mb); } #endif /* MLX4_RXTX_H_ */ |