aboutsummaryrefslogtreecommitdiffstats
path: root/build/external/patches/dpdk_18.08/0001-net-mlx5-support-externally-allocated-mempool.patch
blob: 87c9cf92469efe0ad736b1b1c2ebf3618b7e058e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
From bd42c77c457146bede32333558b4e0414b30683e Mon Sep 17 00:00:00 2001
From: Yongseok Koh <yskoh@mellanox.com>
Date: Fri, 24 Aug 2018 16:46:49 -0700
Subject: [PATCH] net/mlx5: support externally allocated mempool

When MLX PMD registers memory for DMA, it accesses the global memseg list
of DPDK to maximize the range of registration so that LKey search can be
more efficient. Granularity of MR registration is per page.

Externally allocated memory shouldn't be used for DMA because it can't be
searched in the memseg list and free event can't be tracked by DPDK.
However, if the external memory is static (allocated on startup and never
freed), such memory can also be registered by little tweak in the code.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5_mr.c   | 155 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h |  35 +++++++++-
 2 files changed, 189 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c
index 08105a443..876622e91 100644
--- a/drivers/net/mlx5/mlx5_mr.c
+++ b/drivers/net/mlx5/mlx5_mr.c
@@ -277,6 +277,23 @@ mr_find_next_chunk(struct mlx5_mr *mr, struct mlx5_mr_cache *entry,
 	uintptr_t end = 0;
 	uint32_t idx = 0;
 
+	/* MR for external memory doesn't have memseg list. */
+	if (mr->msl == NULL) {
+		struct ibv_mr *ibv_mr = mr->ibv_mr;
+
+		assert(mr->ms_bmp_n == 1);
+		assert(mr->ms_n == 1);
+		assert(base_idx == 0);
+		/*
+		 * Can't search it from memseg list but get it directly from
+		 * verbs MR as there's only one chunk.
+		 */
+		entry->start = (uintptr_t)ibv_mr->addr;
+		entry->end = (uintptr_t)ibv_mr->addr + mr->ibv_mr->length;
+		entry->lkey = rte_cpu_to_be_32(mr->ibv_mr->lkey);
+		/* Returning 1 ends iteration. */
+		return 1;
+	}
 	for (idx = base_idx; idx < mr->ms_bmp_n; ++idx) {
 		if (rte_bitmap_get(mr->ms_bmp, idx)) {
 			const struct rte_memseg_list *msl;
@@ -818,6 +835,7 @@ mlx5_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
 		mr = mr_lookup_dev_list(dev, &entry, start);
 		if (mr == NULL)
 			continue;
+		assert(mr->msl); /* Can't be external memory. */
 		ms = rte_mem_virt2memseg((void *)start, msl);
 		assert(ms != NULL);
 		assert(msl->page_sz == ms->hugepage_sz);
@@ -1070,6 +1088,139 @@ mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl)
 		(void *)mr_ctrl, mr_ctrl->cur_gen);
 }
 
+/**
+ * Called during rte_mempool_mem_iter() by mlx5_mr_update_ext_mp().
+ *
+ * Externally allocated chunk is registered and a MR is created for the chunk.
+ * The MR object is added to the global list. If memseg list of a MR object
+ * (mr->msl) is null, the MR object can be regarded as externally allocated
+ * memory.
+ *
+ * Once external memory is registered, it should be static. If the memory is
+ * freed and the virtual address range has different physical memory mapped
+ * again, it may cause crash on device due to the wrong translation entry. PMD
+ * can't track the free event of the external memory for now.
+ */
+static void
+mlx5_mr_update_ext_mp_cb(struct rte_mempool *mp, void *opaque,
+			 struct rte_mempool_memhdr *memhdr,
+			 unsigned mem_idx __rte_unused)
+{
+	struct mr_update_mp_data *data = opaque;
+	struct rte_eth_dev *dev = data->dev;
+	struct priv *priv = dev->data->dev_private;
+	struct mlx5_mr_ctrl *mr_ctrl = data->mr_ctrl;
+	struct mlx5_mr *mr = NULL;
+	uintptr_t addr = (uintptr_t)memhdr->addr;
+	size_t len = memhdr->len;
+	struct mlx5_mr_cache entry;
+	uint32_t lkey;
+
+	/* If already registered, it should return. */
+	rte_rwlock_read_lock(&priv->mr.rwlock);
+	lkey = mr_lookup_dev(dev, &entry, addr);
+	rte_rwlock_read_unlock(&priv->mr.rwlock);
+	if (lkey != UINT32_MAX)
+		return;
+	mr = rte_zmalloc_socket(NULL,
+				RTE_ALIGN_CEIL(sizeof(*mr),
+					       RTE_CACHE_LINE_SIZE),
+				RTE_CACHE_LINE_SIZE, mp->socket_id);
+	if (mr == NULL) {
+		DRV_LOG(WARNING,
+			"port %u unable to allocate memory for a new MR of"
+			" mempool (%s).",
+			dev->data->port_id, mp->name);
+		data->ret = -1;
+		return;
+	}
+	DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
+		dev->data->port_id, mem_idx, mp->name);
+	mr->ibv_mr = mlx5_glue->reg_mr(priv->pd, (void *)addr, len,
+				       IBV_ACCESS_LOCAL_WRITE);
+	if (mr->ibv_mr == NULL) {
+		DRV_LOG(WARNING,
+			"port %u fail to create a verbs MR for address (%p)",
+			dev->data->port_id, (void *)addr);
+		rte_free(mr);
+		data->ret = -1;
+		return;
+	}
+	mr->msl = NULL; /* Mark it is external memory. */
+	mr->ms_bmp = NULL;
+	mr->ms_n = 1;
+	mr->ms_bmp_n = 1;
+	rte_rwlock_write_lock(&priv->mr.rwlock);
+	LIST_INSERT_HEAD(&priv->mr.mr_list, mr, mr);
+	DRV_LOG(DEBUG,
+		"port %u MR CREATED (%p) for external memory %p:\n"
+		"  [0x%" PRIxPTR ", 0x%" PRIxPTR "),"
+		" lkey=0x%x base_idx=%u ms_n=%u, ms_bmp_n=%u",
+		dev->data->port_id, (void *)mr, (void *)addr,
+		addr, addr + len, rte_cpu_to_be_32(mr->ibv_mr->lkey),
+		mr->ms_base_idx, mr->ms_n, mr->ms_bmp_n);
+	/* Insert to the global cache table. */
+	mr_insert_dev_cache(dev, mr);
+	rte_rwlock_write_unlock(&priv->mr.rwlock);
+	/* Insert to the local cache table */
+	mlx5_mr_addr2mr_bh(dev, mr_ctrl, addr);
+}
+
+/**
+ * Register MR for entire memory chunks in a Mempool having externally allocated
+ * memory and fill in local cache.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param mr_ctrl
+ *   Pointer to per-queue MR control structure.
+ * @param mp
+ *   Pointer to registering Mempool.
+ *
+ * @return
+ *   0 on success, -1 on failure.
+ */
+static uint32_t
+mlx5_mr_update_ext_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
+		      struct rte_mempool *mp)
+{
+	struct mr_update_mp_data data = {
+		.dev = dev,
+		.mr_ctrl = mr_ctrl,
+		.ret = 0,
+	};
+
+	rte_mempool_mem_iter(mp, mlx5_mr_update_ext_mp_cb, &data);
+	return data.ret;
+}
+
+/**
+ * Register MR entire memory chunks in a Mempool having externally allocated
+ * memory and search LKey of the address to return.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param addr
+ *   Search key.
+ * @param mp
+ *   Pointer to registering Mempool where addr belongs.
+ *
+ * @return
+ *   LKey for address on success, UINT32_MAX on failure.
+ */
+uint32_t
+mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+		      struct rte_mempool *mp)
+{
+	struct mlx5_txq_ctrl *txq_ctrl =
+		container_of(txq, struct mlx5_txq_ctrl, txq);
+	struct mlx5_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
+	struct priv *priv = txq_ctrl->priv;
+
+	mlx5_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
+	return mlx5_tx_addr2mr_bh(txq, addr);
+}
+
 /* Called during rte_mempool_mem_iter() by mlx5_mr_update_mp(). */
 static void
 mlx5_mr_update_mp_cb(struct rte_mempool *mp __rte_unused, void *opaque,
@@ -1113,6 +1264,10 @@ mlx5_mr_update_mp(struct rte_eth_dev *dev, struct mlx5_mr_ctrl *mr_ctrl,
 	};
 
 	rte_mempool_mem_iter(mp, mlx5_mr_update_mp_cb, &data);
+	if (data.ret < 0 && rte_errno == ENXIO) {
+		/* Mempool may have externally allocated memory. */
+		return mlx5_mr_update_ext_mp(dev, mr_ctrl, mp);
+	}
 	return data.ret;
 }
 
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index f53bb43c3..b61c23b33 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -347,6 +347,8 @@ uint16_t mlx5_rx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts,
 void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
 uint32_t mlx5_rx_addr2mr_bh(struct mlx5_rxq_data *rxq, uintptr_t addr);
 uint32_t mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr);
+uint32_t mlx5_tx_update_ext_mp(struct mlx5_txq_data *txq, uintptr_t addr,
+			       struct rte_mempool *mp);
 
 #ifndef NDEBUG
 /**
@@ -534,6 +536,24 @@ mlx5_tx_complete(struct mlx5_txq_data *txq)
 }
 
 /**
+ * Get Memory Pool (MP) from mbuf. If mbuf is indirect, the pool from which the
+ * cloned mbuf is allocated is returned instead.
+ *
+ * @param buf
+ *   Pointer to mbuf.
+ *
+ * @return
+ *   Memory pool where data is located for given mbuf.
+ */
+static struct rte_mempool *
+mlx5_mb2mp(struct rte_mbuf *buf)
+{
+	if (unlikely(RTE_MBUF_INDIRECT(buf)))
+		return rte_mbuf_from_indirect(buf)->pool;
+	return buf->pool;
+}
+
+/**
  * Query LKey from a packet buffer for Rx. No need to flush local caches for Rx
  * as mempool is pre-configured and static.
  *
@@ -591,7 +611,20 @@ mlx5_tx_addr2mr(struct mlx5_txq_data *txq, uintptr_t addr)
 	return mlx5_tx_addr2mr_bh(txq, addr);
 }
 
-#define mlx5_tx_mb2mr(rxq, mb) mlx5_tx_addr2mr(rxq, (uintptr_t)((mb)->buf_addr))
+static __rte_always_inline uint32_t
+mlx5_tx_mb2mr(struct mlx5_txq_data *txq, struct rte_mbuf *mb)
+{
+	uintptr_t addr = (uintptr_t)mb->buf_addr;
+	uint32_t lkey = mlx5_tx_addr2mr(txq, addr);
+
+	if (likely(lkey != UINT32_MAX))
+		return lkey;
+	if (rte_errno == ENXIO) {
+		/* Mempool may have externally allocated memory. */
+		lkey = mlx5_tx_update_ext_mp(txq, addr, mlx5_mb2mp(mb));
+	}
+	return lkey;
+}
 
 /**
  * Ring TX queue doorbell and flush the update if requested.
-- 
2.11.0