summaryrefslogtreecommitdiffstats
path: root/dpdk/dpdk-17.02_patches/0006-net-mlx5-add-hardware-TSO-support.patch
blob: 929a61325aa65aac7e54ef1c2e25e13042ca5188 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
From e25bad4a287924d26627ffe307f8a12824b87054 Mon Sep 17 00:00:00 2001
From: Shahaf Shuler <shahafs@mellanox.com>
Date: Thu, 2 Mar 2017 11:01:31 +0200
Subject: [PATCH] net/mlx5: add hardware TSO support

Implement support for hardware TSO.

Signed-off-by: Shahaf Shuler <shahafs@mellanox.com>
Acked-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
---
 doc/guides/nics/features/mlx5.ini |   1 +
 doc/guides/nics/mlx5.rst          |  12 ++++
 drivers/net/mlx5/mlx5.c           |  18 ++++++
 drivers/net/mlx5/mlx5.h           |   2 +
 drivers/net/mlx5/mlx5_defs.h      |   3 +
 drivers/net/mlx5/mlx5_ethdev.c    |   2 +
 drivers/net/mlx5/mlx5_rxtx.c      | 123 +++++++++++++++++++++++++++++++++-----
 drivers/net/mlx5/mlx5_rxtx.h      |   2 +
 drivers/net/mlx5/mlx5_txq.c       |  13 ++++
 9 files changed, 160 insertions(+), 16 deletions(-)

diff --git a/doc/guides/nics/features/mlx5.ini b/doc/guides/nics/features/mlx5.ini
index f20d214..8df25ce 100644
--- a/doc/guides/nics/features/mlx5.ini
+++ b/doc/guides/nics/features/mlx5.ini
@@ -11,6 +11,7 @@ Queue start/stop     = Y
 MTU update           = Y
 Jumbo frame          = Y
 Scattered Rx         = Y
+TSO                  = Y
 Promiscuous mode     = Y
 Allmulticast mode    = Y
 Unicast MAC filter   = Y
diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index 5f6e594..9b0ba29 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -90,6 +90,7 @@ Features
 - Secondary process TX is supported.
 - KVM and VMware ESX SR-IOV modes are supported.
 - RSS hash result is supported.
+- Hardware TSO.
 
 Limitations
 -----------
@@ -186,9 +187,20 @@ Run-time configuration
   save PCI bandwidth and improve performance at the cost of a slightly
   higher CPU usage.
 
+  This option cannot be used in conjunction with ``tso`` below. When ``tso``
+  is set, ``txq_mpw_en`` is disabled.
+
   It is currently only supported on the ConnectX-4 Lx and ConnectX-5
   families of adapters. Enabled by default.
 
+- ``tso`` parameter [int]
+
+  A nonzero value enables hardware TSO.
+  When hardware TSO is enabled, packets marked with TCP segmentation
+  offload will be divided into segments by the hardware.
+
+  Disabled by default.
+
 Prerequisites
 -------------
 
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index d4bd469..03ed3b3 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -84,6 +84,9 @@
 /* Device parameter to enable multi-packet send WQEs. */
 #define MLX5_TXQ_MPW_EN "txq_mpw_en"
 
+/* Device parameter to enable hardware TSO offload. */
+#define MLX5_TSO "tso"
+
 /**
  * Retrieve integer value from environment variable.
  *
@@ -290,6 +293,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
 		priv->txqs_inline = tmp;
 	} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
 		priv->mps &= !!tmp; /* Enable MPW only if HW supports */
+	} else if (strcmp(MLX5_TSO, key) == 0) {
+		priv->tso = !!tmp;
 	} else {
 		WARN("%s: unknown parameter", key);
 		return -EINVAL;
@@ -316,6 +321,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
 		MLX5_TXQ_INLINE,
 		MLX5_TXQS_MIN_INLINE,
 		MLX5_TXQ_MPW_EN,
+		MLX5_TSO,
 		NULL,
 	};
 	struct rte_kvargs *kvlist;
@@ -479,6 +485,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 			IBV_EXP_DEVICE_ATTR_RX_HASH |
 			IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
 			IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
+			IBV_EXP_DEVICE_ATTR_TSO_CAPS |
 			0;
 
 		DEBUG("using port %u (%08" PRIx32 ")", port, test);
@@ -580,11 +587,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 
 		priv_get_num_vfs(priv, &num_vfs);
 		priv->sriov = (num_vfs || sriov);
+		priv->tso = ((priv->tso) &&
+			    (exp_device_attr.tso_caps.max_tso > 0) &&
+			    (exp_device_attr.tso_caps.supported_qpts &
+			    (1 << IBV_QPT_RAW_ETH)));
+		if (priv->tso)
+			priv->max_tso_payload_sz =
+				exp_device_attr.tso_caps.max_tso;
 		if (priv->mps && !mps) {
 			ERROR("multi-packet send not supported on this device"
 			      " (" MLX5_TXQ_MPW_EN ")");
 			err = ENOTSUP;
 			goto port_error;
+		} else if (priv->mps && priv->tso) {
+			WARN("multi-packet send not supported in conjunction "
+			      "with TSO. MPS disabled");
+			priv->mps = 0;
 		}
 		/* Allocate and register default RSS hash keys. */
 		priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 4c4b9d4..93f129b 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -126,6 +126,8 @@ struct priv {
 	unsigned int mps:1; /* Whether multi-packet send is supported. */
 	unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
 	unsigned int pending_alarm:1; /* An alarm is pending. */
+	unsigned int tso:1; /* Whether TSO is supported. */
+	unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
 	unsigned int txq_inline; /* Maximum packet size for inlining. */
 	unsigned int txqs_inline; /* Queue number threshold for inlining. */
 	/* RX/TX queues. */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index e91d245..eecb908 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -79,4 +79,7 @@
 /* Maximum number of extended statistics counters. */
 #define MLX5_MAX_XSTATS 32
 
+/* Maximum Packet headers size (L2+L3+L4) for TSO. */
+#define MLX5_MAX_TSO_HEADER 128
+
 #endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 5677f03..5542193 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -693,6 +693,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
 			(DEV_TX_OFFLOAD_IPV4_CKSUM |
 			 DEV_TX_OFFLOAD_UDP_CKSUM |
 			 DEV_TX_OFFLOAD_TCP_CKSUM);
+	if (priv->tso)
+		info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
 	if (priv_get_ifname(priv, &ifname) == 0)
 		info->if_index = if_nametoindex(ifname);
 	/* FIXME: RETA update/query API expects the callee to know the size of
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 4d5455b..98889f6 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -365,6 +365,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	const unsigned int elts_n = 1 << txq->elts_n;
 	unsigned int i = 0;
 	unsigned int j = 0;
+	unsigned int k = 0;
 	unsigned int max;
 	uint16_t max_wqe;
 	unsigned int comp;
@@ -392,8 +393,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		uintptr_t addr;
 		uint64_t naddr;
 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
+		uint16_t tso_header_sz = 0;
 		uint16_t ehdr;
 		uint8_t cs_flags = 0;
+		uint64_t tso = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		uint32_t total_length = 0;
 #endif
@@ -465,14 +468,74 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			length -= pkt_inline_sz;
 			addr += pkt_inline_sz;
 		}
+		if (txq->tso_en) {
+			tso = buf->ol_flags & PKT_TX_TCP_SEG;
+			if (tso) {
+				uintptr_t end = (uintptr_t)
+						(((uintptr_t)txq->wqes) +
+						(1 << txq->wqe_n) *
+						MLX5_WQE_SIZE);
+				unsigned int copy_b;
+				uint8_t vlan_sz = (buf->ol_flags &
+						  PKT_TX_VLAN_PKT) ? 4 : 0;
+
+				tso_header_sz = buf->l2_len + vlan_sz +
+						buf->l3_len + buf->l4_len;
+
+				if (unlikely(tso_header_sz >
+					     MLX5_MAX_TSO_HEADER))
+					break;
+				copy_b = tso_header_sz - pkt_inline_sz;
+				/* First seg must contain all headers. */
+				assert(copy_b <= length);
+				raw += MLX5_WQE_DWORD_SIZE;
+				if (copy_b &&
+				   ((end - (uintptr_t)raw) > copy_b)) {
+					uint16_t n = (MLX5_WQE_DS(copy_b) -
+						      1 + 3) / 4;
+
+					if (unlikely(max_wqe < n))
+						break;
+					max_wqe -= n;
+					rte_memcpy((void *)raw,
+						   (void *)addr, copy_b);
+					addr += copy_b;
+					length -= copy_b;
+					pkt_inline_sz += copy_b;
+					/*
+					 * Another DWORD will be added
+					 * in the inline part.
+					 */
+					raw += MLX5_WQE_DS(copy_b) *
+					       MLX5_WQE_DWORD_SIZE -
+					       MLX5_WQE_DWORD_SIZE;
+				} else {
+					/* NOP WQE. */
+					wqe->ctrl = (rte_v128u32_t){
+						     htonl(txq->wqe_ci << 8),
+						     htonl(txq->qp_num_8s | 1),
+						     0,
+						     0,
+					};
+					ds = 1;
+					total_length = 0;
+					pkts--;
+					pkts_n++;
+					elts_head = (elts_head - 1) &
+						    (elts_n - 1);
+					k++;
+					goto next_wqe;
+				}
+			}
+		}
 		/* Inline if enough room. */
-		if (txq->max_inline) {
+		if (txq->inline_en || tso) {
 			uintptr_t end = (uintptr_t)
 				(((uintptr_t)txq->wqes) +
 				 (1 << txq->wqe_n) * MLX5_WQE_SIZE);
 			unsigned int max_inline = txq->max_inline *
 						  RTE_CACHE_LINE_SIZE -
-						  MLX5_WQE_DWORD_SIZE;
+						  (pkt_inline_sz - 2);
 			uintptr_t addr_end = (addr + max_inline) &
 					     ~(RTE_CACHE_LINE_SIZE - 1);
 			unsigned int copy_b = (addr_end > addr) ?
@@ -491,6 +554,18 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 				if (unlikely(max_wqe < n))
 					break;
 				max_wqe -= n;
+				if (tso) {
+					uint32_t inl =
+						htonl(copy_b | MLX5_INLINE_SEG);
+
+					pkt_inline_sz =
+						MLX5_WQE_DS(tso_header_sz) *
+						MLX5_WQE_DWORD_SIZE;
+					rte_memcpy((void *)raw,
+						   (void *)&inl, sizeof(inl));
+					raw += sizeof(inl);
+					pkt_inline_sz += sizeof(inl);
+				}
 				rte_memcpy((void *)raw, (void *)addr, copy_b);
 				addr += copy_b;
 				length -= copy_b;
@@ -591,18 +666,34 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 next_pkt:
 		++i;
 		/* Initialize known and common part of the WQE structure. */
-		wqe->ctrl = (rte_v128u32_t){
-			htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
-			htonl(txq->qp_num_8s | ds),
-			0,
-			0,
-		};
-		wqe->eseg = (rte_v128u32_t){
-			0,
-			cs_flags,
-			0,
-			(ehdr << 16) | htons(pkt_inline_sz),
-		};
+		if (tso) {
+			wqe->ctrl = (rte_v128u32_t){
+				htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
+				htonl(txq->qp_num_8s | ds),
+				0,
+				0,
+			};
+			wqe->eseg = (rte_v128u32_t){
+				0,
+				cs_flags | (htons(buf->tso_segsz) << 16),
+				0,
+				(ehdr << 16) | htons(tso_header_sz),
+			};
+		} else {
+			wqe->ctrl = (rte_v128u32_t){
+				htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
+				htonl(txq->qp_num_8s | ds),
+				0,
+				0,
+			};
+			wqe->eseg = (rte_v128u32_t){
+				0,
+				cs_flags,
+				0,
+				(ehdr << 16) | htons(pkt_inline_sz),
+			};
+		}
+next_wqe:
 		txq->wqe_ci += (ds + 3) / 4;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Increment sent bytes counter. */
@@ -610,10 +701,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #endif
 	} while (pkts_n);
 	/* Take a shortcut if nothing must be sent. */
-	if (unlikely(i == 0))
+	if (unlikely((i + k) == 0))
 		return 0;
 	/* Check whether completion threshold has been reached. */
-	comp = txq->elts_comp + i + j;
+	comp = txq->elts_comp + i + j + k;
 	if (comp >= MLX5_TX_COMP_THRESH) {
 		volatile struct mlx5_wqe_ctrl *w =
 			(volatile struct mlx5_wqe_ctrl *)wqe;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 41a34d7..6b328cf 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -254,6 +254,8 @@ struct txq {
 	uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
 	uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
 	uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
+	uint16_t inline_en:1; /* When set inline is enabled. */
+	uint16_t tso_en:1; /* When set hardware TSO is enabled. */
 	uint32_t qp_num_8s; /* QP number shifted by 8. */
 	volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
 	volatile void *wqes; /* Work queue (use volatile to write into). */
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 949035b..995b763 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -342,6 +342,19 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
 			 RTE_CACHE_LINE_SIZE);
 		attr.init.cap.max_inline_data =
 			tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
+		tmpl.txq.inline_en = 1;
+	}
+	if (priv->tso) {
+		uint16_t max_tso_inline = ((MLX5_MAX_TSO_HEADER +
+					   (RTE_CACHE_LINE_SIZE - 1)) /
+					    RTE_CACHE_LINE_SIZE);
+
+		attr.init.max_tso_header =
+			max_tso_inline * RTE_CACHE_LINE_SIZE;
+		attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
+		tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
+					      max_tso_inline);
+		tmpl.txq.tso_en = 1;
 	}
 	tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
 	if (tmpl.qp == NULL) {
-- 
2.7.4