aboutsummaryrefslogtreecommitdiffstats
path: root/lib/libtle_udp/misc.h
blob: ffe665f4f769ab65c2ba8fc90fd936fbf141c053 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
/*
 * Copyright (c) 2016  Intel Corporation.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef _MISC_H_
#define _MISC_H_

#ifdef __cplusplus
extern "C" {
#endif

static inline int
ymm_mask_cmp(const _ymm_t *da, const _ymm_t *sa, const _ymm_t *sm)
{
	uint64_t ret;

	ret = ((da->u64[0] & sm->u64[0]) ^ sa->u64[0]) |
		((da->u64[1] & sm->u64[1]) ^ sa->u64[1]) |
		((da->u64[2] & sm->u64[2]) ^ sa->u64[2]) |
		((da->u64[3] & sm->u64[3]) ^ sa->u64[3]);

	return (ret != 0);
}

/*
 * Setup tx_offload field inside mbuf using raw 64-bit field.
 * Consider to move it into DPDK librte_mbuf.
 */
static inline uint64_t
_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
	uint64_t ol3, uint64_t ol2)
{
	return il2 | il3 << 7 | il4 << 16 | tso << 24 | ol3 << 40 | ol2 << 49;
}

/*
 * Given the value of mbuf's tx_offload, calculate L4 payload offset.
 */
static inline uint32_t
_tx_offload_l4_offset(uint64_t ofl)
{
	uint32_t l2, l3, l4;

	l2 = ofl & 0x7f;
	l3 = ofl >> 7 & 0x1ff;
	l4 = ofl >> 16 & UINT8_MAX;

	return l2 + l3 + l4;
}

/*
 * Routines to calculate L3/L4 checksums in SW.
 * Pretty similar to ones from DPDK librte_net/rte_ip.h,
 * but provide better performance (at least for tested configurations),
 * and extended functionality.
 * Consider to move them into DPDK librte_net/rte_ip.h.
 */

/* make compiler to generate: add %r1, %r2; adc $0, %r1. */
#define CKSUM_ADD_CARRY(s, v)	do {       \
	(s) += (v);                        \
	(s) = ((s) < (v)) ? (s) + 1 : (s); \
} while (0)

/**
 * Process the non-complemented checksum of a buffer.
 * Similar  to rte_raw_cksum(), but provide better perfomance
 * (at least on IA platforms).
 * @param buf
 *   Pointer to the buffer.
 * @param len
 *   Length of the buffer.
 * @return
 *   The non-complemented checksum.
 */
static inline uint16_t
__raw_cksum(const uint8_t *buf, uint32_t size)
{
	uint64_t s, sum;
	uint32_t i, n;
	uint32_t  dw1, dw2;
	uint16_t w1, w2;
	const uint64_t *b;

	b = (const uint64_t *)buf;
	n = size / sizeof(*b);
	sum = 0;

	/* main loop, consume 8 bytes per iteration. */
	for (i = 0; i != n; i++) {
		s = b[i];
		CKSUM_ADD_CARRY(sum, s);
	}

	/* consume the remainder. */
	n = size % sizeof(*b);
	if (n != 0) {
		/* position of the of last 8 bytes of data. */
		b = (const uint64_t *)((uintptr_t)(b + i) + n - sizeof(*b));
		/* calculate shift amount. */
		n = (sizeof(*b) - n) * CHAR_BIT;
		s = b[0] >> n;
		CKSUM_ADD_CARRY(sum, s);
	}

	/* reduce to 16 bits */
	dw1 = sum;
	dw2 = sum >> 32;
	CKSUM_ADD_CARRY(dw1, dw2);
	w1 = dw1;
	w2 = dw1 >> 16;
	CKSUM_ADD_CARRY(w1, w2);
	return w1;
}


/**
 * Process UDP or TCP checksum over possibly multi-segmented packet.
 * @param mb
 *   The pointer to the mbuf with the packet.
 * @param l4_ofs
 *   Offset to the beginning of the L4 header (should be in first segment).
 * @param cksum
 *   Already pre-calculated pseudo-header checksum value.
 * @return
 *   The complemented checksum.
 */
static inline uint32_t
__udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
	uint32_t cksum)
{
	uint32_t dlen, i, plen;
	const struct rte_mbuf *ms;
	const void *data;

	plen = rte_pktmbuf_pkt_len(mb);
	ms = mb;

	for (i = l4_ofs; i < plen && ms != NULL; i += dlen) {
		data = rte_pktmbuf_mtod_offset(ms, const void *, l4_ofs);
		dlen = rte_pktmbuf_data_len(ms) - l4_ofs;
		cksum += __raw_cksum(data, dlen);
		ms = ms->next;
		l4_ofs = 0;
	}

	cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
	cksum = (~cksum) & 0xffff;
	if (cksum == 0)
		cksum = 0xffff;

	return cksum;
}

/**
 * Process the pseudo-header checksum of an IPv4 header.
 *
 * Depending on the ol_flags, the pseudo-header checksum expected by the
 * drivers is not the same. For instance, when TSO is enabled, the IP
 * payload length must not be included in the packet.
 *
 * When ol_flags is 0, it computes the standard pseudo-header checksum.
 *
 * @param ipv4_hdr
 *   The pointer to the contiguous IPv4 header.
 * @param ipv4_len
 *   Length of the IPv4 header.
 * @param ol_flags
 *   The ol_flags of the associated mbuf.
 * @return
 *   The non-complemented checksum to set in the L4 header.
 */
static inline uint16_t
_ipv4x_phdr_cksum(const struct ipv4_hdr *ipv4_hdr, size_t ipv4h_len,
	uint64_t ol_flags)
{
	uint32_t s0, s1;

	s0 = ipv4_hdr->src_addr;
	s1 = ipv4_hdr->dst_addr;
	CKSUM_ADD_CARRY(s0, s1);

	if (ol_flags & PKT_TX_TCP_SEG)
		s1 = 0;
	else
		s1 = rte_cpu_to_be_16(
			(uint16_t)(rte_be_to_cpu_16(ipv4_hdr->total_length) -
			ipv4h_len));

	s1 += rte_cpu_to_be_16(ipv4_hdr->next_proto_id);
	CKSUM_ADD_CARRY(s0, s1);

	return __rte_raw_cksum_reduce(s0);
}

/**
 * Process the IPv4 UDP or TCP checksum.
 *
 * @param mb
 *   The pointer to the IPv4 packet.
 * @param l4_ofs
 *   Offset to the beginning of the L4 header (should be in first segment).
 * @param ipv4_hdr
 *   The pointer to the contiguous IPv4 header.
 * @return
 *   The complemented checksum to set in the IP packet.
 */
static inline int
_ipv4_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
	const struct ipv4_hdr *ipv4_hdr)
{
	uint32_t cksum;

	cksum = _ipv4x_phdr_cksum(ipv4_hdr, mb->l3_len, 0);
	cksum  = __udptcp_mbuf_cksum(mb, l4_ofs, cksum);

	return cksum;
}

/**
 * Process the IPv6 UDP or TCP checksum.
 *
 * @param mb
 *   The pointer to the IPv6 packet.
 * @param l4_ofs
 *   Offset to the beginning of the L4 header (should be in first segment).
 * @param ipv6_hdr
 *   The pointer to the contiguous IPv6 header.
 * @return
 *   The complemented checksum to set in the IP packet.
 */
static inline int
_ipv6_udptcp_mbuf_cksum(const struct rte_mbuf *mb, uint16_t l4_ofs,
	const struct ipv6_hdr *ipv6_hdr)
{
	uint32_t cksum;

	cksum = rte_ipv6_phdr_cksum(ipv6_hdr, 0);
	cksum  = __udptcp_mbuf_cksum(mb, l4_ofs, cksum);

	return cksum;
}

static inline uint16_t
_ipv4x_cksum(const void *iph, size_t len)
{
	uint16_t cksum;

	cksum = __raw_cksum(iph, len);
	return (cksum == 0xffff) ? cksum : ~cksum;
}


/*
 * Analog of read-write locks, very much in favour of read side.
 * Assumes, that there are no more then INT32_MAX concurrent readers.
 * Consider to move into DPDK librte_eal.
 */

static inline int
rwl_try_acquire(rte_atomic32_t *p)
{
	return rte_atomic32_add_return(p, 1);
}

static inline void
rwl_release(rte_atomic32_t *p)
{
	rte_atomic32_sub(p, 1);
}

static inline int
rwl_acquire(rte_atomic32_t *p)
{
	int32_t rc;

	rc = rwl_try_acquire(p);
	if (rc < 0)
		rwl_release(p);
	return rc;
}

static inline void
rwl_down(rte_atomic32_t *p)
{
	 while (rte_atomic32_cmpset((volatile uint32_t *)p, 0, INT32_MIN) == 0)
		rte_pause();
}

static inline void
rwl_up(rte_atomic32_t *p)
{
	rte_atomic32_sub(p, INT32_MIN);
}

#ifdef __cplusplus
}
#endif

#endif /* _MISC_H_ */