aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/bus/dpaa/base/qbman/bman_driver.c
blob: 750b756b93000523bbb77746b49a720b10f9f439 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
 *
 * Copyright 2008-2016 Freescale Semiconductor Inc.
 * Copyright 2017 NXP
 *
 */

#include <rte_branch_prediction.h>

#include <fsl_usd.h>
#include <process.h>
#include "bman_priv.h"
#include <sys/ioctl.h>

/*
 * Global variables of the max portal/pool number this bman version supported
 */
static u16 bman_ip_rev;
u16 bman_pool_max;
static void *bman_ccsr_map;

/*****************/
/* Portal driver */
/*****************/

static __thread int bmfd = -1;
static __thread struct bm_portal_config pcfg;
static __thread struct dpaa_ioctl_portal_map map = {
	.type = dpaa_portal_bman
};

static int fsl_bman_portal_init(uint32_t idx, int is_shared)
{
	cpu_set_t cpuset;
	struct bman_portal *portal;
	int loop, ret;
	struct dpaa_ioctl_irq_map irq_map;

	/* Verify the thread's cpu-affinity */
	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
				     &cpuset);
	if (ret) {
		error(0, ret, "pthread_getaffinity_np()");
		return ret;
	}
	pcfg.cpu = -1;
	for (loop = 0; loop < CPU_SETSIZE; loop++)
		if (CPU_ISSET(loop, &cpuset)) {
			if (pcfg.cpu != -1) {
				pr_err("Thread is not affine to 1 cpu");
				return -EINVAL;
			}
			pcfg.cpu = loop;
		}
	if (pcfg.cpu == -1) {
		pr_err("Bug in getaffinity handling!");
		return -EINVAL;
	}
	/* Allocate and map a bman portal */
	map.index = idx;
	ret = process_portal_map(&map);
	if (ret) {
		error(0, ret, "process_portal_map()");
		return ret;
	}
	/* Make the portal's cache-[enabled|inhibited] regions */
	pcfg.addr_virt[DPAA_PORTAL_CE] = map.addr.cena;
	pcfg.addr_virt[DPAA_PORTAL_CI] = map.addr.cinh;
	pcfg.is_shared = is_shared;
	pcfg.index = map.index;
	bman_depletion_fill(&pcfg.mask);

	bmfd = open(BMAN_PORTAL_IRQ_PATH, O_RDONLY);
	if (bmfd == -1) {
		pr_err("BMan irq init failed");
		process_portal_unmap(&map.addr);
		return -EBUSY;
	}
	/* Use the IRQ FD as a unique IRQ number */
	pcfg.irq = bmfd;

	portal = bman_create_affine_portal(&pcfg);
	if (!portal) {
		pr_err("Bman portal initialisation failed (%d)",
		       pcfg.cpu);
		process_portal_unmap(&map.addr);
		return -EBUSY;
	}

	/* Set the IRQ number */
	irq_map.type = dpaa_portal_bman;
	irq_map.portal_cinh = map.addr.cinh;
	process_portal_irq_map(bmfd, &irq_map);
	return 0;
}

static int fsl_bman_portal_finish(void)
{
	__maybe_unused const struct bm_portal_config *cfg;
	int ret;

	process_portal_irq_unmap(bmfd);

	cfg = bman_destroy_affine_portal();
	DPAA_BUG_ON(cfg != &pcfg);
	ret = process_portal_unmap(&map.addr);
	if (ret)
		error(0, ret, "process_portal_unmap()");
	return ret;
}

int bman_thread_fd(void)
{
	return bmfd;
}

int bman_thread_init(void)
{
	/* Convert from contiguous/virtual cpu numbering to real cpu when
	 * calling into the code that is dependent on the device naming.
	 */
	return fsl_bman_portal_init(QBMAN_ANY_PORTAL_IDX, 0);
}

int bman_thread_finish(void)
{
	return fsl_bman_portal_finish();
}

void bman_thread_irq(void)
{
	qbman_invoke_irq(pcfg.irq);
	/* Now we need to uninhibit interrupts. This is the only code outside
	 * the regular portal driver that manipulates any portal register, so
	 * rather than breaking that encapsulation I am simply hard-coding the
	 * offset to the inhibit register here.
	 */
	out_be32(pcfg.addr_virt[DPAA_PORTAL_CI] + 0xe0c, 0);
}

int bman_init_ccsr(const struct device_node *node)
{
	static int ccsr_map_fd;
	uint64_t phys_addr;
	const uint32_t *bman_addr;
	uint64_t regs_size;

	bman_addr = of_get_address(node, 0, &regs_size, NULL);
	if (!bman_addr) {
		pr_err("of_get_address cannot return BMan address");
		return -EINVAL;
	}
	phys_addr = of_translate_address(node, bman_addr);
	if (!phys_addr) {
		pr_err("of_translate_address failed");
		return -EINVAL;
	}

	ccsr_map_fd = open(BMAN_CCSR_MAP, O_RDWR);
	if (unlikely(ccsr_map_fd < 0)) {
		pr_err("Can not open /dev/mem for BMan CCSR map");
		return ccsr_map_fd;
	}

	bman_ccsr_map = mmap(NULL, regs_size, PROT_READ |
			     PROT_WRITE, MAP_SHARED, ccsr_map_fd, phys_addr);
	if (bman_ccsr_map == MAP_FAILED) {
		pr_err("Can not map BMan CCSR base Bman: "
		       "0x%x Phys: 0x%" PRIx64 " size 0x%" PRIu64,
		       *bman_addr, phys_addr, regs_size);
		return -EINVAL;
	}

	return 0;
}

int bman_global_init(void)
{
	const struct device_node *dt_node;
	static int done;

	if (done)
		return -EBUSY;
	/* Use the device-tree to determine IP revision until something better
	 * is devised.
	 */
	dt_node = of_find_compatible_node(NULL, NULL, "fsl,bman-portal");
	if (!dt_node) {
		pr_err("No bman portals available for any CPU\n");
		return -ENODEV;
	}
	if (of_device_is_compatible(dt_node, "fsl,bman-portal-1.0") ||
	    of_device_is_compatible(dt_node, "fsl,bman-portal-1.0.0")) {
		bman_ip_rev = BMAN_REV10;
		bman_pool_max = 64;
	} else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.0") ||
		of_device_is_compatible(dt_node, "fsl,bman-portal-2.0.8")) {
		bman_ip_rev = BMAN_REV20;
		bman_pool_max = 8;
	} else if (of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.0") ||
		of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.1") ||
		of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.2") ||
		of_device_is_compatible(dt_node, "fsl,bman-portal-2.1.3")) {
		bman_ip_rev = BMAN_REV21;
		bman_pool_max = 64;
	} else {
		pr_warn("unknown BMan version in portal node,default "
			"to rev1.0");
		bman_ip_rev = BMAN_REV10;
		bman_pool_max = 64;
	}

	if (!bman_ip_rev) {
		pr_err("Unknown bman portal version\n");
		return -ENODEV;
	}
	{
		const struct device_node *dn = of_find_compatible_node(NULL,
							NULL, "fsl,bman");
		if (!dn)
			pr_err("No bman device node available");

		if (bman_init_ccsr(dn))
			pr_err("BMan CCSR map failed.");
	}

	done = 1;
	return 0;
}

#define BMAN_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
u32 bm_pool_free_buffers(u32 bpid)
{
	return in_be32(bman_ccsr_map + BMAN_POOL_CONTENT(bpid));
}

static u32 __generate_thresh(u32 val, int roundup)
{
	u32 e = 0;      /* co-efficient, exponent */
	int oddbit = 0;

	while (val > 0xff) {
		oddbit = val & 1;
		val >>= 1;
		e++;
		if (roundup && oddbit)
			val++;
	}
	DPAA_ASSERT(e < 0x10);
	return (val | (e << 8));
}

#define POOL_SWDET(n)       (0x0000 + ((n) * 0x04))
#define POOL_HWDET(n)       (0x0100 + ((n) * 0x04))
#define POOL_SWDXT(n)       (0x0200 + ((n) * 0x04))
#define POOL_HWDXT(n)       (0x0300 + ((n) * 0x04))
int bm_pool_set(u32 bpid, const u32 *thresholds)
{
	if (!bman_ccsr_map)
		return -ENODEV;
	if (bpid >= bman_pool_max)
		return -EINVAL;
	out_be32(bman_ccsr_map + POOL_SWDET(bpid),
		 __generate_thresh(thresholds[0], 0));
	out_be32(bman_ccsr_map + POOL_SWDXT(bpid),
		 __generate_thresh(thresholds[1], 1));
	out_be32(bman_ccsr_map + POOL_HWDET(bpid),
		 __generate_thresh(thresholds[2], 0));
	out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
		 __generate_thresh(thresholds[3], 1));
	return 0;
}

#define BMAN_LOW_DEFAULT_THRESH		0x40
#define BMAN_HIGH_DEFAULT_THRESH		0x80
int bm_pool_set_hw_threshold(u32 bpid, const u32 low_thresh,
			     const u32 high_thresh)
{
	if (!bman_ccsr_map)
		return -ENODEV;
	if (bpid >= bman_pool_max)
		return -EINVAL;
	if (low_thresh && high_thresh) {
		out_be32(bman_ccsr_map + POOL_HWDET(bpid),
			 __generate_thresh(low_thresh, 0));
		out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
			 __generate_thresh(high_thresh, 1));
	} else {
		out_be32(bman_ccsr_map + POOL_HWDET(bpid),
			 __generate_thresh(BMAN_LOW_DEFAULT_THRESH, 0));
		out_be32(bman_ccsr_map + POOL_HWDXT(bpid),
			 __generate_thresh(BMAN_HIGH_DEFAULT_THRESH, 1));
	}
	return 0;
}