aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/event/opdl/opdl_evdev.h
blob: 610b58b356348409392050960643b3188fec6b86 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2017 Intel Corporation
 */

#ifndef _OPDL_EVDEV_H_
#define _OPDL_EVDEV_H_

#include <rte_eventdev.h>
#include <rte_eventdev_pmd_vdev.h>
#include <rte_atomic.h>
#include "opdl_ring.h"

#define OPDL_QID_NUM_FIDS 1024
#define OPDL_IQS_MAX 1
#define OPDL_Q_PRIORITY_MAX 1
#define OPDL_PORTS_MAX 64
#define MAX_OPDL_CONS_Q_DEPTH 128
/* OPDL size */
#define OPDL_INFLIGHT_EVENTS_TOTAL 4096
/* allow for lots of over-provisioning */
#define OPDL_FRAGMENTS_MAX 1

/* report dequeue burst sizes in buckets */
#define OPDL_DEQ_STAT_BUCKET_SHIFT 2
/* how many packets pulled from port by sched */
#define SCHED_DEQUEUE_BURST_SIZE 32

/* size of our history list */
#define OPDL_PORT_HIST_LIST (MAX_OPDL_PROD_Q_DEPTH)

/* how many data points use for average stats */
#define NUM_SAMPLES 64

#define EVENTDEV_NAME_OPDL_PMD event_opdl
#define OPDL_PMD_NAME RTE_STR(event_opdl)
#define OPDL_PMD_NAME_MAX 64

#define OPDL_INVALID_QID 255

#define OPDL_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)

#define OPDL_NUM_POLL_BUCKETS  \
	(MAX_OPDL_CONS_Q_DEPTH >> OPDL_DEQ_STAT_BUCKET_SHIFT)

enum {
	QE_FLAG_VALID_SHIFT = 0,
	QE_FLAG_COMPLETE_SHIFT,
	QE_FLAG_NOT_EOP_SHIFT,
	_QE_FLAG_COUNT
};

enum port_type {
	OPDL_INVALID_PORT = 0,
	OPDL_REGULAR_PORT = 1,
	OPDL_PURE_RX_PORT,
	OPDL_PURE_TX_PORT,
	OPDL_ASYNC_PORT
};

enum queue_type {
	OPDL_Q_TYPE_INVALID = 0,
	OPDL_Q_TYPE_SINGLE_LINK = 1,
	OPDL_Q_TYPE_ATOMIC,
	OPDL_Q_TYPE_ORDERED
};

enum queue_pos {
	OPDL_Q_POS_START = 0,
	OPDL_Q_POS_MIDDLE,
	OPDL_Q_POS_END
};

#define QE_FLAG_VALID    (1 << QE_FLAG_VALID_SHIFT)    /* for NEW FWD, FRAG */
#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP  */
#define QE_FLAG_NOT_EOP  (1 << QE_FLAG_NOT_EOP_SHIFT)  /* set for FRAG only  */

static const uint8_t opdl_qe_flag_map[] = {
	QE_FLAG_VALID /* NEW Event */,
	QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
	QE_FLAG_COMPLETE /* RELEASE Event */,

	/* Values which can be used for future support for partial
	 * events, i.e. where one event comes back to the scheduler
	 * as multiple which need to be tracked together
	 */
	QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
};


enum port_xstat_name {
	claim_pkts_requested = 0,
	claim_pkts_granted,
	claim_non_empty,
	claim_empty,
	total_cycles,
	max_num_port_xstat
};

#define OPDL_MAX_PORT_XSTAT_NUM (OPDL_PORTS_MAX * max_num_port_xstat)

struct opdl_port;

typedef uint16_t (*opdl_enq_operation)(struct opdl_port *port,
		const struct rte_event ev[],
		uint16_t num);

typedef uint16_t (*opdl_deq_operation)(struct opdl_port *port,
		struct rte_event ev[],
		uint16_t num);

struct opdl_evdev;

struct opdl_stage_meta_data {
	uint32_t num_claimed;	/* number of entries claimed by this stage */
	uint32_t burst_sz;	/* Port claim burst size */
};

struct opdl_port {

	/* back pointer */
	struct opdl_evdev *opdl;

	/* enq handler & stage instance */
	opdl_enq_operation enq;
	struct opdl_stage *enq_stage_inst;

	/* deq handler & stage instance */
	opdl_deq_operation deq;
	struct opdl_stage *deq_stage_inst;

	/* port id has correctly been set */
	uint8_t configured;

	/* set when the port is initialized */
	uint8_t initialized;

	/* A numeric ID for the port */
	uint8_t id;

	/* Space for claimed entries */
	struct rte_event *entries[MAX_OPDL_CONS_Q_DEPTH];

	/* RX/REGULAR/TX/ASYNC - determined on position in queue */
	enum port_type p_type;

	/* if the claim is static atomic type  */
	bool atomic_claim;

	/* Queue linked to this port - internal queue id*/
	uint8_t queue_id;

	/* Queue linked to this port - external queue id*/
	uint8_t external_qid;

	/* Next queue linked to this port - external queue id*/
	uint8_t next_external_qid;

	/* number of instances of this stage */
	uint32_t num_instance;

	/* instance ID of this stage*/
	uint32_t instance_id;

	/* track packets in and out of this port */
	uint64_t port_stat[max_num_port_xstat];
	uint64_t start_cycles;
};

struct opdl_queue_meta_data {
	uint8_t         ext_id;
	enum queue_type type;
	int8_t          setup;
};

struct opdl_xstats_entry {
	struct rte_event_dev_xstats_name stat;
	unsigned int id;
	uint64_t *value;
};

struct opdl_queue {

	/* Opdl ring this queue is associated with */
	uint32_t opdl_id;

	/* type and position have correctly been set */
	uint8_t configured;

	/* port number and associated ports have been associated */
	uint8_t initialized;

	/* type of this queue (Atomic, Ordered, Parallel, Direct)*/
	enum queue_type q_type;

	/* position of queue (START, MIDDLE, END) */
	enum queue_pos q_pos;

	/* external queue id. It is mapped to the queue position */
	uint8_t external_qid;

	struct opdl_port *ports[OPDL_PORTS_MAX];
	uint32_t nb_ports;

	/* priority, reserved for future */
	uint8_t priority;
};


#define OPDL_TUR_PER_DEV 12

/* PMD needs an extra queue per Opdl  */
#define OPDL_MAX_QUEUES (RTE_EVENT_MAX_QUEUES_PER_DEV - OPDL_TUR_PER_DEV)


struct opdl_evdev {
	struct rte_eventdev_data *data;

	uint8_t started;

	/* Max number of ports and queues*/
	uint32_t max_port_nb;
	uint32_t max_queue_nb;

	/* slots in the opdl ring */
	uint32_t nb_events_limit;

	/*
	 * Array holding all opdl for this device
	 */
	struct opdl_ring *opdl[OPDL_TUR_PER_DEV];
	uint32_t nb_opdls;

	struct opdl_queue_meta_data q_md[OPDL_MAX_QUEUES];
	uint32_t nb_q_md;

	/* Internal queues - one per logical queue */
	struct opdl_queue
		queue[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;

	uint32_t nb_queues;

	struct opdl_stage_meta_data s_md[OPDL_PORTS_MAX];

	/* Contains all ports - load balanced and directed */
	struct opdl_port ports[OPDL_PORTS_MAX] __rte_cache_aligned;
	uint32_t nb_ports;

	uint8_t q_map_ex_to_in[OPDL_INVALID_QID];

	/* Stats */
	struct opdl_xstats_entry port_xstat[OPDL_MAX_PORT_XSTAT_NUM];

	char service_name[OPDL_PMD_NAME_MAX];
	int socket;
	int do_validation;
	int do_test;
};


static inline struct opdl_evdev *
opdl_pmd_priv(const struct rte_eventdev *eventdev)
{
	return eventdev->data->dev_private;
}

static inline uint8_t
opdl_pmd_dev_id(const struct opdl_evdev *opdl)
{
	return opdl->data->dev_id;
}

static inline const struct opdl_evdev *
opdl_pmd_priv_const(const struct rte_eventdev *eventdev)
{
	return eventdev->data->dev_private;
}

uint16_t opdl_event_enqueue(void *port, const struct rte_event *ev);
uint16_t opdl_event_enqueue_burst(void *port, const struct rte_event ev[],
		uint16_t num);

uint16_t opdl_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
uint16_t opdl_event_dequeue_burst(void *port, struct rte_event *ev,
		uint16_t num, uint64_t wait);
void opdl_event_schedule(struct rte_eventdev *dev);

void opdl_xstats_init(struct rte_eventdev *dev);
int opdl_xstats_uninit(struct rte_eventdev *dev);
int opdl_xstats_get_names(const struct rte_eventdev *dev,
		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
		struct rte_event_dev_xstats_name *xstats_names,
		unsigned int *ids, unsigned int size);
int opdl_xstats_get(const struct rte_eventdev *dev,
		enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
		const unsigned int ids[], uint64_t values[], unsigned int n);
uint64_t opdl_xstats_get_by_name(const struct rte_eventdev *dev,
		const char *name, unsigned int *id);
int opdl_xstats_reset(struct rte_eventdev *dev,
		enum rte_event_dev_xstats_mode mode,
		int16_t queue_port_id,
		const uint32_t ids[],
		uint32_t nb_ids);

int opdl_add_event_handlers(struct rte_eventdev *dev);
int build_all_dependencies(struct rte_eventdev *dev);
int check_queues_linked(struct rte_eventdev *dev);
int create_queues_and_rings(struct rte_eventdev *dev);
int initialise_all_other_ports(struct rte_eventdev *dev);
int initialise_queue_zero_ports(struct rte_eventdev *dev);
int assign_internal_queue_ids(struct rte_eventdev *dev);
void destroy_queues_and_rings(struct rte_eventdev *dev);
int opdl_selftest(void);

#endif /* _OPDL_EVDEV_H_ */