aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/softnic/rte_eth_softnic_internals.h
blob: 050e3e7e19f84534d1f794b0f31140611f2f9326 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2017 Intel Corporation
 */

#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__
#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__

#include <stdint.h>

#include <rte_mbuf.h>
#include <rte_sched.h>
#include <rte_ethdev_driver.h>
#include <rte_tm_driver.h>

#include "rte_eth_softnic.h"

/**
 * PMD Parameters
 */

enum pmd_feature {
	PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */
};

#ifndef INTRUSIVE
#define INTRUSIVE					0
#endif

struct pmd_params {
	/** Parameters for the soft device (to be created) */
	struct {
		const char *name; /**< Name */
		uint32_t flags; /**< Flags */

		/** 0 = Access hard device though API only (potentially slower,
		 *      but safer);
		 *  1 = Access hard device private data structures is allowed
		 *      (potentially faster).
		 */
		int intrusive;

		/** Traffic Management (TM) */
		struct {
			uint32_t rate; /**< Rate (bytes/second) */
			uint32_t nb_queues; /**< Number of queues */
			uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
			/**< Queue size per traffic class */
			uint32_t enq_bsz; /**< Enqueue burst size */
			uint32_t deq_bsz; /**< Dequeue burst size */
		} tm;
	} soft;

	/** Parameters for the hard device (existing) */
	struct {
		char *name; /**< Name */
		uint16_t tx_queue_id; /**< TX queue ID */
	} hard;
};

/**
 * Default Internals
 */

#ifndef DEFAULT_BURST_SIZE
#define DEFAULT_BURST_SIZE				32
#endif

#ifndef FLUSH_COUNT_THRESHOLD
#define FLUSH_COUNT_THRESHOLD			(1 << 17)
#endif

struct default_internals {
	struct rte_mbuf **pkts;
	uint32_t pkts_len;
	uint32_t txq_pos;
	uint32_t flush_count;
};

/**
 * Traffic Management (TM) Internals
 */

#ifndef TM_MAX_SUBPORTS
#define TM_MAX_SUBPORTS					8
#endif

#ifndef TM_MAX_PIPES_PER_SUBPORT
#define TM_MAX_PIPES_PER_SUBPORT			4096
#endif

struct tm_params {
	struct rte_sched_port_params port_params;

	struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS];

	struct rte_sched_pipe_params
		pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
	uint32_t n_pipe_profiles;
	uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT];
};

/* TM Levels */
enum tm_node_level {
	TM_NODE_LEVEL_PORT = 0,
	TM_NODE_LEVEL_SUBPORT,
	TM_NODE_LEVEL_PIPE,
	TM_NODE_LEVEL_TC,
	TM_NODE_LEVEL_QUEUE,
	TM_NODE_LEVEL_MAX,
};

/* TM Shaper Profile */
struct tm_shaper_profile {
	TAILQ_ENTRY(tm_shaper_profile) node;
	uint32_t shaper_profile_id;
	uint32_t n_users;
	struct rte_tm_shaper_params params;
};

TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile);

/* TM Shared Shaper */
struct tm_shared_shaper {
	TAILQ_ENTRY(tm_shared_shaper) node;
	uint32_t shared_shaper_id;
	uint32_t n_users;
	uint32_t shaper_profile_id;
};

TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper);

/* TM WRED Profile */
struct tm_wred_profile {
	TAILQ_ENTRY(tm_wred_profile) node;
	uint32_t wred_profile_id;
	uint32_t n_users;
	struct rte_tm_wred_params params;
};

TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile);

/* TM Node */
struct tm_node {
	TAILQ_ENTRY(tm_node) node;
	uint32_t node_id;
	uint32_t parent_node_id;
	uint32_t priority;
	uint32_t weight;
	uint32_t level;
	struct tm_node *parent_node;
	struct tm_shaper_profile *shaper_profile;
	struct tm_wred_profile *wred_profile;
	struct rte_tm_node_params params;
	struct rte_tm_node_stats stats;
	uint32_t n_children;
};

TAILQ_HEAD(tm_node_list, tm_node);

/* TM Hierarchy Specification */
struct tm_hierarchy {
	struct tm_shaper_profile_list shaper_profiles;
	struct tm_shared_shaper_list shared_shapers;
	struct tm_wred_profile_list wred_profiles;
	struct tm_node_list nodes;

	uint32_t n_shaper_profiles;
	uint32_t n_shared_shapers;
	uint32_t n_wred_profiles;
	uint32_t n_nodes;

	uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX];
};

struct tm_internals {
	/** Hierarchy specification
	 *
	 *     -Hierarchy is unfrozen at init and when port is stopped.
	 *     -Hierarchy is frozen on successful hierarchy commit.
	 *     -Run-time hierarchy changes are not allowed, therefore it makes
	 *      sense to keep the hierarchy frozen after the port is started.
	 */
	struct tm_hierarchy h;
	int hierarchy_frozen;

	/** Blueprints */
	struct tm_params params;

	/** Run-time */
	struct rte_sched_port *sched;
	struct rte_mbuf **pkts_enq;
	struct rte_mbuf **pkts_deq;
	uint32_t pkts_enq_len;
	uint32_t txq_pos;
	uint32_t flush_count;
};

/**
 * PMD Internals
 */
struct pmd_internals {
	/** Params */
	struct pmd_params params;

	/** Soft device */
	struct {
		struct default_internals def; /**< Default */
		struct tm_internals tm; /**< Traffic Management */
	} soft;

	/** Hard device */
	struct {
		uint16_t port_id;
	} hard;
};

struct pmd_rx_queue {
	/** Hard device */
	struct {
		uint16_t port_id;
		uint16_t rx_queue_id;
	} hard;
};

/**
 * Traffic Management (TM) Operation
 */
extern const struct rte_tm_ops pmd_tm_ops;

int
tm_params_check(struct pmd_params *params, uint32_t hard_rate);

int
tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node);

void
tm_free(struct pmd_internals *p);

int
tm_start(struct pmd_internals *p);

void
tm_stop(struct pmd_internals *p);

static inline int
tm_enabled(struct rte_eth_dev *dev)
{
	struct pmd_internals *p = dev->data->dev_private;

	return (p->params.soft.flags & PMD_FEATURE_TM);
}

static inline int
tm_used(struct rte_eth_dev *dev)
{
	struct pmd_internals *p = dev->data->dev_private;

	return (p->params.soft.flags & PMD_FEATURE_TM) &&
		p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT];
}

#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */