aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/softnic/rte_eth_softnic_tm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/softnic/rte_eth_softnic_tm.c')
-rw-r--r--drivers/net/softnic/rte_eth_softnic_tm.c345
1 files changed, 167 insertions, 178 deletions
diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c
index 79f1c6a8..baaafbe2 100644
--- a/drivers/net/softnic/rte_eth_softnic_tm.c
+++ b/drivers/net/softnic/rte_eth_softnic_tm.c
@@ -7,62 +7,148 @@
#include <string.h>
#include <rte_malloc.h>
+#include <rte_string_fns.h>
#include "rte_eth_softnic_internals.h"
#include "rte_eth_softnic.h"
-#define BYTES_IN_MBPS (1000 * 1000 / 8)
#define SUBPORT_TC_PERIOD 10
#define PIPE_TC_PERIOD 40
int
-tm_params_check(struct pmd_params *params, uint32_t hard_rate)
+softnic_tmgr_init(struct pmd_internals *p)
{
- uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS;
- uint32_t i;
+ TAILQ_INIT(&p->tmgr_port_list);
- /* rate */
- if (params->soft.tm.rate) {
- if (params->soft.tm.rate > hard_rate_bytes_per_sec)
- return -EINVAL;
- } else {
- params->soft.tm.rate =
- (hard_rate_bytes_per_sec > UINT32_MAX) ?
- UINT32_MAX : hard_rate_bytes_per_sec;
+ return 0;
+}
+
+void
+softnic_tmgr_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = TAILQ_FIRST(&p->tmgr_port_list);
+ if (tmgr_port == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node);
+ rte_sched_port_free(tmgr_port->s);
+ free(tmgr_port);
}
+}
+
+struct softnic_tmgr_port *
+softnic_tmgr_port_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
- /* nb_queues */
- if (params->soft.tm.nb_queues == 0)
- return -EINVAL;
+ if (name == NULL)
+ return NULL;
- if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE)
- params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE;
+ TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node)
+ if (strcmp(tmgr_port->name, name) == 0)
+ return tmgr_port;
- params->soft.tm.nb_queues =
- rte_align32pow2(params->soft.tm.nb_queues);
+ return NULL;
+}
- /* qsize */
- for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
- if (params->soft.tm.qsize[i] == 0)
- return -EINVAL;
+struct softnic_tmgr_port *
+softnic_tmgr_port_create(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_tmgr_port *tmgr_port;
+ struct tm_params *t = &p->soft.tm.params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+
+ /* Check input params */
+ if (name == NULL ||
+ softnic_tmgr_port_find(p, name))
+ return NULL;
+
+ /*
+ * Resource
+ */
+
+ /* Is hierarchy frozen? */
+ if (p->soft.tm.hierarchy_frozen == 0)
+ return NULL;
+
+ /* Port */
+ sched = rte_sched_port_config(&t->port_params);
+ if (sched == NULL)
+ return NULL;
+
+ /* Subport */
+ n_subports = t->port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport = t->port_params.n_pipes_per_subport;
+ uint32_t pipe_id;
+ int status;
- params->soft.tm.qsize[i] =
- rte_align32pow2(params->soft.tm.qsize[i]);
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &t->subport_params[subport_id]);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+
+ /* Pipe */
+ for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
+ int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id;
+ int profile_id = t->pipe_to_profile[pos];
+
+ if (profile_id < 0)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
+ }
}
- /* enq_bsz, deq_bsz */
- if (params->soft.tm.enq_bsz == 0 ||
- params->soft.tm.deq_bsz == 0 ||
- params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz)
- return -EINVAL;
+ /* Node allocation */
+ tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port));
+ if (tmgr_port == NULL) {
+ rte_sched_port_free(sched);
+ return NULL;
+ }
- return 0;
+ /* Node fill in */
+ strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name));
+ tmgr_port->s = sched;
+
+ /* Node add to list */
+ TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node);
+
+ return tmgr_port;
}
-static void
+static struct rte_sched_port *
+SCHED(struct pmd_internals *p)
+{
+ struct softnic_tmgr_port *tmgr_port;
+
+ tmgr_port = softnic_tmgr_port_find(p, "TMGR");
+ if (tmgr_port == NULL)
+ return NULL;
+
+ return tmgr_port->s;
+}
+
+void
tm_hierarchy_init(struct pmd_internals *p)
{
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
+ memset(&p->soft.tm, 0, sizeof(p->soft.tm));
/* Initialize shaper profile list */
TAILQ_INIT(&p->soft.tm.h.shaper_profiles);
@@ -77,8 +163,8 @@ tm_hierarchy_init(struct pmd_internals *p)
TAILQ_INIT(&p->soft.tm.h.nodes);
}
-static void
-tm_hierarchy_uninit(struct pmd_internals *p)
+void
+tm_hierarchy_free(struct pmd_internals *p)
{
/* Remove all nodes*/
for ( ; ; ) {
@@ -129,111 +215,7 @@ tm_hierarchy_uninit(struct pmd_internals *p)
free(shaper_profile);
}
- memset(&p->soft.tm.h, 0, sizeof(p->soft.tm.h));
-}
-
-int
-tm_init(struct pmd_internals *p,
- struct pmd_params *params,
- int numa_node)
-{
- uint32_t enq_bsz = params->soft.tm.enq_bsz;
- uint32_t deq_bsz = params->soft.tm.deq_bsz;
-
- p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name,
- 2 * enq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_enq == NULL)
- return -ENOMEM;
-
- p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name,
- deq_bsz * sizeof(struct rte_mbuf *),
- 0,
- numa_node);
-
- if (p->soft.tm.pkts_deq == NULL) {
- rte_free(p->soft.tm.pkts_enq);
- return -ENOMEM;
- }
-
tm_hierarchy_init(p);
-
- return 0;
-}
-
-void
-tm_free(struct pmd_internals *p)
-{
- tm_hierarchy_uninit(p);
- rte_free(p->soft.tm.pkts_enq);
- rte_free(p->soft.tm.pkts_deq);
-}
-
-int
-tm_start(struct pmd_internals *p)
-{
- struct tm_params *t = &p->soft.tm.params;
- uint32_t n_subports, subport_id;
- int status;
-
- /* Is hierarchy frozen? */
- if (p->soft.tm.hierarchy_frozen == 0)
- return -1;
-
- /* Port */
- p->soft.tm.sched = rte_sched_port_config(&t->port_params);
- if (p->soft.tm.sched == NULL)
- return -1;
-
- /* Subport */
- n_subports = t->port_params.n_subports_per_port;
- for (subport_id = 0; subport_id < n_subports; subport_id++) {
- uint32_t n_pipes_per_subport =
- t->port_params.n_pipes_per_subport;
- uint32_t pipe_id;
-
- status = rte_sched_subport_config(p->soft.tm.sched,
- subport_id,
- &t->subport_params[subport_id]);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
-
- /* Pipe */
- n_pipes_per_subport = t->port_params.n_pipes_per_subport;
- for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) {
- int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT +
- pipe_id;
- int profile_id = t->pipe_to_profile[pos];
-
- if (profile_id < 0)
- continue;
-
- status = rte_sched_pipe_config(p->soft.tm.sched,
- subport_id,
- pipe_id,
- profile_id);
- if (status) {
- rte_sched_port_free(p->soft.tm.sched);
- return -1;
- }
- }
- }
-
- return 0;
-}
-
-void
-tm_stop(struct pmd_internals *p)
-{
- if (p->soft.tm.sched)
- rte_sched_port_free(p->soft.tm.sched);
-
- /* Unfreeze hierarchy */
- p->soft.tm.hierarchy_frozen = 0;
}
static struct tm_shaper_profile *
@@ -384,7 +366,7 @@ static uint32_t
tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level)
{
struct pmd_internals *p = dev->data->dev_private;
- uint32_t n_queues_max = p->params.soft.tm.nb_queues;
+ uint32_t n_queues_max = p->params.tm.n_queues;
uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS;
uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE;
uint32_t n_subports_max = n_pipes_max;
@@ -429,7 +411,7 @@ pmd_tm_node_type_get(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
- *is_leaf = node_id < p->params.soft.tm.nb_queues;
+ *is_leaf = node_id < p->params.tm.n_queues;
return 0;
}
@@ -479,6 +461,8 @@ static const struct rte_tm_capabilities tm_cap = {
.sched_wfq_n_groups_max = 1,
.sched_wfq_weight_max = UINT32_MAX,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_head_drop_supported = 0,
.cman_wred_context_n_max = 0,
.cman_wred_context_private_n_max = 0,
@@ -667,6 +651,8 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = {
.shaper_shared_n_max = 0,
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
@@ -828,6 +814,8 @@ static const struct rte_tm_node_capabilities tm_node_cap[] = {
{.leaf = {
.cman_head_drop_supported = 0,
+ .cman_wred_packet_mode_supported = WRED_SUPPORTED,
+ .cman_wred_byte_mode_supported = 0,
.cman_wred_context_private_supported = WRED_SUPPORTED,
.cman_wred_context_shared_n_max = 0,
} },
@@ -1077,7 +1065,7 @@ update_subport_tc_rate(struct rte_eth_dev *dev,
subport_params.tc_rate[tc_id] = sp_new->params.peak.rate;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched,
+ if (rte_sched_subport_config(SCHED(p),
subport_id, &subport_params))
return -1;
@@ -1243,12 +1231,23 @@ wred_profile_check(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
+ /* WRED profile should be in packet mode */
+ if (profile->packet_mode == 0)
+ return -rte_tm_error_set(error,
+ ENOTSUP,
+ RTE_TM_ERROR_TYPE_WRED_PROFILE,
+ NULL,
+ rte_strerror(ENOTSUP));
+
/* min_th <= max_th, max_th > 0 */
for (color = RTE_TM_GREEN; color < RTE_TM_COLORS; color++) {
- uint16_t min_th = profile->red_params[color].min_th;
- uint16_t max_th = profile->red_params[color].max_th;
+ uint32_t min_th = profile->red_params[color].min_th;
+ uint32_t max_th = profile->red_params[color].max_th;
- if (min_th > max_th || max_th == 0)
+ if (min_th > max_th ||
+ max_th == 0 ||
+ min_th > UINT16_MAX ||
+ max_th > UINT16_MAX)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_WRED_PROFILE,
@@ -1345,7 +1344,7 @@ node_add_check_port(struct rte_eth_dev *dev,
params->shaper_profile_id);
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1368,12 +1367,9 @@ node_add_check_port(struct rte_eth_dev *dev,
NULL,
rte_strerror(EINVAL));
- /* Shaper must be valid.
- * Shaper profile peak rate must fit the configured port rate.
- */
+ /* Shaper must be valid */
if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE ||
- sp == NULL ||
- sp->params.peak.rate > p->params.soft.tm.rate)
+ sp == NULL)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID,
@@ -1420,7 +1416,7 @@ node_add_check_subport(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1492,7 +1488,7 @@ node_add_check_pipe(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1569,7 +1565,7 @@ node_add_check_tc(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: non-leaf */
- if (node_id < p->params.soft.tm.nb_queues)
+ if (node_id < p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -1642,7 +1638,7 @@ node_add_check_queue(struct rte_eth_dev *dev,
struct pmd_internals *p = dev->data->dev_private;
/* node type: leaf */
- if (node_id >= p->params.soft.tm.nb_queues)
+ if (node_id >= p->params.tm.n_queues)
return -rte_tm_error_set(error,
EINVAL,
RTE_TM_ERROR_TYPE_NODE_ID,
@@ -2531,10 +2527,10 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev)
.n_subports_per_port = root->n_children,
.n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] /
h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT],
- .qsize = {p->params.soft.tm.qsize[0],
- p->params.soft.tm.qsize[1],
- p->params.soft.tm.qsize[2],
- p->params.soft.tm.qsize[3],
+ .qsize = {p->params.tm.qsize[0],
+ p->params.tm.qsize[1],
+ p->params.tm.qsize[2],
+ p->params.tm.qsize[3],
},
.pipe_profiles = t->pipe_profiles,
.n_pipe_profiles = t->n_pipe_profiles,
@@ -2597,10 +2593,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev,
status = hierarchy_commit_check(dev, error);
if (status) {
- if (clear_on_fail) {
- tm_hierarchy_uninit(p);
- tm_hierarchy_init(p);
- }
+ if (clear_on_fail)
+ tm_hierarchy_free(p);
return status;
}
@@ -2642,7 +2636,7 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight)
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2691,7 +2685,7 @@ update_queue_weight(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2824,7 +2818,7 @@ update_subport_rate(struct rte_eth_dev *dev,
subport_params.tb_size = sp->params.peak.size;
/* Update the subport configuration. */
- if (rte_sched_subport_config(p->soft.tm.sched, subport_id,
+ if (rte_sched_subport_config(SCHED(p), subport_id,
&subport_params))
return -1;
@@ -2871,7 +2865,7 @@ update_pipe_rate(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -2916,7 +2910,7 @@ update_tc_rate(struct rte_eth_dev *dev,
return -1;
/* Update the pipe profile used by the current pipe. */
- if (rte_sched_pipe_config(p->soft.tm.sched, subport_id, pipe_id,
+ if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id,
(int32_t)pipe_profile_id))
return -1;
@@ -3051,8 +3045,7 @@ read_port_stats(struct rte_eth_dev *dev,
uint32_t tc_ov, id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
@@ -3099,8 +3092,7 @@ read_subport_stats(struct rte_eth_dev *dev,
uint32_t tc_ov, tc_id;
/* Stats read */
- int status = rte_sched_subport_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_subport_read_stats(SCHED(p),
subport_id,
&s,
&tc_ov);
@@ -3160,8 +3152,7 @@ read_pipe_stats(struct rte_eth_dev *dev,
i / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS,
i % RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
@@ -3221,8 +3212,7 @@ read_tc_stats(struct rte_eth_dev *dev,
tc_id,
i);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);
@@ -3281,8 +3271,7 @@ read_queue_stats(struct rte_eth_dev *dev,
tc_id,
queue_id);
- int status = rte_sched_queue_read_stats(
- p->soft.tm.sched,
+ int status = rte_sched_queue_read_stats(SCHED(p),
qid,
&s,
&qlen);