summaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx5/mlx5_flow.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/net/mlx5/mlx5_flow.c
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/mlx5/mlx5_flow.c')
-rw-r--r--drivers/net/mlx5/mlx5_flow.c2811
1 files changed, 2138 insertions, 673 deletions
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 86be9291..cd99cb07 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -52,13 +52,36 @@
#include "mlx5.h"
#include "mlx5_prm.h"
-/* Number of Work Queue necessary for the DROP queue. */
-#ifndef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
-#define MLX5_DROP_WQ_N 4
-#else
-#define MLX5_DROP_WQ_N 1
+/* Define minimal priority for control plane flows. */
+#define MLX5_CTRL_FLOW_PRIORITY 4
+
+/* Internet Protocol versions. */
+#define MLX5_IPV4 4
+#define MLX5_IPV6 6
+
+#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+struct ibv_counter_set_init_attr {
+ int dummy;
+};
+struct ibv_flow_spec_counter_action {
+ int dummy;
+};
+struct ibv_counter_set {
+ int dummy;
+};
+
+static inline int
+ibv_destroy_counter_set(struct ibv_counter_set *cs)
+{
+ (void)cs;
+ return -ENOTSUP;
+}
#endif
+/* Dev ops structure defined in mlx5.c */
+extern const struct eth_dev_ops mlx5_dev_ops;
+extern const struct eth_dev_ops mlx5_dev_ops_isolate;
+
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
@@ -94,19 +117,144 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
const void *default_mask,
void *data);
-struct rte_flow {
- TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
- struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+struct mlx5_flow_parse;
+
+static void
+mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
+ unsigned int size);
+
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id);
+
+static int
+mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser);
+
+/* Hash RX queue types. */
+enum hash_rxq_type {
+ HASH_RXQ_TCPV4,
+ HASH_RXQ_UDPV4,
+ HASH_RXQ_IPV4,
+ HASH_RXQ_TCPV6,
+ HASH_RXQ_UDPV6,
+ HASH_RXQ_IPV6,
+ HASH_RXQ_ETH,
+};
+
+/* Initialization data for hash RX queue. */
+struct hash_rxq_init {
+ uint64_t hash_fields; /* Fields that participate in the hash. */
+ uint64_t dpdk_rss_hf; /* Matching DPDK RSS hash fields. */
+ unsigned int flow_priority; /* Flow priority to use. */
+ unsigned int ip_version; /* Internet protocol. */
+};
+
+/* Initialization data for hash RX queues. */
+const struct hash_rxq_init hash_rxq_init[] = {
+ [HASH_RXQ_TCPV4] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV4,
+ },
+ [HASH_RXQ_UDPV4] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4 |
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV4,
+ },
+ [HASH_RXQ_IPV4] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV4 |
+ IBV_RX_HASH_DST_IPV4),
+ .dpdk_rss_hf = (ETH_RSS_IPV4 |
+ ETH_RSS_FRAG_IPV4),
+ .flow_priority = 1,
+ .ip_version = MLX5_IPV4,
+ },
+ [HASH_RXQ_TCPV6] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV6,
+ },
+ [HASH_RXQ_UDPV6] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6 |
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP),
+ .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
+ .flow_priority = 0,
+ .ip_version = MLX5_IPV6,
+ },
+ [HASH_RXQ_IPV6] = {
+ .hash_fields = (IBV_RX_HASH_SRC_IPV6 |
+ IBV_RX_HASH_DST_IPV6),
+ .dpdk_rss_hf = (ETH_RSS_IPV6 |
+ ETH_RSS_FRAG_IPV6),
+ .flow_priority = 1,
+ .ip_version = MLX5_IPV6,
+ },
+ [HASH_RXQ_ETH] = {
+ .hash_fields = 0,
+ .dpdk_rss_hf = 0,
+ .flow_priority = 2,
+ },
+};
+
+/* Number of entries in hash_rxq_init[]. */
+const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
+
+/** Structure for holding counter stats. */
+struct mlx5_flow_counter_stats {
+ uint64_t hits; /**< Number of packets matched by the rule. */
+ uint64_t bytes; /**< Number of bytes matched by the rule. */
+};
+
+/** Structure for Drop queue. */
+struct mlx5_hrxq_drop {
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
- struct ibv_exp_wq *wq; /**< Verbs work queue. */
+ struct ibv_wq *wq; /**< Verbs work queue. */
struct ibv_cq *cq; /**< Verbs completion queue. */
- uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */
+};
+
+/* Flows structures. */
+struct mlx5_flow {
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */
+};
+
+/* Drop flows structures. */
+struct mlx5_flow_drop {
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
uint32_t mark:1; /**< Set if the flow is marked. */
uint32_t drop:1; /**< Drop queue. */
- uint64_t hash_fields; /**< Fields that participate in the hash. */
- struct rxq *rxqs[]; /**< Pointer to the queues array. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
+ uint16_t (*queues)[]; /**< Queues indexes to use. */
+ struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
+ uint8_t rss_key[40]; /**< copy of the RSS key. */
+ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */
+ struct mlx5_flow_counter_stats counter_stats;/**<The counter stats. */
+ union {
+ struct mlx5_flow frxq[RTE_DIM(hash_rxq_init)];
+ /**< Flow with Rx queue. */
+ struct mlx5_flow_drop drxq; /**< Flow with drop Rx queue. */
+ };
};
/** Static initializer for items. */
@@ -157,6 +305,9 @@ static const enum rte_flow_action_type valid_actions[] = {
RTE_FLOW_ACTION_TYPE_QUEUE,
RTE_FLOW_ACTION_TYPE_MARK,
RTE_FLOW_ACTION_TYPE_FLAG,
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ RTE_FLOW_ACTION_TYPE_COUNT,
+#endif
RTE_FLOW_ACTION_TYPE_END,
};
@@ -179,7 +330,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_eth_mask,
.mask_sz = sizeof(struct rte_flow_item_eth),
.convert = mlx5_flow_create_eth,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+ .dst_sz = sizeof(struct ibv_flow_spec_eth),
},
[RTE_FLOW_ITEM_TYPE_VLAN] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
@@ -208,7 +359,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_ipv4_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv4),
.convert = mlx5_flow_create_ipv4,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv4_ext),
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
@@ -236,7 +387,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_ipv6_mask,
.mask_sz = sizeof(struct rte_flow_item_ipv6),
.convert = mlx5_flow_create_ipv6,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext),
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv6),
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
@@ -250,7 +401,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_udp_mask,
.mask_sz = sizeof(struct rte_flow_item_udp),
.convert = mlx5_flow_create_udp,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_TCP] = {
.actions = valid_actions,
@@ -263,7 +414,7 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_tcp_mask,
.mask_sz = sizeof(struct rte_flow_item_tcp),
.convert = mlx5_flow_create_tcp,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
},
[RTE_FLOW_ITEM_TYPE_VXLAN] = {
.items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
@@ -274,33 +425,76 @@ static const struct mlx5_flow_items mlx5_flow_items[] = {
.default_mask = &rte_flow_item_vxlan_mask,
.mask_sz = sizeof(struct rte_flow_item_vxlan),
.convert = mlx5_flow_create_vxlan,
- .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+ .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
},
};
/** Structure to pass to the conversion function. */
-struct mlx5_flow {
- struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
- unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+struct mlx5_flow_parse {
uint32_t inner; /**< Set once VXLAN is encountered. */
- uint64_t hash_fields; /**< Fields that participate in the hash. */
-};
-
-/** Structure for Drop queue. */
-struct rte_flow_drop {
- struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
- struct ibv_qp *qp; /**< Verbs queue pair. */
- struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */
- struct ibv_cq *cq; /**< Verbs completion queue. */
-};
-
-struct mlx5_flow_action {
- uint32_t queue:1; /**< Target is a receive queue. */
+ uint32_t create:1;
+ /**< Whether resources should remain after a validate. */
uint32_t drop:1; /**< Target is a drop queue. */
uint32_t mark:1; /**< Mark is present in the flow. */
+ uint32_t count:1; /**< Count is present in the flow. */
uint32_t mark_id; /**< Mark identifier. */
uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
uint16_t queues_n; /**< Number of entries in queue[]. */
+ struct rte_eth_rss_conf rss_conf; /**< RSS configuration */
+ uint8_t rss_key[40]; /**< copy of the RSS key. */
+ enum hash_rxq_type layer; /**< Last pattern layer detected. */
+ struct ibv_counter_set *cs; /**< Holds the counter set for the rule */
+ union {
+ struct {
+ struct ibv_flow_attr *ibv_attr;
+ /**< Pointer to Verbs attributes. */
+ unsigned int offset;
+ /**< Current position or total size of the attribute. */
+ } queue[RTE_DIM(hash_rxq_init)];
+ struct {
+ struct ibv_flow_attr *ibv_attr;
+ /**< Pointer to Verbs attributes. */
+ unsigned int offset;
+ /**< Current position or total size of the attribute. */
+ } drop_q;
+ };
+};
+
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ .query = mlx5_flow_query,
+#else
+ .query = NULL,
+#endif
+ .isolate = mlx5_flow_isolate,
+};
+
+/* Convert FDIR request to Generic flow. */
+struct mlx5_fdir {
+ struct rte_flow_attr attr;
+ struct rte_flow_action actions[2];
+ struct rte_flow_item items[4];
+ struct rte_flow_item_eth l2;
+ struct rte_flow_item_eth l2_mask;
+ union {
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ } l3;
+ union {
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ } l4;
+ struct rte_flow_action_queue queue;
+};
+
+/* Verbs specification header. */
+struct ibv_spec_header {
+ enum ibv_flow_spec_type type;
+ uint16_t size;
};
/**
@@ -367,38 +561,58 @@ mlx5_flow_item_validate(const struct rte_flow_item *item,
}
/**
- * Validate a flow supported by the NIC.
+ * Copy the RSS configuration from the user ones.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
+ * @param rss_conf
+ * User RSS configuration to save.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_flow_convert_rss_conf(struct priv *priv,
+ struct mlx5_flow_parse *parser,
+ const struct rte_eth_rss_conf *rss_conf)
+{
+ const struct rte_eth_rss_conf *rss =
+ rss_conf ? rss_conf : &priv->rss_conf;
+
+ if (rss->rss_key_len > 40)
+ return EINVAL;
+ parser->rss_conf.rss_key_len = rss->rss_key_len;
+ parser->rss_conf.rss_hf = rss->rss_hf;
+ memcpy(parser->rss_key, rss->rss_key, rss->rss_key_len);
+ parser->rss_conf.rss_key = parser->rss_key;
+ return 0;
+}
+
+/**
+ * Extract attribute to the parser.
*
* @param priv
* Pointer to private structure.
* @param[in] attr
* Flow rule attributes.
- * @param[in] pattern
- * Pattern specification (list terminated by the END pattern item).
- * @param[in] actions
- * Associated actions (list terminated by the END action).
* @param[out] error
* Perform verbose error reporting if not NULL.
- * @param[in, out] flow
- * Flow structure to update.
- * @param[in, out] action
- * Action structure to update.
+ * @param[in, out] parser
+ * Internal parser structure.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_validate(struct priv *priv,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error,
- struct mlx5_flow *flow,
- struct mlx5_flow_action *action)
+priv_flow_convert_attributes(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
{
- const struct mlx5_flow_items *cur_item = mlx5_flow_items;
-
(void)priv;
+ (void)parser;
if (attr->group) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
@@ -406,7 +620,7 @@ priv_flow_validate(struct priv *priv,
"groups are not supported");
return -rte_errno;
}
- if (attr->priority) {
+ if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
@@ -427,56 +641,42 @@ priv_flow_validate(struct priv *priv,
"only ingress is supported");
return -rte_errno;
}
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
- const struct mlx5_flow_items *token = NULL;
- unsigned int i;
- int err;
+ return 0;
+}
- if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
- continue;
- for (i = 0;
- cur_item->items &&
- cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
- ++i) {
- if (cur_item->items[i] == items->type) {
- token = &mlx5_flow_items[items->type];
- break;
- }
- }
- if (!token)
- goto exit_item_not_supported;
- cur_item = token;
- err = mlx5_flow_item_validate(items,
- (const uint8_t *)cur_item->mask,
- cur_item->mask_sz);
- if (err)
- goto exit_item_not_supported;
- if (flow->ibv_attr && cur_item->convert) {
- err = cur_item->convert(items,
- (cur_item->default_mask ?
- cur_item->default_mask :
- cur_item->mask),
- flow);
- if (err)
- goto exit_item_not_supported;
- } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
- if (flow->inner) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- items,
- "cannot recognize multiple"
- " VXLAN encapsulations");
- return -rte_errno;
- }
- flow->inner = 1;
- }
- flow->offset += cur_item->dst_sz;
- }
+/**
+ * Extract actions request to the parser.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert_actions(struct priv *priv,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
+{
+ /*
+ * Add default RSS configuration necessary for Verbs to create QP even
+ * if no RSS is necessary.
+ */
+ priv_flow_convert_rss_conf(priv, parser,
+ (const struct rte_eth_rss_conf *)
+ &priv->rss_conf);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
continue;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
- action->drop = 1;
+ parser->drop = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
const struct rte_flow_action_queue *queue =
(const struct rte_flow_action_queue *)
@@ -486,13 +686,13 @@ priv_flow_validate(struct priv *priv,
if (!queue || (queue->index > (priv->rxqs_n - 1)))
goto exit_action_not_supported;
- for (n = 0; n < action->queues_n; ++n) {
- if (action->queues[n] == queue->index) {
+ for (n = 0; n < parser->queues_n; ++n) {
+ if (parser->queues[n] == queue->index) {
found = 1;
break;
}
}
- if (action->queues_n > 1 && !found) {
+ if (parser->queues_n > 1 && !found) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
@@ -500,9 +700,8 @@ priv_flow_validate(struct priv *priv,
return -rte_errno;
}
if (!found) {
- action->queue = 1;
- action->queues_n = 1;
- action->queues[0] = queue->index;
+ parser->queues_n = 1;
+ parser->queues[0] = queue->index;
}
} else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
const struct rte_flow_action_rss *rss =
@@ -517,12 +716,12 @@ priv_flow_validate(struct priv *priv,
"no valid queues");
return -rte_errno;
}
- if (action->queues_n == 1) {
+ if (parser->queues_n == 1) {
uint16_t found = 0;
- assert(action->queues_n);
+ assert(parser->queues_n);
for (n = 0; n < rss->num; ++n) {
- if (action->queues[0] ==
+ if (parser->queues[0] ==
rss->queue[n]) {
found = 1;
break;
@@ -547,10 +746,17 @@ priv_flow_validate(struct priv *priv,
return -rte_errno;
}
}
- action->queue = 1;
for (n = 0; n < rss->num; ++n)
- action->queues[n] = rss->queue[n];
- action->queues_n = rss->num;
+ parser->queues[n] = rss->queue[n];
+ parser->queues_n = rss->num;
+ if (priv_flow_convert_rss_conf(priv, parser,
+ rss->rss_conf)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "wrong RSS configuration");
+ return -rte_errno;
+ }
} else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
@@ -570,30 +776,25 @@ priv_flow_validate(struct priv *priv,
" and 16777199");
return -rte_errno;
}
- action->mark = 1;
- action->mark_id = mark->id;
+ parser->mark = 1;
+ parser->mark_id = mark->id;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
- action->mark = 1;
+ parser->mark = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
+ priv->counter_set_supported) {
+ parser->count = 1;
} else {
goto exit_action_not_supported;
}
}
- if (action->mark && !flow->ibv_attr && !action->drop)
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
-#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
- if (!flow->ibv_attr && action->drop)
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
-#endif
- if (!action->queue && !action->drop) {
+ if (parser->drop && parser->mark)
+ parser->mark = 0;
+ if (!parser->queues_n && !parser->drop) {
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "no valid action");
return -rte_errno;
}
return 0;
-exit_item_not_supported:
- rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- items, "item not supported");
- return -rte_errno;
exit_action_not_supported:
rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
actions, "action not supported");
@@ -601,34 +802,467 @@ exit_action_not_supported:
}
/**
- * Validate a flow supported by the NIC.
+ * Validate items.
*
- * @see rte_flow_validate()
- * @see rte_flow_ops
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
-mlx5_flow_validate(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+static int
+priv_flow_convert_items_validate(struct priv *priv,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
{
- struct priv *priv = dev->data->dev_private;
+ const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+ unsigned int i;
+
+ (void)priv;
+ /* Initialise the offsets to start after verbs attribute. */
+ if (parser->drop) {
+ parser->drop_q.offset = sizeof(struct ibv_flow_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset = sizeof(struct ibv_flow_attr);
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx5_flow_items *token = NULL;
+ unsigned int n;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx5_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = mlx5_flow_item_validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ if (parser->inner) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "cannot recognize multiple"
+ " VXLAN encapsulations");
+ return -rte_errno;
+ }
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ }
+ if (parser->drop) {
+ parser->drop_q.offset += cur_item->dst_sz;
+ } else if (parser->queues_n == 1) {
+ parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz;
+ } else {
+ for (n = 0; n != hash_rxq_init_n; ++n)
+ parser->queue[n].offset += cur_item->dst_sz;
+ }
+ }
+ if (parser->mark) {
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset +=
+ sizeof(struct ibv_flow_spec_action_tag);
+ }
+ if (parser->count) {
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+
+ if (parser->drop) {
+ parser->drop_q.offset += size;
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i)
+ parser->queue[i].offset += size;
+ }
+ }
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+}
+
+/**
+ * Allocate memory space to store verbs flow attributes.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] priority
+ * Flow priority.
+ * @param[in] size
+ * Amount of byte to allocate.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A verbs flow attribute on success, NULL otherwise.
+ */
+static struct ibv_flow_attr*
+priv_flow_convert_allocate(struct priv *priv,
+ unsigned int priority,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ struct ibv_flow_attr *ibv_attr;
+
+ (void)priv;
+ ibv_attr = rte_calloc(__func__, 1, size, 0);
+ if (!ibv_attr) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate verbs spec attributes.");
+ return NULL;
+ }
+ ibv_attr->priority = priority;
+ return ibv_attr;
+}
+
+/**
+ * Finalise verbs flow attributes.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in, out] parser
+ * Internal parser structure.
+ */
+static void
+priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
+{
+ const unsigned int ipv4 =
+ hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
+ const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6;
+ const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+ const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4;
+ const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4;
+ const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
+ unsigned int i;
+
+ (void)priv;
+ if (parser->layer == HASH_RXQ_ETH) {
+ goto fill;
+ } else {
+ /*
+ * This layer becomes useless as the pattern define under
+ * layers.
+ */
+ rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr);
+ parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
+ }
+ /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */
+ for (i = ohmin; i != (ohmax + 1); ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ /* Remove impossible flow according to the RSS configuration. */
+ if (hash_rxq_init[parser->layer].dpdk_rss_hf &
+ parser->rss_conf.rss_hf) {
+ /* Remove any other flow. */
+ for (i = hmin; i != (hmax + 1); ++i) {
+ if ((i == parser->layer) ||
+ (!parser->queue[i].ibv_attr))
+ continue;
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ } else if (!parser->queue[ip].ibv_attr) {
+ /* no RSS possible with the current configuration. */
+ parser->queues_n = 1;
+ return;
+ }
+fill:
+ /*
+ * Fill missing layers in verbs specifications, or compute the correct
+ * offset to allocate the memory space for the attributes and
+ * specifications.
+ */
+ for (i = 0; i != hash_rxq_init_n - 1; ++i) {
+ union {
+ struct ibv_flow_spec_ipv4_ext ipv4;
+ struct ibv_flow_spec_ipv6 ipv6;
+ struct ibv_flow_spec_tcp_udp udp_tcp;
+ } specs;
+ void *dst;
+ uint16_t size;
+
+ if (i == parser->layer)
+ continue;
+ if (parser->layer == HASH_RXQ_ETH) {
+ if (hash_rxq_init[i].ip_version == MLX5_IPV4) {
+ size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){
+ .type = IBV_FLOW_SPEC_IPV4_EXT,
+ .size = size,
+ };
+ } else {
+ size = sizeof(struct ibv_flow_spec_ipv6);
+ specs.ipv6 = (struct ibv_flow_spec_ipv6){
+ .type = IBV_FLOW_SPEC_IPV6,
+ .size = size,
+ };
+ }
+ if (parser->queue[i].ibv_attr) {
+ dst = (void *)((uintptr_t)
+ parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, &specs, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ }
+ parser->queue[i].offset += size;
+ }
+ if ((i == HASH_RXQ_UDPV4) || (i == HASH_RXQ_TCPV4) ||
+ (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) {
+ size = sizeof(struct ibv_flow_spec_tcp_udp);
+ specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) {
+ .type = ((i == HASH_RXQ_UDPV4 ||
+ i == HASH_RXQ_UDPV6) ?
+ IBV_FLOW_SPEC_UDP :
+ IBV_FLOW_SPEC_TCP),
+ .size = size,
+ };
+ if (parser->queue[i].ibv_attr) {
+ dst = (void *)((uintptr_t)
+ parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, &specs, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ }
+ parser->queue[i].offset += size;
+ }
+ }
+}
+
+/**
+ * Validate and convert a flow supported by the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_convert(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow_parse *parser)
+{
+ const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+ unsigned int i;
int ret;
- struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
- struct mlx5_flow_action action = {
- .queue = 0,
- .drop = 0,
- .mark = 0,
+
+ /* First step. Validate the attributes, items and actions. */
+ *parser = (struct mlx5_flow_parse){
+ .create = parser->create,
+ .layer = HASH_RXQ_ETH,
.mark_id = MLX5_FLOW_MARK_DEFAULT,
- .queues_n = 0,
};
-
- priv_lock(priv);
- ret = priv_flow_validate(priv, attr, items, actions, error, &flow,
- &action);
- priv_unlock(priv);
+ ret = priv_flow_convert_attributes(priv, attr, error, parser);
+ if (ret)
+ return ret;
+ ret = priv_flow_convert_actions(priv, actions, error, parser);
+ if (ret)
+ return ret;
+ ret = priv_flow_convert_items_validate(priv, items, error, parser);
+ if (ret)
+ return ret;
+ priv_flow_convert_finalise(priv, parser);
+ /*
+ * Second step.
+ * Allocate the memory space to store verbs specifications.
+ */
+ if (parser->drop) {
+ parser->drop_q.ibv_attr =
+ priv_flow_convert_allocate(priv, attr->priority,
+ parser->drop_q.offset,
+ error);
+ if (!parser->drop_q.ibv_attr)
+ return ENOMEM;
+ parser->drop_q.offset = sizeof(struct ibv_flow_attr);
+ } else if (parser->queues_n == 1) {
+ unsigned int priority =
+ attr->priority +
+ hash_rxq_init[HASH_RXQ_ETH].flow_priority;
+ unsigned int offset = parser->queue[HASH_RXQ_ETH].offset;
+
+ parser->queue[HASH_RXQ_ETH].ibv_attr =
+ priv_flow_convert_allocate(priv, priority,
+ offset, error);
+ if (!parser->queue[HASH_RXQ_ETH].ibv_attr)
+ return ENOMEM;
+ parser->queue[HASH_RXQ_ETH].offset =
+ sizeof(struct ibv_flow_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ unsigned int priority =
+ attr->priority +
+ hash_rxq_init[i].flow_priority;
+ unsigned int offset;
+
+ if (!(parser->rss_conf.rss_hf &
+ hash_rxq_init[i].dpdk_rss_hf) &&
+ (i != HASH_RXQ_ETH))
+ continue;
+ offset = parser->queue[i].offset;
+ parser->queue[i].ibv_attr =
+ priv_flow_convert_allocate(priv, priority,
+ offset, error);
+ if (!parser->queue[i].ibv_attr)
+ goto exit_enomem;
+ parser->queue[i].offset = sizeof(struct ibv_flow_attr);
+ }
+ }
+ /* Third step. Conversion parse, fill the specifications. */
+ parser->inner = 0;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ cur_item = &mlx5_flow_items[items->type];
+ ret = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ parser);
+ if (ret) {
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ goto exit_free;
+ }
+ }
+ if (parser->mark)
+ mlx5_flow_create_flag_mark(parser, parser->mark_id);
+ if (parser->count && parser->create) {
+ mlx5_flow_create_count(priv, parser);
+ if (!parser->cs)
+ goto exit_count_error;
+ }
+ /*
+ * Last step. Complete missing specification to reach the RSS
+ * configuration.
+ */
+ if (parser->drop) {
+ /*
+ * Drop queue priority needs to be adjusted to
+ * their most specific layer priority.
+ */
+ parser->drop_q.ibv_attr->priority =
+ attr->priority +
+ hash_rxq_init[parser->layer].flow_priority;
+ } else if (parser->queues_n > 1) {
+ priv_flow_convert_finalise(priv, parser);
+ } else {
+ /*
+ * Action queue have their priority overridden with
+ * Ethernet priority, this priority needs to be adjusted to
+ * their most specific layer priority.
+ */
+ parser->queue[HASH_RXQ_ETH].ibv_attr->priority =
+ attr->priority +
+ hash_rxq_init[parser->layer].flow_priority;
+ }
+exit_free:
+ /* Only verification is expected, all resources should be released. */
+ if (!parser->create) {
+ if (parser->drop) {
+ rte_free(parser->drop_q.ibv_attr);
+ parser->drop_q.ibv_attr = NULL;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser->queue[i].ibv_attr) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ }
+ }
return ret;
+exit_enomem:
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser->queue[i].ibv_attr) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ }
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot allocate verbs spec attributes.");
+ return ret;
+exit_count_error:
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create counter.");
+ return rte_errno;
+}
+
+/**
+ * Copy the specification created into the flow.
+ *
+ * @param parser
+ * Internal parser structure.
+ * @param src
+ * Create specification.
+ * @param size
+ * Size in bytes of the specification to copy.
+ */
+static void
+mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src,
+ unsigned int size)
+{
+ unsigned int i;
+ void *dst;
+
+ if (parser->drop) {
+ dst = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+ parser->drop_q.offset);
+ memcpy(dst, src, size);
+ ++parser->drop_q.ibv_attr->num_of_specs;
+ parser->drop_q.offset += size;
+ return;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ /* Specification must be the same l3 type or none. */
+ if (parser->layer == HASH_RXQ_ETH ||
+ (hash_rxq_init[parser->layer].ip_version ==
+ hash_rxq_init[i].ip_version) ||
+ (hash_rxq_init[i].ip_version == 0)) {
+ dst = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset);
+ memcpy(dst, src, size);
+ ++parser->queue[i].ibv_attr->num_of_specs;
+ parser->queue[i].offset += size;
+ }
+ }
}
/**
@@ -648,35 +1282,35 @@ mlx5_flow_create_eth(const struct rte_flow_item *item,
{
const struct rte_flow_item_eth *spec = item->spec;
const struct rte_flow_item_eth *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
- unsigned int i;
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 2;
- flow->hash_fields = 0;
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *eth = (struct ibv_exp_flow_spec_eth) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ struct ibv_flow_spec_eth eth = {
+ .type = parser->inner | IBV_FLOW_SPEC_ETH,
.size = eth_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
- eth->val.ether_type = spec->type;
- memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
- eth->mask.ether_type = mask->type;
- /* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
- eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
- eth->val.src_mac[i] &= eth->mask.src_mac[i];
- }
- eth->val.ether_type &= eth->mask.ether_type;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner)
+ parser->layer = HASH_RXQ_ETH;
+ if (spec) {
+ unsigned int i;
+
+ if (!mask)
+ mask = default_mask;
+ memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.val.ether_type = spec->type;
+ memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+ eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ }
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ mlx5_flow_create_copy(parser, &eth, eth_size);
return 0;
}
@@ -697,18 +1331,34 @@ mlx5_flow_create_vlan(const struct rte_flow_item *item,
{
const struct rte_flow_item_vlan *spec = item->spec;
const struct rte_flow_item_vlan *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_eth *eth;
- const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
- eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- eth->val.vlan_tag = spec->tci;
- eth->mask.vlan_tag = mask->tci;
- eth->val.vlan_tag &= eth->mask.vlan_tag;
+ if (spec) {
+ unsigned int i;
+ if (!mask)
+ mask = default_mask;
+
+ if (parser->drop) {
+ eth = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+ parser->drop_q.offset - eth_size);
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+
+ eth = (void *)((uintptr_t)parser->queue[i].ibv_attr +
+ parser->queue[i].offset - eth_size);
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ }
+ }
return 0;
}
@@ -729,40 +1379,38 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item,
{
const struct rte_flow_item_ipv4 *spec = item->spec;
const struct rte_flow_item_ipv4 *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_ipv4_ext *ipv4;
- unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 1;
- flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
- IBV_EXP_RX_HASH_DST_IPV4);
- ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ struct ibv_flow_spec_ipv4_ext ipv4 = {
+ .type = parser->inner | IBV_FLOW_SPEC_IPV4_EXT,
.size = ipv4_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
- .src_ip = spec->hdr.src_addr,
- .dst_ip = spec->hdr.dst_addr,
- .proto = spec->hdr.next_proto_id,
- .tos = spec->hdr.type_of_service,
- };
- ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
- .src_ip = mask->hdr.src_addr,
- .dst_ip = mask->hdr.dst_addr,
- .proto = mask->hdr.next_proto_id,
- .tos = mask->hdr.type_of_service,
- };
- /* Remove unwanted bits from values. */
- ipv4->val.src_ip &= ipv4->mask.src_ip;
- ipv4->val.dst_ip &= ipv4->mask.dst_ip;
- ipv4->val.proto &= ipv4->mask.proto;
- ipv4->val.tos &= ipv4->mask.tos;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner)
+ parser->layer = HASH_RXQ_IPV4;
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4.val.src_ip &= ipv4.mask.src_ip;
+ ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+ ipv4.val.proto &= ipv4.mask.proto;
+ ipv4.val.tos &= ipv4.mask.tos;
+ }
+ mlx5_flow_create_copy(parser, &ipv4, ipv4_size);
return 0;
}
@@ -783,43 +1431,42 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item,
{
const struct rte_flow_item_ipv6 *spec = item->spec;
const struct rte_flow_item_ipv6 *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_ipv6_ext *ipv6;
- unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext);
- unsigned int i;
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 1;
- flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
- IBV_EXP_RX_HASH_DST_IPV6);
- ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int ipv6_size = sizeof(struct ibv_flow_spec_ipv6);
+ struct ibv_flow_spec_ipv6 ipv6 = {
+ .type = parser->inner | IBV_FLOW_SPEC_IPV6,
.size = ipv6_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
- RTE_DIM(ipv6->val.src_ip));
- memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
- RTE_DIM(ipv6->val.dst_ip));
- memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
- RTE_DIM(ipv6->mask.src_ip));
- memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
- RTE_DIM(ipv6->mask.dst_ip));
- ipv6->mask.flow_label = mask->hdr.vtc_flow;
- ipv6->mask.next_hdr = mask->hdr.proto;
- ipv6->mask.hop_limit = mask->hdr.hop_limits;
- /* Remove unwanted bits from values. */
- for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
- ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
- ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
- }
- ipv6->val.flow_label &= ipv6->mask.flow_label;
- ipv6->val.next_hdr &= ipv6->mask.next_hdr;
- ipv6->val.hop_limit &= ipv6->mask.hop_limit;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner)
+ parser->layer = HASH_RXQ_IPV6;
+ if (spec) {
+ unsigned int i;
+
+ if (!mask)
+ mask = default_mask;
+ memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6.val.src_ip));
+ memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6.val.dst_ip));
+ memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6.mask.src_ip));
+ memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6.mask.dst_ip));
+ ipv6.mask.flow_label = mask->hdr.vtc_flow;
+ ipv6.mask.next_hdr = mask->hdr.proto;
+ ipv6.mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+ ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+ ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+ }
+ ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+ ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+ }
+ mlx5_flow_create_copy(parser, &ipv6, ipv6_size);
return 0;
}
@@ -840,30 +1487,32 @@ mlx5_flow_create_udp(const struct rte_flow_item *item,
{
const struct rte_flow_item_udp *spec = item->spec;
const struct rte_flow_item_udp *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tcp_udp *udp;
- unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
- flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |
- IBV_EXP_RX_HASH_DST_PORT_UDP);
- udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *udp = (struct ibv_exp_flow_spec_tcp_udp) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = parser->inner | IBV_FLOW_SPEC_UDP,
.size = udp_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- udp->val.dst_port = spec->hdr.dst_port;
- udp->val.src_port = spec->hdr.src_port;
- udp->mask.dst_port = mask->hdr.dst_port;
- udp->mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- udp->val.src_port &= udp->mask.src_port;
- udp->val.dst_port &= udp->mask.dst_port;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner) {
+ if (parser->layer == HASH_RXQ_IPV4)
+ parser->layer = HASH_RXQ_UDPV4;
+ else
+ parser->layer = HASH_RXQ_UDPV6;
+ }
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ mlx5_flow_create_copy(parser, &udp, udp_size);
return 0;
}
@@ -884,30 +1533,32 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item,
{
const struct rte_flow_item_tcp *spec = item->spec;
const struct rte_flow_item_tcp *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tcp_udp *tcp;
- unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
-
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
- flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |
- IBV_EXP_RX_HASH_DST_PORT_TCP);
- tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp tcp = {
+ .type = parser->inner | IBV_FLOW_SPEC_TCP,
.size = tcp_size,
};
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- tcp->val.dst_port = spec->hdr.dst_port;
- tcp->val.src_port = spec->hdr.src_port;
- tcp->mask.dst_port = mask->hdr.dst_port;
- tcp->mask.src_port = mask->hdr.src_port;
- /* Remove unwanted bits from values. */
- tcp->val.src_port &= tcp->mask.src_port;
- tcp->val.dst_port &= tcp->mask.dst_port;
+
+ /* Don't update layer for the inner pattern. */
+ if (!parser->inner) {
+ if (parser->layer == HASH_RXQ_IPV4)
+ parser->layer = HASH_RXQ_TCPV4;
+ else
+ parser->layer = HASH_RXQ_TCPV6;
+ }
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ tcp.val.dst_port = spec->hdr.dst_port;
+ tcp.val.src_port = spec->hdr.src_port;
+ tcp.mask.dst_port = mask->hdr.dst_port;
+ tcp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp.val.src_port &= tcp.mask.src_port;
+ tcp.val.dst_port &= tcp.mask.dst_port;
+ }
+ mlx5_flow_create_copy(parser, &tcp, tcp_size);
return 0;
}
@@ -928,57 +1579,97 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item,
{
const struct rte_flow_item_vxlan *spec = item->spec;
const struct rte_flow_item_vxlan *mask = item->mask;
- struct mlx5_flow *flow = (struct mlx5_flow *)data;
- struct ibv_exp_flow_spec_tunnel *vxlan;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+ struct mlx5_flow_parse *parser = (struct mlx5_flow_parse *)data;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
union vni {
uint32_t vlan_id;
uint8_t vni[4];
} id;
- ++flow->ibv_attr->num_of_specs;
- flow->ibv_attr->priority = 0;
id.vni[0] = 0;
- vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *vxlan = (struct ibv_exp_flow_spec_tunnel) {
- .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
- .size = size,
- };
- flow->inner = IBV_EXP_FLOW_SPEC_INNER;
- if (!spec)
- return 0;
- if (!mask)
- mask = default_mask;
- memcpy(&id.vni[1], spec->vni, 3);
- vxlan->val.tunnel_id = id.vlan_id;
- memcpy(&id.vni[1], mask->vni, 3);
- vxlan->mask.tunnel_id = id.vlan_id;
- /* Remove unwanted bits from values. */
- vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+ parser->inner = IBV_FLOW_SPEC_INNER;
+ if (spec) {
+ if (!mask)
+ mask = default_mask;
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule.
+ * To avoid such situation, VNI 0 is currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return EINVAL;
+ mlx5_flow_create_copy(parser, &vxlan, size);
return 0;
}
/**
* Convert mark/flag action to Verbs specification.
*
- * @param flow
- * Pointer to MLX5 flow structure.
+ * @param parser
+ * Internal parser structure.
* @param mark_id
* Mark identifier.
*/
static int
-mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id)
{
- struct ibv_exp_flow_spec_action_tag *tag;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
-
- tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *tag = (struct ibv_exp_flow_spec_action_tag){
- .type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
.size = size,
.tag_id = mlx5_flow_mark_set(mark_id),
};
- ++flow->ibv_attr->num_of_specs;
+
+ assert(parser->mark);
+ mlx5_flow_create_copy(parser, &tag, size);
+ return 0;
+}
+
+/**
+ * Convert count action to Verbs specification.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param parser
+ * Pointer to MLX5 flow parser structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+mlx5_flow_create_count(struct priv *priv __rte_unused,
+ struct mlx5_flow_parse *parser __rte_unused)
+{
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
+ struct ibv_counter_set_init_attr init_attr = {0};
+ struct ibv_flow_spec_counter_action counter = {
+ .type = IBV_FLOW_SPEC_ACTION_COUNT,
+ .size = size,
+ .counter_set_handle = 0,
+ };
+
+ init_attr.counter_set_id = 0;
+ parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
+ if (!parser->cs)
+ return EINVAL;
+ counter.counter_set_handle = parser->cs->handle;
+ mlx5_flow_create_copy(parser, &counter, size);
+#endif
return 0;
}
@@ -987,59 +1678,127 @@ mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
*
* @param priv
* Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
* @param flow
- * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * Pointer to the rte_flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow if the rule could be created.
+ * 0 on success, errno value on failure.
*/
-static struct rte_flow *
+static int
priv_flow_create_action_queue_drop(struct priv *priv,
- struct mlx5_flow *flow,
+ struct mlx5_flow_parse *parser,
+ struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
-#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
- struct ibv_exp_flow_spec_action_drop *drop;
- unsigned int size = sizeof(struct ibv_exp_flow_spec_action_drop);
-#endif
+ struct ibv_flow_spec_action_drop *drop;
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ int err = 0;
assert(priv->pd);
assert(priv->ctx);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
- if (!rte_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate flow memory");
- return NULL;
- }
- rte_flow->drop = 1;
-#ifdef HAVE_VERBS_IBV_EXP_FLOW_SPEC_ACTION_DROP
- drop = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
- *drop = (struct ibv_exp_flow_spec_action_drop){
- .type = IBV_EXP_FLOW_SPEC_ACTION_DROP,
+ flow->drop = 1;
+ drop = (void *)((uintptr_t)parser->drop_q.ibv_attr +
+ parser->drop_q.offset);
+ *drop = (struct ibv_flow_spec_action_drop){
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
.size = size,
};
- ++flow->ibv_attr->num_of_specs;
- flow->offset += sizeof(struct ibv_exp_flow_spec_action_drop);
-#endif
- rte_flow->ibv_attr = flow->ibv_attr;
- if (!priv->started)
- return rte_flow;
- rte_flow->qp = priv->flow_drop_queue->qp;
- rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
- rte_flow->ibv_attr);
- if (!rte_flow->ibv_flow) {
+ ++parser->drop_q.ibv_attr->num_of_specs;
+ parser->drop_q.offset += size;
+ flow->drxq.ibv_attr = parser->drop_q.ibv_attr;
+ if (parser->count)
+ flow->cs = parser->cs;
+ if (!priv->dev->data->dev_started)
+ return 0;
+ parser->drop_q.ibv_attr = NULL;
+ flow->drxq.ibv_flow = ibv_create_flow(priv->flow_drop_queue->qp,
+ flow->drxq.ibv_attr);
+ if (!flow->drxq.ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
+ err = ENOMEM;
goto error;
}
- return rte_flow;
+ return 0;
error:
- assert(rte_flow);
- rte_free(rte_flow);
- return NULL;
+ assert(flow);
+ if (flow->drxq.ibv_flow) {
+ claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+ flow->drxq.ibv_flow = NULL;
+ }
+ if (flow->drxq.ibv_attr) {
+ rte_free(flow->drxq.ibv_attr);
+ flow->drxq.ibv_attr = NULL;
+ }
+ if (flow->cs) {
+ claim_zero(ibv_destroy_counter_set(flow->cs));
+ flow->cs = NULL;
+ parser->cs = NULL;
+ }
+ return err;
+}
+
+/**
+ * Create hash Rx queues when RSS is enabled.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
+ * @param flow
+ * Pointer to the rte_flow.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_create_action_queue_rss(struct priv *priv,
+ struct mlx5_flow_parse *parser,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ uint64_t hash_fields;
+
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr;
+ parser->queue[i].ibv_attr = NULL;
+ hash_fields = hash_rxq_init[i].hash_fields;
+ if (!priv->dev->data->dev_started)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_get(priv,
+ parser->rss_conf.rss_key,
+ parser->rss_conf.rss_key_len,
+ hash_fields,
+ parser->queues,
+ parser->queues_n);
+ if (flow->frxq[i].hrxq)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_new(priv,
+ parser->rss_conf.rss_key,
+ parser->rss_conf.rss_key_len,
+ hash_fields,
+ parser->queues,
+ parser->queues_n);
+ if (!flow->frxq[i].hrxq) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot create hash rxq");
+ return ENOMEM;
+ }
+ }
+ return 0;
}
/**
@@ -1047,112 +1806,79 @@ error:
*
* @param priv
* Pointer to private structure.
+ * @param parser
+ * Internal parser structure.
* @param flow
- * MLX5 flow attributes (filled by mlx5_flow_validate()).
- * @param action
- * Target action structure.
+ * Pointer to the rte_flow.
* @param[out] error
* Perform verbose error reporting if not NULL.
*
* @return
- * A flow if the rule could be created.
+ * 0 on success, a errno value otherwise and rte_errno is set.
*/
-static struct rte_flow *
+static int
priv_flow_create_action_queue(struct priv *priv,
- struct mlx5_flow *flow,
- struct mlx5_flow_action *action,
+ struct mlx5_flow_parse *parser,
+ struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
+ int err = 0;
unsigned int i;
- unsigned int j;
- const unsigned int wqs_n = 1 << log2above(action->queues_n);
- struct ibv_exp_wq *wqs[wqs_n];
assert(priv->pd);
assert(priv->ctx);
- assert(!action->drop);
- rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) +
- sizeof(*rte_flow->rxqs) * action->queues_n, 0);
- if (!rte_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate flow memory");
- return NULL;
- }
- for (i = 0; i < action->queues_n; ++i) {
- struct rxq_ctrl *rxq;
-
- rxq = container_of((*priv->rxqs)[action->queues[i]],
- struct rxq_ctrl, rxq);
- wqs[i] = rxq->wq;
- rte_flow->rxqs[i] = &rxq->rxq;
- ++rte_flow->rxqs_n;
- rxq->rxq.mark |= action->mark;
- }
- /* finalise indirection table. */
- for (j = 0; i < wqs_n; ++i, ++j) {
- wqs[i] = wqs[j];
- if (j == action->queues_n)
- j = 0;
- }
- rte_flow->mark = action->mark;
- rte_flow->ibv_attr = flow->ibv_attr;
- rte_flow->hash_fields = flow->hash_fields;
- rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
- priv->ctx,
- &(struct ibv_exp_rwq_ind_table_init_attr){
- .pd = priv->pd,
- .log_ind_tbl_size = log2above(action->queues_n),
- .ind_tbl = wqs,
- .comp_mask = 0,
- });
- if (!rte_flow->ind_table) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate indirection table");
- goto error;
- }
- rte_flow->qp = ibv_exp_create_qp(
- priv->ctx,
- &(struct ibv_exp_qp_init_attr){
- .qp_type = IBV_QPT_RAW_PACKET,
- .comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT |
- IBV_EXP_QP_INIT_ATTR_RX_HASH,
- .pd = priv->pd,
- .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
- .rx_hash_function =
- IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
- .rx_hash_key_len = rss_hash_default_key_len,
- .rx_hash_key = rss_hash_default_key,
- .rx_hash_fields_mask = rte_flow->hash_fields,
- .rwq_ind_tbl = rte_flow->ind_table,
- },
- .port_num = priv->port,
- });
- if (!rte_flow->qp) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate QP");
+ assert(!parser->drop);
+ err = priv_flow_create_action_queue_rss(priv, parser, flow, error);
+ if (err)
goto error;
+ if (parser->count)
+ flow->cs = parser->cs;
+ if (!priv->dev->data->dev_started)
+ return 0;
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].hrxq)
+ continue;
+ flow->frxq[i].ibv_flow =
+ ibv_create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
+ if (!flow->frxq[i].ibv_flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ err = ENOMEM;
+ goto error;
+ }
+ DEBUG("%p type %d QP %p ibv_flow %p",
+ (void *)flow, i,
+ (void *)flow->frxq[i].hrxq,
+ (void *)flow->frxq[i].ibv_flow);
}
- if (!priv->started)
- return rte_flow;
- rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
- rte_flow->ibv_attr);
- if (!rte_flow->ibv_flow) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "flow rule creation failure");
- goto error;
+ for (i = 0; i != parser->queues_n; ++i) {
+ struct mlx5_rxq_data *q =
+ (*priv->rxqs)[parser->queues[i]];
+
+ q->mark |= parser->mark;
}
- return rte_flow;
+ return 0;
error:
- assert(rte_flow);
- if (rte_flow->qp)
- ibv_destroy_qp(rte_flow->qp);
- if (rte_flow->ind_table)
- ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
- rte_free(rte_flow);
- return NULL;
+ assert(flow);
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (flow->frxq[i].ibv_flow) {
+ struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
+
+ claim_zero(ibv_destroy_flow(ibv_flow));
+ }
+ if (flow->frxq[i].hrxq)
+ mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ if (flow->frxq[i].ibv_attr)
+ rte_free(flow->frxq[i].ibv_attr);
+ }
+ if (flow->cs) {
+ claim_zero(ibv_destroy_counter_set(flow->cs));
+ flow->cs = NULL;
+ parser->cs = NULL;
+ }
+ return err;
}
/**
@@ -1160,6 +1886,8 @@ error:
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
@@ -1174,65 +1902,86 @@ error:
*/
static struct rte_flow *
priv_flow_create(struct priv *priv,
+ struct mlx5_flows *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_flow *rte_flow;
- struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
- struct mlx5_flow_action action = {
- .queue = 0,
- .drop = 0,
- .mark = 0,
- .mark_id = MLX5_FLOW_MARK_DEFAULT,
- .queues_n = 0,
- };
+ struct mlx5_flow_parse parser = { .create = 1, };
+ struct rte_flow *flow = NULL;
+ unsigned int i;
int err;
- err = priv_flow_validate(priv, attr, items, actions, error, &flow,
- &action);
+ err = priv_flow_convert(priv, attr, items, actions, error, &parser);
if (err)
goto exit;
- flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
- flow.offset = sizeof(struct ibv_exp_flow_attr);
- if (!flow.ibv_attr) {
- rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
- NULL, "cannot allocate ibv_attr memory");
- goto exit;
+ flow = rte_calloc(__func__, 1,
+ sizeof(*flow) + parser.queues_n * sizeof(uint16_t),
+ 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow memory");
+ return NULL;
}
- *flow.ibv_attr = (struct ibv_exp_flow_attr){
- .type = IBV_EXP_FLOW_ATTR_NORMAL,
- .size = sizeof(struct ibv_exp_flow_attr),
- .priority = attr->priority,
- .num_of_specs = 0,
- .port = 0,
- .flags = 0,
- .reserved = 0,
- };
- flow.inner = 0;
- flow.hash_fields = 0;
- claim_zero(priv_flow_validate(priv, attr, items, actions,
- error, &flow, &action));
- if (action.mark && !action.drop) {
- mlx5_flow_create_flag_mark(&flow, action.mark_id);
- flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
- }
- if (action.drop)
- rte_flow =
- priv_flow_create_action_queue_drop(priv, &flow, error);
- else
- rte_flow = priv_flow_create_action_queue(priv, &flow, &action,
+ /* Copy queues configuration. */
+ flow->queues = (uint16_t (*)[])(flow + 1);
+ memcpy(flow->queues, parser.queues, parser.queues_n * sizeof(uint16_t));
+ flow->queues_n = parser.queues_n;
+ /* Copy RSS configuration. */
+ flow->rss_conf = parser.rss_conf;
+ flow->rss_conf.rss_key = flow->rss_key;
+ memcpy(flow->rss_key, parser.rss_key, parser.rss_conf.rss_key_len);
+ /* finalise the flow. */
+ if (parser.drop)
+ err = priv_flow_create_action_queue_drop(priv, &parser, flow,
error);
- if (!rte_flow)
+ else
+ err = priv_flow_create_action_queue(priv, &parser, flow, error);
+ if (err)
goto exit;
- return rte_flow;
+ TAILQ_INSERT_TAIL(list, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ return flow;
exit:
- rte_free(flow.ibv_attr);
+ if (parser.drop) {
+ rte_free(parser.drop_q.ibv_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser.queue[i].ibv_attr)
+ rte_free(parser.queue[i].ibv_attr);
+ }
+ }
+ rte_free(flow);
return NULL;
}
/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx5_flow_parse parser = { .create = 0, };
+
+ priv_lock(priv);
+ ret = priv_flow_convert(priv, attr, items, actions, error, &parser);
+ priv_unlock(priv);
+ return ret;
+}
+
+/**
* Create a flow.
*
* @see rte_flow_create()
@@ -1249,11 +1998,8 @@ mlx5_flow_create(struct rte_eth_dev *dev,
struct rte_flow *flow;
priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
- TAILQ_INSERT_TAIL(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
- }
+ flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
+ error);
priv_unlock(priv);
return flow;
}
@@ -1263,121 +2009,95 @@ mlx5_flow_create(struct rte_eth_dev *dev,
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
priv_flow_destroy(struct priv *priv,
+ struct mlx5_flows *list,
struct rte_flow *flow)
{
- TAILQ_REMOVE(&priv->flows, flow, next);
- if (flow->ibv_flow)
- claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
- if (flow->drop)
+ unsigned int i;
+
+ if (flow->drop || !flow->mark)
goto free;
- if (flow->qp)
- claim_zero(ibv_destroy_qp(flow->qp));
- if (flow->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
- if (flow->drop && flow->wq)
- claim_zero(ibv_exp_destroy_wq(flow->wq));
- if (flow->drop && flow->cq)
- claim_zero(ibv_destroy_cq(flow->cq));
- if (flow->mark) {
+ for (i = 0; i != flow->queues_n; ++i) {
struct rte_flow *tmp;
- struct rxq *rxq;
- uint32_t mark_n = 0;
- uint32_t queue_n;
+ int mark = 0;
/*
* To remove the mark from the queue, the queue must not be
* present in any other marked flow (RSS or not).
*/
- for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
- rxq = flow->rxqs[queue_n];
- for (tmp = TAILQ_FIRST(&priv->flows);
- tmp;
- tmp = TAILQ_NEXT(tmp, next)) {
- uint32_t tqueue_n;
-
- if (tmp->drop)
+ TAILQ_FOREACH(tmp, list, next) {
+ unsigned int j;
+ uint16_t *tqs = NULL;
+ uint16_t tq_n = 0;
+
+ if (!tmp->mark)
+ continue;
+ for (j = 0; j != hash_rxq_init_n; ++j) {
+ if (!tmp->frxq[j].hrxq)
continue;
- for (tqueue_n = 0;
- tqueue_n < tmp->rxqs_n;
- ++tqueue_n) {
- struct rxq *trxq;
-
- trxq = tmp->rxqs[tqueue_n];
- if (rxq == trxq)
- ++mark_n;
- }
+ tqs = tmp->frxq[j].hrxq->ind_table->queues;
+ tq_n = tmp->frxq[j].hrxq->ind_table->queues_n;
}
- rxq->mark = !!mark_n;
+ if (!tq_n)
+ continue;
+ for (j = 0; (j != tq_n) && !mark; j++)
+ if (tqs[j] == (*flow->queues)[i])
+ mark = 1;
}
+ (*priv->rxqs)[(*flow->queues)[i]]->mark = mark;
}
free:
- rte_free(flow->ibv_attr);
+ if (flow->drop) {
+ if (flow->drxq.ibv_flow)
+ claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+ rte_free(flow->drxq.ibv_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ struct mlx5_flow *frxq = &flow->frxq[i];
+
+ if (frxq->ibv_flow)
+ claim_zero(ibv_destroy_flow(frxq->ibv_flow));
+ if (frxq->hrxq)
+ mlx5_priv_hrxq_release(priv, frxq->hrxq);
+ if (frxq->ibv_attr)
+ rte_free(frxq->ibv_attr);
+ }
+ }
+ if (flow->cs) {
+ claim_zero(ibv_destroy_counter_set(flow->cs));
+ flow->cs = NULL;
+ }
+ TAILQ_REMOVE(list, flow, next);
DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow);
}
/**
- * Destroy a flow.
- *
- * @see rte_flow_destroy()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_destroy(struct rte_eth_dev *dev,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
-
- (void)error;
- priv_lock(priv);
- priv_flow_destroy(priv, flow);
- priv_unlock(priv);
- return 0;
-}
-
-/**
* Destroy all flows.
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
-static void
-priv_flow_flush(struct priv *priv)
+void
+priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
{
- while (!TAILQ_EMPTY(&priv->flows)) {
+ while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
- flow = TAILQ_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
+ flow = TAILQ_FIRST(list);
+ priv_flow_destroy(priv, list, flow);
}
}
/**
- * Destroy all flows.
- *
- * @see rte_flow_flush()
- * @see rte_flow_ops
- */
-int
-mlx5_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
-
- (void)error;
- priv_lock(priv);
- priv_flow_flush(priv);
- priv_unlock(priv);
- return 0;
-}
-
-/**
* Create drop queue.
*
* @param priv
@@ -1386,11 +2106,10 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
* @return
* 0 on success.
*/
-static int
+int
priv_flow_create_drop_queue(struct priv *priv)
{
- struct rte_flow_drop *fdq = NULL;
- unsigned int i;
+ struct mlx5_hrxq_drop *fdq = NULL;
assert(priv->pd);
assert(priv->ctx);
@@ -1399,57 +2118,50 @@ priv_flow_create_drop_queue(struct priv *priv)
WARN("cannot allocate memory for drop queue");
goto error;
}
- fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
- &(struct ibv_exp_cq_init_attr){
- .comp_mask = 0,
- });
+ fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
WARN("cannot allocate CQ for drop queue");
goto error;
}
- for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
- fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
- &(struct ibv_exp_wq_init_attr){
- .wq_type = IBV_EXP_WQT_RQ,
- .max_recv_wr = 1,
- .max_recv_sge = 1,
- .pd = priv->pd,
- .cq = fdq->cq,
- });
- if (!fdq->wqs[i]) {
- WARN("cannot allocate WQ for drop queue");
- goto error;
- }
- }
- fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
- &(struct ibv_exp_rwq_ind_table_init_attr){
+ fdq->wq = ibv_create_wq(priv->ctx,
+ &(struct ibv_wq_init_attr){
+ .wq_type = IBV_WQT_RQ,
+ .max_wr = 1,
+ .max_sge = 1,
.pd = priv->pd,
+ .cq = fdq->cq,
+ });
+ if (!fdq->wq) {
+ WARN("cannot allocate WQ for drop queue");
+ goto error;
+ }
+ fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
- .ind_tbl = fdq->wqs,
+ .ind_tbl = &fdq->wq,
.comp_mask = 0,
});
if (!fdq->ind_table) {
WARN("cannot allocate indirection table for drop queue");
goto error;
}
- fdq->qp = ibv_exp_create_qp(priv->ctx,
- &(struct ibv_exp_qp_init_attr){
+ fdq->qp = ibv_create_qp_ex(priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
- IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_PORT |
- IBV_EXP_QP_INIT_ATTR_RX_HASH,
- .pd = priv->pd,
- .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ IBV_QP_INIT_ATTR_PD |
+ IBV_QP_INIT_ATTR_IND_TABLE |
+ IBV_QP_INIT_ATTR_RX_HASH,
+ .rx_hash_conf = (struct ibv_rx_hash_conf){
.rx_hash_function =
- IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ IBV_RX_HASH_FUNC_TOEPLITZ,
.rx_hash_key_len = rss_hash_default_key_len,
.rx_hash_key = rss_hash_default_key,
.rx_hash_fields_mask = 0,
- .rwq_ind_tbl = fdq->ind_table,
},
- .port_num = priv->port,
- });
+ .rwq_ind_tbl = fdq->ind_table,
+ .pd = priv->pd
+ });
if (!fdq->qp) {
WARN("cannot allocate QP for drop queue");
goto error;
@@ -1460,11 +2172,9 @@ error:
if (fdq->qp)
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
- for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
- if (fdq->wqs[i])
- claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
- }
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
if (fdq->cq)
claim_zero(ibv_destroy_cq(fdq->cq));
if (fdq)
@@ -1479,22 +2189,19 @@ error:
* @param priv
* Pointer to private structure.
*/
-static void
+void
priv_flow_delete_drop_queue(struct priv *priv)
{
- struct rte_flow_drop *fdq = priv->flow_drop_queue;
- unsigned int i;
+ struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
if (!fdq)
return;
if (fdq->qp)
claim_zero(ibv_destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
- for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
- if (fdq->wqs[i])
- claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
- }
+ claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ if (fdq->wq)
+ claim_zero(ibv_destroy_wq(fdq->wq));
if (fdq->cq)
claim_zero(ibv_destroy_cq(fdq->cq));
rte_free(fdq);
@@ -1504,28 +2211,49 @@ priv_flow_delete_drop_queue(struct priv *priv)
/**
* Remove all flows.
*
- * Called by dev_stop() to remove all flows.
- *
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv)
+priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
- claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
- flow->ibv_flow = NULL;
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
+ unsigned int i;
+
+ if (flow->drop) {
+ if (!flow->drxq.ibv_flow)
+ continue;
+ claim_zero(ibv_destroy_flow(flow->drxq.ibv_flow));
+ flow->drxq.ibv_flow = NULL;
+ /* Next flow. */
+ continue;
+ }
if (flow->mark) {
- unsigned int n;
+ struct mlx5_ind_table_ibv *ind_tbl = NULL;
- for (n = 0; n < flow->rxqs_n; ++n)
- flow->rxqs[n]->mark = 0;
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].hrxq)
+ continue;
+ ind_tbl = flow->frxq[i].hrxq->ind_table;
+ }
+ assert(ind_tbl);
+ for (i = 0; i != ind_tbl->queues_n; ++i)
+ (*priv->rxqs)[ind_tbl->queues[i]]->mark = 0;
+ }
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_flow)
+ continue;
+ claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
+ flow->frxq[i].ibv_flow = NULL;
+ mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
+ flow->frxq[i].hrxq = NULL;
}
DEBUG("Flow %p removed", (void *)flow);
}
- priv_flow_delete_drop_queue(priv);
}
/**
@@ -1533,75 +2261,321 @@ priv_flow_stop(struct priv *priv)
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*
* @return
* 0 on success, a errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv)
+priv_flow_start(struct priv *priv, struct mlx5_flows *list)
{
- int ret;
struct rte_flow *flow;
- ret = priv_flow_create_drop_queue(priv);
- if (ret)
- return -1;
- TAILQ_FOREACH(flow, &priv->flows, next) {
- struct ibv_qp *qp;
+ TAILQ_FOREACH(flow, list, next) {
+ unsigned int i;
- if (flow->drop)
- qp = priv->flow_drop_queue->qp;
- else
- qp = flow->qp;
- flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
- if (!flow->ibv_flow) {
- DEBUG("Flow %p cannot be applied", (void *)flow);
- rte_errno = EINVAL;
- return rte_errno;
+ if (flow->drop) {
+ flow->drxq.ibv_flow =
+ ibv_create_flow(priv->flow_drop_queue->qp,
+ flow->drxq.ibv_attr);
+ if (!flow->drxq.ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ /* Next flow. */
+ continue;
}
- DEBUG("Flow %p applied", (void *)flow);
- if (flow->mark) {
- unsigned int n;
-
- for (n = 0; n < flow->rxqs_n; ++n)
- flow->rxqs[n]->mark = 1;
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!flow->frxq[i].ibv_attr)
+ continue;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_get(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (flow->frxq[i].hrxq)
+ goto flow_create;
+ flow->frxq[i].hrxq =
+ mlx5_priv_hrxq_new(priv, flow->rss_conf.rss_key,
+ flow->rss_conf.rss_key_len,
+ hash_rxq_init[i].hash_fields,
+ (*flow->queues),
+ flow->queues_n);
+ if (!flow->frxq[i].hrxq) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+flow_create:
+ flow->frxq[i].ibv_flow =
+ ibv_create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
+ if (!flow->frxq[i].ibv_flow) {
+ DEBUG("Flow %p cannot be applied",
+ (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
}
+ if (!flow->mark)
+ continue;
+ for (i = 0; i != flow->queues_n; ++i)
+ (*priv->rxqs)[(*flow->queues)[i]]->mark = 1;
}
return 0;
}
/**
- * Verify if the Rx queue is used in a flow.
+ * Verify the flow list is empty
*
* @param priv
- * Pointer to private structure.
- * @param rxq
- * Pointer to the queue to search.
+ * Pointer to private structure.
+ *
+ * @return the number of flows not released.
+ */
+int
+priv_flow_verify(struct priv *priv)
+{
+ struct rte_flow *flow;
+ int ret = 0;
+
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ DEBUG("%p: flow %p still referenced", (void *)priv,
+ (void *)flow);
+ ++ret;
+ }
+ return ret;
+}
+
+/**
+ * Enable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ * @param vlan_spec
+ * A VLAN flow spec to apply.
+ * @param vlan_mask
+ * A VLAN flow mask to apply.
*
* @return
- * Nonzero if the queue is used by a flow.
+ * 0 on success.
*/
int
-priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask,
+ struct rte_flow_item_vlan *vlan_spec,
+ struct rte_flow_item_vlan *vlan_mask)
{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = eth_spec,
+ .last = NULL,
+ .mask = eth_mask,
+ },
+ {
+ .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+ RTE_FLOW_ITEM_TYPE_END,
+ .spec = vlan_spec,
+ .last = NULL,
+ .mask = vlan_mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_RSS,
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
struct rte_flow *flow;
+ struct rte_flow_error error;
+ unsigned int i;
+ union {
+ struct rte_flow_action_rss rss;
+ struct {
+ const struct rte_eth_rss_conf *rss_conf;
+ uint16_t num;
+ uint16_t queue[RTE_MAX_QUEUES_PER_PORT];
+ } local;
+ } action_rss;
+
+ if (!priv->reta_idx_n)
+ return EINVAL;
+ for (i = 0; i != priv->reta_idx_n; ++i)
+ action_rss.local.queue[i] = (*priv->reta_idx)[i];
+ action_rss.local.rss_conf = &priv->rss_conf;
+ action_rss.local.num = priv->reta_idx_n;
+ actions[0].conf = (const void *)&action_rss.rss;
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
+ &error);
+ if (!flow)
+ return rte_errno;
+ return 0;
+}
- for (flow = TAILQ_FIRST(&priv->flows);
- flow;
- flow = TAILQ_NEXT(flow, next)) {
- unsigned int n;
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param eth_spec
+ * An Ethernet flow spec to apply.
+ * @param eth_mask
+ * An Ethernet flow mask to apply.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *eth_spec,
+ struct rte_flow_item_eth *eth_mask)
+{
+ return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}
- if (flow->drop)
- continue;
- for (n = 0; n < flow->rxqs_n; ++n) {
- if (flow->rxqs[n] == rxq)
- return 1;
- }
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, &priv->flows, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv, &priv->flows);
+ priv_unlock(priv);
+ return 0;
+}
+
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+/**
+ * Query flow counter.
+ *
+ * @param cs
+ * the counter set.
+ * @param counter_value
+ * returned data from the counter.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_query_count(struct ibv_counter_set *cs,
+ struct mlx5_flow_counter_stats *counter_stats,
+ struct rte_flow_query_count *query_count,
+ struct rte_flow_error *error)
+{
+ uint64_t counters[2];
+ struct ibv_query_counter_set_attr query_cs_attr = {
+ .cs = cs,
+ .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
+ };
+ struct ibv_counter_set_data query_out = {
+ .out = counters,
+ .outlen = 2 * sizeof(uint64_t),
+ };
+ int res = ibv_query_counter_set(&query_cs_attr, &query_out);
+
+ if (res) {
+ rte_flow_error_set(error, -res,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counter");
+ return -res;
+ }
+ query_count->hits_set = 1;
+ query_count->bytes_set = 1;
+ query_count->hits = counters[0] - counter_stats->hits;
+ query_count->bytes = counters[1] - counter_stats->bytes;
+ if (query_count->reset) {
+ counter_stats->hits = counters[0];
+ counter_stats->bytes = counters[1];
}
return 0;
}
/**
+ * Query a flows.
+ *
+ * @see rte_flow_query()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ enum rte_flow_action_type action __rte_unused,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int res = EINVAL;
+
+ priv_lock(priv);
+ if (flow->cs) {
+ res = priv_flow_query_count(flow->cs,
+ &flow->counter_stats,
+ (struct rte_flow_query_count *)data,
+ error);
+ } else {
+ rte_flow_error_set(error, res,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "no counter found for flow");
+ }
+ priv_unlock(priv);
+ return -res;
+}
+#endif
+
+/**
* Isolated mode.
*
* @see rte_flow_isolate()
@@ -1615,7 +2589,7 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
priv_lock(priv);
- if (priv->started) {
+ if (dev->data->dev_started) {
rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
@@ -1624,6 +2598,497 @@ mlx5_flow_isolate(struct rte_eth_dev *dev,
return -rte_errno;
}
priv->isolated = !!enable;
+ if (enable)
+ priv->dev->dev_ops = &mlx5_dev_ops_isolate;
+ else
+ priv->dev->dev_ops = &mlx5_dev_ops;
priv_unlock(priv);
return 0;
}
+
+/**
+ * Convert a flow director filter to a generic flow.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Flow director filter to add.
+ * @param attributes
+ * Generic flow parameters structure.
+ *
+ * @return
+ * 0 on success, errno value on error.
+ */
+static int
+priv_fdir_filter_convert(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct mlx5_fdir *attributes)
+{
+ const struct rte_eth_fdir_input *input = &fdir_filter->input;
+
+ /* Validate queue number. */
+ if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
+ ERROR("invalid queue number %d", fdir_filter->action.rx_queue);
+ return EINVAL;
+ }
+ attributes->attr.ingress = 1;
+ attributes->items[0] = (struct rte_flow_item) {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = &attributes->l2,
+ .mask = &attributes->l2_mask,
+ };
+ switch (fdir_filter->action.behavior) {
+ case RTE_ETH_FDIR_ACCEPT:
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &attributes->queue,
+ };
+ break;
+ case RTE_ETH_FDIR_REJECT:
+ attributes->actions[0] = (struct rte_flow_action){
+ .type = RTE_FLOW_ACTION_TYPE_DROP,
+ };
+ break;
+ default:
+ ERROR("invalid behavior %d", fdir_filter->action.behavior);
+ return ENOTSUP;
+ }
+ attributes->queue.index = fdir_filter->action.rx_queue;
+ switch (fdir_filter->input.flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.udp4_flow.ip.src_ip,
+ .dst_addr = input->flow.udp4_flow.ip.dst_ip,
+ .time_to_live = input->flow.udp4_flow.ip.ttl,
+ .type_of_service = input->flow.udp4_flow.ip.tos,
+ .next_proto_id = input->flow.udp4_flow.ip.proto,
+ };
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp4_flow.src_port,
+ .dst_port = input->flow.udp4_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.tcp4_flow.ip.src_ip,
+ .dst_addr = input->flow.tcp4_flow.ip.dst_ip,
+ .time_to_live = input->flow.tcp4_flow.ip.ttl,
+ .type_of_service = input->flow.tcp4_flow.ip.tos,
+ .next_proto_id = input->flow.tcp4_flow.ip.proto,
+ };
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp4_flow.src_port,
+ .dst_port = input->flow.tcp4_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ attributes->l3.ipv4.hdr = (struct ipv4_hdr){
+ .src_addr = input->flow.ip4_flow.src_ip,
+ .dst_addr = input->flow.ip4_flow.dst_ip,
+ .time_to_live = input->flow.ip4_flow.ttl,
+ .type_of_service = input->flow.ip4_flow.tos,
+ .next_proto_id = input->flow.ip4_flow.proto,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .spec = &attributes->l3,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.udp6_flow.ip.hop_limits,
+ .proto = input->flow.udp6_flow.ip.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.udp6_flow.ip.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.udp6_flow.ip.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->l4.udp.hdr = (struct udp_hdr){
+ .src_port = input->flow.udp6_flow.src_port,
+ .dst_port = input->flow.udp6_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.tcp6_flow.ip.hop_limits,
+ .proto = input->flow.tcp6_flow.ip.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.tcp6_flow.ip.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.tcp6_flow.ip.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->l4.tcp.hdr = (struct tcp_hdr){
+ .src_port = input->flow.tcp6_flow.src_port,
+ .dst_port = input->flow.tcp6_flow.dst_port,
+ };
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ attributes->items[2] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .spec = &attributes->l4,
+ };
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ attributes->l3.ipv6.hdr = (struct ipv6_hdr){
+ .hop_limits = input->flow.ipv6_flow.hop_limits,
+ .proto = input->flow.ipv6_flow.proto,
+ };
+ memcpy(attributes->l3.ipv6.hdr.src_addr,
+ input->flow.ipv6_flow.src_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ memcpy(attributes->l3.ipv6.hdr.dst_addr,
+ input->flow.ipv6_flow.dst_ip,
+ RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
+ attributes->items[1] = (struct rte_flow_item){
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .spec = &attributes->l3,
+ };
+ break;
+ default:
+ ERROR("invalid flow type%d",
+ fdir_filter->input.flow_type);
+ return ENOTSUP;
+ }
+ return 0;
+}
+
+/**
+ * Add new flow director filter and store it in list.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Flow director filter to add.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_add(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ .l2_mask = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ },
+ };
+ struct mlx5_flow_parse parser = {
+ .layer = HASH_RXQ_ETH,
+ };
+ struct rte_flow_error error;
+ struct rte_flow *flow;
+ int ret;
+
+ ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ if (ret)
+ return -ret;
+ ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ attributes.actions, &error, &parser);
+ if (ret)
+ return -ret;
+ flow = priv_flow_create(priv,
+ &priv->flows,
+ &attributes.attr,
+ attributes.items,
+ attributes.actions,
+ &error);
+ if (flow) {
+ DEBUG("FDIR created %p", (void *)flow);
+ return 0;
+ }
+ return ENOTSUP;
+}
+
+/**
+ * Delete specific filter.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Filter to be deleted.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_delete(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ struct mlx5_fdir attributes = {
+ .attr.group = 0,
+ };
+ struct mlx5_flow_parse parser = {
+ .create = 1,
+ .layer = HASH_RXQ_ETH,
+ };
+ struct rte_flow_error error;
+ struct rte_flow *flow;
+ unsigned int i;
+ int ret;
+
+ ret = priv_fdir_filter_convert(priv, fdir_filter, &attributes);
+ if (ret)
+ return -ret;
+ ret = priv_flow_convert(priv, &attributes.attr, attributes.items,
+ attributes.actions, &error, &parser);
+ if (ret)
+ goto exit;
+ /*
+ * Special case for drop action which is only set in the
+ * specifications when the flow is created. In this situation the
+ * drop specification is missing.
+ */
+ if (parser.drop) {
+ struct ibv_flow_spec_action_drop *drop;
+
+ drop = (void *)((uintptr_t)parser.drop_q.ibv_attr +
+ parser.drop_q.offset);
+ *drop = (struct ibv_flow_spec_action_drop){
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = sizeof(struct ibv_flow_spec_action_drop),
+ };
+ parser.drop_q.ibv_attr->num_of_specs++;
+ }
+ TAILQ_FOREACH(flow, &priv->flows, next) {
+ struct ibv_flow_attr *attr;
+ struct ibv_spec_header *attr_h;
+ void *spec;
+ struct ibv_flow_attr *flow_attr;
+ struct ibv_spec_header *flow_h;
+ void *flow_spec;
+ unsigned int specs_n;
+
+ if (parser.drop)
+ attr = parser.drop_q.ibv_attr;
+ else
+ attr = parser.queue[HASH_RXQ_ETH].ibv_attr;
+ if (flow->drop)
+ flow_attr = flow->drxq.ibv_attr;
+ else
+ flow_attr = flow->frxq[HASH_RXQ_ETH].ibv_attr;
+ /* Compare first the attributes. */
+ if (memcmp(attr, flow_attr, sizeof(struct ibv_flow_attr)))
+ continue;
+ if (attr->num_of_specs == 0)
+ continue;
+ spec = (void *)((uintptr_t)attr +
+ sizeof(struct ibv_flow_attr));
+ flow_spec = (void *)((uintptr_t)flow_attr +
+ sizeof(struct ibv_flow_attr));
+ specs_n = RTE_MIN(attr->num_of_specs, flow_attr->num_of_specs);
+ for (i = 0; i != specs_n; ++i) {
+ attr_h = spec;
+ flow_h = flow_spec;
+ if (memcmp(spec, flow_spec,
+ RTE_MIN(attr_h->size, flow_h->size)))
+ continue;
+ spec = (void *)((uintptr_t)attr + attr_h->size);
+ flow_spec = (void *)((uintptr_t)flow_attr +
+ flow_h->size);
+ }
+ /* At this point, the flow match. */
+ break;
+ }
+ if (flow)
+ priv_flow_destroy(priv, &priv->flows, flow);
+exit:
+ if (parser.drop) {
+ rte_free(parser.drop_q.ibv_attr);
+ } else {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser.queue[i].ibv_attr)
+ rte_free(parser.queue[i].ibv_attr);
+ }
+ }
+ return -ret;
+}
+
+/**
+ * Update queue for specific filter.
+ *
+ * @param priv
+ * Private structure.
+ * @param fdir_filter
+ * Filter to be updated.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_filter_update(struct priv *priv,
+ const struct rte_eth_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = priv_fdir_filter_delete(priv, fdir_filter);
+ if (ret)
+ return ret;
+ ret = priv_fdir_filter_add(priv, fdir_filter);
+ return ret;
+}
+
+/**
+ * Flush all filters.
+ *
+ * @param priv
+ * Private structure.
+ */
+static void
+priv_fdir_filter_flush(struct priv *priv)
+{
+ priv_flow_flush(priv, &priv->flows);
+}
+
+/**
+ * Get flow director information.
+ *
+ * @param priv
+ * Private structure.
+ * @param[out] fdir_info
+ * Resulting flow director information.
+ */
+static void
+priv_fdir_info_get(struct priv *priv, struct rte_eth_fdir_info *fdir_info)
+{
+ struct rte_eth_fdir_masks *mask =
+ &priv->dev->data->dev_conf.fdir_conf.mask;
+
+ fdir_info->mode = priv->dev->data->dev_conf.fdir_conf.mode;
+ fdir_info->guarant_spc = 0;
+ rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
+ fdir_info->max_flexpayload = 0;
+ fdir_info->flow_types_mask[0] = 0;
+ fdir_info->flex_payload_unit = 0;
+ fdir_info->max_flex_payload_segment_num = 0;
+ fdir_info->flex_payload_limit = 0;
+ memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
+}
+
+/**
+ * Deal with flow director operations.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, errno value on failure.
+ */
+static int
+priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
+{
+ enum rte_fdir_mode fdir_mode =
+ priv->dev->data->dev_conf.fdir_conf.mode;
+ int ret = 0;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+ if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
+ fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ ERROR("%p: flow director mode %d not supported",
+ (void *)priv, fdir_mode);
+ return EINVAL;
+ }
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = priv_fdir_filter_add(priv, arg);
+ break;
+ case RTE_ETH_FILTER_UPDATE:
+ ret = priv_fdir_filter_update(priv, arg);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = priv_fdir_filter_delete(priv, arg);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ priv_fdir_filter_flush(priv);
+ break;
+ case RTE_ETH_FILTER_INFO:
+ priv_fdir_info_get(priv, arg);
+ break;
+ default:
+ DEBUG("%p: unknown operation %u", (void *)priv,
+ filter_op);
+ ret = EINVAL;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = EINVAL;
+ struct priv *priv = dev->data->dev_private;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_FDIR:
+ priv_lock(priv);
+ ret = priv_fdir_ctrl_func(priv, filter_op, arg);
+ priv_unlock(priv);
+ break;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
+ }
+ return -ret;
+}