aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx5')
-rw-r--r--drivers/net/mlx5/Makefile4
-rw-r--r--drivers/net/mlx5/mlx5.c7
-rw-r--r--drivers/net/mlx5/mlx5_flow.c19
-rw-r--r--drivers/net/mlx5/mlx5_flow_dv.c270
-rw-r--r--drivers/net/mlx5/mlx5_flow_tcf.c100
-rw-r--r--drivers/net/mlx5/mlx5_flow_verbs.c25
-rw-r--r--drivers/net/mlx5/mlx5_utils.h10
7 files changed, 249 insertions, 186 deletions
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index 7a50bccd..895cdfee 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -51,7 +51,7 @@ CFLAGS += -D_DEFAULT_SOURCE
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
CFLAGS += -Wno-strict-prototypes
-CFLAGS += $(shell pkg-config --cflags libmnl)
+CFLAGS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --cflags libmnl)
ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLOPEN_DEPS),y)
CFLAGS += -DMLX5_GLUE='"$(LIB_GLUE)"'
CFLAGS += -DMLX5_GLUE_VERSION='"$(LIB_GLUE_VERSION)"'
@@ -60,7 +60,7 @@ LDLIBS += -ldl
else
LDLIBS += -libverbs -lmlx5
endif
-LDLIBS += $(shell pkg-config --libs libmnl)
+LDLIBS += $(shell command -v pkg-config > /dev/null 2>&1 && pkg-config --libs libmnl || echo "-lmnl")
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
LDLIBS += -lrte_bus_pci
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index ed1fcfc7..9e5cab16 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -347,11 +347,6 @@ mlx5_dev_close(struct rte_eth_dev *dev)
memset(priv, 0, sizeof(*priv));
priv->domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
/*
- * flag to rte_eth_dev_close() that it should release the port resources
- * (calling rte_eth_dev_release_port()) in addition to closing it.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
- /*
* Reset mac_addrs to NULL such that it is not freed as part of
* rte_eth_dev_release_port(). mac_addrs is part of dev_private so
* it is freed when dev_private is freed.
@@ -1114,6 +1109,8 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
err = ENOMEM;
goto error;
}
+ /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
if (priv->representor) {
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
eth_dev->data->representor_id = priv->representor_id;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3c2ac4b3..5ad3a11a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1178,6 +1178,12 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
"L3 cannot follow an L4 layer.");
if (!mask)
mask = &rte_flow_item_ipv4_mask;
+ else if (mask->hdr.next_proto_id != 0 &&
+ mask->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+ "partial mask is not supported"
+ " for protocol");
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv4),
@@ -1234,17 +1240,6 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"L3 cannot follow an L4 layer.");
- /*
- * IPv6 is not recognised by the NIC inside a GRE tunnel.
- * Such support has to be disabled as the rule will be
- * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
- * Mellanox OFED 4.4-1.0.0.0.
- */
- if (tunnel && item_flags & MLX5_FLOW_LAYER_GRE)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "IPv6 inside a GRE tunnel is"
- " not recognised.");
if (!mask)
mask = &rte_flow_item_ipv6_mask;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
@@ -2657,7 +2652,7 @@ flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
FLOW_FDIR_CMP(f1, f2, l3_mask) ||
FLOW_FDIR_CMP(f1, f2, l4) ||
FLOW_FDIR_CMP(f1, f2, l4_mask) ||
- FLOW_FDIR_CMP(f1, f2, actions[0]))
+ FLOW_FDIR_CMP(f1, f2, actions[0].type))
return 1;
if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
FLOW_FDIR_CMP(f1, f2, queue))
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 79096153..a2edd168 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -814,10 +814,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id)
+ items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@@ -828,10 +835,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto)
+ items->mask)->hdr.proto) {
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
@@ -1041,6 +1055,39 @@ flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
return flow;
}
+#ifndef NDEBUG
+/**
+ * Sanity check for match mask and value. Similar to check_valid_spec() in
+ * kernel driver. If unmasked bit is present in value, it returns failure.
+ *
+ * @param match_mask
+ * pointer to match mask buffer.
+ * @param match_value
+ * pointer to match value buffer.
+ *
+ * @return
+ * 0 if valid, -EINVAL otherwise.
+ */
+static int
+flow_dv_check_valid_spec(void *match_mask, void *match_value)
+{
+ uint8_t *m = match_mask;
+ uint8_t *v = match_value;
+ unsigned int i;
+
+ for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
+ if (v[i] & ~m[i]) {
+ DRV_LOG(ERR,
+ "match_value differs from match_criteria"
+ " %p[%u] != %p[%u]",
+ match_value, i, match_mask, i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif
+
/**
* Add Ethernet item to matcher and to the value.
*
@@ -1750,114 +1797,6 @@ flow_dv_translate(struct rte_eth_dev *dev,
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- void *match_mask = matcher.mask.buf;
- void *match_value = dev_flow->dv.value.buf;
-
- switch (items->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- flow_dv_translate_item_vlan(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
- MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 |
- MLX5_FLOW_LAYER_OUTER_VLAN);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV4_LAYER_TYPES,
- MLX5_IPV4_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV6_LAYER_TYPES,
- MLX5_IPV6_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- flow_dv_translate_item_tcp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_TCP,
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- flow_dv_translate_item_udp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_UDP,
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
- break;
- case RTE_FLOW_ITEM_TYPE_META:
- flow_dv_translate_item_meta(match_mask, match_value,
- items);
- item_flags |= MLX5_FLOW_ITEM_METADATA;
- break;
- default:
- break;
- }
- }
- dev_flow->layers = item_flags;
- /* Register matcher. */
- matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
- matcher.mask.size);
- matcher.priority = mlx5_flow_adjust_priority(dev, priority,
- matcher.priority);
- matcher.egress = attr->egress;
- if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
- return -rte_errno;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
@@ -1991,6 +1930,116 @@ flow_dv_translate(struct rte_eth_dev *dev,
}
dev_flow->dv.actions_n = actions_n;
flow->actions = action_flags;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_dv_translate_item_eth(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L2;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_dv_translate_item_vlan(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L2;
+ item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+ MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_dv_translate_item_ipv4(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel,
+ MLX5_IPV4_LAYER_TYPES,
+ MLX5_IPV4_IBV_RX_HASH);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_dv_translate_item_ipv6(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel,
+ MLX5_IPV6_LAYER_TYPES,
+ MLX5_IPV6_IBV_RX_HASH);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_dv_translate_item_tcp(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel, ETH_RSS_TCP,
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_dv_translate_item_udp(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel, ETH_RSS_UDP,
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP);
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ flow_dv_translate_item_gre(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ flow_dv_translate_item_vxlan(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ flow_dv_translate_item_vxlan(match_mask, match_value,
+ items, tunnel);
+ item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(match_mask, match_value,
+ items);
+ item_flags |= MLX5_FLOW_ITEM_METADATA;
+ break;
+ default:
+ break;
+ }
+ }
+ assert(!flow_dv_check_valid_spec(matcher.mask.buf,
+ dev_flow->dv.value.buf));
+ dev_flow->layers = item_flags;
+ /* Register matcher. */
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ matcher.priority = mlx5_flow_adjust_priority(dev, priority,
+ matcher.priority);
+ matcher.egress = attr->egress;
+ if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+ return -rte_errno;
return 0;
}
@@ -2034,6 +2083,7 @@ flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
} else if (flow->actions &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
+
hrxq = mlx5_hrxq_get(dev, flow->key,
MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields,
diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flow_tcf.c
index fb817b23..97d2a54c 100644
--- a/drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/drivers/net/mlx5/mlx5_flow_tcf.c
@@ -3847,30 +3847,6 @@ flow_tcf_alloc_nlcmd(struct tcf_nlcb_context *ctx, uint32_t size)
}
/**
- * Set NLM_F_ACK flags in the last netlink command in buffer.
- * Only last command in the buffer will be acked by system.
- *
- * @param[in, out] buf
- * Pointer to buffer with netlink commands.
- */
-static void
-flow_tcf_setack_nlcmd(struct tcf_nlcb_buf *buf)
-{
- struct nlmsghdr *nlh;
- uint32_t size = 0;
-
- assert(buf->size);
- do {
- nlh = (struct nlmsghdr *)&buf->msg[size];
- size += NLMSG_ALIGN(nlh->nlmsg_len);
- if (size >= buf->size) {
- nlh->nlmsg_flags |= NLM_F_ACK;
- break;
- }
- } while (true);
-}
-
-/**
* Send the buffers with prepared netlink commands. Scans the list and
* sends all found buffers. Buffers are sent and freed anyway in order
* to prevent memory leakage if some every message in received packet.
@@ -3888,21 +3864,35 @@ static int
flow_tcf_send_nlcmd(struct mlx5_flow_tcf_context *tcf,
struct tcf_nlcb_context *ctx)
{
- struct tcf_nlcb_buf *bc, *bn;
- struct nlmsghdr *nlh;
+ struct tcf_nlcb_buf *bc = LIST_FIRST(&ctx->nlbuf);
int ret = 0;
- bc = LIST_FIRST(&ctx->nlbuf);
while (bc) {
+ struct tcf_nlcb_buf *bn = LIST_NEXT(bc, next);
+ struct nlmsghdr *nlh;
+ uint32_t msg = 0;
int rc;
- bn = LIST_NEXT(bc, next);
- if (bc->size) {
- flow_tcf_setack_nlcmd(bc);
- nlh = (struct nlmsghdr *)&bc->msg;
- rc = flow_tcf_nl_ack(tcf, nlh, bc->size, NULL, NULL);
- if (rc && !ret)
- ret = rc;
+ while (msg < bc->size) {
+ /*
+ * Send Netlink commands from buffer in one by one
+ * fashion. If we send multiple rule deletion commands
+ * in one Netlink message and some error occurs it may
+ * cause multiple ACK error messages and break sequence
+ * numbers of Netlink communication, because we expect
+ * the only one ACK reply.
+ */
+ assert((bc->size - msg) >= sizeof(struct nlmsghdr));
+ nlh = (struct nlmsghdr *)&bc->msg[msg];
+ assert((bc->size - msg) >= nlh->nlmsg_len);
+ msg += nlh->nlmsg_len;
+ rc = flow_tcf_nl_ack(tcf, nlh, 0, NULL, NULL);
+ if (rc) {
+ DRV_LOG(WARNING,
+ "netlink: cleanup error %d", rc);
+ if (!ret)
+ ret = rc;
+ }
}
rte_free(bc);
bc = bn;
@@ -3935,6 +3925,7 @@ flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
struct nlattr *na_local = NULL;
struct nlattr *na_peer = NULL;
unsigned char family;
+ uint32_t size;
if (nlh->nlmsg_type != RTM_NEWADDR) {
rte_errno = EINVAL;
@@ -3962,11 +3953,11 @@ flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
if (!na_local || !na_peer)
return 1;
/* Local rule found with scope link, permanent and assigned peer. */
- cmd = flow_tcf_alloc_nlcmd(ctx, MNL_ALIGN(sizeof(struct nlmsghdr)) +
- MNL_ALIGN(sizeof(struct ifaddrmsg)) +
- (family == AF_INET6
- ? 2 * SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
- : 2 * SZ_NLATTR_TYPE_OF(uint32_t)));
+ size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct ifaddrmsg)) +
+ (family == AF_INET6 ? 2 * SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
+ : 2 * SZ_NLATTR_TYPE_OF(uint32_t));
+ cmd = flow_tcf_alloc_nlcmd(ctx, size);
if (!cmd) {
rte_errno = ENOMEM;
return -rte_errno;
@@ -3991,6 +3982,7 @@ flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
mnl_attr_put(cmd, IFA_ADDRESS, IPV6_ADDR_LEN,
mnl_attr_get_payload(na_peer));
}
+ assert(size == cmd->nlmsg_len);
return 1;
}
@@ -4059,6 +4051,7 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
struct nlattr *na_ip = NULL;
struct nlattr *na_mac = NULL;
unsigned char family;
+ uint32_t size;
if (nlh->nlmsg_type != RTM_NEWNEIGH) {
rte_errno = EINVAL;
@@ -4085,12 +4078,12 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
if (!na_mac || !na_ip)
return 1;
/* Neigh rule with permenent attribute found. */
- cmd = flow_tcf_alloc_nlcmd(ctx, MNL_ALIGN(sizeof(struct nlmsghdr)) +
- MNL_ALIGN(sizeof(struct ndmsg)) +
- SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
- (family == AF_INET6
- ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
- : SZ_NLATTR_TYPE_OF(uint32_t)));
+ size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct ndmsg)) +
+ SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
+ (family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
+ : SZ_NLATTR_TYPE_OF(uint32_t));
+ cmd = flow_tcf_alloc_nlcmd(ctx, size);
if (!cmd) {
rte_errno = ENOMEM;
return -rte_errno;
@@ -4113,6 +4106,7 @@ flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
}
mnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,
mnl_attr_get_payload(na_mac));
+ assert(size == cmd->nlmsg_len);
return 1;
}
@@ -4179,6 +4173,7 @@ flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
struct nlattr *na_vxlan = NULL;
bool found = false;
unsigned int vxindex;
+ uint32_t size;
if (nlh->nlmsg_type != RTM_NEWLINK) {
rte_errno = EINVAL;
@@ -4224,9 +4219,10 @@ flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
return 1;
/* Attached VXLAN device found, store the command to delete. */
vxindex = ifm->ifi_index;
- cmd = flow_tcf_alloc_nlcmd(ctx, MNL_ALIGN(sizeof(struct nlmsghdr)) +
- MNL_ALIGN(sizeof(struct ifinfomsg)));
- if (!nlh) {
+ size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct ifinfomsg));
+ cmd = flow_tcf_alloc_nlcmd(ctx, size);
+ if (!cmd) {
rte_errno = ENOMEM;
return -rte_errno;
}
@@ -4236,6 +4232,7 @@ flow_tcf_collect_vxlan_cb(const struct nlmsghdr *nlh, void *arg)
ifm = mnl_nlmsg_put_extra_header(cmd, sizeof(*ifm));
ifm->ifi_family = AF_UNSPEC;
ifm->ifi_index = vxindex;
+ assert(size == cmd->nlmsg_len);
return 1;
}
@@ -5127,6 +5124,13 @@ flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
dev_flow->tcf.applied = 1;
return 0;
}
+ if (dev_flow->tcf.tunnel) {
+ /* Rollback the VTEP configuration if rule apply failed. */
+ assert(dev_flow->tcf.tunnel->vtep);
+ flow_tcf_vtep_release(ctx, dev_flow->tcf.tunnel->vtep,
+ dev_flow);
+ dev_flow->tcf.tunnel->vtep = NULL;
+ }
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"netlink: failed to create TC flow rule");
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c
index 699cc88c..d6d95db5 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1058,10 +1058,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id)
+ items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@@ -1072,10 +1079,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto)
+ items->mask)->hdr.proto) {
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
@@ -1125,13 +1139,6 @@ flow_verbs_validate(struct rte_eth_dev *dev,
error);
if (ret < 0)
return ret;
- if (next_protocol != 0xff &&
- next_protocol != IPPROTO_MPLS)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, items,
- "protocol filtering not compatible"
- " with MPLS layer");
item_flags |= MLX5_FLOW_LAYER_MPLS;
break;
default:
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index 886f60e6..97092c74 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -15,6 +15,16 @@
#include "mlx5_defs.h"
+/*
+ * Compilation workaround for PPC64 when AltiVec is fully enabled, e.g. std=c11.
+ * Otherwise there would be a type conflict between stdbool and altivec.
+ */
+#if defined(__PPC64__) && !defined(__APPLE_ALTIVEC__)
+#undef bool
+/* redefine as in stdbool.h */
+#define bool _Bool
+#endif
+
/* Bit-field manipulation. */
#define BITFIELD_DECLARE(bf, type, size) \
type bf[(((size_t)(size) / (sizeof(type) * CHAR_BIT)) + \