aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/i40e/i40e_flow.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/i40e/i40e_flow.c')
-rw-r--r--drivers/net/i40e/i40e_flow.c559
1 files changed, 461 insertions, 98 deletions
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7e4936e3..16c47cf7 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1,33 +1,5 @@
-/*-
- * BSD LICENSE
- *
- * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
*/
#include <sys/queue.h>
@@ -39,7 +11,7 @@
#include <stdarg.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_eth_ctrl.h>
@@ -138,6 +110,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int
i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
@@ -2015,8 +1989,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Get the MAC info. */
if (!eth_spec || !eth_mask) {
rte_flow_error_set(error, EINVAL,
@@ -2101,7 +2075,7 @@ i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
}
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- act_q = (const struct rte_flow_action_queue *)act->conf;
+ act_q = act->conf;
filter->queue = act_q->index;
if (filter->queue >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
@@ -2276,11 +2250,19 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
uint8_t raw_id)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
- uint32_t flx_pit;
+ uint32_t flx_pit, flx_ort;
uint8_t field_idx;
uint16_t min_next_off = 0; /* in words */
uint8_t i;
+ if (raw_id) {
+ flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
+ (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
+ (layer_idx * I40E_MAX_FLXPLD_FIED);
+ I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
+ }
+
/* Set flex pit */
for (i = 0; i < raw_id; i++) {
field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
@@ -2496,8 +2478,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
if (eth_spec && eth_mask) {
if (!is_zero_ether_addr(&eth_mask->src) ||
@@ -2534,10 +2516,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@ -2553,10 +2533,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2621,10 +2599,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
l3 = RTE_FLOW_ITEM_TYPE_IPV6;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
@@ -2692,8 +2668,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
outer_ip = false;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@ -2740,8 +2716,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_spec = item->spec;
+ udp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@ -2793,8 +2769,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
- gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
- gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
if (gtp_spec && gtp_mask) {
if (gtp_mask->v_pt_rsv_flags ||
@@ -2815,10 +2791,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@ -2866,8 +2840,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_RAW:
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_spec = item->spec;
+ raw_mask = item->mask;
if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
@@ -2877,6 +2851,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
+ if (pf->support_multi_driver) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported flexible payload.");
+ return -rte_errno;
+ }
+
ret = i40e_flow_check_raw_item(item, raw_spec, error);
if (ret < 0)
return ret;
@@ -2935,7 +2917,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
- vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ vf_spec = item->spec;
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
@@ -3027,7 +3009,7 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
- act_q = (const struct rte_flow_action_queue *)act->conf;
+ act_q = act->conf;
filter->action.rx_queue = act_q->index;
if ((!filter->input.flow_ext.is_vf &&
filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
@@ -3058,7 +3040,7 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
- mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ mark_spec = act->conf;
filter->action.report_status = I40E_FDIR_REPORT_ID;
filter->soft_id = mark_spec->id;
break;
@@ -3149,7 +3131,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
}
if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
- act_vf = (const struct rte_flow_action_vf *)act->conf;
+ act_vf = act->conf;
filter->vf_id = act_vf->id;
filter->is_to_vf = 1;
if (filter->vf_id >= pf->vf_num) {
@@ -3164,7 +3146,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- act_q = (const struct rte_flow_action_queue *)act->conf;
+ act_q = act->conf;
filter->queue_id = act_q->index;
if ((!filter->is_to_vf) &&
(filter->queue_id >= pf->dev_data->nb_rx_queues)) {
@@ -3256,8 +3238,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
@@ -3300,10 +3282,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3360,10 +3340,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec =
- (const struct rte_flow_item_vxlan *)item->spec;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@ -3489,8 +3467,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
@@ -3534,10 +3512,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -3582,10 +3558,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
/* Check if NVGRE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@ -3610,6 +3584,41 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
"Invalid TNI mask");
return -rte_errno;
}
+ if (nvgre_mask->protocol &&
+ nvgre_mask->protocol != 0xFFFF) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver &&
+ nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0xFFFF)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) &&
+ nvgre_mask->c_k_s_rsvd0_ver) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
+ if (nvgre_mask->protocol &&
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(0x6558)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid NVGRE item");
+ return -rte_errno;
+ }
rte_memcpy(((uint8_t *)&tenant_id_be + 1),
nvgre_spec->tni, 3);
filter->tenant_id =
@@ -3761,10 +3770,8 @@ i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- mpls_spec =
- (const struct rte_flow_item_mpls *)item->spec;
- mpls_mask =
- (const struct rte_flow_item_mpls *)item->mask;
+ mpls_spec = item->spec;
+ mpls_mask = item->mask;
if (!mpls_spec || !mpls_mask) {
rte_flow_error_set(error, EINVAL,
@@ -3900,10 +3907,8 @@ i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_GTPC:
case RTE_FLOW_ITEM_TYPE_GTPU:
- gtp_spec =
- (const struct rte_flow_item_gtp *)item->spec;
- gtp_mask =
- (const struct rte_flow_item_gtp *)item->mask;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
if (!gtp_spec || !gtp_mask) {
rte_flow_error_set(error, EINVAL,
@@ -4014,10 +4019,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
@@ -4094,6 +4097,317 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
return ret;
}
+/**
+ * This function is used to do configuration i40e existing RSS with rte_flow.
+ * It also enable queue region configuration using flow API for i40e.
+ * pattern can be used indicate what parameters will be include in flow,
+ * like user_priority or flowtype for queue region or HASH function for RSS.
+ * Action is used to transmit parameter like queue index and HASH
+ * function for RSS, or flowtype for queue region configuration.
+ * For example:
+ * pattern:
+ * Case 1: only ETH, indicate flowtype for queue region will be parsed.
+ * Case 2: only VLAN, indicate user_priority for queue region will be parsed.
+ * Case 3: none, indicate RSS related will be parsed in action.
+ * Any pattern other the ETH or VLAN will be treated as invalid except END.
+ * So, pattern choice is depened on the purpose of configuration of
+ * that flow.
+ * action:
+ * action RSS will be uaed to transmit valid parameter with
+ * struct rte_flow_action_rss for all the 3 case.
+ */
+static int
+i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ uint8_t *action_flag,
+ struct i40e_queue_regions *info)
+{
+ const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *action_flag = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
+ if (vlan_spec && vlan_mask) {
+ if (vlan_mask->tci ==
+ rte_cpu_to_be_16(I40E_TCI_MASK)) {
+ info->region[0].user_priority[0] =
+ (vlan_spec->tci >> 13) & 0x7;
+ info->region[0].user_priority_num = 1;
+ info->queue_region_number = 1;
+ *action_flag = 0;
+ }
+ }
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ uint8_t action_flag,
+ struct i40e_queue_regions *conf_info,
+ union i40e_filter_t *filter)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_queue_regions *info = &pf->queue_region;
+ struct i40e_rte_flow_rss_conf *rss_config =
+ &filter->rss_conf;
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ uint16_t i, j, n, tmp;
+ uint32_t index = 0;
+ uint64_t hf_bit = 1;
+
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ rss = act->conf;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (action_flag) {
+ for (n = 0; n < 64; n++) {
+ if (rss->rss_conf->rss_hf & (hf_bit << n)) {
+ conf_info->region[0].hw_flowtype[0] = n;
+ conf_info->region[0].flowtype_num = 1;
+ conf_info->queue_region_number = 1;
+ break;
+ }
+ }
+ }
+
+ for (n = 0; n < conf_info->queue_region_number; n++) {
+ if (conf_info->region[n].user_priority_num ||
+ conf_info->region[n].flowtype_num) {
+ if (!((rte_is_power_of_2(rss->num)) &&
+ rss->num <= 64)) {
+ PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
+ "total number of queues do not exceed the VSI allocation");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].user_priority[n] >=
+ I40E_MAX_USER_PRIORITY) {
+ PMD_DRV_LOG(ERR, "the user priority max index is 7");
+ return -rte_errno;
+ }
+
+ if (conf_info->region[n].hw_flowtype[n] >=
+ I40E_FILTER_PCTYPE_MAX) {
+ PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63");
+ return -rte_errno;
+ }
+
+ if (rss_info->num < rss->num ||
+ rss_info->queue[0] < rss->queue[0] ||
+ (rss->queue[0] + rss->num >
+ rss_info->num + rss_info->queue[0])) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < info->queue_region_number; i++) {
+ if (info->region[i].queue_num == rss->num &&
+ info->region[i].queue_start_index ==
+ rss->queue[0])
+ break;
+ }
+
+ if (i == info->queue_region_number) {
+ if (i > I40E_REGION_MAX_INDEX) {
+ PMD_DRV_LOG(ERR, "the queue region max index is 7");
+ return -rte_errno;
+ }
+
+ info->region[i].queue_num =
+ rss->num;
+ info->region[i].queue_start_index =
+ rss->queue[0];
+ info->region[i].region_id =
+ info->queue_region_number;
+
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ info->queue_region_number++;
+ } else {
+ j = info->region[i].user_priority_num;
+ tmp = conf_info->region[n].user_priority[0];
+ if (conf_info->region[n].user_priority_num) {
+ info->region[i].user_priority[j] = tmp;
+ info->region[i].user_priority_num++;
+ }
+
+ j = info->region[i].flowtype_num;
+ tmp = conf_info->region[n].hw_flowtype[0];
+ if (conf_info->region[n].flowtype_num) {
+ info->region[i].hw_flowtype[j] = tmp;
+ info->region[i].flowtype_num++;
+ }
+ }
+ }
+
+ rss_config->queue_region_conf = TRUE;
+ }
+
+ if (rss_config->queue_region_conf)
+ return 0;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+ if (rss->rss_conf)
+ rss_config->rss_conf = *rss->rss_conf;
+ else
+ rss_config->rss_conf.rss_hf =
+ pf->adapter->flow_types_mask;
+
+ for (n = 0; n < rss->num; ++n)
+ rss_config->queue[n] = rss->queue[n];
+ rss_config->num = rss->num;
+ index++;
+
+ /* check if the next not void action is END */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+ rss_config->queue_region_conf = FALSE;
+
+ return 0;
+}
+
+static int
+i40e_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ union i40e_filter_t *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct i40e_queue_regions info;
+ uint8_t action_flag = 0;
+
+ memset(&info, 0, sizeof(struct i40e_queue_regions));
+
+ ret = i40e_flow_parse_rss_pattern(dev, pattern,
+ error, &action_flag, &info);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_rss_action(dev, actions, error,
+ action_flag, &info, filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_HASH;
+
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_set(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (conf->queue_region_conf) {
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+ conf->queue_region_conf = 0;
+ } else {
+ i40e_config_rss_filter(pf, conf, 1);
+ }
+ return 0;
+}
+
+static int
+i40e_config_rss_filter_del(struct rte_eth_dev *dev,
+ struct i40e_rte_flow_rss_conf *conf)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ i40e_config_rss_filter(pf, conf, 0);
+ return 0;
+}
+
static int
i40e_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
@@ -4130,6 +4444,17 @@ i40e_flow_validate(struct rte_eth_dev *dev,
memset(&cons_filter, 0, sizeof(cons_filter));
+ /* Get the non-void item of action */
+ while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ i++;
+
+ if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ ret = i40e_parse_rss_filter(dev, attr, pattern,
+ actions, &cons_filter, error);
+ return ret;
+ }
+
+ i = 0;
/* Get the non-void item number of pattern */
while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
@@ -4217,6 +4542,11 @@ i40e_flow_create(struct rte_eth_dev *dev,
flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
i40e_tunnel_filter_list);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_set(dev,
+ &cons_filter.rss_conf);
+ flow->rule = &pf->rss_info;
+ break;
default:
goto free_flow;
}
@@ -4255,6 +4585,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev,
ret = i40e_flow_add_del_fdir_filter(dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
break;
+ case RTE_ETH_FILTER_HASH:
+ ret = i40e_config_rss_filter_del(dev,
+ (struct i40e_rte_flow_rss_conf *)flow->rule);
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -4397,6 +4731,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
return -rte_errno;
}
+ ret = i40e_flow_flush_rss_filter(dev);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush rss flows.");
+ return -rte_errno;
+ }
+
return ret;
}
@@ -4406,6 +4748,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
struct rte_eth_dev *dev = pf->adapter->eth_dev;
struct i40e_fdir_info *fdir_info = &pf->fdir;
struct i40e_fdir_filter *fdir_filter;
+ enum i40e_filter_pctype pctype;
struct rte_flow *flow;
void *temp;
int ret;
@@ -4427,6 +4770,10 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
rte_free(flow);
}
}
+
+ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
+ pf->fdir.inset_flag[pctype] = 0;
}
return ret;
@@ -4487,3 +4834,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
return ret;
}
+
+/* remove the rss filter */
+static int
+i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int32_t ret = -EINVAL;
+
+ ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
+
+ if (rss_info->num)
+ ret = i40e_config_rss_filter(pf, rss_info, FALSE);
+ return ret;
+}