diff options
Diffstat (limited to 'drivers/net/i40e')
36 files changed, 3672 insertions, 1295 deletions
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 9ab8c84d..5663f5b1 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -1,33 +1,5 @@ -# BSD LICENSE -# -# Copyright(c) 2010-2017 Intel Corporation. All rights reserved. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Intel Corporation nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2017 Intel Corporation include $(RTE_SDK)/mk/rte.vars.mk @@ -114,6 +86,25 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_tm.c +ifeq ($(findstring RTE_MACHINE_CPUFLAG_AVX2,$(CFLAGS)),RTE_MACHINE_CPUFLAG_AVX2) + CC_AVX2_SUPPORT=1 +else + CC_AVX2_SUPPORT=\ + $(shell $(CC) -march=core-avx2 -dM -E - </dev/null 2>&1 | \ + grep -q AVX2 && echo 1) + ifeq ($(CC_AVX2_SUPPORT), 1) + ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) + CFLAGS_i40e_rxtx_vec_avx2.o += -march=core-avx2 + else + CFLAGS_i40e_rxtx_vec_avx2.o += -mavx2 + endif + endif +endif + +ifeq ($(CC_AVX2_SUPPORT), 1) + SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_avx2.c +endif + # install this header file SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README index 59e76c21..247ba11d 100644 --- a/drivers/net/i40e/base/README +++ b/drivers/net/i40e/base/README @@ -34,7 +34,7 @@ IntelĀ® I40E driver ================== This directory contains source code of FreeBSD i40e driver of version -cid-i40e.2017.06.23.tar.gz released by the team which develops +cid-i40e.2018.01.02.tar.gz released by the team which develops basic drivers for any i40e NIC. The directory of base/ contains the original source package. This driver is valid for the product(s) listed below diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c index 8cc8c5ec..612be883 100644 --- a/drivers/net/i40e/base/i40e_adminq.c +++ b/drivers/net/i40e/base/i40e_adminq.c @@ -688,12 +688,18 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) (hw->aq.api_min_ver >= 7))) hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; - if (hw->mac.type == I40E_MAC_XL710 && + if (hw->mac.type == I40E_MAC_XL710 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; } + /* Newer versions of firmware require lock when reading the NVM */ + if ((hw->aq.api_maj_ver > 1) || + ((hw->aq.api_maj_ver == 1) && + (hw->aq.api_min_ver >= 5))) + hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; goto init_adminq_free_arq; @@ -998,10 +1004,19 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, /* update the error if time out occurred */ if ((!cmd_completed) && (!details->async && !details->postpone)) { - i40e_debug(hw, - I40E_DEBUG_AQ_MESSAGE, - "AQTX: Writeback timeout.\n"); - status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; +#ifdef PF_DRIVER + if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) { +#else + if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) { +#endif + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: AQ Critical error.\n"); + status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR; + } else { + i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, + "AQTX: Writeback timeout.\n"); + status = I40E_ERR_ADMIN_QUEUE_TIMEOUT; + } } asq_send_command_error: @@ -1063,22 +1078,19 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, } /* set next_to_use to head */ -#ifdef PF_DRIVER #ifdef INTEGRATED_VF if (!i40e_is_vf(hw)) - ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); + ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; + else + ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; #else - ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK); -#endif /* INTEGRATED_VF */ +#ifdef PF_DRIVER + ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK; #endif /* PF_DRIVER */ #ifdef VF_DRIVER -#ifdef INTEGRATED_VF - if (i40e_is_vf(hw)) - ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); -#else - ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK); -#endif /* INTEGRATED_VF */ + ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK; #endif /* VF_DRIVER */ +#endif /* INTEGRATED_VF */ if (ntu == ntc) { /* nothing to do - shouldn't need to update ring's values */ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK; @@ -1137,7 +1149,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_use = ntu; #ifdef PF_DRIVER - i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode)); + i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode), &e->desc); #endif /* PF_DRIVER */ clean_arq_element_out: /* Set pending if needed, unlock and return */ diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h index 182e40b9..de4ab3f3 100644 --- a/drivers/net/i40e/base/i40e_adminq.h +++ b/drivers/net/i40e/base/i40e_adminq.h @@ -159,9 +159,6 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) /* general information */ #define I40E_AQ_LARGE_BUF 512 #define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */ -#ifdef I40E_ESS_SUPPORT -#define I40E_ASQ_CMD_TIMEOUT_ESS 50000000 /* usecs */ -#endif void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode); diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h index c36da2a3..801c0ff1 100644 --- a/drivers/net/i40e/base/i40e_adminq_cmd.h +++ b/drivers/net/i40e/base/i40e_adminq_cmd.h @@ -214,6 +214,7 @@ enum i40e_admin_queue_opc { /* DCB commands */ i40e_aqc_opc_dcb_ignore_pfc = 0x0301, i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, /* TX scheduler */ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, @@ -262,6 +263,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_update = 0x0703, i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_nvm_progress = 0x0706, i40e_aqc_opc_oem_post_update = 0x0720, i40e_aqc_opc_thermal_sensor = 0x0721, @@ -1877,6 +1879,7 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_AOC = 0x23, I40E_PHY_TYPE_25GBASE_ACC = 0x24, I40E_PHY_TYPE_MAX, + I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, }; @@ -2182,8 +2185,8 @@ struct i40e_aqc_phy_register_access { #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 u8 dev_addres; u8 reserved1[2]; - u32 reg_address; - u32 reg_value; + __le32 reg_address; + __le32 reg_value; u8 reserved2[4]; }; @@ -2195,8 +2198,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); */ struct i40e_aqc_nvm_update { u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_LAST_CMD 0x01 +#define I40E_AQ_NVM_FLASH_ONLY 0x80 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 +#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 u8 module_pointer; __le16 length; __le32 offset; @@ -2456,6 +2463,17 @@ struct i40e_aqc_lldp_start { I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); +/* Set DCB (direct 0x0303) */ +struct i40e_aqc_set_dcb_parameters { + u8 command; +#define I40E_AQ_DCB_SET_AGENT 0x1 +#define I40E_DCB_VALID 0x1 + u8 valid_flags; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); + /* Get CEE DCBX Oper Config (0x0A07) * uses the generic descriptor struct * returns below as indirect response diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c index 900d379c..e0a5be14 100644 --- a/drivers/net/i40e/base/i40e_common.c +++ b/drivers/net/i40e/base/i40e_common.c @@ -310,6 +310,8 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err) return "I40E_NOT_SUPPORTED"; case I40E_ERR_FIRMWARE_API_VERSION: return "I40E_ERR_FIRMWARE_API_VERSION"; + case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR: + return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR"; } snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err); @@ -1037,7 +1039,8 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw) hw->pf_id = (u8)(func_rid & 0x7); if (hw->mac.type == I40E_MAC_X722) - hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | + I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; status = i40e_init_nvm(hw); return status; @@ -1393,7 +1396,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) break; reg2 = rd32(hw, I40E_GLGEN_RSTAT); if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { - DEBUGOUT("Core reset upcoming.\n"); + DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n"); DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2); return I40E_ERR_NOT_READY; } @@ -1585,6 +1588,7 @@ u32 i40e_led_get(struct i40e_hw *hw) case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: continue; default: break; @@ -1633,6 +1637,7 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) case I40E_COMBINED_ACTIVITY: case I40E_FILTER_ACTIVITY: case I40E_MAC_ACTIVITY: + case I40E_LINK_ACTIVITY: continue; default: break; @@ -1643,9 +1648,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink) gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK); - if (mode == I40E_LINK_ACTIVITY) - blink = false; - if (blink) gpio_val |= BIT(I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT); else @@ -1675,31 +1677,47 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, { struct i40e_aq_desc desc; enum i40e_status_code status; + u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); if (!abilities) return I40E_ERR_PARAM; - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_phy_abilities); + do { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_abilities); - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); - if (abilities_size > I40E_AQ_LARGE_BUF) - desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF); + if (abilities_size > I40E_AQ_LARGE_BUF) + desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB); - if (qualified_modules) - desc.params.external.param0 |= + if (qualified_modules) + desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); - if (report_init) - desc.params.external.param0 |= + if (report_init) + desc.params.external.param0 |= CPU_TO_LE32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); - status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, - cmd_details); + status = i40e_asq_send_command(hw, &desc, abilities, + abilities_size, cmd_details); - if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) - status = I40E_ERR_UNKNOWN_PHY; + if (status != I40E_SUCCESS) + break; + + if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { + status = I40E_ERR_UNKNOWN_PHY; + break; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { + i40e_msec_delay(1); + total_delay++; + status = I40E_ERR_TIMEOUT; + } + } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && + (total_delay < max_delay)); + + if (status != I40E_SUCCESS) + return status; if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && @@ -1753,6 +1771,8 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw, /** * i40e_set_fc * @hw: pointer to the hw struct + * @aq_failures: buffer to return AdminQ failure information + * @atomic_restart: whether to enable atomic link restart * * Set the requested flow control mode using set_phy_config. **/ @@ -2005,7 +2025,11 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw, if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= 7) { - hw->phy.phy_types = LE32_TO_CPU(*(__le32 *)resp->link_type); + __le32 tmp; + + i40e_memcpy(&tmp, resp->link_type, sizeof(tmp), + I40E_NONDMA_TO_NONDMA); + hw->phy.phy_types = LE32_TO_CPU(tmp); hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); } @@ -3107,8 +3131,8 @@ enum i40e_status_code i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for * VEBs/VEPA elements only @@ -3168,8 +3192,8 @@ static enum i40e_status_code i40e_mirrorrule_op(struct i40e_hw *hw, * @mr_list: list of mirrored VSI SEIDs or VLAN IDs * @cmd_details: pointer to command details structure or NULL * @rule_id: Rule ID returned from FW - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only **/ @@ -3199,8 +3223,8 @@ enum i40e_status_code i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, * add_mirrorrule. * @mr_list: list of mirrored VLAN IDs to be removed * @cmd_details: pointer to command details structure or NULL - * @rule_used: Number of rules used in internal switch - * @rule_free: Number of rules free in internal switch + * @rules_used: Number of rules used in internal switch + * @rules_free: Number of rules free in internal switch * * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only **/ @@ -3603,6 +3627,8 @@ enum i40e_status_code i40e_aq_write_nvm_config(struct i40e_hw *hw, /** * i40e_aq_oem_post_update - triggers an OEM specific flow after update * @hw: pointer to the hw struct + * @buff: buffer for result + * @buff_size: buffer size * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_oem_post_update(struct i40e_hw *hw, @@ -4053,13 +4079,14 @@ exit: * @length: length of the section to be written (in bytes from the offset) * @data: command buffer (size [bytes] = length) * @last_command: tells if this is the last command in a series + * @preservation_flags: Preservation mode flags * @cmd_details: pointer to command details structure or NULL * * Update the NVM using the admin queue commands **/ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, - bool last_command, + bool last_command, u8 preservation_flags, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -4080,6 +4107,16 @@ enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, /* If this is the last command in a series, set the proper flag. */ if (last_command) cmd->command_flags |= I40E_AQ_NVM_LAST_CMD; + if (hw->mac.type == I40E_MAC_X722) { + if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_SELECTED) + cmd->command_flags |= + (I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED << + I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); + else if (preservation_flags == I40E_NVM_PRESERVATION_FLAGS_ALL) + cmd->command_flags |= + (I40E_AQ_NVM_PRESERVATION_FLAGS_ALL << + I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT); + } cmd->module_pointer = module_pointer; cmd->offset = CPU_TO_LE32(offset); cmd->length = CPU_TO_LE16(length); @@ -4095,6 +4132,28 @@ i40e_aq_update_nvm_exit: } /** + * i40e_aq_nvm_progress + * @hw: pointer to the hw struct + * @progress: pointer to progress returned from AQ + * @cmd_details: pointer to command details structure or NULL + * + * Gets progress of flash rearrangement process + **/ +enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, + struct i40e_asq_cmd_details *cmd_details) +{ + enum i40e_status_code status; + struct i40e_aq_desc desc; + + DEBUGFUNC("i40e_aq_nvm_progress"); + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_progress); + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + *progress = desc.params.raw[0]; + return status; +} + +/** * i40e_aq_get_lldp_mib * @hw: pointer to the hw struct * @bridge_type: type of bridge requested @@ -4408,7 +4467,34 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start); cmd->command = I40E_AQ_LLDP_AGENT_START; + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + return status; +} + +/** + * i40e_aq_set_dcb_parameters + * @hw: pointer to the hw struct + * @cmd_details: pointer to command details structure or NULL + * @dcb_enable: True if DCB configuration needs to be applied + * + **/ +enum i40e_status_code +i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_dcb_parameters *cmd = + (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_dcb_parameters); + + if (dcb_enable) { + cmd->valid_flags = I40E_DCB_VALID; + cmd->command = I40E_AQ_DCB_SET_AGENT; + } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; @@ -4476,7 +4562,6 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw, * i40e_aq_add_udp_tunnel * @hw: pointer to the hw struct * @udp_port: the UDP port to add in Host byte order - * @header_len: length of the tunneling header length in DWords * @protocol_index: protocol index type * @filter_index: pointer to filter index * @cmd_details: pointer to command details structure or NULL @@ -5219,6 +5304,7 @@ enum i40e_status_code i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, * @hw: pointer to the hw struct * @seid: seid of the switching component connected to Physical Port * @ets_data: Buffer holding ETS parameters + * @opcode: Tx scheduler AQ command opcode * @cmd_details: pointer to command details structure or NULL **/ enum i40e_status_code i40e_aq_config_switch_comp_ets(struct i40e_hw *hw, @@ -5581,10 +5667,10 @@ enum i40e_status_code i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, * @hw: pointer to the hw struct * @seid: VSI seid to add ethertype filter from **/ -#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 seid) { +#define I40E_FLOW_CONTROL_ETHTYPE 0x8808 u16 flag = I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP | I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX; @@ -6165,6 +6251,7 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status) * @ret_buff_size: actual buffer size returned * @ret_next_table: next block to read * @ret_next_index: next index to read + * @cmd_details: pointer to command details structure or NULL * * Dump internal FW/HW data for debug purposes. * @@ -6287,7 +6374,7 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw, * i40e_read_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -6332,7 +6419,7 @@ enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw, * i40e_write_phy_register_clause22 * @hw: pointer to the HW structure * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes specified PHY register value @@ -6373,7 +6460,7 @@ enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw, * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -6447,7 +6534,7 @@ phy_read_end: * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register @@ -6514,7 +6601,7 @@ phy_write_end: * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Writes value to specified PHY register @@ -6550,7 +6637,7 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, * @hw: pointer to the HW structure * @page: registers page number * @reg: register address in the page - * @phy_adr: PHY address on MDIO interface + * @phy_addr: PHY address on MDIO interface * @value: PHY register value * * Reads specified PHY register value @@ -6585,7 +6672,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, * i40e_get_phy_address * @hw: pointer to the HW structure * @dev_num: PHY port num that address we want - * @phy_addr: Returned PHY address * * Gets PHY address for current port **/ @@ -6672,6 +6758,64 @@ phy_blinking_end: } /** + * i40e_led_get_reg - read LED register + * @hw: pointer to the HW structure + * @led_addr: LED register address + * @reg_val: read register value + **/ +static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, + u32 *reg_val) +{ + enum i40e_status_code status; + u8 phy_addr = 0; + + *reg_val = 0; + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, + I40E_PHY_LED_PROV_REG_1, + reg_val, NULL); + } else { + phy_addr = i40e_get_phy_address(hw, hw->port); + status = i40e_read_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + (u16 *)reg_val); + } + return status; +} + +/** + * i40e_led_set_reg - write LED register + * @hw: pointer to the HW structure + * @led_addr: LED register address + * @reg_val: register value to write + **/ +static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, + u32 reg_val) +{ + enum i40e_status_code status; + u8 phy_addr = 0; + + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = i40e_aq_set_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, + I40E_PHY_LED_PROV_REG_1, + reg_val, NULL); + } else { + phy_addr = i40e_get_phy_address(hw, hw->port); + status = i40e_write_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + (u16)reg_val); + } + + return status; +} + +/** * i40e_led_get_phy - return current on/off mode * @hw: pointer to the hw struct * @led_addr: address of led register to use @@ -6683,43 +6827,35 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, { enum i40e_status_code status = I40E_SUCCESS; u16 gpio_led_port; + u32 reg_val_aq; + u16 temp_addr; u8 phy_addr = 0; u16 reg_val; - u16 temp_addr; - u8 port_num; - u32 i; - u32 reg_val_aq; if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_get_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - ®_val_aq, NULL); - if (status) - return status; - *val = (u16)reg_val_aq; - } else { - temp_addr = I40E_PHY_LED_PROV_REG_1; - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); - - for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, - temp_addr++) { - status = - i40e_read_phy_register_clause45(hw, + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, + I40E_PHY_LED_PROV_REG_1, + ®_val_aq, NULL); + if (status == I40E_SUCCESS) + *val = (u16)reg_val_aq; + return status; + } + temp_addr = I40E_PHY_LED_PROV_REG_1; + phy_addr = i40e_get_phy_address(hw, hw->port); + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + temp_addr++) { + status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, temp_addr, phy_addr, ®_val); - if (status) - return status; - *val = reg_val; - if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { - *led_addr = temp_addr; - break; - } + if (status) + return status; + *val = reg_val; + if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { + *led_addr = temp_addr; + break; } } return status; @@ -6729,7 +6865,9 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, * i40e_led_set_phy * @hw: pointer to the HW structure * @on: true or false + * @led_addr: address of led register to use * @mode: original val plus bit for set or ignore + * * Set led's on or off when controlled by the PHY * **/ @@ -6739,113 +6877,35 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on, enum i40e_status_code status = I40E_SUCCESS; u32 led_ctl = 0; u32 led_reg = 0; - u8 phy_addr = 0; - u8 port_num; - u32 i; - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_get_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - &led_reg, NULL); - } else { - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); - status = i40e_read_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - (u16 *)&led_reg); - } + status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) return status; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; - if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { - status = i40e_aq_set_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - led_reg, NULL); - } else { - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - (u16)led_reg); - } + status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) return status; } - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_get_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - &led_reg, NULL); - } else { - status = i40e_read_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - (u16 *)&led_reg); - } + status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) goto restore_config; if (on) led_reg = I40E_PHY_LED_MANUAL_ON; else led_reg = 0; - - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_set_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - led_reg, NULL); - } else { - status = - i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - (u16)led_reg); - } + status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) goto restore_config; if (mode & I40E_PHY_LED_MODE_ORIG) { led_ctl = (mode & I40E_PHY_LED_MODE_MASK); - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = i40e_aq_set_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - led_ctl, NULL); - } else { - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - (u16)led_ctl); - } + status = i40e_led_set_reg(hw, led_addr, led_ctl); } return status; + restore_config: - if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { - status = - i40e_aq_set_phy_register(hw, - I40E_AQ_PHY_REG_ACCESS_EXTERNAL, - I40E_PHY_COM_REG_PAGE, - I40E_PHY_LED_PROV_REG_1, - led_ctl, NULL); - } else { - status = - i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - (u16)led_ctl); - } + status = i40e_led_set_reg(hw, led_addr, led_ctl); return status; } #endif /* PF_DRIVER */ @@ -7002,8 +7062,8 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw, cmd->phy_interface = phy_select; cmd->dev_addres = dev_addr; - cmd->reg_address = reg_addr; - cmd->reg_value = reg_val; + cmd->reg_address = CPU_TO_LE32(reg_addr); + cmd->reg_value = CPU_TO_LE32(reg_val); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -7036,11 +7096,11 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw, cmd->phy_interface = phy_select; cmd->dev_addres = dev_addr; - cmd->reg_address = reg_addr; + cmd->reg_address = CPU_TO_LE32(reg_addr); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); if (!status) - *reg_val = cmd->reg_value; + *reg_val = LE32_TO_CPU(cmd->reg_value); return status; } @@ -7111,9 +7171,9 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw, hw->dev_caps.num_rx_qp = msg->num_queue_pairs; hw->dev_caps.num_tx_qp = msg->num_queue_pairs; hw->dev_caps.num_msix_vectors_vf = msg->max_vectors; - hw->dev_caps.dcb = msg->vf_offload_flags & + hw->dev_caps.dcb = msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_L2; - hw->dev_caps.iwarp = (msg->vf_offload_flags & + hw->dev_caps.iwarp = (msg->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_IWARP) ? 1 : 0; for (i = 0; i < msg->num_vsis; i++) { if (vsi_res->vsi_type == VIRTCHNL_VSI_SRIOV) { @@ -7147,7 +7207,7 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw) /** * i40e_aq_set_arp_proxy_config * @hw: pointer to the HW structure - * @proxy_config - pointer to proxy config command table struct + * @proxy_config: pointer to proxy config command table struct * @cmd_details: pointer to command details * * Set ARP offload parameters from pre-populated @@ -7333,7 +7393,6 @@ enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw, return status; } - /** * i40e_aq_write_ddp - Write dynamic device personalization (ddp) * @hw: pointer to the hw struct @@ -7385,6 +7444,7 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff, * @hw: pointer to the hw struct * @buff: command buffer (size in bytes = buff_size) * @buff_size: buffer size in bytes + * @flags: AdminQ command flags * @cmd_details: pointer to command details structure or NULL **/ enum diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c index 9b5405db..7600c922 100644 --- a/drivers/net/i40e/base/i40e_dcb.c +++ b/drivers/net/i40e/base/i40e_dcb.c @@ -1277,6 +1277,67 @@ enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen, /** + * _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM + * @hw: pointer to the HW structure + * @lldp_cfg: pointer to hold lldp configuration variables + * @module: address of the module pointer + * @word_offset: offset of LLDP configuration + * + * Reads the LLDP configuration data from NVM using passed addresses + **/ +static enum i40e_status_code _i40e_read_lldp_cfg(struct i40e_hw *hw, + struct i40e_lldp_variables *lldp_cfg, + u8 module, u32 word_offset) +{ + u32 address, offset = (2 * word_offset); + enum i40e_status_code ret; + u16 mem; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + return ret; + + ret = i40e_aq_read_nvm(hw, 0x0, module * 2, sizeof(mem), &mem, true, + NULL); + i40e_release_nvm(hw); + if (ret != I40E_SUCCESS) + return ret; + + /* Check if this pointer needs to be read in word size or 4K sector + * units. + */ + if (mem & I40E_PTR_TYPE) + address = (0x7FFF & mem) * 4096; + else + address = (0x7FFF & mem) * 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, module, offset, sizeof(mem), &mem, true, + NULL); + i40e_release_nvm(hw); + if (ret != I40E_SUCCESS) + return ret; + + offset = mem + word_offset; + offset *= 2; + + ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (ret != I40E_SUCCESS) + goto err_lldp_cfg; + + ret = i40e_aq_read_nvm(hw, 0, address + offset, + sizeof(struct i40e_lldp_variables), lldp_cfg, + true, NULL); + i40e_release_nvm(hw); + +err_lldp_cfg: + return ret; +} + +/** * i40e_read_lldp_cfg - read LLDP Configuration data from NVM * @hw: pointer to the HW structure * @lldp_cfg: pointer to hold lldp configuration variables @@ -1287,21 +1348,34 @@ enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg) { enum i40e_status_code ret = I40E_SUCCESS; - u32 offset = (2 * I40E_NVM_LLDP_CFG_PTR); + u32 mem; if (!lldp_cfg) return I40E_ERR_PARAM; ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret != I40E_SUCCESS) - goto err_lldp_cfg; + return ret; - ret = i40e_aq_read_nvm(hw, I40E_SR_EMP_MODULE_PTR, offset, - sizeof(struct i40e_lldp_variables), - (u8 *)lldp_cfg, - true, NULL); + ret = i40e_aq_read_nvm(hw, I40E_SR_NVM_CONTROL_WORD, 0, sizeof(mem), + &mem, true, NULL); i40e_release_nvm(hw); + if (ret != I40E_SUCCESS) + return ret; + + /* Read a bit that holds information whether we are running flat or + * structured NVM image. Flat image has LLDP configuration in shadow + * ram, so there is a need to pass different addresses for both cases. + */ + if (mem & I40E_SR_NVM_MAP_STRUCTURE_TYPE) { + /* Flat NVM case */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_SR_EMP_MODULE_PTR, + I40E_SR_LLDP_CFG_PTR); + } else { + /* Good old structured NVM image */ + ret = _i40e_read_lldp_cfg(hw, lldp_cfg, I40E_EMP_MODULE_PTR, + I40E_NVM_LLDP_CFG_PTR); + } -err_lldp_cfg: return ret; } diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h index f4a87842..66ff1ccf 100644 --- a/drivers/net/i40e/base/i40e_devids.h +++ b/drivers/net/i40e/base/i40e_devids.h @@ -76,4 +76,7 @@ POSSIBILITY OF SUCH DAMAGE. (d) == I40E_DEV_ID_QSFP_B || \ (d) == I40E_DEV_ID_QSFP_C) +#define i40e_is_25G_device(d) ((d) == I40E_DEV_ID_25G_B || \ + (d) == I40E_DEV_ID_25G_SFP28) + #endif /* _I40E_DEVIDS_H_ */ diff --git a/drivers/net/i40e/base/i40e_hmc.c b/drivers/net/i40e/base/i40e_hmc.c index 75d38412..502407bd 100644 --- a/drivers/net/i40e/base/i40e_hmc.c +++ b/drivers/net/i40e/base/i40e_hmc.c @@ -210,7 +210,6 @@ exit: * @hw: pointer to our HW structure * @hmc_info: pointer to the HMC configuration information structure * @idx: the page index - * @is_pf: distinguishes a VF from a PF * * This function: * 1. Marks the entry in pd tabe (for paged address mode) or in sd table diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c index a1e78300..c77dac02 100644 --- a/drivers/net/i40e/base/i40e_nvm.c +++ b/drivers/net/i40e/base/i40e_nvm.c @@ -33,18 +33,6 @@ POSSIBILITY OF SUCH DAMAGE. #include "i40e_prototype.h" -enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, - u16 *data); -enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, - u16 *data); -enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data); -enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data); -enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 words, void *data, - bool last_command); - /** * i40e_init_nvm_ops - Initialize NVM function pointers * @hw: pointer to the HW structure @@ -207,52 +195,6 @@ static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) } /** - * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary - * @hw: pointer to the HW structure - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) - * @data: word read from the Shadow RAM - * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. - **/ -enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, - u16 *data) -{ - enum i40e_status_code ret_code = I40E_SUCCESS; - - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_read_nvm_word_aq(hw, offset, data); - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); - } - i40e_release_nvm(hw); - } - return ret_code; -} - -/** - * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking - * @hw: pointer to the HW structure - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) - * @data: word read from the Shadow RAM - * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. - **/ -enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, - u16 offset, - u16 *data) -{ - enum i40e_status_code ret_code = I40E_SUCCESS; - - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) - ret_code = i40e_read_nvm_word_aq(hw, offset, data); - else - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); - return ret_code; -} - -/** * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) @@ -260,8 +202,9 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, * * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. **/ -enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, - u16 *data) +STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, + u16 offset, + u16 *data) { enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; u32 sr_reg; @@ -303,15 +246,68 @@ read_nvm_exit: } /** + * i40e_read_nvm_aq - Read Shadow RAM. + * @hw: pointer to the HW structure. + * @module_pointer: module pointer location in words from the NVM beginning + * @offset: offset in words from module start + * @words: number of words to write + * @data: buffer with words to write to the Shadow RAM + * @last_command: tells the AdminQ that this is the last command + * + * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + **/ +STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, + u8 module_pointer, u32 offset, + u16 words, void *data, + bool last_command) +{ + enum i40e_status_code ret_code = I40E_ERR_NVM; + struct i40e_asq_cmd_details cmd_details; + + DEBUGFUNC("i40e_read_nvm_aq"); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + /* Here we are checking the SR limit only for the flat memory model. + * We cannot do it for the module-based model, as we did not acquire + * the NVM resource yet (we cannot get the module pointer value). + * Firmware will check the module-based model. + */ + if ((offset + words) > hw->nvm.sr_size) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: offset %d beyond Shadow RAM limit %d\n", + (offset + words), hw->nvm.sr_size); + else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) + /* We can write only up to 4KB (one sector), in one AQ write */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write fail error: tried to write %d words, limit is %d.\n", + words, I40E_SR_SECTOR_SIZE_IN_WORDS); + else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) + != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) + /* A single write cannot spread over two sectors */ + i40e_debug(hw, I40E_DEBUG_NVM, + "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + offset, words); + else + ret_code = i40e_aq_read_nvm(hw, module_pointer, + 2 * offset, /*bytes*/ + 2 * words, /*bytes*/ + data, last_command, &cmd_details); + + return ret_code; +} + +/** * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ * @hw: pointer to the HW structure * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) * @data: word read from the Shadow RAM * - * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. + * Reads one 16 bit word from the Shadow RAM using the AdminQ **/ -enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, - u16 *data) +STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, + u16 *data) { enum i40e_status_code ret_code = I40E_ERR_TIMEOUT; @@ -324,55 +320,49 @@ enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, } /** - * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock + * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking * @hw: pointer to the HW structure - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @words: (in) number of words to read; (out) number of words actually read - * @data: words read from the Shadow RAM + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM * - * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - * method. The buffer read is preceded by the NVM ownership take - * and followed by the release. + * Reads one 16 bit word from the Shadow RAM. + * + * Do not use this function except in cases where the nvm lock is already + * taken via i40e_acquire_nvm(). **/ -enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, - u16 offset, - u16 *words, u16 *data) +enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw, + u16 offset, + u16 *data) { - enum i40e_status_code ret_code = I40E_SUCCESS; if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) - ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); - else - ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); - return ret_code; + return i40e_read_nvm_word_aq(hw, offset, data); + + return i40e_read_nvm_word_srctl(hw, offset, data); } /** - * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary + * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary * @hw: pointer to the HW structure - * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). - * @words: (in) number of words to read; (out) number of words actually read - * @data: words read from the Shadow RAM + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) + * @data: word read from the Shadow RAM * - * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() - * method. The buffer read is preceded by the NVM ownership take - * and followed by the release. + * Reads one 16 bit word from the Shadow RAM. **/ -enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, + u16 *data) { enum i40e_status_code ret_code = I40E_SUCCESS; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, - data); - i40e_release_nvm(hw); - } - } else { - ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); - } + + if (ret_code) + return ret_code; + ret_code = __i40e_read_nvm_word(hw, offset, data); + + if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + i40e_release_nvm(hw); return ret_code; } @@ -387,8 +377,8 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { enum i40e_status_code ret_code = I40E_SUCCESS; u16 index, word; @@ -420,8 +410,8 @@ enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset, * method. The buffer read is preceded by the NVM ownership take * and followed by the release. **/ -enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, - u16 *words, u16 *data) +STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) { enum i40e_status_code ret_code; u16 read_size = *words; @@ -469,53 +459,51 @@ read_nvm_buffer_aq_exit: } /** - * i40e_read_nvm_aq - Read Shadow RAM. - * @hw: pointer to the HW structure. - * @module_pointer: module pointer location in words from the NVM beginning - * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM - * @last_command: tells the AdminQ that this is the last command + * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. **/ -enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, - u32 offset, u16 words, void *data, - bool last_command) +enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw, + u16 offset, + u16 *words, u16 *data) { - enum i40e_status_code ret_code = I40E_ERR_NVM; - struct i40e_asq_cmd_details cmd_details; + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) + return i40e_read_nvm_buffer_aq(hw, offset, words, data); - DEBUGFUNC("i40e_read_nvm_aq"); + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); +} - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; +/** + * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary + * @hw: pointer to the HW structure + * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). + * @words: (in) number of words to read; (out) number of words actually read + * @data: words read from the Shadow RAM + * + * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() + * method. The buffer read is preceded by the NVM ownership take + * and followed by the release. + **/ +enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, + u16 *words, u16 *data) +{ + enum i40e_status_code ret_code = I40E_SUCCESS; - /* Here we are checking the SR limit only for the flat memory model. - * We cannot do it for the module-based model, as we did not acquire - * the NVM resource yet (we cannot get the module pointer value). - * Firmware will check the module-based model. - */ - if ((offset + words) > hw->nvm.sr_size) - i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", - (offset + words), hw->nvm.sr_size); - else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ - i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", - words, I40E_SR_SECTOR_SIZE_IN_WORDS); - else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) - != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ - i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", - offset, words); - else - ret_code = i40e_aq_read_nvm(hw, module_pointer, - 2 * offset, /*bytes*/ - 2 * words, /*bytes*/ - data, last_command, &cmd_details); + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, + data); + i40e_release_nvm(hw); + } + } else { + ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); + } return ret_code; } @@ -561,7 +549,8 @@ enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer, ret_code = i40e_aq_update_nvm(hw, module_pointer, 2 * offset, /*bytes*/ 2 * words, /*bytes*/ - data, last_command, &cmd_details); + data, last_command, 0, + &cmd_details); return ret_code; } @@ -650,16 +639,14 @@ enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum) data = (u16 *)vmem.va; /* read pointer to VPD area */ - ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, - &vpd_module); + ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module); if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; goto i40e_calc_nvm_checksum_exit; } /* read pointer to PCIe Alt Auto-load module */ - ret_code = __i40e_read_nvm_word(hw, - I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, + ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR, &pcie_alt_module); if (ret_code != I40E_SUCCESS) { ret_code = I40E_ERR_NVM_CHECKSUM; @@ -749,25 +736,19 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, DEBUGFUNC("i40e_validate_nvm_checksum"); - /* acquire_nvm provides exclusive NVM lock to synchronize access across - * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit - * twice (first time to be able to write address to I40E_GLNVM_SRCTL - * register, second to read data from I40E_GLNVM_SRDATA. One PF can see - * done bit and try to write address, while another one will interpret - * it as a good time to read data. It will cause invalid data to be - * read. + /* We must acquire the NVM lock in order to correctly synchronize the + * NVM accesses across multiple PFs. Without doing so it is possible + * for one of the PFs to read invalid data potentially indicating that + * the checksum is invalid. */ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { - ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + if (ret_code) + return ret_code; + ret_code = i40e_calc_nvm_checksum(hw, &checksum_local); + __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); i40e_release_nvm(hw); - if (ret_code != I40E_SUCCESS) - goto i40e_validate_nvm_checksum_exit; - } else { - goto i40e_validate_nvm_checksum_exit; - } - - i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr); + if (ret_code) + return ret_code; /* Verify read checksum from EEPROM is the same as * calculated checksum @@ -779,7 +760,6 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, if (checksum) *checksum = checksum_local; -i40e_validate_nvm_checksum_exit: return ret_code; } @@ -810,6 +790,9 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *perrno); +STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno); STATIC INLINE u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); @@ -819,6 +802,12 @@ STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val) return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT); } +STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val) +{ + return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >> + I40E_NVM_PRESERVATION_FLAGS_SHIFT); +} + STATIC const char *i40e_nvm_update_state_str[] = { "I40E_NVMUPD_INVALID", "I40E_NVMUPD_READ_CON", @@ -836,6 +825,7 @@ STATIC const char *i40e_nvm_update_state_str[] = { "I40E_NVMUPD_STATUS", "I40E_NVMUPD_EXEC_AQ", "I40E_NVMUPD_GET_AQ_RESULT", + "I40E_NVMUPD_GET_AQ_EVENT", }; /** @@ -907,7 +897,11 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, /* Acquire lock to prevent race condition where adminq_task * can execute after i40e_nvmupd_nvm_read/write but before state - * variables (nvm_wait_opcode, nvm_release_on_done) are updated + * variables (nvm_wait_opcode, nvm_release_on_done) are updated. + * + * During NVMUpdate, it is observed that lock could be held for + * ~5ms for most commands. However lock is held for ~60ms for + * NVMUPD_CSUM_LCB command. */ i40e_acquire_spinlock(&hw->aq.arq_spinlock); switch (hw->nvmupd_state) { @@ -929,8 +923,9 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, * the wait info and return before doing anything else */ if (cmd->offset == 0xffff) { - i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); - return I40E_SUCCESS; + i40e_nvmupd_clear_wait_state(hw); + status = I40E_SUCCESS; + break; } status = I40E_ERR_NOT_READY; @@ -945,6 +940,7 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, *perrno = -ESRCH; break; } + i40e_release_spinlock(&hw->aq.arq_spinlock); return status; } @@ -1075,6 +1071,10 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno); break; + case I40E_NVMUPD_GET_AQ_EVENT: + status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno); + break; + default: i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: bad cmd %s in init state\n", @@ -1253,39 +1253,55 @@ retry: } /** - * i40e_nvmupd_check_wait_event - handle NVM update operation events + * i40e_nvmupd_clear_wait_state - clear wait state on hw * @hw: pointer to the hardware structure - * @opcode: the event that just happened **/ -void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) { - if (opcode == hw->nvm_wait_opcode) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", + hw->nvm_wait_opcode); - i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); - if (hw->nvm_release_on_done) { - i40e_release_nvm(hw); - hw->nvm_release_on_done = false; - } - hw->nvm_wait_opcode = 0; + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + hw->nvm_wait_opcode = 0; - if (hw->aq.arq_last_status) { - hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; - return; - } + if (hw->aq.arq_last_status) { + hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; + return; + } - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; - default: - break; - } + default: + break; + } +} + +/** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + * @desc: AdminQ descriptor + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc) +{ + u32 aq_desc_len = sizeof(struct i40e_aq_desc); + + if (opcode == hw->nvm_wait_opcode) { + i40e_memcpy(&hw->nvm_aq_event_desc, desc, + aq_desc_len, I40E_NONDMA_TO_NONDMA); + i40e_nvmupd_clear_wait_state(hw); } } @@ -1343,6 +1359,9 @@ STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, else if (module == 0) upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; break; + case I40E_NVM_AQE: + upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; + break; } break; @@ -1405,6 +1424,9 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, u32 aq_data_len; i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + if (cmd->offset == 0xffff) + return I40E_SUCCESS; + memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; @@ -1441,6 +1463,9 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, } } + if (cmd->offset) + memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); + /* and away we go! */ status = i40e_asq_send_command(hw, aq_desc, buff, buff_size, &cmd_details); @@ -1450,6 +1475,7 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + return status; } /* should we wait for a followup event? */ @@ -1531,6 +1557,41 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw, } /** + * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen); + + /* check copylength range */ + if (cmd->data_size > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, aq_total_len); + cmd->data_size = aq_total_len; + } + + i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size, + I40E_NONDMA_TO_NONDMA); + + return I40E_SUCCESS; +} + +/** * i40e_nvmupd_nvm_read - Read NVM * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer @@ -1625,18 +1686,20 @@ STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw, enum i40e_status_code status = I40E_SUCCESS; struct i40e_asq_cmd_details cmd_details; u8 module, transaction; + u8 preservation_flags; bool last; transaction = i40e_nvmupd_get_transaction(cmd->config); module = i40e_nvmupd_get_module(cmd->config); last = (transaction & I40E_NVM_LCB); + preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); memset(&cmd_details, 0, sizeof(cmd_details)); cmd_details.wb_desc = &hw->nvm_wb_desc; status = i40e_aq_update_nvm(hw, module, cmd->offset, (u16)cmd->data_size, bytes, last, - &cmd_details); + preservation_flags, &cmd_details); if (status) { i40e_debug(hw, I40E_DEBUG_NVM, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h index acb2023f..c6ec2d76 100644 --- a/drivers/net/i40e/base/i40e_prototype.h +++ b/drivers/net/i40e/base/i40e_prototype.h @@ -263,7 +263,9 @@ enum i40e_status_code i40e_aq_discover_capabilities(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer, u32 offset, u16 length, void *data, - bool last_command, + bool last_command, u8 preservation_flags, + struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_nvm_progress(struct i40e_hw *hw, u8 *progress, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type, u8 mib_type, void *buff, u16 buff_size, @@ -290,6 +292,10 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw, + bool dcb_enable, + struct i40e_asq_cmd_details + *cmd_details); enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw, @@ -481,7 +487,9 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); -void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode); +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc); +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); #endif /* PF_DRIVER */ @@ -496,6 +504,38 @@ STATIC INLINE struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) return i40e_ptype_lookup[ptype]; } +#ifdef PF_DRIVER +/** + * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition + * @link_speed: the speed to convert + * + * Returns the link_speed in terms of the virtchnl interface, for use in + * converting link_speed as reported by the AdminQ into the format used for + * talking to virtchnl devices. If we can't represent the link speed properly, + * report LINK_SPEED_UNKNOWN. + **/ +STATIC INLINE enum virtchnl_link_speed +i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed) +{ + switch (link_speed) { + case I40E_LINK_SPEED_100MB: + return VIRTCHNL_LINK_SPEED_100MB; + case I40E_LINK_SPEED_1GB: + return VIRTCHNL_LINK_SPEED_1GB; + case I40E_LINK_SPEED_10GB: + return VIRTCHNL_LINK_SPEED_10GB; + case I40E_LINK_SPEED_40GB: + return VIRTCHNL_LINK_SPEED_40GB; + case I40E_LINK_SPEED_20GB: + return VIRTCHNL_LINK_SPEED_20GB; + case I40E_LINK_SPEED_25GB: + return VIRTCHNL_LINK_SPEED_25GB; + case I40E_LINK_SPEED_UNKNOWN: + default: + return VIRTCHNL_LINK_SPEED_UNKNOWN; + } +} +#endif /* PF_DRIVER */ /* prototype for functions used for SW spinlocks */ void i40e_init_spinlock(struct i40e_spinlock *sp); void i40e_acquire_spinlock(struct i40e_spinlock *sp); diff --git a/drivers/net/i40e/base/i40e_status.h b/drivers/net/i40e/base/i40e_status.h index 5632ff2b..49af2d9f 100644 --- a/drivers/net/i40e/base/i40e_status.h +++ b/drivers/net/i40e/base/i40e_status.h @@ -102,6 +102,7 @@ enum i40e_status_code { I40E_ERR_NOT_READY = -63, I40E_NOT_SUPPORTED = -64, I40E_ERR_FIRMWARE_API_VERSION = -65, + I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR = -66, }; #endif /* _I40E_STATUS_H_ */ diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h index dca725af..006a11a8 100644 --- a/drivers/net/i40e/base/i40e_type.h +++ b/drivers/net/i40e/base/i40e_type.h @@ -77,6 +77,9 @@ POSSIBILITY OF SUCH DAMAGE. /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 +/* Max timeout in ms for the phy to respond */ +#define I40E_MAX_PHY_TIMEOUT 500 + /* Check whether address is multicast. */ #define I40E_IS_MULTICAST(address) (bool)(((u8 *)(address))[0] & ((u8)0x01)) @@ -351,6 +354,10 @@ struct i40e_phy_info { I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \ + I40E_PHY_TYPE_OFFSET) #define I40E_HW_CAP_MAX_GPIO 30 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 @@ -483,6 +490,7 @@ enum i40e_nvmupd_cmd { I40E_NVMUPD_STATUS, I40E_NVMUPD_EXEC_AQ, I40E_NVMUPD_GET_AQ_RESULT, + I40E_NVMUPD_GET_AQ_EVENT, }; enum i40e_nvmupd_state { @@ -502,15 +510,21 @@ enum i40e_nvmupd_state { #define I40E_NVM_MOD_PNT_MASK 0xFF -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_EXEC 0xf +#define I40E_NVM_TRANS_SHIFT 8 +#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 +#define I40E_NVM_PRESERVATION_FLAGS_MASK \ + (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) +#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 +#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 +#define I40E_NVM_CON 0x0 +#define I40E_NVM_SNT 0x1 +#define I40E_NVM_LCB 0x2 +#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) +#define I40E_NVM_ERA 0x4 +#define I40E_NVM_CSUM 0x8 +#define I40E_NVM_AQE 0xe +#define I40E_NVM_EXEC 0xf #define I40E_NVM_ADAPT_SHIFT 16 #define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT) @@ -526,6 +540,19 @@ struct i40e_nvm_access { u8 data[1]; }; +/* (Q)SFP module access definitions */ +#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 +#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 +#define I40E_MODULE_TYPE_ADDR 0x00 +#define I40E_MODULE_REVISION_ADDR 0x01 +#define I40E_MODULE_SFF_8472_COMP 0x5E +#define I40E_MODULE_SFF_8472_SWAP 0x5C +#define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_SFF_DIAG_CAPAB 0x40 +#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D +#define I40E_MODULE_TYPE_QSFP28 0x11 +#define I40E_MODULE_QSFP_MAX_LEN 640 + /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, @@ -680,6 +707,7 @@ struct i40e_hw { /* state of nvm update process */ enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; + struct i40e_aq_desc nvm_aq_event_desc; struct i40e_virt_mem nvm_buff; bool nvm_release_on_done; u16 nvm_wait_opcode; @@ -702,6 +730,7 @@ struct i40e_hw { #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) #define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) +#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) u64 flags; /* Used in set switch config AQ command */ @@ -1468,7 +1497,8 @@ struct i40e_hw_port_stats { #define I40E_SR_PE_IMAGE_PTR 0x0C #define I40E_SR_CSR_PROTECTED_LIST_PTR 0x0D #define I40E_SR_MNG_CONFIG_PTR 0x0E -#define I40E_SR_EMP_MODULE_PTR 0x0F +#define I40E_EMP_MODULE_PTR 0x0F +#define I40E_SR_EMP_MODULE_PTR 0x48 #define I40E_SR_PBA_FLAGS 0x15 #define I40E_SR_PBA_BLOCK_PTR 0x16 #define I40E_SR_BOOT_CONFIG_PTR 0x17 @@ -1509,6 +1539,9 @@ struct i40e_hw_port_stats { #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 #define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 #define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) +#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) +#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) +#define I40E_PTR_TYPE BIT(15) /* Shadow RAM related */ #define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 @@ -1826,7 +1859,8 @@ enum i40e_reset_type { }; /* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0xD +#define I40E_NVM_LLDP_CFG_PTR 0x06 +#define I40E_SR_LLDP_CFG_PTR 0x31 struct i40e_lldp_variables { u16 length; u16 adminstatus; diff --git a/drivers/net/i40e/base/meson.build b/drivers/net/i40e/base/meson.build new file mode 100644 index 00000000..401a1477 --- /dev/null +++ b/drivers/net/i40e/base/meson.build @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +sources = [ + 'i40e_adminq.c', + 'i40e_common.c', + 'i40e_dcb.c', + 'i40e_diag.c', + 'i40e_hmc.c', + 'i40e_lan_hmc.c', + 'i40e_nvm.c' +] + +error_cflags = ['-Wno-sign-compare', '-Wno-unused-value', + '-Wno-format', '-Wno-unused-but-set-variable', + '-Wno-strict-aliasing' +] +c_args = cflags +foreach flag: error_cflags + if cc.has_argument(flag) + c_args += flag + endif +endforeach + +base_lib = static_library('i40e_base', sources, + dependencies: static_rte_eal, + c_args: c_args) +base_objs = base_lib.extract_all_objects() diff --git a/drivers/net/i40e/base/virtchnl.h b/drivers/net/i40e/base/virtchnl.h index f00dd360..b2d5fe73 100644 --- a/drivers/net/i40e/base/virtchnl.h +++ b/drivers/net/i40e/base/virtchnl.h @@ -240,7 +240,7 @@ struct virtchnl_vsi_resource { VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); -/* VF offload flags +/* VF capability flags * VIRTCHNL_VF_OFFLOAD_L2 flag is inclusive of base mode L2 offloads including * TX/RX Checksum offloading and TSO for non-tunnelled packets. */ @@ -269,7 +269,7 @@ struct virtchnl_vf_resource { u16 max_vectors; u16 max_mtu; - u32 vf_offload_flags; + u32 vf_cap_flags; u32 rss_key_size; u32 rss_lut_size; @@ -349,8 +349,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); * additional queues must be negotiated. This is a best effort request as it * is possible the PF does not have enough queues left to support the request. * If the PF cannot support the number requested it will respond with the - * maximum number it is able to support; otherwise it will respond with the - * number requested. + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. */ /* VF resource request */ @@ -509,7 +509,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_key); struct virtchnl_rss_lut { u16 vsi_id; u16 lut_entries; - u8 lut[1]; /* RSS lookup table*/ + u8 lut[1]; /* RSS lookup table */ }; VIRTCHNL_CHECK_STRUCT_LEN(6, virtchnl_rss_lut); @@ -764,7 +764,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, return VIRTCHNL_ERR_PARAM; } /* few more checks */ - if ((valid_len != msglen) || (err_msg_format)) + if (err_msg_format || valid_len != msglen) return VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH; return 0; diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 811cc9ff..508b4171 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <stdio.h> @@ -45,7 +16,7 @@ #include <rte_pci.h> #include <rte_bus_pci.h> #include <rte_ether.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_ethdev_pci.h> #include <rte_memzone.h> #include <rte_malloc.h> @@ -684,6 +655,15 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, return 0; } +static inline void +i40e_write_global_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + i40e_write_rx_ctl(hw, reg_addr, reg_val); + PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " + "with value 0x%08x", + reg_addr, reg_val); +} + RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); @@ -701,24 +681,31 @@ RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio-pci"); static inline void i40e_GLQF_reg_init(struct i40e_hw *hw) { /* - * Force global configuration for flexible payload - * to the first 16 bytes of the corresponding L2/L3/L4 paylod. - * This should be removed from code once proper - * configuration API is added to avoid configuration conflicts - * between ports of the same device. - */ - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33), 0x000000E0); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(34), 0x000000E3); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(35), 0x000000E6); - - /* * Initialize registers for parsing packet type of QinQ * This should be removed from code once proper * configuration API is added to avoid configuration conflicts * between ports of the same device. */ - I40E_WRITE_REG(hw, I40E_GLQF_ORT(40), 0x00000029); - I40E_WRITE_REG(hw, I40E_GLQF_PIT(9), 0x00009420); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(40), 0x00000029); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_PIT(9), 0x00009420); + i40e_global_cfg_warning(I40E_WARNING_QINQ_PARSER); +} + +static inline void i40e_config_automask(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t val; + + /* INTENA flag is not auto-cleared for interrupt */ + val = I40E_READ_REG(hw, I40E_GLINT_CTL); + val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | + I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; + + /* If support multi-driver, PF will use INT0. */ + if (!pf->support_multi_driver) + val |= I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK; + + I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); } #define I40E_FLOW_CONTROL_ETHERTYPE 0x8808 @@ -1006,7 +993,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) struct rte_hash_parameters fdir_hash_params = { .name = fdir_hash_name, .entries = I40E_MAX_FDIR_FILTER_NUM, - .key_len = sizeof(struct rte_eth_fdir_input), + .key_len = sizeof(struct i40e_fdir_input), .hash_func = rte_hash_crc, .hash_func_init_val = 0, .socket_id = rte_socket_id(), @@ -1068,6 +1055,68 @@ i40e_init_queue_region_conf(struct rte_eth_dev *dev) memset(info, 0, sizeof(struct i40e_queue_regions)); } +#define ETH_I40E_SUPPORT_MULTI_DRIVER "support-multi-driver" + +static int +i40e_parse_multi_drv_handler(__rte_unused const char *key, + const char *value, + void *opaque) +{ + struct i40e_pf *pf; + unsigned long support_multi_driver; + char *end; + + pf = (struct i40e_pf *)opaque; + + errno = 0; + support_multi_driver = strtoul(value, &end, 10); + if (errno != 0 || end == value || *end != 0) { + PMD_DRV_LOG(WARNING, "Wrong global configuration"); + return -(EINVAL); + } + + if (support_multi_driver == 1 || support_multi_driver == 0) + pf->support_multi_driver = (bool)support_multi_driver; + else + PMD_DRV_LOG(WARNING, "%s must be 1 or 0,", + "enable global configuration by default." + ETH_I40E_SUPPORT_MULTI_DRIVER); + return 0; +} + +static int +i40e_support_multi_driver(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + static const char *const valid_keys[] = { + ETH_I40E_SUPPORT_MULTI_DRIVER, NULL}; + struct rte_kvargs *kvlist; + + /* Enable global configuration by default */ + pf->support_multi_driver = false; + + if (!dev->device->devargs) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (!kvlist) + return -EINVAL; + + if (rte_kvargs_count(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER) > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + ETH_I40E_SUPPORT_MULTI_DRIVER); + + if (rte_kvargs_process(kvlist, ETH_I40E_SUPPORT_MULTI_DRIVER, + i40e_parse_multi_drv_handler, pf) < 0) { + rte_kvargs_free(kvlist); + return -EINVAL; + } + + rte_kvargs_free(kvlist); + return 0; +} + static int eth_i40e_dev_init(struct rte_eth_dev *dev) { @@ -1096,7 +1145,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) return 0; } i40e_set_default_ptype_table(dev); - i40e_set_default_pctype_table(dev); pci_dev = RTE_ETH_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; @@ -1122,6 +1170,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->bus.func = pci_dev->addr.function; hw->adapter_stopped = 0; + /* Check if need to support multi-driver */ + i40e_support_multi_driver(dev); + /* Make sure all is clean before doing PF reset */ i40e_clear_hw(hw); @@ -1142,13 +1193,17 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) return ret; } + i40e_config_automask(pf); + + i40e_set_default_pctype_table(dev); + /* * To work around the NVM issue, initialize registers - * for flexible payload and packet type of QinQ by - * software. It should be removed once issues are fixed - * in NVM. + * for packet type of QinQ by software. + * It should be removed once issues are fixed in NVM. */ - i40e_GLQF_reg_init(hw); + if (!pf->support_multi_driver) + i40e_GLQF_reg_init(hw); /* Initialize the input set for filters (hash and fd) to default value */ i40e_filter_input_set_init(pf); @@ -1168,10 +1223,17 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) (hw->nvm.version & 0xf), hw->nvm.eetrack); /* initialise the L3_MAP register */ - ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), - 0x00000028, NULL); - if (ret) - PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret); + if (!pf->support_multi_driver) { + ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40), + 0x00000028, NULL); + if (ret) + PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", + ret); + PMD_INIT_LOG(DEBUG, + "Global register 0x%08x is changed with 0x28", + I40E_GLQF_L3_MAP(40)); + i40e_global_cfg_warning(I40E_WARNING_QINQ_CLOUD_FILTER); + } /* Need the special FW version to support floating VEB */ config_floating_veb(dev); @@ -1247,11 +1309,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_fc(hw, &aq_fail, TRUE); /* Set the global registers with default ether type value */ - ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, - "Failed to set the default outer VLAN ether type"); - goto err_setup_pf_switch; + if (!pf->support_multi_driver) { + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, + "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } } /* PF setup, which includes VSI setup */ @@ -1315,6 +1381,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* enable uio intr after callback register */ rte_intr_enable(intr_handle); + + /* By default disable flexible payload in global configuration */ + if (!pf->support_multi_driver) + i40e_flex_payload_reg_set_default(hw); + /* * Add an ethertype filter to drop all flow control frames transmitted * from VSIs. By doing so, we stop VF from sending out PAUSE or PFC @@ -1349,6 +1420,10 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize queue region configuration */ i40e_init_queue_region_conf(dev); + /* initialize rss configuration from rte_flow */ + memset(&pf->rss_info, 0, + sizeof(struct i40e_rte_flow_rss_conf)); + return 0; err_init_fdir_filter_list: @@ -1435,6 +1510,18 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf) } } +void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) +{ + /* + * Disable by default flexible payload + * for corresponding L2/L3/L4 layers. + */ + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33), 0x00000000); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(34), 0x00000000); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(35), 0x00000000); + i40e_global_cfg_warning(I40E_WARNING_DIS_FLX_PLD); +} + static int eth_i40e_dev_uninit(struct rte_eth_dev *dev) { @@ -1640,6 +1727,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, int i; uint32_t val; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); /* Bind all RX queues to allocated MSIX interrupt */ for (i = 0; i < nb_queue; i++) { @@ -1658,7 +1746,8 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, /* Write first RX queue to Link list register as the head element */ if (vsi->type != I40E_VSI_SRIOV) { uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 1, + pf->support_multi_driver); if (msix_vect == I40E_MISC_VEC_ID) { I40E_WRITE_REG(hw, I40E_PFINT_LNKLST0, @@ -1717,7 +1806,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd); uint16_t queue_idx = 0; int record = 0; - uint32_t val; int i; for (i = 0; i < vsi->nb_qps; i++) { @@ -1725,13 +1813,6 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) I40E_WRITE_REG(hw, I40E_QINT_RQCTL(vsi->base_queue + i), 0); } - /* INTENA flag is not auto-cleared for interrupt */ - val = I40E_READ_REG(hw, I40E_GLINT_CTL); - val |= I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK | - I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK | - I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK; - I40E_WRITE_REG(hw, I40E_GLINT_CTL, val); - /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { __vsi_queues_bind_intr(vsi, msix_vect, @@ -1788,27 +1869,22 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); - uint16_t interval = i40e_calc_itr_interval(\ - RTE_LIBRTE_I40E_ITR_INTERVAL); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; - if (rte_intr_allow_others(intr_handle)) + if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) for (i = 0; i < vsi->nb_msix; i++) { msix_intr = vsi->msix_intr + i; I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); } else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -1820,16 +1896,18 @@ i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); uint16_t msix_intr, i; - if (rte_intr_allow_others(intr_handle)) + if (rte_intr_allow_others(intr_handle) && !pf->support_multi_driver) for (i = 0; i < vsi->nb_msix; i++) { msix_intr = vsi->msix_intr + i; I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - 1), - 0); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); } else - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -2048,6 +2126,16 @@ i40e_dev_start(struct rte_eth_dev *dev) } } + /* Enable mac loopback mode */ + if (dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_MODE_NONE || + dev->data->dev_conf.lpbk_mode == I40E_AQ_LB_PHY_LOCAL) { + ret = i40e_aq_set_lb_modes(hw, dev->data->dev_conf.lpbk_mode, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "fail to set loopback link"); + goto err_up; + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -2154,9 +2242,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) /* reset hierarchy commit */ pf->tm_conf.committed = false; - /* Remove all the queue region configuration */ - i40e_flush_queue_region_all_conf(dev, hw, pf, 0); - hw->adapter_stopped = 1; } @@ -2222,6 +2307,10 @@ i40e_dev_close(struct rte_eth_dev *dev) i40e_res_pool_destroy(&pf->qp_pool); i40e_res_pool_destroy(&pf->msix_pool); + /* Disable flexible payload in global configuration */ + if (!pf->support_multi_driver) + i40e_flex_payload_reg_set_default(hw); + /* force a PF reset to clean anything leftover */ reg = I40E_READ_REG(hw, I40E_PFGEN_CTRL); I40E_WRITE_REG(hw, I40E_PFGEN_CTRL, @@ -2531,6 +2620,22 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) pf->offset_loaded, &pf->internal_stats_offset.rx_broadcast, &pf->internal_stats.rx_broadcast); + /* Get total internal tx packet count */ + i40e_stat_update_48(hw, I40E_GLV_UPTCH(hw->port), + I40E_GLV_UPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_unicast, + &pf->internal_stats.tx_unicast); + i40e_stat_update_48(hw, I40E_GLV_MPTCH(hw->port), + I40E_GLV_MPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_multicast, + &pf->internal_stats.tx_multicast); + i40e_stat_update_48(hw, I40E_GLV_BPTCH(hw->port), + I40E_GLV_BPTCL(hw->port), + pf->offset_loaded, + &pf->internal_stats_offset.tx_broadcast, + &pf->internal_stats.tx_broadcast); /* exclude CRC size */ pf->internal_stats.rx_bytes -= (pf->internal_stats.rx_unicast + @@ -2560,16 +2665,32 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) ns->eth.rx_bytes -= (ns->eth.rx_unicast + ns->eth.rx_multicast + ns->eth.rx_broadcast) * ETHER_CRC_LEN; - /* Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before - * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negtive + /* exclude internal rx bytes + * Workaround: it is possible I40E_GLV_GORCH[H/L] is updated before + * I40E_GLPRT_GORCH[H/L], so there is a small window that cause negative * value. + * same to I40E_GLV_UPRC[H/L], I40E_GLV_MPRC[H/L], I40E_GLV_BPRC[H/L]. */ if (ns->eth.rx_bytes < pf->internal_stats.rx_bytes) ns->eth.rx_bytes = 0; - /* exlude internal rx bytes */ else ns->eth.rx_bytes -= pf->internal_stats.rx_bytes; + if (ns->eth.rx_unicast < pf->internal_stats.rx_unicast) + ns->eth.rx_unicast = 0; + else + ns->eth.rx_unicast -= pf->internal_stats.rx_unicast; + + if (ns->eth.rx_multicast < pf->internal_stats.rx_multicast) + ns->eth.rx_multicast = 0; + else + ns->eth.rx_multicast -= pf->internal_stats.rx_multicast; + + if (ns->eth.rx_broadcast < pf->internal_stats.rx_broadcast) + ns->eth.rx_broadcast = 0; + else + ns->eth.rx_broadcast -= pf->internal_stats.rx_broadcast; + i40e_stat_update_32(hw, I40E_GLPRT_RDPC(hw->port), pf->offset_loaded, &os->eth.rx_discards, &ns->eth.rx_discards); @@ -2598,12 +2719,32 @@ i40e_read_stats_registers(struct i40e_pf *pf, struct i40e_hw *hw) ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast + ns->eth.tx_broadcast) * ETHER_CRC_LEN; - /* exclude internal tx bytes */ + /* exclude internal tx bytes + * Workaround: it is possible I40E_GLV_GOTCH[H/L] is updated before + * I40E_GLPRT_GOTCH[H/L], so there is a small window that cause negative + * value. + * same to I40E_GLV_UPTC[H/L], I40E_GLV_MPTC[H/L], I40E_GLV_BPTC[H/L]. + */ if (ns->eth.tx_bytes < pf->internal_stats.tx_bytes) ns->eth.tx_bytes = 0; else ns->eth.tx_bytes -= pf->internal_stats.tx_bytes; + if (ns->eth.tx_unicast < pf->internal_stats.tx_unicast) + ns->eth.tx_unicast = 0; + else + ns->eth.tx_unicast -= pf->internal_stats.tx_unicast; + + if (ns->eth.tx_multicast < pf->internal_stats.tx_multicast) + ns->eth.tx_multicast = 0; + else + ns->eth.tx_multicast -= pf->internal_stats.tx_multicast; + + if (ns->eth.tx_broadcast < pf->internal_stats.tx_broadcast) + ns->eth.tx_broadcast = 0; + else + ns->eth.tx_broadcast -= pf->internal_stats.tx_broadcast; + /* GLPRT_TEPC not supported */ /* additional port specific stats */ @@ -3040,7 +3181,9 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | @@ -3173,8 +3316,8 @@ i40e_vlan_tpid_set_by_registers(struct rte_eth_dev *dev, return -EIO; } PMD_DRV_LOG(DEBUG, - "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]", - reg_w, reg_id); + "Global register 0x%08x is changed with value 0x%08x", + I40E_GL_SWT_L2TAGCTRL(reg_id), (uint32_t)reg_w); return 0; } @@ -3185,6 +3328,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; int ret = 0; @@ -3195,6 +3339,12 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, "Unsupported vlan type."); return -EINVAL; } + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Setting TPID is not supported."); + return -ENOTSUP; + } + /* 802.1ad frames ability is added in NVM API 1.7*/ if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { if (qinq) { @@ -3217,6 +3367,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, /* If NVM API < 1.7, keep the register setting */ ret = i40e_vlan_tpid_set_by_registers(dev, vlan_type, tpid, qinq); + i40e_global_cfg_warning(I40E_WARNING_TPID); return ret; } @@ -3446,19 +3597,25 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) I40E_WRITE_REG(hw, I40E_PRTDCB_MFLCN, mflcn_reg); } - /* config the water marker both based on the packets and bytes */ - I40E_WRITE_REG(hw, I40E_GLRPB_PHW, - (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); - I40E_WRITE_REG(hw, I40E_GLRPB_PLW, - (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); - I40E_WRITE_REG(hw, I40E_GLRPB_GHW, - pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT); - I40E_WRITE_REG(hw, I40E_GLRPB_GLW, - pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] - << I40E_KILOSHIFT); + if (!pf->support_multi_driver) { + /* config water marker both based on the packets and bytes */ + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PHW, + (pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_PLW, + (pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT) / I40E_PACKET_AVERAGE_SIZE); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GHW, + pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + I40E_WRITE_GLB_REG(hw, I40E_GLRPB_GLW, + pf->fc_conf.low_water[I40E_MAX_TRAFFIC_CLASS] + << I40E_KILOSHIFT); + i40e_global_cfg_warning(I40E_WARNING_FLOW_CTL); + } else { + PMD_DRV_LOG(ERR, + "Water marker configuration is not supported."); + } I40E_WRITE_FLUSH(hw); @@ -3678,6 +3835,7 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; int ret; if (!lut) @@ -3694,14 +3852,22 @@ i40e_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) uint32_t *lut_dw = (uint32_t *)lut; uint16_t i, lut_size_dw = lut_size / 4; - for (i = 0; i < lut_size_dw; i++) - lut_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HLUT(i)); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= lut_size_dw; i++) { + reg = I40E_VFQF_HLUT1(i, vsi->user_param); + lut_dw[i] = i40e_read_rx_ctl(hw, reg); + } + } else { + for (i = 0; i < lut_size_dw; i++) + lut_dw[i] = I40E_READ_REG(hw, + I40E_PFQF_HLUT(i)); + } } return 0; } -static int +int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) { struct i40e_pf *pf; @@ -3725,8 +3891,17 @@ i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size) uint32_t *lut_dw = (uint32_t *)lut; uint16_t i, lut_size_dw = lut_size / 4; - for (i = 0; i < lut_size_dw; i++) - I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), lut_dw[i]); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG( + hw, + I40E_VFQF_HLUT1(i, vsi->user_param), + lut_dw[i]); + } else { + for (i = 0; i < lut_size_dw; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), + lut_dw[i]); + } I40E_WRITE_FLUSH(hw); } @@ -3971,6 +4146,68 @@ i40e_get_cap(struct i40e_hw *hw) return ret; } +#define RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF 4 +#define QUEUE_NUM_PER_VF_ARG "queue-num-per-vf" + +static int i40e_pf_parse_vf_queue_number_handler(const char *key, + const char *value, + void *opaque) +{ + struct i40e_pf *pf; + unsigned long num; + char *end; + + pf = (struct i40e_pf *)opaque; + RTE_SET_USED(key); + + errno = 0; + num = strtoul(value, &end, 0); + if (errno != 0 || end == value || *end != 0) { + PMD_DRV_LOG(WARNING, "Wrong VF queue number = %s, Now it is " + "kept the value = %hu", value, pf->vf_nb_qp_max); + return -(EINVAL); + } + + if (num <= I40E_MAX_QP_NUM_PER_VF && rte_is_power_of_2(num)) + pf->vf_nb_qp_max = (uint16_t)num; + else + /* here return 0 to make next valid same argument work */ + PMD_DRV_LOG(WARNING, "Wrong VF queue number = %lu, it must be " + "power of 2 and equal or less than 16 !, Now it is " + "kept the value = %hu", num, pf->vf_nb_qp_max); + + return 0; +} + +static int i40e_pf_config_vf_rxq_number(struct rte_eth_dev *dev) +{ + static const char * const valid_keys[] = {QUEUE_NUM_PER_VF_ARG, NULL}; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_kvargs *kvlist; + + /* set default queue number per VF as 4 */ + pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + + if (dev->device->devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys); + if (kvlist == NULL) + return -(EINVAL); + + if (rte_kvargs_count(kvlist, QUEUE_NUM_PER_VF_ARG) > 1) + PMD_DRV_LOG(WARNING, "More than one argument \"%s\" and only " + "the first invalid or last valid one is used !", + QUEUE_NUM_PER_VF_ARG); + + rte_kvargs_process(kvlist, QUEUE_NUM_PER_VF_ARG, + i40e_pf_parse_vf_queue_number_handler, pf); + + rte_kvargs_free(kvlist); + + return 0; +} + static int i40e_pf_parameter_init(struct rte_eth_dev *dev) { @@ -3983,6 +4220,9 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV"); return -EINVAL; } + + i40e_pf_config_vf_rxq_number(dev); + /* Add the parameter init for LFC */ pf->fc_conf.pause_time = I40E_DEFAULT_PAUSE_TIME; pf->fc_conf.high_water[I40E_MAX_TRAFFIC_CLASS] = I40E_DEFAULT_HIGH_WATER; @@ -3992,7 +4232,6 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->max_num_vsi = hw->func_caps.num_vsis; pf->lan_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF; pf->vmdq_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - pf->vf_nb_qp_max = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; /* FDir queue/VSI allocation */ pf->fdir_qp_offset = 0; @@ -4022,7 +4261,7 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev) pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps; if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) { pf->flags |= I40E_FLAG_SRIOV; - pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF; + pf->vf_nb_qps = pf->vf_nb_qp_max; pf->vf_num = pci_dev->max_vfs; PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, in total %u queues", @@ -4950,16 +5189,28 @@ i40e_vsi_setup(struct i40e_pf *pf, /* VF has MSIX interrupt in VF range, don't allocate here */ if (type == I40E_VSI_MAIN) { - ret = i40e_res_pool_alloc(&pf->msix_pool, - RTE_MIN(vsi->nb_qps, - RTE_MAX_RXTX_INTR_VEC_ID)); - if (ret < 0) { - PMD_DRV_LOG(ERR, "VSI MAIN %d get heap failed %d", - vsi->seid, ret); - goto fail_queue_alloc; + if (pf->support_multi_driver) { + /* If support multi-driver, need to use INT0 instead of + * allocating from msix pool. The Msix pool is init from + * INT1, so it's OK just set msix_intr to 0 and nb_msix + * to 1 without calling i40e_res_pool_alloc. + */ + vsi->msix_intr = 0; + vsi->nb_msix = 1; + } else { + ret = i40e_res_pool_alloc(&pf->msix_pool, + RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID)); + if (ret < 0) { + PMD_DRV_LOG(ERR, + "VSI MAIN %d get heap failed %d", + vsi->seid, ret); + goto fail_queue_alloc; + } + vsi->msix_intr = ret; + vsi->nb_msix = RTE_MIN(vsi->nb_qps, + RTE_MAX_RXTX_INTR_VEC_ID); } - vsi->msix_intr = ret; - vsi->nb_msix = RTE_MIN(vsi->nb_qps, RTE_MAX_RXTX_INTR_VEC_ID); } else if (type != I40E_VSI_SRIOV) { ret = i40e_res_pool_alloc(&pf->msix_pool, 1); if (ret < 0) { @@ -5315,15 +5566,15 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) int mask = 0; /* Apply vlan offload setting */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + mask = ETH_VLAN_STRIP_MASK | + ETH_VLAN_FILTER_MASK | + ETH_VLAN_EXTEND_MASK; ret = i40e_vlan_offload_set(dev, mask); if (ret) { PMD_DRV_LOG(INFO, "Failed to update vlan offload"); return ret; } - /* Apply double-vlan setting, not implemented yet */ - /* Apply pvid setting */ ret = i40e_vlan_pvid_set(dev, data->dev_conf.txmode.pvid, data->dev_conf.txmode.hw_vlan_insert_pvid); @@ -5876,7 +6127,8 @@ void i40e_pf_disable_irq0(struct i40e_hw *hw) { /* Disable all interrupt types */ - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); } @@ -5996,7 +6248,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) ret = i40e_dev_link_update(dev, 0); if (!ret) _rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC, NULL, NULL); + RTE_ETH_EVENT_INTR_LSC, NULL); break; default: PMD_DRV_LOG(DEBUG, "Request %u is not supported yet", @@ -6698,17 +6950,20 @@ i40e_pf_disable_rss(struct i40e_pf *pf) I40E_WRITE_FLUSH(hw); } -static int +int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) { struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint16_t key_idx = (vsi->type == I40E_VSI_SRIOV) ? + I40E_VFQF_HKEY_MAX_INDEX : + I40E_PFQF_HKEY_MAX_INDEX; int ret = 0; if (!key || key_len == 0) { PMD_DRV_LOG(DEBUG, "No key to be configured"); return 0; - } else if (key_len != (I40E_PFQF_HKEY_MAX_INDEX + 1) * + } else if (key_len != (key_idx + 1) * sizeof(uint32_t)) { PMD_DRV_LOG(ERR, "Invalid key length %u", key_len); return -EINVAL; @@ -6725,8 +6980,18 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) uint32_t *hash_key = (uint32_t *)key; uint16_t i; - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) + I40E_WRITE_REG( + hw, + I40E_VFQF_HKEY1(i, vsi->user_param), + hash_key[i]); + + } else { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) + I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), + hash_key[i]); + } I40E_WRITE_FLUSH(hw); } @@ -6738,6 +7003,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) { struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); struct i40e_hw *hw = I40E_VSI_TO_HW(vsi); + uint32_t reg; int ret; if (!key || !key_len) @@ -6754,11 +7020,22 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) uint32_t *key_dw = (uint32_t *)key; uint16_t i; - for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); + if (vsi->type == I40E_VSI_SRIOV) { + for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) { + reg = I40E_VFQF_HKEY1(i, vsi->user_param); + key_dw[i] = i40e_read_rx_ctl(hw, reg); + } + *key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } else { + for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) { + reg = I40E_PFQF_HKEY(i); + key_dw[i] = i40e_read_rx_ctl(hw, reg); + } + *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } } - *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); - return 0; } @@ -6951,7 +7228,7 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, uint8_t add) { uint16_t ip_type; - uint32_t ipv4_addr; + uint32_t ipv4_addr, ipv4_addr_le; uint8_t i, tun_type = 0; /* internal varialbe to convert ipv6 byte order */ uint32_t convert_ipv6[4]; @@ -6984,8 +7261,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); rte_memcpy(&pfilter->element.ipaddr.v4.data, - &rte_cpu_to_le_32(ipv4_addr), + &ipv4_addr_le, sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; @@ -7036,11 +7314,13 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); if (add && node) { PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(cld_filter); return -EINVAL; } if (!add && !node) { PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(cld_filter); return -EINVAL; } @@ -7049,16 +7329,26 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + if (tunnel == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + rte_free(cld_filter); + return -ENOMEM; + } + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + if (ret < 0) + rte_free(tunnel); } else { ret = i40e_aq_remove_cloud_filters(hw, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } ret = i40e_sw_tunnel_filter_del(pf, &node->input); @@ -7084,6 +7374,11 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_status_code status = I40E_SUCCESS; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); memset(&filter_replace_buf, 0, @@ -7120,6 +7415,13 @@ i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return status; } @@ -7131,6 +7433,11 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_status_code status = I40E_SUCCESS; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + /* For MPLSoUDP */ memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); @@ -7152,6 +7459,10 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) &filter_replace_buf); if (status < 0) return status; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* For MPLSoGRE */ memset(&filter_replace, 0, @@ -7174,6 +7485,13 @@ i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return status; } @@ -7185,6 +7503,11 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_status_code status = I40E_SUCCESS; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace l1 filter is not supported."); + return I40E_NOT_SUPPORTED; + } + /* For GTP-C */ memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); @@ -7213,6 +7536,10 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) &filter_replace_buf); if (status < 0) return status; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* for GTP-U */ memset(&filter_replace, 0, @@ -7241,6 +7568,13 @@ i40e_replace_gtp_l1_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return status; } @@ -7252,6 +7586,11 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); enum i40e_status_code status = I40E_SUCCESS; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return I40E_NOT_SUPPORTED; + } + /* for GTP-C */ memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); @@ -7272,6 +7611,10 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) &filter_replace_buf); if (status < 0) return status; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* for GTP-U */ memset(&filter_replace, 0, @@ -7293,6 +7636,13 @@ i40e_status_code i40e_replace_gtp_cloud_filter(struct i40e_pf *pf) status = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!status) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return status; } @@ -7302,7 +7652,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, uint8_t add) { uint16_t ip_type; - uint32_t ipv4_addr; + uint32_t ipv4_addr, ipv4_addr_le; uint8_t i, tun_type = 0; /* internal variable to convert ipv6 byte order */ uint32_t convert_ipv6[4]; @@ -7338,8 +7688,9 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr); + ipv4_addr_le = rte_cpu_to_le_32(ipv4_addr); rte_memcpy(&pfilter->element.ipaddr.v4.data, - &rte_cpu_to_le_32(ipv4_addr), + &ipv4_addr_le, sizeof(pfilter->element.ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; @@ -7486,6 +7837,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, else { if (tunnel_filter->vf_id >= pf->vf_num) { PMD_DRV_LOG(ERR, "Invalid argument."); + rte_free(cld_filter); return -EINVAL; } vf = &pf->vfs[tunnel_filter->vf_id]; @@ -7500,11 +7852,13 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input); if (add && node) { PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!"); + rte_free(cld_filter); return -EINVAL; } if (!add && !node) { PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!"); + rte_free(cld_filter); return -EINVAL; } @@ -7517,11 +7871,20 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, vsi->seid, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to add a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0); + if (tunnel == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + rte_free(cld_filter); + return -ENOMEM; + } + rte_memcpy(tunnel, &check_filter, sizeof(check_filter)); ret = i40e_sw_tunnel_filter_insert(pf, tunnel); + if (ret < 0) + rte_free(tunnel); } else { if (big_buffer) ret = i40e_aq_remove_cloud_filters_big_buffer( @@ -7531,6 +7894,7 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf, &cld_filter->element, 1); if (ret < 0) { PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter."); + rte_free(cld_filter); return -ENOTSUP; } ret = i40e_sw_tunnel_filter_del(pf, &node->input); @@ -7808,9 +8172,15 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf, static int i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) { + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; uint32_t val, reg; int ret = -EINVAL; + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "GRE key length configuration is unsupported"); + return -ENOTSUP; + } + val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); @@ -7828,6 +8198,10 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) reg, NULL); if (ret != 0) return ret; + PMD_DRV_LOG(DEBUG, "Global register 0x%08x is changed " + "with value 0x%08x", + I40E_GL_PRS_FVBM(2), reg); + i40e_global_cfg_warning(I40E_WARNING_GRE_KEY_LEN); } else { ret = 0; } @@ -7984,14 +8358,17 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, (reg & I40E_GLQF_CTL_HTOEP_MASK) ? "Toeplitz" : "Simple XOR"); /* - * We work only with lowest 32 bits which is not correct, but to work - * properly the valid_bit_mask size should be increased up to 64 bits - * and this will brake ABI. This modification will be done in next - * release + * As i40e supports less than 64 flow types, only first 64 bits need to + * be checked. */ - g_cfg->valid_bit_mask[0] = (uint32_t)adapter->flow_types_mask; + for (i = 1; i < RTE_SYM_HASH_MASK_ARRAY_SIZE; i++) { + g_cfg->valid_bit_mask[i] = 0ULL; + g_cfg->sym_hash_enable_mask[i] = 0ULL; + } + + g_cfg->valid_bit_mask[0] = adapter->flow_types_mask; - for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT32_BIT; i++) { + for (i = RTE_ETH_FLOW_UNKNOWN + 1; i < UINT64_BIT; i++) { if (!adapter->pctypes_tbl[i]) continue; for (j = I40E_FILTER_PCTYPE_INVALID + 1; @@ -8000,7 +8377,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(j)); if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) { g_cfg->sym_hash_enable_mask[0] |= - (1UL << i); + (1ULL << i); } } } @@ -8014,7 +8391,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter, const struct rte_eth_hash_global_conf *g_cfg) { uint32_t i; - uint32_t mask0, i40e_mask = adapter->flow_types_mask; + uint64_t mask0, i40e_mask = adapter->flow_types_mask; if (g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_TOEPLITZ && g_cfg->hash_func != RTE_ETH_HASH_FUNCTION_SIMPLE_XOR && @@ -8025,7 +8402,7 @@ i40e_hash_global_config_check(const struct i40e_adapter *adapter, } /* - * As i40e supports less than 32 flow types, only first 32 bits need to + * As i40e supports less than 64 flow types, only first 64 bits need to * be checked. */ mask0 = g_cfg->valid_bit_mask[0]; @@ -8058,35 +8435,39 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, struct rte_eth_hash_global_conf *g_cfg) { struct i40e_adapter *adapter = (struct i40e_adapter *)hw->back; + struct i40e_pf *pf = &((struct i40e_adapter *)hw->back)->pf; int ret; uint16_t i, j; uint32_t reg; - /* - * We work only with lowest 32 bits which is not correct, but to work - * properly the valid_bit_mask size should be increased up to 64 bits - * and this will brake ABI. This modification will be done in next - * release - */ - uint32_t mask0 = g_cfg->valid_bit_mask[0] & - (uint32_t)adapter->flow_types_mask; + uint64_t mask0 = g_cfg->valid_bit_mask[0] & adapter->flow_types_mask; + + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Hash global configuration is not supported."); + return -ENOTSUP; + } /* Check the input parameters */ ret = i40e_hash_global_config_check(adapter, g_cfg); if (ret < 0) return ret; - for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT32_BIT; i++) { + /* + * As i40e supports less than 64 flow types, only first 64 bits need to + * be configured. + */ + for (i = RTE_ETH_FLOW_UNKNOWN + 1; mask0 && i < UINT64_BIT; i++) { if (mask0 & (1UL << i)) { - reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? + reg = (g_cfg->sym_hash_enable_mask[0] & (1ULL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; for (j = I40E_FILTER_PCTYPE_INVALID + 1; j < I40E_FILTER_PCTYPE_MAX; j++) { if (adapter->pctypes_tbl[i] & (1ULL << j)) - i40e_write_rx_ctl(hw, + i40e_write_global_rx_ctl(hw, I40E_GLQF_HSYM(j), reg); } + i40e_global_cfg_warning(I40E_WARNING_HSYM); } } @@ -8111,7 +8492,8 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, /* Use the default, and keep it as it is */ goto out; - i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg); + i40e_write_global_rx_ctl(hw, I40E_GLQF_CTL, reg); + i40e_global_cfg_warning(I40E_WARNING_QF_CTL); out: I40E_WRITE_FLUSH(hw); @@ -8700,6 +9082,18 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) (uint32_t)i40e_read_rx_ctl(hw, addr)); } +void +i40e_check_write_global_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) +{ + uint32_t reg = i40e_read_rx_ctl(hw, addr); + + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); + if (reg != val) + i40e_write_global_rx_ctl(hw, addr, val); + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, + (uint32_t)i40e_read_rx_ctl(hw, addr)); +} + static void i40e_filter_input_set_init(struct i40e_pf *pf) { @@ -8723,6 +9117,10 @@ i40e_filter_input_set_init(struct i40e_pf *pf) I40E_INSET_MASK_NUM_REG); if (num < 0) return; + if (pf->support_multi_driver && num > 0) { + PMD_DRV_LOG(ERR, "Input set setting is not supported."); + return; + } inset_reg = i40e_translate_input_set_reg(hw->mac.type, input_set); @@ -8731,31 +9129,48 @@ i40e_filter_input_set_init(struct i40e_pf *pf) i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); - - for (i = 0; i < num; i++) { - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[i]); - } - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - 0); - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - 0); + if (!pf->support_multi_driver) { + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + + for (i = 0; i < num; i++) { + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + } + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) { + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_check_write_global_reg(hw, + I40E_GLQF_HASH_MSK(i, pctype), + 0); + } + } else { + PMD_DRV_LOG(ERR, "Input set setting is not supported."); } I40E_WRITE_FLUSH(hw); /* store the default input set */ - pf->hash_input_set[pctype] = input_set; + if (!pf->support_multi_driver) + pf->hash_input_set[pctype] = input_set; pf->fdir.input_set[pctype] = input_set; } + + if (!pf->support_multi_driver) { + i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); + i40e_global_cfg_warning(I40E_WARNING_FD_MSK); + i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); + } } int @@ -8778,6 +9193,11 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, return -EINVAL; } + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Hash input set setting is not supported."); + return -ENOTSUP; + } + pctype = i40e_flowtype_to_pctype(pf->adapter, conf->flow_type); if (pctype == I40E_FILTER_PCTYPE_INVALID) { PMD_DRV_LOG(ERR, "invalid flow_type input."); @@ -8811,19 +9231,21 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), - (uint32_t)(inset_reg & UINT32_MAX)); - i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), - (uint32_t)((inset_reg >> - I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + i40e_global_cfg_warning(I40E_WARNING_HASH_INSET); for (i = 0; i < num; i++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - mask_reg[i]); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); /*clear unused mask registers of the pctype */ for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), - 0); + i40e_check_write_global_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + 0); + i40e_global_cfg_warning(I40E_WARNING_HASH_MSK); I40E_WRITE_FLUSH(hw); pf->hash_input_set[pctype] = input_set; @@ -8881,6 +9303,10 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, I40E_INSET_MASK_NUM_REG); if (num < 0) return -EINVAL; + if (pf->support_multi_driver && num > 0) { + PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); + return -ENOTSUP; + } inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set); @@ -8890,13 +9316,20 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, (uint32_t)((inset_reg >> I40E_32_BIT_WIDTH) & UINT32_MAX)); - for (i = 0; i < num; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - mask_reg[i]); - /*clear unused mask registers of the pctype */ - for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) - i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), - 0); + if (!pf->support_multi_driver) { + for (i = 0; i < num; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + /*clear unused mask registers of the pctype */ + for (i = num; i < I40E_INSET_MASK_NUM_REG; i++) + i40e_check_write_global_reg(hw, + I40E_GLQF_FD_MSK(i, pctype), + 0); + i40e_global_cfg_warning(I40E_WARNING_FD_MSK); + } else { + PMD_DRV_LOG(ERR, "FDIR bit mask is not supported."); + } I40E_WRITE_FLUSH(hw); pf->fdir.input_set[pctype] = input_set; @@ -9142,9 +9575,16 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, if (add) { ethertype_filter = rte_zmalloc("ethertype_filter", sizeof(*ethertype_filter), 0); + if (ethertype_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + rte_memcpy(ethertype_filter, &check_filter, sizeof(check_filter)); ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter); + if (ret < 0) + rte_free(ethertype_filter); } else { ret = i40e_sw_ethertype_filter_del(pf, &node->input); } @@ -10679,27 +11119,21 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); uint16_t msix_intr; msix_intr = intr_handle->intr_vec[queue_id]; if (msix_intr == I40E_MISC_VEC_ID) I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, - I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTL0_INTENA_MASK | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - I40E_RX_VEC_START), I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | - (0 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | - (interval << - I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); rte_intr_enable(&pci_dev->intr_handle); @@ -10717,12 +11151,13 @@ i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) msix_intr = intr_handle->intr_vec[queue_id]; if (msix_intr == I40E_MISC_VEC_ID) - I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, 0); + I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTL0, + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK); else I40E_WRITE_REG(hw, I40E_PFINT_DYN_CTLN(msix_intr - I40E_RX_VEC_START), - 0); + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK); I40E_WRITE_FLUSH(hw); return 0; @@ -10818,14 +11253,43 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_vsi *vsi = pf->main_vsi; + struct i40e_mac_filter_info mac_filter; + struct i40e_mac_filter *f; + int ret; if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); return; } - /* Flags: 0x3 updates port address */ - i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL); + TAILQ_FOREACH(f, &vsi->mac_list, next) { + if (is_same_ether_addr(&pf->dev_addr, &f->mac_info.mac_addr)) + break; + } + + if (f == NULL) { + PMD_DRV_LOG(ERR, "Failed to find filter for default mac"); + return; + } + + mac_filter = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &mac_filter.mac_addr); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to delete mac filter"); + return; + } + memcpy(&mac_filter.mac_addr, mac_addr, ETH_ADDR_LEN); + ret = i40e_vsi_add_mac(vsi, &mac_filter); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to add mac filter"); + return; + } + memcpy(&pf->dev_addr, mac_addr, ETH_ADDR_LEN); + + i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL, + mac_addr->addr_bytes, NULL); } static int @@ -10943,12 +11407,23 @@ i40e_tunnel_filter_restore(struct i40e_pf *pf) } } +/* Restore rss filter */ +static inline void +i40e_rss_filter_restore(struct i40e_pf *pf) +{ + struct i40e_rte_flow_rss_conf *conf = + &pf->rss_info; + if (conf->num) + i40e_config_rss_filter(pf, conf, TRUE); +} + static void i40e_filter_restore(struct i40e_pf *pf) { i40e_ethertype_filter_restore(pf); i40e_tunnel_filter_restore(pf); i40e_fdir_filter_restore(pf); + i40e_rss_filter_restore(pf); } static bool @@ -11078,7 +11553,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, uint8_t proto_id; char name[RTE_PMD_I40E_DDP_NAME_SIZE]; uint32_t i, j, n; - bool inner_ip; + bool in_tunnel; int ret; /* get information about new ptype num */ @@ -11123,7 +11598,7 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, for (i = 0; i < ptype_num; i++) { ptype_mapping[i].hw_ptype = ptype[i].ptype_id; ptype_mapping[i].sw_ptype = 0; - inner_ip = false; + in_tunnel = false; for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { proto_id = ptype[i].protocols[j]; if (proto_id == RTE_PMD_I40E_PROTO_UNUSED) @@ -11133,54 +11608,108 @@ i40e_update_customized_ptype(struct rte_eth_dev *dev, uint8_t *pkg, continue; memset(name, 0, sizeof(name)); strcpy(name, proto[n].name); - if (!strncmp(name, "IPV4", 4) && !inner_ip) { + if (!strncasecmp(name, "PPPOE", 5)) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L2_ETHER_PPPOE; + else if (!strncasecmp(name, "IPV4FRAG", 8) && + !in_tunnel) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; - inner_ip = true; - } else if (!strncmp(name, "IPV4FRAG", 8) && - inner_ip) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_FRAG; + } else if (!strncasecmp(name, "IPV4FRAG", 8) && + in_tunnel) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L4_FRAG; - } else if (!strncmp(name, "IPV4", 4) && - inner_ip) + } else if (!strncasecmp(name, "OIPV4", 5)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + in_tunnel = true; + } else if (!strncasecmp(name, "IPV4", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV4", 4) && + in_tunnel) ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; - else if (!strncmp(name, "IPV6", 4) && - !inner_ip) { + else if (!strncasecmp(name, "IPV6FRAG", 8) && + !in_tunnel) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; - inner_ip = true; - } else if (!strncmp(name, "IPV6FRAG", 8) && - inner_ip) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_FRAG; + } else if (!strncasecmp(name, "IPV6FRAG", 8) && + in_tunnel) { ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L4_FRAG; - } else if (!strncmp(name, "IPV6", 4) && - inner_ip) + } else if (!strncasecmp(name, "OIPV6", 5)) { ptype_mapping[i].sw_ptype |= - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; - else if (!strncmp(name, "GTPC", 4)) + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + in_tunnel = true; + } else if (!strncasecmp(name, "IPV6", 4) && + !in_tunnel) ptype_mapping[i].sw_ptype |= - RTE_PTYPE_TUNNEL_GTPC; - else if (!strncmp(name, "GTPU", 4)) + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + else if (!strncasecmp(name, "IPV6", 4) && + in_tunnel) ptype_mapping[i].sw_ptype |= - RTE_PTYPE_TUNNEL_GTPU; - else if (!strncmp(name, "UDP", 3)) + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + else if (!strncasecmp(name, "UDP", 3) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_UDP; + else if (!strncasecmp(name, "UDP", 3) && + in_tunnel) ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L4_UDP; - else if (!strncmp(name, "TCP", 3)) + else if (!strncasecmp(name, "TCP", 3) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_TCP; + else if (!strncasecmp(name, "TCP", 3) && + in_tunnel) ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L4_TCP; - else if (!strncmp(name, "SCTP", 4)) + else if (!strncasecmp(name, "SCTP", 4) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_SCTP; + else if (!strncasecmp(name, "SCTP", 4) && + in_tunnel) ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L4_SCTP; - else if (!strncmp(name, "ICMP", 4) || - !strncmp(name, "ICMPV6", 6)) + else if ((!strncasecmp(name, "ICMP", 4) || + !strncasecmp(name, "ICMPV6", 6)) && + !in_tunnel) + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_L4_ICMP; + else if ((!strncasecmp(name, "ICMP", 4) || + !strncasecmp(name, "ICMPV6", 6)) && + in_tunnel) ptype_mapping[i].sw_ptype |= RTE_PTYPE_INNER_L4_ICMP; + else if (!strncasecmp(name, "GTPC", 4)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPC; + in_tunnel = true; + } else if (!strncasecmp(name, "GTPU", 4)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GTPU; + in_tunnel = true; + } else if (!strncasecmp(name, "GRENAT", 6)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_GRENAT; + in_tunnel = true; + } else if (!strncasecmp(name, "L2TPV2CTL", 9)) { + ptype_mapping[i].sw_ptype |= + RTE_PTYPE_TUNNEL_L2TP; + in_tunnel = true; + } break; } @@ -11312,6 +11841,11 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf; struct i40e_hw *hw = I40E_PF_TO_HW(pf); + if (pf->support_multi_driver) { + PMD_DRV_LOG(ERR, "Replace cloud filter is not supported."); + return ret; + } + /* Init */ memset(&filter_replace, 0, sizeof(struct i40e_aqc_replace_cloud_filters_cmd)); @@ -11342,6 +11876,10 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) &filter_replace_buf); if (ret != I40E_SUCCESS) return ret; + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud l1 type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); /* Apply the second L2 cloud filter */ memset(&filter_replace, 0, @@ -11363,17 +11901,104 @@ i40e_cloud_filter_qinq_create(struct i40e_pf *pf) I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED; ret = i40e_aq_replace_cloud_filters(hw, &filter_replace, &filter_replace_buf); + if (!ret) { + i40e_global_cfg_warning(I40E_WARNING_RPL_CLD_FILTER); + PMD_DRV_LOG(DEBUG, "Global configuration modification: " + "cloud filter type is changed from 0x%x to 0x%x", + filter_replace.old_filter_type, + filter_replace.new_filter_type); + } return ret; } +int +i40e_config_rss_filter(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf, bool add) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t i, lut = 0; + uint16_t j, num; + struct rte_eth_rss_conf rss_conf = conf->rss_conf; + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + + if (!add) { + if (memcmp(conf, rss_info, + sizeof(struct i40e_rte_flow_rss_conf)) == 0) { + i40e_pf_disable_rss(pf); + memset(rss_info, 0, + sizeof(struct i40e_rte_flow_rss_conf)); + return 0; + } + return -EINVAL; + } + + if (rss_info->num) + return -EINVAL; + + /* If both VMDQ and RSS enabled, not all of PF queues are configured. + * It's necessary to calculate the actual PF queues that are configured. + */ + if (pf->dev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + num = i40e_pf_calc_configured_queues_num(pf); + else + num = pf->dev_data->nb_rx_queues; + + num = RTE_MIN(num, conf->num); + PMD_DRV_LOG(INFO, "Max of contiguous %u PF queues are configured", + num); + + if (num == 0) { + PMD_DRV_LOG(ERR, "No PF queues are configured to enable RSS"); + return -ENOTSUP; + } + + /* Fill in redirection table */ + for (i = 0, j = 0; i < hw->func_caps.rss_table_size; i++, j++) { + if (j == num) + j = 0; + lut = (lut << 8) | (conf->queue[j] & ((0x1 << + hw->func_caps.rss_table_entry_width) - 1)); + if ((i & 3) == 3) + I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i >> 2), lut); + } + + if ((rss_conf.rss_hf & pf->adapter->flow_types_mask) == 0) { + i40e_pf_disable_rss(pf); + return 0; + } + if (rss_conf.rss_key == NULL || rss_conf.rss_key_len < + (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) { + /* Random default keys */ + static uint32_t rss_key_default[] = {0x6b793944, + 0x23504cb5, 0x5bea75b6, 0x309f4f12, 0x3dc0a2b8, + 0x024ddcdf, 0x339b8ca0, 0x4c4af64a, 0x34fac605, + 0x55d85839, 0x3a58997d, 0x2ec938e1, 0x66031581}; + + rss_conf.rss_key = (uint8_t *)rss_key_default; + rss_conf.rss_key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * + sizeof(uint32_t); + } + + i40e_hw_rss_hash_set(pf, &rss_conf); + + rte_memcpy(rss_info, + conf, sizeof(struct i40e_rte_flow_rss_conf)); + + return 0; +} + RTE_INIT(i40e_init_log); static void i40e_init_log(void) { - i40e_logtype_init = rte_log_register("pmd.i40e.init"); + i40e_logtype_init = rte_log_register("pmd.net.i40e.init"); if (i40e_logtype_init >= 0) rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE); - i40e_logtype_driver = rte_log_register("pmd.i40e.driver"); + i40e_logtype_driver = rte_log_register("pmd.net.i40e.driver"); if (i40e_logtype_driver >= 0) rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE); } + +RTE_PMD_REGISTER_PARAM_STRING(net_i40e, + QUEUE_NUM_PER_VF_ARG "=1|2|4|8|16" + ETH_I40E_SUPPORT_MULTI_DRIVER "=1"); diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index cd67453d..99efb670 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #ifndef _I40E_ETHDEV_H_ @@ -61,7 +32,8 @@ #define I40E_NUM_MACADDR_MAX 64 /* Maximum number of VFs */ #define I40E_MAX_VF 128 - +/*flag of no loopback*/ +#define I40E_AQ_LB_MODE_NONE 0x0 /* * vlan_id is a 12 bit number. * The VFTA array is actually a 4096 bit array, 128 of 32bit elements. @@ -106,6 +78,15 @@ (((vf)->version_major == VIRTCHNL_VERSION_MAJOR) && \ ((vf)->version_minor == 1)) +#define I40E_WRITE_GLB_REG(hw, reg, value) \ + do { \ + I40E_PCI_REG_WRITE(I40E_PCI_REG_ADDR((hw), \ + (reg)), (value)); \ + PMD_DRV_LOG(DEBUG, "Global register 0x%08x is modified " \ + "with value 0x%08x", \ + (reg), (value)); \ + } while (0) + /* index flex payload per layer */ enum i40e_flxpld_layer_idx { I40E_FLXPLD_L2_IDX = 0, @@ -189,6 +170,7 @@ enum i40e_flxpld_layer_idx { #define I40E_ITR_INDEX_NONE 3 #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ +#define I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT 8160 /* 8160 us */ /* Special FW support this floating VEB feature */ #define FLOATING_VEB_SUPPORTED_FW_MAJ 5 #define FLOATING_VEB_SUPPORTED_FW_MIN 0 @@ -353,7 +335,7 @@ struct i40e_vsi { * needs to add, HW needs to know the layout that VSIs are organized. * Besides that, VSI isan element and can't switch packets, which needs * to add new component VEB to perform switching. So, a new VSI needs - * to specify the the uplink VSI (Parent VSI) before created. The + * to specify the uplink VSI (Parent VSI) before created. The * uplink VSI will check whether it had a VEB to switch packets. If no, * it will try to create one. Then, uplink VSI will move the new VSI * into its' sib_vsi_list to manage all the downlink VSI. @@ -426,6 +408,9 @@ struct i40e_pf_vf { uint16_t lan_nb_qps; /* Actual queues allocated */ uint16_t reset_cnt; /* Total vf reset times */ struct ether_addr mac_addr; /* Default MAC address */ + /* version of the virtchnl from VF */ + struct virtchnl_version_info version; + uint32_t request_caps; /* offload caps requested from VF */ }; /* @@ -891,6 +876,13 @@ struct i40e_customized_pctype { bool valid; /* Check if it's valid */ }; +struct i40e_rte_flow_rss_conf { + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ + uint16_t queue_region_conf; /**< Queue region config flag */ + uint16_t num; /**< Number of entries in queue[]. */ + uint16_t queue[I40E_MAX_Q_PER_TC]; /**< Queues indices to use. */ +}; + /* * Structure to store private data specific for PF instance. */ @@ -945,6 +937,7 @@ struct i40e_pf { struct i40e_fdir_info fdir; /* flow director info */ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */ + struct i40e_rte_flow_rss_conf rss_info; /* rss info */ struct i40e_queue_regions queue_region; /* queue region info */ struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; @@ -957,6 +950,7 @@ struct i40e_pf { bool gtp_replace_flag; /* 1 - GTP-C/U filter replace is done */ bool qinq_replace_flag; /* QINQ filter replace is done */ struct i40e_tm_conf tm_conf; + bool support_multi_driver; /* 1 - support multiple driver */ /* Dynamic Device Personalization */ bool gtp_support; /* 1 - support GTP-C and GTP-U */ @@ -1071,6 +1065,7 @@ union i40e_filter_t { struct i40e_fdir_filter_conf fdir_filter; struct rte_eth_tunnel_filter_conf tunnel_filter; struct i40e_tunnel_filter_conf consistent_tunnel_filter; + struct i40e_rte_flow_rss_conf rss_conf; }; typedef int (*parse_filter_t)(struct rte_eth_dev *dev, @@ -1084,6 +1079,22 @@ struct i40e_valid_pattern { parse_filter_t parse_filter; }; +enum I40E_WARNING_IDX { + I40E_WARNING_DIS_FLX_PLD, + I40E_WARNING_ENA_FLX_PLD, + I40E_WARNING_QINQ_PARSER, + I40E_WARNING_QINQ_CLOUD_FILTER, + I40E_WARNING_TPID, + I40E_WARNING_FLOW_CTL, + I40E_WARNING_GRE_KEY_LEN, + I40E_WARNING_QF_CTL, + I40E_WARNING_HASH_INSET, + I40E_WARNING_HSYM, + I40E_WARNING_HASH_MSK, + I40E_WARNING_FD_MSK, + I40E_WARNING_RPL_CLD_FILTER, +}; + int i40e_dev_switch_queues(struct i40e_pf *pf, bool on); int i40e_vsi_release(struct i40e_vsi *vsi); struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, @@ -1186,6 +1197,8 @@ int i40e_generate_inset_mask_reg(uint64_t inset, uint32_t *mask, uint8_t nb_elem); uint64_t i40e_translate_input_set_reg(enum i40e_mac_type type, uint64_t input); void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val); +void i40e_check_write_global_reg(struct i40e_hw *hw, + uint32_t addr, uint32_t val); int i40e_tm_ops_get(struct rte_eth_dev *dev, void *ops); void i40e_tm_conf_init(struct rte_eth_dev *dev); @@ -1198,6 +1211,11 @@ int i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb); int i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, struct i40e_hw *hw, struct i40e_pf *pf, uint16_t on); void i40e_init_queue_region_conf(struct rte_eth_dev *dev); +void i40e_flex_payload_reg_set_default(struct i40e_hw *hw); +int i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len); +int i40e_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size); +int i40e_config_rss_filter(struct i40e_pf *pf, + struct i40e_rte_flow_rss_conf *conf, bool add); #define I40E_DEV_TO_PCI(eth_dev) \ RTE_DEV_TO_PCI((eth_dev)->device) @@ -1274,15 +1292,50 @@ i40e_align_floor(int n) } static inline uint16_t -i40e_calc_itr_interval(int16_t interval) +i40e_calc_itr_interval(int16_t interval, bool is_pf, bool is_multi_drv) { - if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) - interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + if (interval < 0 || interval > I40E_QUEUE_ITR_INTERVAL_MAX) { + if (is_multi_drv) { + interval = I40E_QUEUE_ITR_INTERVAL_MAX; + } else { + if (is_pf) + interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT; + else + interval = I40E_VF_QUEUE_ITR_INTERVAL_DEFAULT; + } + } /* Convert to hardware count, as writing each 1 represents 2 us */ return interval / 2; } +static inline void +i40e_global_cfg_warning(enum I40E_WARNING_IDX idx) +{ + const char *warning; + static const char *const warning_list[] = { + [I40E_WARNING_DIS_FLX_PLD] = "disable FDIR flexible payload", + [I40E_WARNING_ENA_FLX_PLD] = "enable FDIR flexible payload", + [I40E_WARNING_QINQ_PARSER] = "support QinQ parser", + [I40E_WARNING_QINQ_CLOUD_FILTER] = "support QinQ cloud filter", + [I40E_WARNING_TPID] = "support TPID configuration", + [I40E_WARNING_FLOW_CTL] = "configure water marker", + [I40E_WARNING_GRE_KEY_LEN] = "support GRE key length setting", + [I40E_WARNING_QF_CTL] = "support hash function setting", + [I40E_WARNING_HASH_INSET] = "configure hash input set", + [I40E_WARNING_HSYM] = "set symmetric hash", + [I40E_WARNING_HASH_MSK] = "configure hash mask", + [I40E_WARNING_FD_MSK] = "configure fdir mask", + [I40E_WARNING_RPL_CLD_FILTER] = "replace cloud filter", + }; + + warning = warning_list[idx]; + + RTE_LOG(WARNING, PMD, + "Global register is changed during %s\n", + warning); +} + #define I40E_VALID_FLOW(flow_type) \ ((flow_type) == RTE_ETH_FLOW_FRAG_IPV4 || \ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 91b5bb03..fd003fe0 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include <sys/queue.h> @@ -54,7 +25,7 @@ #include <rte_eal.h> #include <rte_alarm.h> #include <rte_ether.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_ethdev_pci.h> #include <rte_malloc.h> #include <rte_dev.h> @@ -945,14 +916,16 @@ i40evf_update_stats(struct i40e_vsi *vsi, static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev) { + int ret; struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40e_eth_stats *pstats = NULL; /* read stat values to clear hardware registers */ - i40evf_query_stats(dev, &pstats); + ret = i40evf_query_stats(dev, &pstats); /* set stats offset base on current values */ - vf->vsi.eth_stats_offset = *pstats; + if (ret == 0) + vf->vsi.eth_stats_offset = *pstats; } static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, @@ -1165,7 +1138,7 @@ i40evf_init_vf(struct rte_eth_dev *dev) struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); uint16_t interval = - i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0); vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); vf->dev_data = dev->data; @@ -1308,7 +1281,7 @@ i40evf_handle_pf_event(struct rte_eth_dev *dev, uint8_t *msg, case VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event"); _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, - NULL, NULL); + NULL); break; case VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event"); @@ -1585,13 +1558,19 @@ static int i40evf_init_vlan(struct rte_eth_dev *dev) { /* Apply vlan offload setting */ - return i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + + return 0; } static int i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + + if (!(vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + return -ENOTSUP; /* Vlan stripping setting */ if (mask & ETH_VLAN_STRIP_MASK) { @@ -1862,7 +1841,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint16_t interval = - i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL); + i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL, 0, 0); uint16_t msix_intr; msix_intr = intr_handle->intr_vec[queue_id]; @@ -1997,7 +1976,8 @@ i40evf_dev_start(struct rte_eth_dev *dev) dev->data->nb_tx_queues); /* check and configure queue intr-vector mapping */ - if (dev->data->dev_conf.intr_conf.rxq != 0) { + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { intr_vector = dev->data->nb_rx_queues; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; @@ -2214,14 +2194,22 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_RX_OFFLOAD_QINQ_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_CRC_STRIP; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | DEV_TX_OFFLOAD_QINQ_INSERT | DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM; + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO; dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { @@ -2675,19 +2663,19 @@ i40evf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr) { struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); if (!is_valid_assigned_ether_addr(mac_addr)) { PMD_DRV_LOG(ERR, "Tried to set invalid MAC address."); return; } - if (is_same_ether_addr(mac_addr, dev->data->mac_addrs)) - return; - if (vf->flags & I40E_FLAG_VF_MAC_BY_PF) return; - i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs); + i40evf_del_mac_addr_by_addr(dev, (struct ether_addr *)hw->mac.addr); i40evf_add_mac_addr(dev, mac_addr, 0, 0); + + ether_addr_copy(mac_addr, (struct ether_addr *)hw->mac.addr); } diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index 3d7170d5..b83a0cff 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ #include <sys/queue.h> @@ -40,7 +11,7 @@ #include <stdarg.h> #include <rte_ether.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_log.h> #include <rte_memzone.h> #include <rte_malloc.h> @@ -95,17 +66,17 @@ #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) #define I40E_FDIR_FLOWS ( \ - (1 << RTE_ETH_FLOW_FRAG_IPV4) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ - (1 << RTE_ETH_FLOW_FRAG_IPV6) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ - (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ - (1 << RTE_ETH_FLOW_L2_PAYLOAD)) + (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \ + (1ULL << RTE_ETH_FLOW_L2_PAYLOAD)) static int i40e_fdir_filter_programming(struct i40e_pf *pf, enum i40e_filter_pctype pctype, @@ -168,7 +139,6 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) rte_wmb(); /* Init the RX tail regieter. */ - I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); return err; @@ -534,7 +504,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf, { struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED]; - uint32_t flx_pit; + uint32_t flx_pit, flx_ort; uint16_t num, min_next_off; /* in words */ uint8_t field_idx = 0; uint8_t layer_idx = 0; @@ -548,9 +518,18 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf, layer_idx = I40E_FLXPLD_L4_IDX; memset(flex_pit, 0, sizeof(flex_pit)); - num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit); + num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit), + RTE_DIM(flex_pit)); - for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) { + if (num) { + flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | + (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | + (layer_idx * I40E_MAX_FLXPLD_FIED); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); + } + + for (i = 0; i < num; i++) { field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; /* record the info in fdir structure */ pf->fdir.flex_set[field_idx].src_offset = @@ -670,22 +649,31 @@ i40e_fdir_configure(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; } - /* configure flex payload */ - for (i = 0; i < conf->nb_payloads; i++) - i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); - /* configure flex mask*/ - for (i = 0; i < conf->nb_flexmasks; i++) { - if (hw->mac.type == I40E_MAC_X722) { - /* get translated pctype value in fd pctype register */ - pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( - hw, I40E_GLQF_FD_PCTYPES( - (int)i40e_flowtype_to_pctype(pf->adapter, - conf->flex_mask[i].flow_type))); - } else - pctype = i40e_flowtype_to_pctype(pf->adapter, - conf->flex_mask[i].flow_type); - i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); + if (!pf->support_multi_driver) { + /* configure flex payload */ + for (i = 0; i < conf->nb_payloads; i++) + i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); + /* configure flex mask*/ + for (i = 0; i < conf->nb_flexmasks; i++) { + if (hw->mac.type == I40E_MAC_X722) { + /* get pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype) + i40e_read_rx_ctl(hw, + I40E_GLQF_FD_PCTYPES( + (int)i40e_flowtype_to_pctype( + pf->adapter, + conf->flex_mask[i].flow_type))); + } else { + pctype = i40e_flowtype_to_pctype(pf->adapter, + conf->flex_mask[i].flow_type); + } + + i40e_set_flex_mask_on_pctype(pf, pctype, + &conf->flex_mask[i]); + } + } else { + PMD_DRV_LOG(ERR, "Not support flexible payload."); } return ret; @@ -1363,13 +1351,18 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq) PMD_DRV_LOG(ERR, "invalid programming status" " reported, error = %u.", error); } else - PMD_DRV_LOG(ERR, "unknown programming status" + PMD_DRV_LOG(INFO, "unknown programming status" " reported, len = %d, id = %u.", len, id); rxdp->wb.qword1.status_error_len = 0; rxq->rx_tail++; if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) rxq->rx_tail = 0; + if (rxq->rx_tail == 0) + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + else + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); } + return ret; } @@ -1612,8 +1605,15 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, if (add) { fdir_filter = rte_zmalloc("fdir_filter", sizeof(*fdir_filter), 0); + if (fdir_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter)); ret = i40e_sw_fdir_filter_insert(pf, fdir_filter); + if (ret < 0) + rte_free(fdir_filter); } else { ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input); } @@ -2020,6 +2020,7 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint16_t num_flex_set = 0; uint16_t num_flex_mask = 0; + uint16_t i; if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) fdir->mode = RTE_FDIR_MODE_PERFECT; @@ -2032,6 +2033,8 @@ i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) (uint32_t)hw->func_caps.fd_filters_best_effort; fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN; fdir->flow_types_mask[0] = I40E_FDIR_FLOWS; + for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++) + fdir->flow_types_mask[i] = 0ULL; fdir->flex_payload_unit = sizeof(uint16_t); fdir->flex_bitmask_unit = sizeof(uint16_t); fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED; diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c index 7e4936e3..16c47cf7 100644 --- a/drivers/net/i40e/i40e_flow.c +++ b/drivers/net/i40e/i40e_flow.c @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2016-2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016-2017 Intel Corporation */ #include <sys/queue.h> @@ -39,7 +11,7 @@ #include <stdarg.h> #include <rte_ether.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_log.h> #include <rte_malloc.h> #include <rte_eth_ctrl.h> @@ -138,6 +110,8 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); static int +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev); +static int i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], @@ -2015,8 +1989,8 @@ i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Get the MAC info. */ if (!eth_spec || !eth_mask) { rte_flow_error_set(error, EINVAL, @@ -2101,7 +2075,7 @@ i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev, } if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - act_q = (const struct rte_flow_action_queue *)act->conf; + act_q = act->conf; filter->queue = act_q->index; if (filter->queue >= pf->dev_data->nb_rx_queues) { rte_flow_error_set(error, EINVAL, @@ -2276,11 +2250,19 @@ i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, uint8_t raw_id) { struct i40e_hw *hw = I40E_PF_TO_HW(pf); - uint32_t flx_pit; + uint32_t flx_pit, flx_ort; uint8_t field_idx; uint16_t min_next_off = 0; /* in words */ uint8_t i; + if (raw_id) { + flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | + (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | + (layer_idx * I40E_MAX_FLXPLD_FIED); + I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD); + } + /* Set flex pit */ for (i = 0; i < raw_id; i++) { field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; @@ -2496,8 +2478,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; if (eth_spec && eth_mask) { if (!is_zero_ether_addr(ð_mask->src) || @@ -2534,10 +2516,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; if (vlan_spec && vlan_mask) { if (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) { @@ -2553,10 +2533,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV4: l3 = RTE_FLOW_ITEM_TYPE_IPV4; - ipv4_spec = - (const struct rte_flow_item_ipv4 *)item->spec; - ipv4_mask = - (const struct rte_flow_item_ipv4 *)item->mask; + ipv4_spec = item->spec; + ipv4_mask = item->mask; pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; layer_idx = I40E_FLXPLD_L3_IDX; @@ -2621,10 +2599,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV6: l3 = RTE_FLOW_ITEM_TYPE_IPV6; - ipv6_spec = - (const struct rte_flow_item_ipv6 *)item->spec; - ipv6_mask = - (const struct rte_flow_item_ipv6 *)item->mask; + ipv6_spec = item->spec; + ipv6_mask = item->mask; pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER; layer_idx = I40E_FLXPLD_L3_IDX; @@ -2692,8 +2668,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, outer_ip = false; break; case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = (const struct rte_flow_item_tcp *)item->spec; - tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + tcp_spec = item->spec; + tcp_mask = item->mask; if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) pctype = @@ -2740,8 +2716,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = (const struct rte_flow_item_udp *)item->spec; - udp_mask = (const struct rte_flow_item_udp *)item->mask; + udp_spec = item->spec; + udp_mask = item->mask; if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) pctype = @@ -2793,8 +2769,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, return -rte_errno; } - gtp_spec = (const struct rte_flow_item_gtp *)item->spec; - gtp_mask = (const struct rte_flow_item_gtp *)item->mask; + gtp_spec = item->spec; + gtp_mask = item->mask; if (gtp_spec && gtp_mask) { if (gtp_mask->v_pt_rsv_flags || @@ -2815,10 +2791,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_SCTP: - sctp_spec = - (const struct rte_flow_item_sctp *)item->spec; - sctp_mask = - (const struct rte_flow_item_sctp *)item->mask; + sctp_spec = item->spec; + sctp_mask = item->mask; if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) pctype = @@ -2866,8 +2840,8 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_RAW: - raw_spec = (const struct rte_flow_item_raw *)item->spec; - raw_mask = (const struct rte_flow_item_raw *)item->mask; + raw_spec = item->spec; + raw_mask = item->mask; if (!raw_spec || !raw_mask) { rte_flow_error_set(error, EINVAL, @@ -2877,6 +2851,14 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, return -rte_errno; } + if (pf->support_multi_driver) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unsupported flexible payload."); + return -rte_errno; + } + ret = i40e_flow_check_raw_item(item, raw_spec, error); if (ret < 0) return ret; @@ -2935,7 +2917,7 @@ i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev, raw_id++; break; case RTE_FLOW_ITEM_TYPE_VF: - vf_spec = (const struct rte_flow_item_vf *)item->spec; + vf_spec = item->spec; filter->input.flow_ext.is_vf = 1; filter->input.flow_ext.dst_id = vf_spec->id; if (filter->input.flow_ext.is_vf && @@ -3027,7 +3009,7 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, NEXT_ITEM_OF_ACTION(act, actions, index); switch (act->type) { case RTE_FLOW_ACTION_TYPE_QUEUE: - act_q = (const struct rte_flow_action_queue *)act->conf; + act_q = act->conf; filter->action.rx_queue = act_q->index; if ((!filter->input.flow_ext.is_vf && filter->action.rx_queue >= pf->dev_data->nb_rx_queues) || @@ -3058,7 +3040,7 @@ i40e_flow_parse_fdir_action(struct rte_eth_dev *dev, NEXT_ITEM_OF_ACTION(act, actions, index); switch (act->type) { case RTE_FLOW_ACTION_TYPE_MARK: - mark_spec = (const struct rte_flow_action_mark *)act->conf; + mark_spec = act->conf; filter->action.report_status = I40E_FDIR_REPORT_ID; filter->soft_id = mark_spec->id; break; @@ -3149,7 +3131,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, } if (act->type == RTE_FLOW_ACTION_TYPE_VF) { - act_vf = (const struct rte_flow_action_vf *)act->conf; + act_vf = act->conf; filter->vf_id = act_vf->id; filter->is_to_vf = 1; if (filter->vf_id >= pf->vf_num) { @@ -3164,7 +3146,7 @@ i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev, index++; NEXT_ITEM_OF_ACTION(act, actions, index); if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) { - act_q = (const struct rte_flow_action_queue *)act->conf; + act_q = act->conf; filter->queue_id = act_q->index; if ((!filter->is_to_vf) && (filter->queue_id >= pf->dev_data->nb_rx_queues)) { @@ -3256,8 +3238,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Check if ETH item is used for place holder. * If yes, both spec and mask should be NULL. @@ -3300,10 +3282,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; if (!(vlan_spec && vlan_mask)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -3360,10 +3340,8 @@ i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = - (const struct rte_flow_item_vxlan *)item->spec; - vxlan_mask = - (const struct rte_flow_item_vxlan *)item->mask; + vxlan_spec = item->spec; + vxlan_mask = item->mask; /* Check if VXLAN item is used to describe protocol. * If yes, both spec and mask should be NULL. * If no, both spec and mask shouldn't be NULL. @@ -3489,8 +3467,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, item_type = item->type; switch (item_type) { case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = (const struct rte_flow_item_eth *)item->spec; - eth_mask = (const struct rte_flow_item_eth *)item->mask; + eth_spec = item->spec; + eth_mask = item->mask; /* Check if ETH item is used for place holder. * If yes, both spec and mask should be NULL. @@ -3534,10 +3512,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; if (!(vlan_spec && vlan_mask)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -3582,10 +3558,8 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = - (const struct rte_flow_item_nvgre *)item->spec; - nvgre_mask = - (const struct rte_flow_item_nvgre *)item->mask; + nvgre_spec = item->spec; + nvgre_mask = item->mask; /* Check if NVGRE item is used to describe protocol. * If yes, both spec and mask should be NULL. * If no, both spec and mask shouldn't be NULL. @@ -3610,6 +3584,41 @@ i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, "Invalid TNI mask"); return -rte_errno; } + if (nvgre_mask->protocol && + nvgre_mask->protocol != 0xFFFF) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + if (nvgre_mask->c_k_s_rsvd0_ver && + nvgre_mask->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0xFFFF)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + if (nvgre_spec->c_k_s_rsvd0_ver != + rte_cpu_to_be_16(0x2000) && + nvgre_mask->c_k_s_rsvd0_ver) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + if (nvgre_mask->protocol && + nvgre_spec->protocol != + rte_cpu_to_be_16(0x6558)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } rte_memcpy(((uint8_t *)&tenant_id_be + 1), nvgre_spec->tni, 3); filter->tenant_id = @@ -3761,10 +3770,8 @@ i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_MPLS: - mpls_spec = - (const struct rte_flow_item_mpls *)item->spec; - mpls_mask = - (const struct rte_flow_item_mpls *)item->mask; + mpls_spec = item->spec; + mpls_mask = item->mask; if (!mpls_spec || !mpls_mask) { rte_flow_error_set(error, EINVAL, @@ -3900,10 +3907,8 @@ i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_GTPC: case RTE_FLOW_ITEM_TYPE_GTPU: - gtp_spec = - (const struct rte_flow_item_gtp *)item->spec; - gtp_mask = - (const struct rte_flow_item_gtp *)item->mask; + gtp_spec = item->spec; + gtp_mask = item->mask; if (!gtp_spec || !gtp_mask) { rte_flow_error_set(error, EINVAL, @@ -4014,10 +4019,8 @@ i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, } break; case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = - (const struct rte_flow_item_vlan *)item->spec; - vlan_mask = - (const struct rte_flow_item_vlan *)item->mask; + vlan_spec = item->spec; + vlan_mask = item->mask; if (!(vlan_spec && vlan_mask)) { rte_flow_error_set(error, EINVAL, @@ -4094,6 +4097,317 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, return ret; } +/** + * This function is used to do configuration i40e existing RSS with rte_flow. + * It also enable queue region configuration using flow API for i40e. + * pattern can be used indicate what parameters will be include in flow, + * like user_priority or flowtype for queue region or HASH function for RSS. + * Action is used to transmit parameter like queue index and HASH + * function for RSS, or flowtype for queue region configuration. + * For example: + * pattern: + * Case 1: only ETH, indicate flowtype for queue region will be parsed. + * Case 2: only VLAN, indicate user_priority for queue region will be parsed. + * Case 3: none, indicate RSS related will be parsed in action. + * Any pattern other the ETH or VLAN will be treated as invalid except END. + * So, pattern choice is depened on the purpose of configuration of + * that flow. + * action: + * action RSS will be uaed to transmit valid parameter with + * struct rte_flow_action_rss for all the 3 case. + */ +static int +i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item *pattern, + struct rte_flow_error *error, + uint8_t *action_flag, + struct i40e_queue_regions *info) +{ + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item *item = pattern; + enum rte_flow_item_type item_type; + + if (item->type == RTE_FLOW_ITEM_TYPE_END) + return 0; + + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + if (item->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + item_type = item->type; + switch (item_type) { + case RTE_FLOW_ITEM_TYPE_ETH: + *action_flag = 1; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (vlan_spec && vlan_mask) { + if (vlan_mask->tci == + rte_cpu_to_be_16(I40E_TCI_MASK)) { + info->region[0].user_priority[0] = + (vlan_spec->tci >> 13) & 0x7; + info->region[0].user_priority_num = 1; + info->queue_region_number = 1; + *action_flag = 0; + } + } + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Not support range"); + return -rte_errno; + } + } + + return 0; +} + +static int +i40e_flow_parse_rss_action(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error, + uint8_t action_flag, + struct i40e_queue_regions *conf_info, + union i40e_filter_t *filter) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_queue_regions *info = &pf->queue_region; + struct i40e_rte_flow_rss_conf *rss_config = + &filter->rss_conf; + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + uint16_t i, j, n, tmp; + uint32_t index = 0; + uint64_t hf_bit = 1; + + NEXT_ITEM_OF_ACTION(act, actions, index); + rss = act->conf; + + /** + * rss only supports forwarding, + * check if the first not void action is RSS. + */ + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + + if (action_flag) { + for (n = 0; n < 64; n++) { + if (rss->rss_conf->rss_hf & (hf_bit << n)) { + conf_info->region[0].hw_flowtype[0] = n; + conf_info->region[0].flowtype_num = 1; + conf_info->queue_region_number = 1; + break; + } + } + } + + for (n = 0; n < conf_info->queue_region_number; n++) { + if (conf_info->region[n].user_priority_num || + conf_info->region[n].flowtype_num) { + if (!((rte_is_power_of_2(rss->num)) && + rss->num <= 64)) { + PMD_DRV_LOG(ERR, "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the " + "total number of queues do not exceed the VSI allocation"); + return -rte_errno; + } + + if (conf_info->region[n].user_priority[n] >= + I40E_MAX_USER_PRIORITY) { + PMD_DRV_LOG(ERR, "the user priority max index is 7"); + return -rte_errno; + } + + if (conf_info->region[n].hw_flowtype[n] >= + I40E_FILTER_PCTYPE_MAX) { + PMD_DRV_LOG(ERR, "the hw_flowtype or PCTYPE max index is 63"); + return -rte_errno; + } + + if (rss_info->num < rss->num || + rss_info->queue[0] < rss->queue[0] || + (rss->queue[0] + rss->num > + rss_info->num + rss_info->queue[0])) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (i = 0; i < info->queue_region_number; i++) { + if (info->region[i].queue_num == rss->num && + info->region[i].queue_start_index == + rss->queue[0]) + break; + } + + if (i == info->queue_region_number) { + if (i > I40E_REGION_MAX_INDEX) { + PMD_DRV_LOG(ERR, "the queue region max index is 7"); + return -rte_errno; + } + + info->region[i].queue_num = + rss->num; + info->region[i].queue_start_index = + rss->queue[0]; + info->region[i].region_id = + info->queue_region_number; + + j = info->region[i].user_priority_num; + tmp = conf_info->region[n].user_priority[0]; + if (conf_info->region[n].user_priority_num) { + info->region[i].user_priority[j] = tmp; + info->region[i].user_priority_num++; + } + + j = info->region[i].flowtype_num; + tmp = conf_info->region[n].hw_flowtype[0]; + if (conf_info->region[n].flowtype_num) { + info->region[i].hw_flowtype[j] = tmp; + info->region[i].flowtype_num++; + } + info->queue_region_number++; + } else { + j = info->region[i].user_priority_num; + tmp = conf_info->region[n].user_priority[0]; + if (conf_info->region[n].user_priority_num) { + info->region[i].user_priority[j] = tmp; + info->region[i].user_priority_num++; + } + + j = info->region[i].flowtype_num; + tmp = conf_info->region[n].hw_flowtype[0]; + if (conf_info->region[n].flowtype_num) { + info->region[i].hw_flowtype[j] = tmp; + info->region[i].flowtype_num++; + } + } + } + + rss_config->queue_region_conf = TRUE; + } + + if (rss_config->queue_region_conf) + return 0; + + if (!rss || !rss->num) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "no valid queues"); + return -rte_errno; + } + + for (n = 0; n < rss->num; n++) { + if (rss->queue[n] >= dev->data->nb_rx_queues) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "queue id > max number of queues"); + return -rte_errno; + } + } + if (rss->rss_conf) + rss_config->rss_conf = *rss->rss_conf; + else + rss_config->rss_conf.rss_hf = + pf->adapter->flow_types_mask; + + for (n = 0; n < rss->num; ++n) + rss_config->queue[n] = rss->queue[n]; + rss_config->num = rss->num; + index++; + + /* check if the next not void action is END */ + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf)); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + return -rte_errno; + } + rss_config->queue_region_conf = FALSE; + + return 0; +} + +static int +i40e_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + union i40e_filter_t *filter, + struct rte_flow_error *error) +{ + int ret; + struct i40e_queue_regions info; + uint8_t action_flag = 0; + + memset(&info, 0, sizeof(struct i40e_queue_regions)); + + ret = i40e_flow_parse_rss_pattern(dev, pattern, + error, &action_flag, &info); + if (ret) + return ret; + + ret = i40e_flow_parse_rss_action(dev, actions, error, + action_flag, &info, filter); + if (ret) + return ret; + + ret = i40e_flow_parse_attr(attr, error); + if (ret) + return ret; + + cons_filter_type = RTE_ETH_FILTER_HASH; + + return 0; +} + +static int +i40e_config_rss_filter_set(struct rte_eth_dev *dev, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (conf->queue_region_conf) { + i40e_flush_queue_region_all_conf(dev, hw, pf, 1); + conf->queue_region_conf = 0; + } else { + i40e_config_rss_filter(pf, conf, 1); + } + return 0; +} + +static int +i40e_config_rss_filter_del(struct rte_eth_dev *dev, + struct i40e_rte_flow_rss_conf *conf) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + + i40e_config_rss_filter(pf, conf, 0); + return 0; +} + static int i40e_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, @@ -4130,6 +4444,17 @@ i40e_flow_validate(struct rte_eth_dev *dev, memset(&cons_filter, 0, sizeof(cons_filter)); + /* Get the non-void item of action */ + while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID) + i++; + + if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) { + ret = i40e_parse_rss_filter(dev, attr, pattern, + actions, &cons_filter, error); + return ret; + } + + i = 0; /* Get the non-void item number of pattern */ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) { if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID) @@ -4217,6 +4542,11 @@ i40e_flow_create(struct rte_eth_dev *dev, flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list, i40e_tunnel_filter_list); break; + case RTE_ETH_FILTER_HASH: + ret = i40e_config_rss_filter_set(dev, + &cons_filter.rss_conf); + flow->rule = &pf->rss_info; + break; default: goto free_flow; } @@ -4255,6 +4585,10 @@ i40e_flow_destroy(struct rte_eth_dev *dev, ret = i40e_flow_add_del_fdir_filter(dev, &((struct i40e_fdir_filter *)flow->rule)->fdir, 0); break; + case RTE_ETH_FILTER_HASH: + ret = i40e_config_rss_filter_del(dev, + (struct i40e_rte_flow_rss_conf *)flow->rule); + break; default: PMD_DRV_LOG(WARNING, "Filter type (%d) not supported", filter_type); @@ -4397,6 +4731,14 @@ i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) return -rte_errno; } + ret = i40e_flow_flush_rss_filter(dev); + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush rss flows."); + return -rte_errno; + } + return ret; } @@ -4406,6 +4748,7 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) struct rte_eth_dev *dev = pf->adapter->eth_dev; struct i40e_fdir_info *fdir_info = &pf->fdir; struct i40e_fdir_filter *fdir_filter; + enum i40e_filter_pctype pctype; struct rte_flow *flow; void *temp; int ret; @@ -4427,6 +4770,10 @@ i40e_flow_flush_fdir_filter(struct i40e_pf *pf) rte_free(flow); } } + + for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) + pf->fdir.inset_flag[pctype] = 0; } return ret; @@ -4487,3 +4834,19 @@ i40e_flow_flush_tunnel_filter(struct i40e_pf *pf) return ret; } + +/* remove the rss filter */ +static int +i40e_flow_flush_rss_filter(struct rte_eth_dev *dev) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info; + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int32_t ret = -EINVAL; + + ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0); + + if (rss_info->num) + ret = i40e_config_rss_filter(pf, rss_info, FALSE); + return ret; +} diff --git a/drivers/net/i40e/i40e_logs.h b/drivers/net/i40e/i40e_logs.h index 8e99cd52..b1049699 100644 --- a/drivers/net/i40e/i40e_logs.h +++ b/drivers/net/i40e/i40e_logs.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ #ifndef _I40E_LOGS_H_ diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index 94bb0cfd..dd3962d3 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <sys/queue.h> @@ -43,7 +14,7 @@ #include <rte_string_fns.h> #include <rte_pci.h> #include <rte_ether.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_malloc.h> #include <rte_memcpy.h> @@ -273,19 +244,23 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf, } static void -i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op) +i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, uint8_t *msg, + bool b_op) { struct virtchnl_version_info info; - /* Respond like a Linux PF host in order to support both DPDK VF and - * Linux VF driver. The expense is original DPDK host specific feature + /* VF and PF drivers need to follow the Virtchnl definition, No matter + * it's DPDK or other kernel drivers. + * The original DPDK host specific feature * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available. - * - * DPDK VF also can't identify host driver by version number returned. - * It always assume talking with Linux PF. */ + info.major = VIRTCHNL_VERSION_MAJOR; - info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + vf->version = *(struct virtchnl_version_info *)msg; + if (VF_IS_V10(&vf->version)) + info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS; + else + info.minor = VIRTCHNL_VERSION_MINOR; if (b_op) i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, @@ -309,11 +284,13 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf) } static int -i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) +i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, uint8_t *msg, + bool b_op) { struct virtchnl_vf_resource *vf_res = NULL; struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); uint32_t len = 0; + uint64_t default_hena = I40E_RSS_HENA_ALL; int ret = I40E_SUCCESS; if (!b_op) { @@ -337,11 +314,35 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op) goto send_msg; } - vf_res->vf_offload_flags = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_VLAN; + if (VF_IS_V10(&vf->version)) /* doesn't support offload negotiate */ + vf->request_caps = VIRTCHNL_VF_OFFLOAD_L2 | + VIRTCHNL_VF_OFFLOAD_VLAN; + else + vf->request_caps = *(uint32_t *)msg; + + /* enable all RSS by default, + * doesn't support hena setting by virtchnnl yet. + */ + if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx), + (uint32_t)default_hena); + I40E_WRITE_REG(hw, I40E_VFQF_HENA1(1, vf->vf_idx), + (uint32_t)(default_hena >> 32)); + I40E_WRITE_FLUSH(hw); + } + + vf_res->vf_cap_flags = vf->request_caps & + I40E_VIRTCHNL_OFFLOAD_CAPS; + /* For X722, it supports write back on ITR + * without binding queue to interrupt vector. + */ + if (hw->mac.type == I40E_MAC_X722) + vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; vf_res->max_vectors = hw->func_caps.num_msix_vectors_vf; vf_res->num_queue_pairs = vf->vsi->nb_qps; vf_res->num_vsis = I40E_DEFAULT_VF_VSI_NUM; + vf_res->rss_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * 4; + vf_res->rss_lut_size = (I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4; /* Change below setting if PF host can support more VSIs for VF */ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; @@ -1090,6 +1091,84 @@ i40e_pf_host_process_cmd_disable_vlan_strip(struct i40e_pf_vf *vf, bool b_op) return ret; } +static int +i40e_pf_host_process_cmd_set_rss_lut(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct virtchnl_rss_lut *rss_lut = (struct virtchnl_rss_lut *)msg; + uint16_t valid_len; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_CONFIG_RSS_LUT, + I40E_NOT_SUPPORTED, NULL, 0); + return ret; + } + + if (!msg || msglen <= sizeof(struct virtchnl_rss_lut)) { + PMD_DRV_LOG(ERR, "set_rss_lut argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + valid_len = sizeof(struct virtchnl_rss_lut) + rss_lut->lut_entries - 1; + if (msglen < valid_len) { + PMD_DRV_LOG(ERR, "set_rss_lut length mismatch"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_set_rss_lut(vf->vsi, rss_lut->lut, rss_lut->lut_entries); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, + ret, NULL, 0); + + return ret; +} + +static int +i40e_pf_host_process_cmd_set_rss_key(struct i40e_pf_vf *vf, + uint8_t *msg, + uint16_t msglen, + bool b_op) +{ + struct virtchnl_rss_key *rss_key = (struct virtchnl_rss_key *)msg; + uint16_t valid_len; + int ret = I40E_SUCCESS; + + if (!b_op) { + i40e_pf_host_send_msg_to_vf( + vf, + VIRTCHNL_OP_DEL_VLAN, + VIRTCHNL_OP_CONFIG_RSS_KEY, NULL, 0); + return ret; + } + + if (!msg || msglen <= sizeof(struct virtchnl_rss_key)) { + PMD_DRV_LOG(ERR, "set_rss_key argument too short"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + valid_len = sizeof(struct virtchnl_rss_key) + rss_key->key_len - 1; + if (msglen < valid_len) { + PMD_DRV_LOG(ERR, "set_rss_key length mismatch"); + ret = I40E_ERR_PARAM; + goto send_msg; + } + + ret = i40e_set_rss_key(vf->vsi, rss_key->key, rss_key->key_len); + +send_msg: + i40e_pf_host_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, + ret, NULL, 0); + + return ret; +} + void i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) { @@ -1185,8 +1264,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, * do nothing and send not_supported to VF. As PF must send a response * to VF and ACK/NACK is not defined. */ - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, - NULL, &ret_param); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &ret_param); if (ret_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) { PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!", opcode); @@ -1196,7 +1274,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, switch (opcode) { case VIRTCHNL_OP_VERSION: PMD_DRV_LOG(INFO, "OP_VERSION received"); - i40e_pf_host_process_cmd_version(vf, b_op); + i40e_pf_host_process_cmd_version(vf, msg, b_op); break; case VIRTCHNL_OP_RESET_VF: PMD_DRV_LOG(INFO, "OP_RESET_VF received"); @@ -1204,7 +1282,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, break; case VIRTCHNL_OP_GET_VF_RESOURCES: PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received"); - i40e_pf_host_process_cmd_get_vf_resource(vf, b_op); + i40e_pf_host_process_cmd_get_vf_resource(vf, msg, b_op); break; case VIRTCHNL_OP_CONFIG_VSI_QUEUES: PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received"); @@ -1265,6 +1343,14 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, PMD_DRV_LOG(INFO, "OP_DISABLE_VLAN_STRIPPING received"); i40e_pf_host_process_cmd_disable_vlan_strip(vf, b_op); break; + case VIRTCHNL_OP_CONFIG_RSS_LUT: + PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_LUT received"); + i40e_pf_host_process_cmd_set_rss_lut(vf, msg, msglen, b_op); + break; + case VIRTCHNL_OP_CONFIG_RSS_KEY: + PMD_DRV_LOG(INFO, "OP_CONFIG_RSS_KEY received"); + i40e_pf_host_process_cmd_set_rss_key(vf, msg, msglen, b_op); + break; /* Don't add command supported below, which will * return an error code. */ diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h index 04116637..1809ba4d 100644 --- a/drivers/net/i40e/i40e_pf.h +++ b/drivers/net/i40e/i40e_pf.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #ifndef _I40E_PF_H_ @@ -37,6 +8,12 @@ /* Default setting on number of VSIs that VF can contain */ #define I40E_DEFAULT_VF_VSI_NUM 1 +#define I40E_VIRTCHNL_OFFLOAD_CAPS ( \ + VIRTCHNL_VF_OFFLOAD_L2 | \ + VIRTCHNL_VF_OFFLOAD_VLAN | \ + VIRTCHNL_VF_OFFLOAD_RSS_PF | \ + VIRTCHNL_VF_OFFLOAD_RX_POLLING) + struct virtchnl_vlan_offload_info { uint16_t vsi_id; uint8_t enable_vlan_strip; diff --git a/drivers/net/i40e/i40e_regs.h b/drivers/net/i40e/i40e_regs.h index 472c7a06..b19bb1d5 100644 --- a/drivers/net/i40e/i40e_regs.h +++ b/drivers/net/i40e/i40e_regs.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation */ struct i40e_reg_info { diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index ad06b71e..1217e5a6 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation */ #include <stdio.h> @@ -46,7 +17,7 @@ #include <rte_mbuf.h> #include <rte_malloc.h> #include <rte_ether.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_tcp.h> #include <rte_sctp.h> #include <rte_udp.h> @@ -99,10 +70,6 @@ #define I40E_TX_OFFLOAD_NOTSUP_MASK \ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK) -static uint16_t i40e_xmit_pkts_simple(void *tx_queue, - struct rte_mbuf **tx_pkts, - uint16_t nb_pkts); - static inline void i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) { @@ -1718,7 +1685,9 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) #endif dev->rx_pkt_burst == i40e_recv_scattered_pkts || dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || - dev->rx_pkt_burst == i40e_recv_pkts_vec) + dev->rx_pkt_burst == i40e_recv_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || + dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2) return ptypes; return NULL; } @@ -2316,7 +2285,8 @@ i40e_tx_queue_release_mbufs(struct i40e_tx_queue *txq) * vPMD tx will not set sw_ring's mbuf to NULL after free, * so need to free remains more carefully. */ - if (dev->tx_pkt_burst == i40e_xmit_pkts_vec) { + if (dev->tx_pkt_burst == i40e_xmit_pkts_vec_avx2 || + dev->tx_pkt_burst == i40e_xmit_pkts_vec) { i = txq->tx_next_dd - txq->tx_rs_thresh + 1; if (txq->tx_tail < i) { for (; i < txq->nb_tx_desc; i++) { @@ -2749,6 +2719,7 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) rxq->vsi = pf->fdir.fdir_vsi; rxq->rx_ring_phys_addr = rz->iova; + memset(rz->addr, 0, I40E_FDIR_NUM_RX_DESC * sizeof(union i40e_rx_desc)); rxq->rx_ring = (union i40e_rx_desc *)rz->addr; /* @@ -2839,6 +2810,17 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->data->port_id); dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base + * frequency, limit use of AVX2 version to later + * plaforms, not all those that could theoretically + * run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + dev->rx_pkt_burst = + i40e_recv_scattered_pkts_vec_avx2; +#endif } else { PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " "allocation callback (port=%d).", @@ -2858,6 +2840,16 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->data->port_id); dev->rx_pkt_burst = i40e_recv_pkts_vec; +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base + * frequency, limit use of AVX2 version to later + * plaforms, not all those that could theoretically + * run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2; +#endif } else if (ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " @@ -2878,7 +2870,9 @@ i40e_set_rx_function(struct rte_eth_dev *dev) if (rte_eal_process_type() == RTE_PROC_PRIMARY) { rx_using_sse = (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || - dev->rx_pkt_burst == i40e_recv_pkts_vec); + dev->rx_pkt_burst == i40e_recv_pkts_vec || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 || + dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2); for (i = 0; i < dev->data->nb_rx_queues; i++) { struct i40e_rx_queue *rxq = dev->data->rx_queues[i]; @@ -2935,6 +2929,16 @@ i40e_set_tx_function(struct rte_eth_dev *dev) if (ad->tx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_vec; +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base + * frequency, limit use of AVX2 version to later + * plaforms, not all those that could theoretically + * run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2; +#endif } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_simple; @@ -3041,6 +3045,22 @@ i40e_recv_scattered_pkts_vec( return 0; } +uint16_t __attribute__((weak)) +i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, + struct rte_mbuf __rte_unused **rx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} + int __attribute__((weak)) i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) { @@ -3066,3 +3086,11 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, { return 0; } + +uint16_t __attribute__((weak)) +i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, + struct rte_mbuf __rte_unused **tx_pkts, + uint16_t __rte_unused nb_pkts) +{ + return 0; +} diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 06c6a659..34cd7923 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ #ifndef _I40E_RXTX_H_ @@ -256,6 +227,12 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, void i40e_set_tx_function(struct rte_eth_dev *dev); void i40e_set_default_ptype_table(struct rte_eth_dev *dev); void i40e_set_default_pctype_table(struct rte_eth_dev *dev); +uint16_t i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +uint16_t i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); /* For each value it means, datasheet of hardware can tell more details * diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c index 5e4e472a..f3fc8267 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c +++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c @@ -33,7 +33,7 @@ */ #include <stdint.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_malloc.h> #include "base/i40e_prototype.h" diff --git a/drivers/net/i40e/i40e_rxtx_vec_avx2.c b/drivers/net/i40e/i40e_rxtx_vec_avx2.c new file mode 100644 index 00000000..dbcb61f3 --- /dev/null +++ b/drivers/net/i40e/i40e_rxtx_vec_avx2.c @@ -0,0 +1,792 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Intel Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" + +#include <x86intrin.h> + +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + __m128i dma_addr0; + dma_addr0 = _mm_setzero_si128(); + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + _mm_store_si128((__m128i *)&rxdp[i].read, + dma_addr0); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + struct rte_mbuf *mb0, *mb1; + __m128i dma_addr0, dma_addr1; + __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, + RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + __m128i vaddr0, vaddr1; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + + /* convert pa to dma_addr hdr/data */ + dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); + dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); + + /* add headroom to pa values */ + dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); + dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + + /* flush desc with pa dma_addr */ + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); + _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); + } +#else + struct rte_mbuf *mb0, *mb1, *mb2, *mb3; + __m256i dma_addr0_1, dma_addr2_3; + __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); + /* Initialize the mbufs in vector, process 4 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; + i += 4, rxep += 4, rxdp += 4) { + __m128i vaddr0, vaddr1, vaddr2, vaddr3; + __m256i vaddr0_1, vaddr2_3; + + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + mb2 = rxep[2].mbuf; + mb3 = rxep[3].mbuf; + + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + offsetof(struct rte_mbuf, buf_addr) + 8); + vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); + vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); + vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); + vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); + + /* + * merge 0 & 1, by casting 0 to 256-bit and inserting 1 + * into the high lanes. Similarly for 2 & 3 + */ + vaddr0_1 = _mm256_inserti128_si256( + _mm256_castsi128_si256(vaddr0), vaddr1, 1); + vaddr2_3 = _mm256_inserti128_si256( + _mm256_castsi128_si256(vaddr2), vaddr3, 1); + + /* convert pa to dma_addr hdr/data */ + dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); + dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); + + /* add headroom to pa values */ + dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); + dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); + + /* flush desc with pa dma_addr */ + _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); + _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); + } + +#endif + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +#define PKTLEN_SHIFT 10 + +static inline uint16_t +_recv_raw_pkts_vec_avx2(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ +#define RTE_I40E_DESCS_PER_LOOP_AVX 8 + + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; + const __m256i mbuf_init = _mm256_set_epi64x(0, 0, + 0, rxq->mbuf_initializer); + struct i40e_rx_entry *sw_ring = &rxq->sw_ring[rxq->rx_tail]; + volatile union i40e_rx_desc *rxdp = rxq->rx_ring + rxq->rx_tail; + const int avx_aligned = ((rxq->rx_tail & 1) == 0); + rte_prefetch0(rxdp); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP_AVX */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP_AVX); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + while (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* constants used in processing loop */ + const __m256i crc_adjust = _mm256_set_epi16( + /* first descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0, /* ignore pkt_type field */ + /* second descriptor */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ + 0, /* ignore high-16bits of pkt_len */ + -rxq->crc_len, /* sub crc on pkt_len */ + 0, 0 /* ignore pkt_type field */ + ); + + /* 8 packets DD mask, LSB in each 32-bit value */ + const __m256i dd_check = _mm256_set1_epi32(1); + + /* 8 packets EOP mask, second-LSB in each 32-bit value */ + const __m256i eop_check = _mm256_slli_epi32(dd_check, + I40E_RX_DESC_STATUS_EOF_SHIFT); + + /* mask to shuffle from desc. to mbuf (2 descriptors)*/ + const __m256i shuf_msk = _mm256_set_epi8( + /* first descriptor */ + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /*pkt_type set as unknown */ + /* second descriptor */ + 7, 6, 5, 4, /* octet 4~7, 32bits rss */ + 3, 2, /* octet 2~3, low 16 bits vlan_macip */ + 15, 14, /* octet 15~14, 16 bits data_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 15, 14, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF /*pkt_type set as unknown */ + ); + /* + * compile-time check the above crc and shuffle layout is correct. + * NOTE: the first field (lowest address) is given last in set_epi + * calls above. + */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 4); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, vlan_tci) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 10); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, hash) != + offsetof(struct rte_mbuf, rx_descriptor_fields1) + 12); + + /* Status/Error flag masks */ + /* + * mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. Bits 3-5 of error + * field (bits 22-24) are for IP/L4 checksum errors + */ + const __m256i flags_mask = _mm256_set1_epi32( + (1 << 2) | (1 << 11) | (3 << 12) | (7 << 22)); + /* + * data to be shuffled by result of flag mask. If VLAN bit is set, + * (bit 2), then position 4 in this array will be used in the + * destination + */ + const __m256i vlan_flags_shuf = _mm256_set_epi32( + 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0, + 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, 0); + /* + * data to be shuffled by result of flag mask, shifted down 11. + * If RSS/FDIR bits are set, shuffle moves appropriate flags in + * place. + */ + const __m256i rss_flags_shuf = _mm256_set_epi8( + 0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0, /* end up 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); + + /* + * data to be shuffled by the result of the flags mask shifted by 22 + * bits. This gives use the l3_l4 flags. + */ + const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + /* shift right 1 bit to make sure it not exceed 255 */ + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1, + /* second 128-bits */ + 0, 0, 0, 0, 0, 0, 0, 0, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, + (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, + PKT_RX_IP_CKSUM_BAD >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1); + + const __m256i cksum_mask = _mm256_set1_epi32( + PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_EIP_CKSUM_BAD); + + RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */ + + uint16_t i, received; + for (i = 0, received = 0; i < nb_pkts; + i += RTE_I40E_DESCS_PER_LOOP_AVX, + rxdp += RTE_I40E_DESCS_PER_LOOP_AVX) { + /* step 1, copy over 8 mbuf pointers to rx_pkts array */ + _mm256_storeu_si256((void *)&rx_pkts[i], + _mm256_loadu_si256((void *)&sw_ring[i])); +#ifdef RTE_ARCH_X86_64 + _mm256_storeu_si256((void *)&rx_pkts[i + 4], + _mm256_loadu_si256((void *)&sw_ring[i + 4])); +#endif + + __m256i raw_desc0_1, raw_desc2_3, raw_desc4_5, raw_desc6_7; +#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC + /* for AVX we need alignment otherwise loads are not atomic */ + if (avx_aligned) { + /* load in descriptors, 2 at a time, in reverse order */ + raw_desc6_7 = _mm256_load_si256((void *)(rxdp + 6)); + rte_compiler_barrier(); + raw_desc4_5 = _mm256_load_si256((void *)(rxdp + 4)); + rte_compiler_barrier(); + raw_desc2_3 = _mm256_load_si256((void *)(rxdp + 2)); + rte_compiler_barrier(); + raw_desc0_1 = _mm256_load_si256((void *)(rxdp + 0)); + } else +#endif + do { + const __m128i raw_desc7 = _mm_load_si128((void *)(rxdp + 7)); + rte_compiler_barrier(); + const __m128i raw_desc6 = _mm_load_si128((void *)(rxdp + 6)); + rte_compiler_barrier(); + const __m128i raw_desc5 = _mm_load_si128((void *)(rxdp + 5)); + rte_compiler_barrier(); + const __m128i raw_desc4 = _mm_load_si128((void *)(rxdp + 4)); + rte_compiler_barrier(); + const __m128i raw_desc3 = _mm_load_si128((void *)(rxdp + 3)); + rte_compiler_barrier(); + const __m128i raw_desc2 = _mm_load_si128((void *)(rxdp + 2)); + rte_compiler_barrier(); + const __m128i raw_desc1 = _mm_load_si128((void *)(rxdp + 1)); + rte_compiler_barrier(); + const __m128i raw_desc0 = _mm_load_si128((void *)(rxdp + 0)); + + raw_desc6_7 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc6), raw_desc7, 1); + raw_desc4_5 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc4), raw_desc5, 1); + raw_desc2_3 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc2), raw_desc3, 1); + raw_desc0_1 = _mm256_inserti128_si256( + _mm256_castsi128_si256(raw_desc0), raw_desc1, 1); + } while (0); + + if (split_packet) { + int j; + for (j = 0; j < RTE_I40E_DESCS_PER_LOOP_AVX; j++) + rte_mbuf_prefetch_part2(rx_pkts[i + j]); + } + + /* + * convert descriptors 4-7 into mbufs, adjusting length and + * re-arranging fields. Then write into the mbuf + */ + const __m256i len6_7 = _mm256_slli_epi32(raw_desc6_7, PKTLEN_SHIFT); + const __m256i len4_5 = _mm256_slli_epi32(raw_desc4_5, PKTLEN_SHIFT); + const __m256i desc6_7 = _mm256_blend_epi16(raw_desc6_7, len6_7, 0x80); + const __m256i desc4_5 = _mm256_blend_epi16(raw_desc4_5, len4_5, 0x80); + __m256i mb6_7 = _mm256_shuffle_epi8(desc6_7, shuf_msk); + __m256i mb4_5 = _mm256_shuffle_epi8(desc4_5, shuf_msk); + mb6_7 = _mm256_add_epi16(mb6_7, crc_adjust); + mb4_5 = _mm256_add_epi16(mb4_5, crc_adjust); + /* + * to get packet types, shift 64-bit values down 30 bits + * and so ptype is in lower 8-bits in each + */ + const __m256i ptypes6_7 = _mm256_srli_epi64(desc6_7, 30); + const __m256i ptypes4_5 = _mm256_srli_epi64(desc4_5, 30); + const uint8_t ptype7 = _mm256_extract_epi8(ptypes6_7, 24); + const uint8_t ptype6 = _mm256_extract_epi8(ptypes6_7, 8); + const uint8_t ptype5 = _mm256_extract_epi8(ptypes4_5, 24); + const uint8_t ptype4 = _mm256_extract_epi8(ptypes4_5, 8); + mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype7], 4); + mb6_7 = _mm256_insert_epi32(mb6_7, ptype_tbl[ptype6], 0); + mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype5], 4); + mb4_5 = _mm256_insert_epi32(mb4_5, ptype_tbl[ptype4], 0); + /* merge the status bits into one register */ + const __m256i status4_7 = _mm256_unpackhi_epi32(desc6_7, + desc4_5); + + /* + * convert descriptors 0-3 into mbufs, adjusting length and + * re-arranging fields. Then write into the mbuf + */ + const __m256i len2_3 = _mm256_slli_epi32(raw_desc2_3, PKTLEN_SHIFT); + const __m256i len0_1 = _mm256_slli_epi32(raw_desc0_1, PKTLEN_SHIFT); + const __m256i desc2_3 = _mm256_blend_epi16(raw_desc2_3, len2_3, 0x80); + const __m256i desc0_1 = _mm256_blend_epi16(raw_desc0_1, len0_1, 0x80); + __m256i mb2_3 = _mm256_shuffle_epi8(desc2_3, shuf_msk); + __m256i mb0_1 = _mm256_shuffle_epi8(desc0_1, shuf_msk); + mb2_3 = _mm256_add_epi16(mb2_3, crc_adjust); + mb0_1 = _mm256_add_epi16(mb0_1, crc_adjust); + /* get the packet types */ + const __m256i ptypes2_3 = _mm256_srli_epi64(desc2_3, 30); + const __m256i ptypes0_1 = _mm256_srli_epi64(desc0_1, 30); + const uint8_t ptype3 = _mm256_extract_epi8(ptypes2_3, 24); + const uint8_t ptype2 = _mm256_extract_epi8(ptypes2_3, 8); + const uint8_t ptype1 = _mm256_extract_epi8(ptypes0_1, 24); + const uint8_t ptype0 = _mm256_extract_epi8(ptypes0_1, 8); + mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype3], 4); + mb2_3 = _mm256_insert_epi32(mb2_3, ptype_tbl[ptype2], 0); + mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype1], 4); + mb0_1 = _mm256_insert_epi32(mb0_1, ptype_tbl[ptype0], 0); + /* merge the status bits into one register */ + const __m256i status0_3 = _mm256_unpackhi_epi32(desc2_3, + desc0_1); + + /* + * take the two sets of status bits and merge to one + * After merge, the packets status flags are in the + * order (hi->lo): [1, 3, 5, 7, 0, 2, 4, 6] + */ + __m256i status0_7 = _mm256_unpacklo_epi64(status4_7, + status0_3); + + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = _mm256_and_si256( + status0_7, flags_mask); + /* set vlan and rss flags */ + const __m256i vlan_flags = _mm256_shuffle_epi8( + vlan_flags_shuf, flag_bits); + const __m256i rss_flags = _mm256_shuffle_epi8( + rss_flags_shuf, _mm256_srli_epi32(flag_bits, 11)); + /* + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 22)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + + /* merge flags */ + const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, + _mm256_or_si256(rss_flags, vlan_flags)); + /* + * At this point, we have the 8 sets of flags in the low 16-bits + * of each 32-bit value in vlan0. + * We want to extract these, and merge them with the mbuf init data + * so we can do a single write to the mbuf to set the flags + * and all the other initialization fields. Extracting the + * appropriate flags means that we have to do a shift and blend for + * each mbuf before we do the write. However, we can also + * add in the previously computed rx_descriptor fields to + * make a single 256-bit write per mbuf + */ + /* check the structure matches expectations */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) != + offsetof(struct rte_mbuf, rearm_data) + 8); + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, rearm_data) != + RTE_ALIGN(offsetof(struct rte_mbuf, rearm_data), 16)); + /* build up data and do writes */ + __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, + rearm6, rearm7; + rearm6 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 8), 0x04); + rearm4 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(mbuf_flags, 4), 0x04); + rearm2 = _mm256_blend_epi32(mbuf_init, mbuf_flags, 0x04); + rearm0 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(mbuf_flags, 4), 0x04); + /* permute to add in the rx_descriptor e.g. rss fields */ + rearm6 = _mm256_permute2f128_si256(rearm6, mb6_7, 0x20); + rearm4 = _mm256_permute2f128_si256(rearm4, mb4_5, 0x20); + rearm2 = _mm256_permute2f128_si256(rearm2, mb2_3, 0x20); + rearm0 = _mm256_permute2f128_si256(rearm0, mb0_1, 0x20); + /* write to mbuf */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 6]->rearm_data, rearm6); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 4]->rearm_data, rearm4); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 2]->rearm_data, rearm2); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 0]->rearm_data, rearm0); + + /* repeat for the odd mbufs */ + const __m256i odd_flags = _mm256_castsi128_si256( + _mm256_extracti128_si256(mbuf_flags, 1)); + rearm7 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 8), 0x04); + rearm5 = _mm256_blend_epi32(mbuf_init, _mm256_slli_si256(odd_flags, 4), 0x04); + rearm3 = _mm256_blend_epi32(mbuf_init, odd_flags, 0x04); + rearm1 = _mm256_blend_epi32(mbuf_init, _mm256_srli_si256(odd_flags, 4), 0x04); + /* since odd mbufs are already in hi 128-bits use blend */ + rearm7 = _mm256_blend_epi32(rearm7, mb6_7, 0xF0); + rearm5 = _mm256_blend_epi32(rearm5, mb4_5, 0xF0); + rearm3 = _mm256_blend_epi32(rearm3, mb2_3, 0xF0); + rearm1 = _mm256_blend_epi32(rearm1, mb0_1, 0xF0); + /* again write to mbufs */ + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 7]->rearm_data, rearm7); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 5]->rearm_data, rearm5); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 3]->rearm_data, rearm3); + _mm256_storeu_si256((__m256i *)&rx_pkts[i + 1]->rearm_data, rearm1); + + /* extract and record EOP bit */ + if (split_packet) { + const __m128i eop_mask = _mm_set1_epi16( + 1 << I40E_RX_DESC_STATUS_EOF_SHIFT); + const __m256i eop_bits256 = _mm256_and_si256(status0_7, + eop_check); + /* pack status bits into a single 128-bit register */ + const __m128i eop_bits = _mm_packus_epi32( + _mm256_castsi256_si128(eop_bits256), + _mm256_extractf128_si256(eop_bits256, 1)); + /* + * flip bits, and mask out the EOP bit, which is now + * a split-packet bit i.e. !EOP, rather than EOP one. + */ + __m128i split_bits = _mm_andnot_si128(eop_bits, + eop_mask); + /* + * eop bits are out of order, so we need to shuffle them + * back into order again. In doing so, only use low 8 + * bits, which acts like another pack instruction + * The original order is (hi->lo): 1,3,5,7,0,2,4,6 + * [Since we use epi8, the 16-bit positions are + * multiplied by 2 in the eop_shuffle value.] + */ + __m128i eop_shuffle = _mm_set_epi8( + 0xFF, 0xFF, 0xFF, 0xFF, /* zero hi 64b */ + 0xFF, 0xFF, 0xFF, 0xFF, + 8, 0, 10, 2, /* move values to lo 64b */ + 12, 4, 14, 6); + split_bits = _mm_shuffle_epi8(split_bits, eop_shuffle); + *(uint64_t *)split_packet = _mm_cvtsi128_si64(split_bits); + split_packet += RTE_I40E_DESCS_PER_LOOP_AVX; + } + + /* perform dd_check */ + status0_7 = _mm256_and_si256(status0_7, dd_check); + status0_7 = _mm256_packs_epi32(status0_7, + _mm256_setzero_si256()); + + uint64_t burst = __builtin_popcountll(_mm_cvtsi128_si64( + _mm256_extracti128_si256(status0_7, 1))); + burst += __builtin_popcountll(_mm_cvtsi128_si64( + _mm256_castsi256_si128(status0_7))); + received += burst; + if (burst != RTE_I40E_DESCS_PER_LOOP_AVX) + break; + } + + /* update tail pointers */ + rxq->rx_tail += received; + rxq->rx_tail &= (rxq->nb_rx_desc - 1); + if ((rxq->rx_tail & 1) == 1 && received > 1) { /* keep avx2 aligned */ + rxq->rx_tail--; + received--; + } + rxq->rxrearm_nb += received; + return received; +} + +/* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + */ +uint16_t +i40e_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* + * vPMD receive routine that reassembles single burst of 32 scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + */ +static uint16_t +i40e_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +/* + * vPMD receive routine that reassembles scattered packets. + * Main receive routine that can handle arbitrary burst sizes + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + */ +uint16_t +i40e_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t retval = 0; + while (nb_pkts > RTE_I40E_VPMD_RX_BURST) { + uint16_t burst = i40e_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, RTE_I40E_VPMD_RX_BURST); + retval += burst; + nb_pkts -= burst; + if (burst < RTE_I40E_VPMD_RX_BURST) + return retval; + } + return retval + i40e_recv_scattered_burst_vec_avx2(rx_queue, + rx_pkts + retval, nb_pkts); +} + + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + __m128i descriptor = _mm_set_epi64x(high_qw, + pkt->buf_physaddr + pkt->data_off); + _mm_store_si128((__m128i *)txdp, descriptor); +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + const uint64_t hi_qw_tmpl = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT)); + + /* if unaligned on 32-bit boundary, do one to align */ + if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { + vtx1(txdp, *pkt, flags); + nb_pkts--, txdp++, pkt++; + } + + /* do two at a time while possible, in bursts */ + for (; nb_pkts > 3; txdp += 4, pkt += 4, nb_pkts -= 4) { + uint64_t hi_qw3 = hi_qw_tmpl | + ((uint64_t)pkt[3]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw2 = hi_qw_tmpl | + ((uint64_t)pkt[2]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw1 = hi_qw_tmpl | + ((uint64_t)pkt[1]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + uint64_t hi_qw0 = hi_qw_tmpl | + ((uint64_t)pkt[0]->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT); + + __m256i desc2_3 = _mm256_set_epi64x( + hi_qw3, pkt[3]->buf_physaddr + pkt[3]->data_off, + hi_qw2, pkt[2]->buf_physaddr + pkt[2]->data_off); + __m256i desc0_1 = _mm256_set_epi64x( + hi_qw1, pkt[1]->buf_physaddr + pkt[1]->data_off, + hi_qw0, pkt[0]->buf_physaddr + pkt[0]->data_off); + _mm256_store_si256((void *)(txdp + 2), desc2_3); + _mm256_store_si256((void *)txdp, desc0_1); + } + + /* do any last ones */ + while (nb_pkts) { + vtx1(txdp, *pkt, flags); + txdp++, pkt++, nb_pkts--; + } +} + +static inline uint16_t +i40e_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + vtx(txdp, tx_pkts, n - 1, flags); + tx_pkts += (n - 1); + txdp += (n - 1); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +uint16_t +i40e_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); + ret = i40e_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx], + num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h index 39a6da06..3ffedcb9 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_common.h +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h @@ -1,40 +1,11 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ #ifndef _I40E_RXTX_VEC_COMMON_H_ #define _I40E_RXTX_VEC_COMMON_H_ #include <stdint.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_malloc.h> #include "i40e_ethdev.h" diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index b5685e2b..e549d1e8 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -33,7 +33,7 @@ */ #include <stdint.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_malloc.h> #include "base/i40e_prototype.h" diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c index 9d2d1f83..3b22588c 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -1,38 +1,9 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation */ #include <stdint.h> -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> #include <rte_malloc.h> #include "base/i40e_prototype.h" diff --git a/drivers/net/i40e/i40e_tm.c b/drivers/net/i40e/i40e_tm.c index 44316f64..c76760c9 100644 --- a/drivers/net/i40e/i40e_tm.c +++ b/drivers/net/i40e/i40e_tm.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <rte_malloc.h> diff --git a/drivers/net/i40e/meson.build b/drivers/net/i40e/meson.build new file mode 100644 index 00000000..8764b0e5 --- /dev/null +++ b/drivers/net/i40e/meson.build @@ -0,0 +1,46 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +cflags += ['-DPF_DRIVER', + '-DVF_DRIVER', + '-DINTEGRATED_VF', + '-DX722_A0_SUPPORT'] + +subdir('base') +objs = [base_objs] + +sources = files( + 'i40e_ethdev.c', + 'i40e_rxtx.c', + 'i40e_ethdev_vf.c', + 'i40e_pf.c', + 'i40e_fdir.c', + 'i40e_flow.c', + 'i40e_tm.c', + 'rte_pmd_i40e.c' + ) + +deps += ['hash'] + +if arch_subdir == 'x86' + dpdk_conf.set('RTE_LIBRTE_I40E_INC_VECTOR', 1) + sources += files('i40e_rxtx_vec_sse.c') + + # compile AVX2 version if either: + # a. we have AVX supported in minimum instruction set baseline + # b. it's not minimum instruction set, but supported by compiler + if dpdk_conf.has('RTE_MACHINE_CPUFLAG_AVX2') + sources += files('i40e_rxtx_vec_avx2.c') + elif cc.has_argument('-mavx2') + i40e_avx2_lib = static_library('i40e_avx2_lib', + 'i40e_rxtx_vec_avx2.c', + dependencies: [static_rte_ethdev, + static_rte_kvargs, static_rte_hash], + c_args: '-mavx2') + objs += i40e_avx2_lib.extract_objects('i40e_rxtx_vec_avx2.c') + endif +endif + +includes += include_directories('base') + +install_headers('rte_pmd_i40e.h') diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c index aeb92af3..dae59e6d 100644 --- a/drivers/net/i40e/rte_pmd_i40e.c +++ b/drivers/net/i40e/rte_pmd_i40e.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include <rte_malloc.h> @@ -1525,7 +1496,14 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) struct rte_pmd_i40e_profile_info *pinfo, *p; uint32_t i; int ret; + static const uint32_t group_mask = 0x00ff0000; + pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec + + sizeof(struct i40e_profile_section_header)); + if (pinfo->track_id == 0) { + PMD_DRV_LOG(INFO, "Read-only profile."); + return 0; + } buff = rte_zmalloc("pinfo_list", (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4), 0); @@ -1544,8 +1522,6 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) return -1; } p_list = (struct rte_pmd_i40e_profile_list *)buff; - pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec + - sizeof(struct i40e_profile_section_header)); for (i = 0; i < p_list->p_count; i++) { p = &p_list->p_info[i]; if (pinfo->track_id == p->track_id) { @@ -1554,6 +1530,23 @@ i40e_check_profile_info(uint16_t port, uint8_t *profile_info_sec) return 1; } } + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if ((p->track_id & group_mask) == 0) { + PMD_DRV_LOG(INFO, "Profile of the group 0 exists."); + rte_free(buff); + return 2; + } + } + for (i = 0; i < p_list->p_count; i++) { + p = &p_list->p_info[i]; + if ((pinfo->track_id & group_mask) != + (p->track_id & group_mask)) { + PMD_DRV_LOG(INFO, "Profile of different group exists."); + rte_free(buff); + return 3; + } + } rte_free(buff); return 0; @@ -1573,6 +1566,7 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, uint8_t *profile_info_sec; int is_exist; enum i40e_status_code status = I40E_SUCCESS; + static const uint32_t type_mask = 0xff000000; if (op != RTE_PMD_I40E_PKG_OP_WR_ADD && op != RTE_PMD_I40E_PKG_OP_WR_ONLY && @@ -1624,6 +1618,10 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, return -EINVAL; } + /* force read-only track_id for type 0 */ + if ((track_id & type_mask) == 0) + track_id = 0; + /* Find profile segment */ profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr); @@ -1657,12 +1655,17 @@ rte_pmd_i40e_process_ddp_package(uint16_t port, uint8_t *buff, if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) { if (is_exist) { - PMD_DRV_LOG(ERR, "Profile already exists."); + if (is_exist == 1) + PMD_DRV_LOG(ERR, "Profile already exists."); + else if (is_exist == 2) + PMD_DRV_LOG(ERR, "Profile of group 0 already exists."); + else if (is_exist == 3) + PMD_DRV_LOG(ERR, "Profile of different group already exists"); rte_free(profile_info_sec); return -EEXIST; } } else if (op == RTE_PMD_I40E_PKG_OP_WR_DEL) { - if (!is_exist) { + if (is_exist != 1) { PMD_DRV_LOG(ERR, "Profile does not exist."); rte_free(profile_info_sec); return -EACCES; @@ -2082,7 +2085,8 @@ static int check_invalid_pkt_type(uint32_t pkt_type) l2 != RTE_PTYPE_L2_ETHER_LLDP && l2 != RTE_PTYPE_L2_ETHER_NSH && l2 != RTE_PTYPE_L2_ETHER_VLAN && - l2 != RTE_PTYPE_L2_ETHER_QINQ) + l2 != RTE_PTYPE_L2_ETHER_QINQ && + l2 != RTE_PTYPE_L2_ETHER_PPPOE) return -1; if (l3 && @@ -2111,7 +2115,8 @@ static int check_invalid_pkt_type(uint32_t pkt_type) tnl != RTE_PTYPE_TUNNEL_GENEVE && tnl != RTE_PTYPE_TUNNEL_GRENAT && tnl != RTE_PTYPE_TUNNEL_GTPC && - tnl != RTE_PTYPE_TUNNEL_GTPU) + tnl != RTE_PTYPE_TUNNEL_GTPU && + tnl != RTE_PTYPE_TUNNEL_L2TP) return -1; if (il2 && @@ -2845,22 +2850,23 @@ i40e_flush_queue_region_all_conf(struct rte_eth_dev *dev, return 0; } - info->queue_region_number = 1; - info->region[0].queue_num = main_vsi->nb_used_qps; - info->region[0].queue_start_index = 0; - - ret = i40e_vsi_update_queue_region_mapping(hw, pf); - if (ret != I40E_SUCCESS) - PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); + if (info->queue_region_number) { + info->queue_region_number = 1; + info->region[0].queue_num = main_vsi->nb_used_qps; + info->region[0].queue_start_index = 0; - ret = i40e_dcb_init_configure(dev, TRUE); - if (ret != I40E_SUCCESS) { - PMD_DRV_LOG(INFO, "Failed to flush dcb."); - pf->flags &= ~I40E_FLAG_DCB; - } + ret = i40e_vsi_update_queue_region_mapping(hw, pf); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(INFO, "Failed to flush queue region mapping."); - i40e_init_queue_region_conf(dev); + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(INFO, "Failed to flush dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + i40e_init_queue_region_conf(dev); + } return 0; } @@ -2985,3 +2991,144 @@ int rte_pmd_i40e_flow_add_del_packet_template( return i40e_flow_add_del_fdir_filter(dev, &filter_conf, add); } + +int +rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + uint64_t inset_reg; + uint32_t mask_reg[2]; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (pctype > 63) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + memset(inset, 0, sizeof(struct rte_pmd_i40e_inset)); + + switch (inset_type) { + case INSET_HASH: + /* Get input set */ + inset_reg = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); + /* Get field mask */ + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(0, pctype)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_GLQF_HASH_MSK(1, pctype)); + break; + case INSET_FDIR: + inset_reg = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + inset_reg <<= I40E_32_BIT_WIDTH; + inset_reg |= + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(0, pctype)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_GLQF_FD_MSK(1, pctype)); + break; + case INSET_FDIR_FLX: + inset_reg = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype)); + mask_reg[0] = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 0)); + mask_reg[1] = + i40e_read_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, 1)); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported input set type."); + return -EINVAL; + } + + inset->inset = inset_reg; + + for (i = 0; i < 2; i++) { + inset->mask[i].field_idx = ((mask_reg[i] >> 16) & 0x3F); + inset->mask[i].mask = mask_reg[i] & 0xFFFF; + } + + return 0; +} + +int +rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type) +{ + struct rte_eth_dev *dev; + struct i40e_hw *hw; + uint64_t inset_reg; + uint32_t mask_reg[2]; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_i40e_supported(dev)) + return -ENOTSUP; + + if (pctype > 63) + return -EINVAL; + + hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Clear mask first */ + for (i = 0; i < 2; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0); + + inset_reg = inset->inset; + for (i = 0; i < 2; i++) + mask_reg[i] = (inset->mask[i].field_idx << 16) | + inset->mask[i].mask; + + switch (inset_type) { + case INSET_HASH: + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(0, pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_GLQF_HASH_INSET(1, pctype), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_reg(hw, I40E_GLQF_HASH_MSK(i, pctype), + mask_reg[i]); + break; + case INSET_FDIR: + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0), + (uint32_t)(inset_reg & UINT32_MAX)); + i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1), + (uint32_t)((inset_reg >> + I40E_32_BIT_WIDTH) & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), + mask_reg[i]); + break; + case INSET_FDIR_FLX: + i40e_check_write_reg(hw, I40E_PRTQF_FD_FLXINSET(pctype), + (uint32_t)(inset_reg & UINT32_MAX)); + for (i = 0; i < 2; i++) + i40e_check_write_reg(hw, I40E_PRTQF_FD_MSK(pctype, i), + mask_reg[i]); + break; + default: + PMD_DRV_LOG(ERR, "Unsupported input set type."); + return -EINVAL; + } + + I40E_WRITE_FLUSH(hw); + return 0; +} diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h index 580ca4ae..d248adb1 100644 --- a/drivers/net/i40e/rte_pmd_i40e.h +++ b/drivers/net/i40e/rte_pmd_i40e.h @@ -1,33 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright (c) 2017 Intel Corporation. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation */ #ifndef _PMD_I40E_H_ @@ -42,7 +14,7 @@ * */ -#include <rte_ethdev.h> +#include <rte_ethdev_driver.h> /** * Response sent back to i40e driver from user app after callback @@ -94,7 +66,7 @@ enum rte_pmd_i40e_package_info { RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST, RTE_PMD_I40E_PKG_INFO_PTYPE_NUM, RTE_PMD_I40E_PKG_INFO_PTYPE_LIST, - RTE_PMD_I40E_PKG_INFO_MAX = 0xFFFFFFFF + RTE_PMD_I40E_PKG_INFO_MAX = (int)0xFFFFFFFF }; /** @@ -317,6 +289,23 @@ struct rte_pmd_i40e_pkt_template_conf { uint32_t soft_id; }; +enum rte_pmd_i40e_inset_type { + INSET_NONE = 0, + INSET_HASH, + INSET_FDIR, + INSET_FDIR_FLX, +}; + +struct rte_pmd_i40e_inset_mask { + uint8_t field_idx; + uint16_t mask; +}; + +struct rte_pmd_i40e_inset { + uint64_t inset; + struct rte_pmd_i40e_inset_mask mask[2]; +}; + /** * Add or remove raw packet template filter to Flow Director. * @@ -933,4 +922,125 @@ int rte_pmd_i40e_query_vfid_by_mac(uint16_t port, int rte_pmd_i40e_rss_queue_region_conf(uint16_t port_id, enum rte_pmd_i40e_queue_region_op op_type, void *arg); +int rte_pmd_i40e_cfg_hash_inset(uint16_t port, + uint64_t pctype, uint64_t inset); + +/** + * Get input set + * + * @param port + * The port identifier of the Ethernet device. + * @param pctype + * HW pctype. + * @param inset + * Buffer for input set info. + * @param inset_type + * Type of input set. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) if operation not supported. + */ +int rte_pmd_i40e_inset_get(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type); + +/** + * Set input set + * + * @param port + * The port identifier of the Ethernet device. + * @param pctype + * HW pctype. + * @param inset + * Input set info. + * @param inset_type + * Type of input set. + * @return + * - (0) if successful. + * - (-ENODEV) if *port* invalid. + * - (-EINVAL) if bad parameter. + * - (-ENOTSUP) if operation not supported. + */ +int rte_pmd_i40e_inset_set(uint16_t port, uint8_t pctype, + struct rte_pmd_i40e_inset *inset, + enum rte_pmd_i40e_inset_type inset_type); + +/** + * Get bit value for some field index + * + * @param inset + * Input set value. + * @param field_idx + * Field index for input set. + * @return + * - (1) if set. + * - (0) if cleared. + */ +static inline int +rte_pmd_i40e_inset_field_get(uint64_t inset, uint8_t field_idx) +{ + uint8_t bit_idx; + + if (field_idx > 63) + return 0; + + bit_idx = 63 - field_idx; + if (inset & (1ULL << bit_idx)) + return 1; + + return 0; +} + +/** + * Set bit value for some field index + * + * @param inset + * Input set value. + * @param field_idx + * Field index for input set. + * @return + * - (-1) if failed. + * - (0) if success. + */ +static inline int +rte_pmd_i40e_inset_field_set(uint64_t *inset, uint8_t field_idx) +{ + uint8_t bit_idx; + + if (field_idx > 63) + return -1; + + bit_idx = 63 - field_idx; + *inset = *inset | (1ULL << bit_idx); + + return 0; +} + +/** + * Clear bit value for some field index + * + * @param inset + * Input set value. + * @param field_idx + * Field index for input set. + * @return + * - (-1) if failed. + * - (0) if success. + */ +static inline int +rte_pmd_i40e_inset_field_clear(uint64_t *inset, uint8_t field_idx) +{ + uint8_t bit_idx; + + if (field_idx > 63) + return -1; + + bit_idx = 63 - field_idx; + *inset = *inset & ~(1ULL << bit_idx); + + return 0; +} + #endif /* _PMD_I40E_H_ */ diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map index ebbd24e0..cccd5768 100644 --- a/drivers/net/i40e/rte_pmd_i40e_version.map +++ b/drivers/net/i40e/rte_pmd_i40e_version.map @@ -58,3 +58,10 @@ DPDK_17.11 { rte_pmd_i40e_rss_queue_region_conf; } DPDK_17.08; + +DPDK_18.02 { + global: + + rte_pmd_i40e_inset_get; + rte_pmd_i40e_inset_set; +} DPDK_17.11;
\ No newline at end of file |