diff options
Diffstat (limited to 'drivers/net/i40e')
-rw-r--r-- | drivers/net/i40e/Makefile | 8 | ||||
-rw-r--r-- | drivers/net/i40e/base/i40e_adminq_cmd.h | 132 | ||||
-rw-r--r-- | drivers/net/i40e/base/i40e_common.c | 76 | ||||
-rw-r--r-- | drivers/net/i40e/base/i40e_devids.h | 1 | ||||
-rw-r--r-- | drivers/net/i40e/base/i40e_prototype.h | 3 | ||||
-rw-r--r-- | drivers/net/i40e/base/i40e_type.h | 295 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_ethdev.c | 340 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_ethdev.h | 45 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_ethdev_vf.c | 46 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_fdir.c | 46 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_pf.c | 6 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx.c | 665 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx.h | 563 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx_vec_common.h | 251 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx_vec_neon.c | 614 | ||||
-rw-r--r-- | drivers/net/i40e/i40e_rxtx_vec_sse.c (renamed from drivers/net/i40e/i40e_rxtx_vec.c) | 257 |
16 files changed, 2373 insertions, 975 deletions
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 53fe145f..13085fb7 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -97,14 +97,18 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_dcb.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c -SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec.c +ifeq ($(CONFIG_RTE_ARCH_ARM64),y) +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c +else +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c +endif SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c # vector PMD driver needs SSE4.1 support ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),) -CFLAGS_i40e_rxtx_vec.o += -msse4.1 +CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1 endif diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h index 2b7a7608..4f067720 100644 --- a/drivers/net/i40e/base/i40e_adminq_cmd.h +++ b/drivers/net/i40e/base/i40e_adminq_cmd.h @@ -196,6 +196,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_control_packet_filter = 0x025B, i40e_aqc_opc_add_cloud_filters = 0x025C, i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, i40e_aqc_opc_add_mirror_rule = 0x0260, i40e_aqc_opc_delete_mirror_rule = 0x0261, @@ -223,6 +224,9 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_suspend_port_tx = 0x041B, i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, @@ -471,13 +475,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); /* Set ARP Proxy command / response (indirect 0x0104) */ struct i40e_aqc_arp_proxy_data { __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0008 -#define I40E_AQ_ARP_UNSUP_CTL 0x0010 -#define I40E_AQ_ARP_ENA 0x0020 -#define I40E_AQ_ARP_ADD_IPV4 0x0040 -#define I40E_AQ_ARP_DEL_IPV4 0x0080 +#define I40E_AQ_ARP_INIT_IPV4 0x0800 +#define I40E_AQ_ARP_UNSUP_CTL 0x1000 +#define I40E_AQ_ARP_ENA 0x2000 +#define I40E_AQ_ARP_ADD_IPV4 0x4000 +#define I40E_AQ_ARP_DEL_IPV4 0x8000 __le16 table_id; - __le32 pfpm_proxyfc; + __le32 enabled_offloads; +#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 +#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 __le32 ip_addr; u8 mac_addr[6]; u8 reserved[2]; @@ -492,17 +498,19 @@ struct i40e_aqc_ns_proxy_data { __le16 table_idx_ipv6_0; __le16 table_idx_ipv6_1; __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0100 -#define I40E_AQ_NS_PROXY_DEL_0 0x0200 -#define I40E_AQ_NS_PROXY_ADD_1 0x0400 -#define I40E_AQ_NS_PROXY_DEL_1 0x0800 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004 +#define I40E_AQ_NS_PROXY_ADD_0 0x0001 +#define I40E_AQ_NS_PROXY_DEL_0 0x0002 +#define I40E_AQ_NS_PROXY_ADD_1 0x0004 +#define I40E_AQ_NS_PROXY_DEL_1 0x0008 +#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 +#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 +#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 +#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 +#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 +#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 +#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 +#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 +#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 u8 mac_addr_0[6]; u8 mac_addr_1[6]; u8 local_mac_addr[6]; @@ -552,6 +560,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); /* Manage MAC Address Write Command (0x0108) */ struct i40e_aqc_mac_address_write { __le16 command_flags; +#define I40E_AQC_MC_MAG_EN 0x0100 #define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 #define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 #define I40E_AQC_WRITE_TYPE_PORT 0x8000 @@ -581,9 +590,18 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); struct i40e_aqc_set_wol_filter { __le16 filter_index; #define I40E_AQC_MAX_NUM_WOL_FILTERS 8 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 +#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ + I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) + +#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 +#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ + I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) __le16 cmd_flags; #define I40E_AQC_SET_WOL_FILTER 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 +#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 +#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 __le16 valid_flags; #define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 #define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 @@ -594,23 +612,29 @@ struct i40e_aqc_set_wol_filter { I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); +struct i40e_aqc_set_wol_filter_data { + u8 filter[128]; + u8 mask[16]; +}; + +I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); + /* Get Wake Reason (0x0121) */ struct i40e_aqc_get_wake_reason_completion { u8 reserved_1[2]; __le16 wake_reason; +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 +#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ + I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) u8 reserved_2[12]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); -struct i40e_aqc_set_wol_filter_data { - u8 filter[128]; - u8 mask[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); - #endif /* X722_SUPPORT */ /* Switch configuration commands (0x02xx) */ @@ -694,6 +718,8 @@ struct i40e_aqc_set_port_parameters { #define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ #define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 __le16 bad_frame_vsi; +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 +#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF __le16 default_seid; /* reserved for command */ u8 reserved[10]; }; @@ -745,6 +771,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); /* Set Switch Configuration (direct 0x0205) */ struct i40e_aqc_set_switch_config { __le16 flags; +/* flags used for both fields below */ #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; @@ -1644,6 +1671,24 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); +/* Get and set the active HMC resource profile and status. + * (direct 0x0500) and (direct 0x0501) + */ +struct i40e_aq_get_set_hmc_resource_profile { + u8 pm_profile; + u8 pe_vf_enabled; + u8 reserved[14]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); + +enum i40e_aq_hmc_profile { + /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ + I40E_HMC_PROFILE_DEFAULT = 1, + I40E_HMC_PROFILE_FAVOR_VF = 2, + I40E_HMC_PROFILE_EQUAL = 3, +}; + /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ @@ -1691,6 +1736,7 @@ enum i40e_aq_phy_type { #define I40E_LINK_SPEED_10GB_SHIFT 0x3 #define I40E_LINK_SPEED_40GB_SHIFT 0x4 #define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 enum i40e_aq_link_speed { I40E_LINK_SPEED_UNKNOWN = 0, @@ -1698,7 +1744,8 @@ enum i40e_aq_link_speed { I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT), I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT), I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT) + I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT), }; struct i40e_aqc_module_desc { @@ -1721,6 +1768,8 @@ struct i40e_aq_get_phy_abilities_resp { #define I40E_AQ_PHY_LINK_ENABLED 0x08 #define I40E_AQ_PHY_AN_ENABLED 0x10 #define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 +#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 +#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 __le16 eee_capability; #define I40E_AQ_EEE_100BASE_TX 0x0002 #define I40E_AQ_EEE_1000BASE_T 0x0004 @@ -1731,7 +1780,13 @@ struct i40e_aq_get_phy_abilities_resp { __le32 eeer_val; u8 d3_lpan; #define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 reserved[3]; + u8 phy_type_ext; +#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 +#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 +#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 +#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 + u8 mod_type_ext; + u8 ext_comp_code; u8 phy_id[4]; u8 module_type[3]; u8 qualified_module_count; @@ -1753,7 +1808,18 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */ __le16 eee_capability; __le32 eeer; u8 low_power_ctrl; - u8 reserved[3]; + u8 phy_type_ext; +#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 +#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 +#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 +#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 + u8 fec_config; +#define I40E_AQ_SET_FEC_ABILITY_KR (1 << 0) +#define I40E_AQ_SET_FEC_ABILITY_RS (1 << 1) +#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2) +#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3) +#define I40E_AQ_SET_FEC_AUTO (1 << 4) + u8 reserved; }; I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); @@ -1833,16 +1899,26 @@ struct i40e_aqc_get_link_status { #define I40E_AQ_LINK_TX_DRAINED 0x01 #define I40E_AQ_LINK_TX_FLUSHED 0x03 #define I40E_AQ_LINK_FORCED_40G 0x10 +/* 25G Error Codes */ +#define I40E_AQ_25G_NO_ERR 0X00 +#define I40E_AQ_25G_NOT_PRESENT 0X01 +#define I40E_AQ_25G_NVM_CRC_ERR 0X02 +#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 +#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 +#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ __le16 max_frame_size; u8 config; +#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 +#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 external_power_ability; + u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 +#define I40E_AQ_PWR_CLASS_MASK 0x03 u8 reserved[4]; }; diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c index 4407f2d3..9a6b3ed6 100644 --- a/drivers/net/i40e/base/i40e_common.c +++ b/drivers/net/i40e/base/i40e_common.c @@ -81,7 +81,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: case I40E_DEV_ID_SFP_I_X722: - case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; #endif @@ -383,8 +382,7 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, d_buf[j] = buf[i]; i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i_sav, d_buf[0], d_buf[1], - d_buf[2], d_buf[3], + i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3], d_buf[4], d_buf[5], d_buf[6], d_buf[7], d_buf[8], d_buf[9], d_buf[10], d_buf[11], d_buf[12], d_buf[13], d_buf[14], d_buf[15]); @@ -1191,6 +1189,32 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable) } /** + * i40e_get_san_mac_addr - get SAN MAC address + * @hw: pointer to the HW structure + * @mac_addr: pointer to SAN MAC address + * + * Reads the adapter's SAN MAC address from NVM + **/ +enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, + u8 *mac_addr) +{ + struct i40e_aqc_mac_address_read_data addrs; + enum i40e_status_code status; + u16 flags = 0; + + status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL); + if (status) + return status; + + if (flags & I40E_AQC_SAN_ADDR_VALID) + memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac)); + else + status = I40E_ERR_INVALID_MAC_ADDR; + + return status; +} + +/** * i40e_read_pba_string - Reads part number string from EEPROM * @hw: pointer to hardware structure * @pba_num: stores the part number string from the EEPROM @@ -1670,8 +1694,10 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) status = I40E_ERR_UNKNOWN_PHY; - if (report_init) + if (report_init) { hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type); + hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); + } return status; } @@ -2215,6 +2241,34 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, } /** + * i40e_aq_clear_default_vsi + * @hw: pointer to the hw struct + * @seid: vsi number + * @cmd_details: pointer to command details structure or NULL + **/ +enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, + u16 seid, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *) + &desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + + cmd->promiscuous_flags = CPU_TO_LE16(0); + cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT); + cmd->seid = CPU_TO_LE16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** * i40e_aq_set_vsi_unicast_promiscuous * @hw: pointer to the hw struct * @seid: vsi number @@ -3792,16 +3846,8 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, if (p->fcoe) i40e_debug(hw, I40E_DEBUG_ALL, "device is FCoE capable\n"); -#ifdef I40E_FCOE_ENA - /* Software override ensuring FCoE is disabled if npar or mfp - * mode because it is not supported in these modes. - */ - if (p->npar_enable || p->flex10_enable) - p->fcoe = false; -#else /* Always disable FCoE if compiled without the I40E_FCOE_ENA flag */ p->fcoe = false; -#endif /* count the enabled ports (aka the "not disabled" ports) */ hw->num_ports = 0; @@ -5452,12 +5498,12 @@ STATIC void i40e_fix_up_geneve_vni( u16 tnl_type; u32 ti; - tnl_type = (le16_to_cpu(f[i].flags) & + tnl_type = (LE16_TO_CPU(f[i].flags) & I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { - ti = le32_to_cpu(f[i].tenant_id); - f[i].tenant_id = cpu_to_le32(ti << 8); + ti = LE32_TO_CPU(f[i].tenant_id); + f[i].tenant_id = CPU_TO_LE32(ti << 8); } } } diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h index ed73e1d2..8bd5793d 100644 --- a/drivers/net/i40e/base/i40e_devids.h +++ b/drivers/net/i40e/base/i40e_devids.h @@ -68,7 +68,6 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 #define I40E_DEV_ID_SFP_I_X722 0x37D3 -#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h index 03dda937..3aab5ca9 100644 --- a/drivers/net/i40e/base/i40e_prototype.h +++ b/drivers/net/i40e/base/i40e_prototype.h @@ -124,6 +124,8 @@ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id, struct i40e_asq_cmd_details *cmd_details); +enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id, + struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw, bool qualified_modules, bool report_init, struct i40e_aq_get_phy_abilities_resp *abilities, @@ -438,6 +440,7 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr); enum i40e_status_code i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, u32 pba_num_size); void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable); +enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr); enum i40e_aq_link_speed i40e_get_link_speed(struct i40e_hw *hw); /* prototype for functions used for NVM access */ enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw); diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h index 5349419f..b5f72c32 100644 --- a/drivers/net/i40e/base/i40e_type.h +++ b/drivers/net/i40e/base/i40e_type.h @@ -157,8 +157,10 @@ enum i40e_debug_mask { #define I40E_PCI_LINK_SPEED_5000 0x2 #define I40E_PCI_LINK_SPEED_8000 0x3 -#define I40E_MDIO_STCODE 0 -#define I40E_MDIO_OPCODE_ADDRESS 0 +#define I40E_MDIO_STCODE I40E_MASK(0, \ + I40E_GLGEN_MSCA_STCODE_SHIFT) +#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \ I40E_GLGEN_MSCA_OPCODE_SHIFT) #define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \ @@ -292,46 +294,48 @@ struct i40e_link_status { #define I40E_MODULE_TYPE_1000BASE_T 0x08 }; -enum i40e_aq_capabilities_phy_type { - I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII), - I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX), - I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4), - I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR), - I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4), - I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI), - I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI), - I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI), - I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI), - I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI), - I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU), - I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU), - I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC), - I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC), - I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX), - I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T), - I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T), - I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR), - I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR), - I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU), - I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1), - I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4), - I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4), - I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4), - I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX), - I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX), - I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL), - I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2) -}; - struct i40e_phy_info { struct i40e_link_status link_info; struct i40e_link_status link_info_old; bool get_link_info; enum i40e_media_type media_type; /* all the phy types the NVM is capable of */ - u32 phy_types; -}; - + u64 phy_types; +}; + +#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) +#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) +#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) +#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) +#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) +#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) +#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) +#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) +#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) +#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) +#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) +#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) +#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) +#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) +#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) +#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) +#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) +#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) +#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) +#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) +#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) +#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) +#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) +#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) +#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ + BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) +#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) +#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32) +#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32) +#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32) +#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32) #define I40E_HW_CAP_MAX_GPIO 30 #define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0 #define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1 @@ -1388,6 +1392,23 @@ struct i40e_veb_tc_stats { u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; }; +/* Statistics collected per function for FCoE */ +struct i40e_fcoe_stats { + u64 rx_fcoe_packets; /* fcoeprc */ + u64 rx_fcoe_dwords; /* focedwrc */ + u64 rx_fcoe_dropped; /* fcoerpdc */ + u64 tx_fcoe_packets; /* fcoeptc */ + u64 tx_fcoe_dwords; /* focedwtc */ + u64 fcoe_bad_fccrc; /* fcoecrc */ + u64 fcoe_last_error; /* fcoelast */ + u64 fcoe_ddp_count; /* fcoeddpc */ +}; + +/* offset to per function FCoE statistics block */ +#define I40E_FCOE_VF_STAT_OFFSET 0 +#define I40E_FCOE_PF_STAT_OFFSET 128 +#define I40E_FCOE_STAT_MAX (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF) + /* Statistics collected by the MAC */ struct i40e_hw_port_stats { /* eth stats collected by the port */ @@ -1509,6 +1530,208 @@ struct i40e_hw_port_stats { #define I40E_SRRD_SRCTL_ATTEMPTS 100000 +/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */ + +enum i40E_fcoe_tx_ctx_desc_cmd_bits { + I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND = 0x00, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2 = 0x01, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3 = 0x05, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2 = 0x02, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3 = 0x06, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2 = 0x03, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3 = 0x07, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL = 0x08, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL = 0x09, /* 4 BITS */ + I40E_FCOE_TX_CTX_DESC_RELOFF = 0x10, + I40E_FCOE_TX_CTX_DESC_CLRSEQ = 0x20, + I40E_FCOE_TX_CTX_DESC_DIFENA = 0x40, + I40E_FCOE_TX_CTX_DESC_IL2TAG2 = 0x80 +}; + +/* FCoE DIF/DIX Context descriptor */ +struct i40e_fcoe_difdix_context_desc { + __le64 flags_buff0_buff1_ref; + __le64 difapp_msk_bias; +}; + +#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT 0 +#define I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_MASK (0xFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_FLAGS_SHIFT) + +enum i40e_fcoe_difdix_ctx_desc_flags_bits { + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_RSVD = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGCHK = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_APPTYPE_TAGNOTCHK = 0x0004, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_OPAQUE = 0x0000, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY = 0x0008, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPTAG = 0x0010, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_GTYPE_CHKINTEGRITY_APPREFTAG = 0x0018, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_CNST = 0x0000, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_INC1BLK = 0x0020, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_APPTAG = 0x0040, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_REFTYPE_RSVD = 0x0060, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_XSUM = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIXMODE_CRC = 0x0080, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_UNTAG = 0x0000, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_BUF = 0x0100, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_RSVD = 0x0200, + /* 2 BITS */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFHOST_EMBDTAGS = 0x0300, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_UNTAG = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFLAN_TAG = 0x0400, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_512B = 0x0000, + /* 1 BIT */ + I40E_FCOE_DIFDIX_CTX_DESC_DIFBLK_4K = 0x0800 +}; + +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT 12 +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_MASK (0x3FFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_BUFF0_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT 22 +#define I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_MASK (0x3FFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_BUFF1_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT 32 +#define I40E_FCOE_DIFDIX_CTX_QW0_REF_MASK (0xFFFFFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW0_REF_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT 0 +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MASK (0xFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW1_APP_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT 16 +#define I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_MASK (0xFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW1_APP_MSK_SHIFT) + +#define I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT 32 +#define I40E_FCOE_DIFDIX_CTX_QW0_REF_BIAS_MASK (0xFFFFFFFFULL << \ + I40E_FCOE_DIFDIX_CTX_QW1_REF_BIAS_SHIFT) + +/* FCoE DIF/DIX Buffers descriptor */ +struct i40e_fcoe_difdix_buffers_desc { + __le64 buff_addr0; + __le64 buff_addr1; +}; + +/* FCoE DDP Context descriptor */ +struct i40e_fcoe_ddp_context_desc { + __le64 rsvd; + __le64 type_cmd_foff_lsize; +}; + +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT 0 +#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT 4 +#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \ + I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT) + +enum i40e_fcoe_ddp_ctx_desc_cmd_bits { + I40E_FCOE_DDP_CTX_DESC_BSIZE_512B = 0x00, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_4K = 0x01, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_8K = 0x02, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_BSIZE_16K = 0x03, /* 2 BITS */ + I40E_FCOE_DDP_CTX_DESC_DIFENA = 0x04, /* 1 BIT */ + I40E_FCOE_DDP_CTX_DESC_LASTSEQH = 0x08, /* 1 BIT */ +}; + +#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT 16 +#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT) + +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT 32 +#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK (0x3FFFULL << \ + I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT) + +/* FCoE DDP/DWO Queue Context descriptor */ +struct i40e_fcoe_queue_context_desc { + __le64 dmaindx_fbase; /* 0:11 DMAINDX, 12:63 FBASE */ + __le64 flen_tph; /* 0:12 FLEN, 13:15 TPH */ +}; + +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK (0xFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT 12 +#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK (0xFFFFFFFFFFFFFULL << \ + I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT 0 +#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK (0x1FFFULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT 13 +#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK (0x7ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT) + +enum i40e_fcoe_queue_ctx_desc_tph_bits { + I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC = 0x1, + I40E_FCOE_QUEUE_CTX_DESC_TPHDATA = 0x2 +}; + +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT 30 +#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK (0x3ULL << \ + I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT) + +/* FCoE DDP/DWO Filter Context descriptor */ +struct i40e_fcoe_filter_context_desc { + __le32 param; + __le16 seqn; + + /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */ + __le16 rsvd_dmaindx; + + /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */ + __le64 flags_rsvd_lanq; +}; + +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4 +#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK (0xFFF << \ + I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT) + +enum i40e_fcoe_filter_ctx_desc_flags_bits { + I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP = 0x00, + I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO = 0x01, + I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT = 0x00, + I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP = 0x02, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 = 0x00, + I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3 = 0x04 +}; + +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT 0 +#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK (0xFFULL << \ + I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT 8 +#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK (0x3FULL << \ + I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT) + +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT 53 +#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK (0x7FFULL << \ + I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT) + enum i40e_switch_element_types { I40E_SWITCH_ELEMENT_TYPE_MAC = 1, I40E_SWITCH_ELEMENT_TYPE_PF = 2, diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 13068cc4..67778baf 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -418,6 +418,7 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, void *arg); static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); +static int i40e_dev_sync_phy_type(struct i40e_hw *hw); static void i40e_configure_registers(struct i40e_hw *hw); static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); @@ -481,7 +482,6 @@ static const struct rte_pci_id pci_id_i40e_map[] = { { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722) }, { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722) }, - { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_I_X722) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -670,10 +670,11 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = { static struct eth_driver rte_i40e_pmd = { .pci_drv = { - .name = "rte_i40e_pmd", .id_table = pci_id_i40e_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | RTE_PCI_DRV_DETACHABLE, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_i40e_dev_init, .eth_dev_uninit = eth_i40e_dev_uninit, @@ -708,28 +709,8 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev, return 0; } -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. - */ -static int -rte_i40e_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - rte_eth_driver_register(&rte_i40e_pmd); - - return 0; -} - -static struct rte_driver rte_i40e_driver = { - .type = PMD_PDEV, - .init = rte_i40e_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_i40e_driver, i40e); -DRIVER_REGISTER_PCI_TABLE(i40e, pci_id_i40e_map); +RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map); #ifndef I40E_GLQF_ORT #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) @@ -933,8 +914,10 @@ config_floating_veb(struct rte_eth_dev *dev) memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list)); if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) { - pf->floating_veb = is_floating_veb_supported(pci_dev->devargs); - config_vf_floating_veb(pci_dev->devargs, pf->floating_veb, + pf->floating_veb = + is_floating_veb_supported(pci_dev->device.devargs); + config_vf_floating_veb(pci_dev->device.devargs, + pf->floating_veb, pf->floating_veb_list); } else { pf->floating_veb = false; @@ -1042,7 +1025,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) config_floating_veb(dev); /* Clear PXE mode */ i40e_clear_pxe_mode(hw); - + ret = i40e_dev_sync_phy_type(hw); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to sync phy type: %d", ret); + goto err_sync_phy_type; + } /* * On X710, performance number is far from the expectation on recent * firmware versions. The fix for this issue may not be integrated in @@ -1207,6 +1194,7 @@ err_msix_pool_init: err_qp_pool_init: err_parameter_init: err_get_capabilities: +err_sync_phy_type: (void)i40e_shutdown_adminq(hw); return ret; @@ -1585,6 +1573,8 @@ i40e_parse_link_speeds(uint16_t link_speeds) if (link_speeds & ETH_LINK_SPEED_40G) link_speed |= I40E_LINK_SPEED_40GB; + if (link_speeds & ETH_LINK_SPEED_25G) + link_speed |= I40E_LINK_SPEED_25GB; if (link_speeds & ETH_LINK_SPEED_20G) link_speed |= I40E_LINK_SPEED_20GB; if (link_speeds & ETH_LINK_SPEED_10G) @@ -1610,6 +1600,7 @@ i40e_phy_conf_link(struct i40e_hw *hw, I40E_AQ_PHY_FLAG_PAUSE_RX | I40E_AQ_PHY_FLAG_LOW_POWER; const uint8_t advt = I40E_LINK_SPEED_40GB | + I40E_LINK_SPEED_25GB | I40E_LINK_SPEED_10GB | I40E_LINK_SPEED_1GB | I40E_LINK_SPEED_100MB; @@ -1662,13 +1653,14 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) struct rte_eth_conf *conf = &dev->data->dev_conf; speed = i40e_parse_link_speeds(conf->link_speeds); - abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) + abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED)) abilities |= I40E_AQ_PHY_AN_ENABLED; abilities |= I40E_AQ_PHY_LINK_ENABLED; /* Skip changing speed on 40G interfaces, FW does not support */ - if (i40e_is_40G_device(hw->device_id)) { + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { speed = I40E_LINK_SPEED_UNKNOWN; abilities |= I40E_AQ_PHY_AN_ENABLED; } @@ -1764,7 +1756,8 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_20G | ETH_LINK_SPEED_40G)) { + ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G)) { PMD_DRV_LOG(ERR, "Invalid link setting"); goto err_up; } @@ -1994,9 +1987,11 @@ static int i40e_dev_set_link_down(struct rte_eth_dev *dev) { uint8_t speed = I40E_LINK_SPEED_UNKNOWN; - uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; + uint8_t abilities = 0; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + if (!I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) + abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; return i40e_phy_conf_link(hw, abilities, speed); } @@ -2056,6 +2051,9 @@ i40e_dev_link_update(struct rte_eth_dev *dev, case I40E_LINK_SPEED_20GB: link.link_speed = ETH_SPEED_NUM_20G; break; + case I40E_LINK_SPEED_25GB: + link.link_speed = ETH_SPEED_NUM_25G; + break; case I40E_LINK_SPEED_40GB: link.link_speed = ETH_SPEED_NUM_40G; break; @@ -2605,7 +2603,11 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_SCTP_CKSUM | DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO; dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); dev_info->reta_size = pf->hash_lut_size; @@ -2655,9 +2657,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues += dev_info->vmdq_queue_num; } - if (i40e_is_40G_device(hw->device_id)) + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) /* For XL710 */ dev_info->speed_capa = ETH_LINK_SPEED_40G; + else if (I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) + /* For XXV710 */ + dev_info->speed_capa = ETH_LINK_SPEED_25G; else /* For X710 */ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G; @@ -2903,7 +2908,7 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (err < 0) return -ENOSYS; - if (i40e_is_40G_device(hw->device_id)) { + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types)) { /* Configure flow control refresh threshold, * the value for stat_tx_pause_refresh_timer[8] * is used for global pause operation. @@ -5496,7 +5501,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev) if (!ret) { i40e_notify_all_vfs_link_status(dev); _rte_eth_dev_callback_process(dev, - RTE_ETH_EVENT_INTR_LSC); + RTE_ETH_EVENT_INTR_LSC, NULL); } break; default: @@ -5564,6 +5569,7 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, PMD_DRV_LOG(INFO, "ICR0: adminq event"); i40e_dev_handle_aq_msg(dev); } + done: /* Enable interrupt */ i40e_pf_enable_irq0(hw); @@ -6125,7 +6131,7 @@ DONE: /* Configure hash enable flags for RSS */ uint64_t -i40e_config_hena(uint64_t flags) +i40e_config_hena(uint64_t flags, enum i40e_mac_type type) { uint64_t hena = 0; @@ -6134,20 +6140,42 @@ i40e_config_hena(uint64_t flags) if (flags & ETH_RSS_FRAG_IPV4) hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4; - if (flags & ETH_RSS_NONFRAG_IPV4_TCP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP; - if (flags & ETH_RSS_NONFRAG_IPV4_UDP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + if (flags & ETH_RSS_NONFRAG_IPV4_TCP) { + if (type == I40E_MAC_X722) { + hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + } else + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + } + if (flags & ETH_RSS_NONFRAG_IPV4_UDP) { + if (type == I40E_MAC_X722) { + hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + } else + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + } if (flags & ETH_RSS_NONFRAG_IPV4_SCTP) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP; if (flags & ETH_RSS_NONFRAG_IPV4_OTHER) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; if (flags & ETH_RSS_FRAG_IPV6) hena |= 1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6; - if (flags & ETH_RSS_NONFRAG_IPV6_TCP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - if (flags & ETH_RSS_NONFRAG_IPV6_UDP) - hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + if (flags & ETH_RSS_NONFRAG_IPV6_TCP) { + if (type == I40E_MAC_X722) { + hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + } else + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP; + } + if (flags & ETH_RSS_NONFRAG_IPV6_UDP) { + if (type == I40E_MAC_X722) { + hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + } else + hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP; + } if (flags & ETH_RSS_NONFRAG_IPV6_SCTP) hena |= 1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP; if (flags & ETH_RSS_NONFRAG_IPV6_OTHER) @@ -6170,8 +6198,18 @@ i40e_parse_hena(uint64_t flags) rss_hf |= ETH_RSS_FRAG_IPV4; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; +#ifdef X722_SUPPORT + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; +#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; +#ifdef X722_SUPPORT + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; +#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP)) rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)) @@ -6180,8 +6218,18 @@ i40e_parse_hena(uint64_t flags) rss_hf |= ETH_RSS_FRAG_IPV6; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; +#ifdef X722_SUPPORT + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; +#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; +#ifdef X722_SUPPORT + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) + rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; +#endif if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP)) rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP; if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER)) @@ -6201,7 +6249,10 @@ i40e_pf_disable_rss(struct i40e_pf *pf) hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - hena &= ~I40E_RSS_HENA_ALL; + if (hw->mac.type == I40E_MAC_X722) + hena &= ~I40E_RSS_HENA_ALL_X722; + else + hena &= ~I40E_RSS_HENA_ALL; i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); @@ -6288,8 +6339,11 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) rss_hf = rss_conf->rss_hf; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - hena &= ~I40E_RSS_HENA_ALL; - hena |= i40e_config_hena(rss_hf); + if (hw->mac.type == I40E_MAC_X722) + hena &= ~I40E_RSS_HENA_ALL_X722; + else + hena &= ~I40E_RSS_HENA_ALL; + hena |= i40e_config_hena(rss_hf, hw->mac.type); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); @@ -6308,7 +6362,9 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev, hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; - if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ + if (!(hena & ((hw->mac.type == I40E_MAC_X722) + ? I40E_RSS_HENA_ALL_X722 + : I40E_RSS_HENA_ALL))) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; return 0; /* Nothing to do */ @@ -7045,6 +7101,26 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_FLEX_PAYLOAD, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7054,6 +7130,17 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV4_TOS | + I40E_INSET_IPV4_PROTO | I40E_INSET_IPV4_TTL | + I40E_INSET_TUNNEL_DMAC | I40E_INSET_TUNNEL_ID | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | + I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7087,6 +7174,26 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7096,6 +7203,17 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + I40E_INSET_DMAC | I40E_INSET_SMAC | + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_VLAN_TUNNEL | I40E_INSET_IPV6_TC | + I40E_INSET_IPV6_FLOW | I40E_INSET_IPV6_NEXT_HDR | + I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC | + I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | + I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS | + I40E_INSET_FLEX_PAYLOAD, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_DMAC | I40E_INSET_SMAC | I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | @@ -7135,11 +7253,30 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | @@ -7161,11 +7298,30 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype, I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER | I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | @@ -7218,9 +7374,22 @@ i40e_get_default_input_set(uint16_t pctype) [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | @@ -7232,9 +7401,22 @@ i40e_get_default_input_set(uint16_t pctype) [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | + I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT | @@ -7508,8 +7690,14 @@ i40e_filter_input_set_init(struct i40e_pf *pf) for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { - if (!I40E_VALID_PCTYPE(pctype)) - continue; + if (hw->mac.type == I40E_MAC_X722) { + if (!I40E_VALID_PCTYPE_X722(pctype)) + continue; + } else { + if (!I40E_VALID_PCTYPE(pctype)) + continue; + } + input_set = i40e_get_default_input_set(pctype); num = i40e_generate_inset_mask_reg(input_set, mask_reg, @@ -7575,7 +7763,15 @@ i40e_hash_filter_inset_select(struct i40e_hw *hw, PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } - pctype = i40e_flowtype_to_pctype(conf->flow_type); + + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(hw, + I40E_GLQF_FD_PCTYPES((int)i40e_flowtype_to_pctype( + conf->flow_type))); + } else + pctype = i40e_flowtype_to_pctype(conf->flow_type); + ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { @@ -7644,7 +7840,9 @@ i40e_fdir_filter_inset_select(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } + pctype = i40e_flowtype_to_pctype(conf->flow_type); + ret = i40e_parse_input_set(&input_set, pctype, conf->field, conf->inset_size); if (ret) { @@ -8017,8 +8215,18 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) [I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4, [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = RTE_ETH_FLOW_NONFRAG_IPV4_UDP, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV4_UDP, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV4_UDP, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = RTE_ETH_FLOW_NONFRAG_IPV4_TCP, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] = + RTE_ETH_FLOW_NONFRAG_IPV4_TCP, +#endif [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = @@ -8026,8 +8234,18 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) [I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6, [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = RTE_ETH_FLOW_NONFRAG_IPV6_UDP, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV6_UDP, + [I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] = + RTE_ETH_FLOW_NONFRAG_IPV6_UDP, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = RTE_ETH_FLOW_NONFRAG_IPV6_TCP, +#ifdef X722_SUPPORT + [I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] = + RTE_ETH_FLOW_NONFRAG_IPV6_TCP, +#endif [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP, [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = @@ -8063,6 +8281,23 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) #define I40E_GL_SWR_PM_UP_THR_SF_VALUE 0x06060606 #define I40E_GL_SWR_PM_UP_THR 0x269FBC +static int +i40e_dev_sync_phy_type(struct i40e_hw *hw) +{ + enum i40e_status_code status; + struct i40e_aq_get_phy_abilities_resp phy_ab; + int ret = -ENOTSUP; + + status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_ab, + NULL); + + if (status) + return ret; + + return 0; +} + + static void i40e_configure_registers(struct i40e_hw *hw) { @@ -8080,7 +8315,8 @@ i40e_configure_registers(struct i40e_hw *hw) for (i = 0; i < RTE_DIM(reg_table); i++) { if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { - if (i40e_is_40G_device(hw->device_id)) /* For XL710 */ + if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ + I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ reg_table[i].val = I40E_GL_SWR_PM_UP_THR_SF_VALUE; else /* For X710 */ diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index 61dfa932..298cef48 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -149,6 +149,16 @@ enum i40e_flxpld_layer_idx { ETH_RSS_NONFRAG_IPV6_OTHER | \ ETH_RSS_L2_PAYLOAD) +/* All bits of RSS hash enable for X722*/ +#define I40E_RSS_HENA_ALL_X722 ( \ + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \ + (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ + I40E_RSS_HENA_ALL) + /* All bits of RSS hash enable */ #define I40E_RSS_HENA_ALL ( \ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ @@ -577,7 +587,7 @@ int i40e_vsi_vlan_pvid_set(struct i40e_vsi *vsi, struct i40e_vsi_vlan_pvid_info *info); int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on); int i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on); -uint64_t i40e_config_hena(uint64_t flags); +uint64_t i40e_config_hena(uint64_t flags, enum i40e_mac_type type); uint64_t i40e_parse_hena(uint64_t flags); enum i40e_status_code i40e_fdir_setup_tx_resources(struct i40e_pf *pf); enum i40e_status_code i40e_fdir_setup_rx_resources(struct i40e_pf *pf); @@ -701,6 +711,25 @@ i40e_calc_itr_interval(int16_t interval) (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER || \ (flow_type) == RTE_ETH_FLOW_L2_PAYLOAD) +#define I40E_VALID_PCTYPE_X722(pctype) \ + ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_FRAG_IPV6 || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP || \ + (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \ + (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD) + #define I40E_VALID_PCTYPE(pctype) \ ((pctype) == I40E_FILTER_PCTYPE_FRAG_IPV4 || \ (pctype) == I40E_FILTER_PCTYPE_NONF_IPV4_TCP || \ @@ -714,4 +743,18 @@ i40e_calc_itr_interval(int16_t interval) (pctype) == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER || \ (pctype) == I40E_FILTER_PCTYPE_L2_PAYLOAD) +#define I40E_PHY_TYPE_SUPPORT_40G(phy_type) \ + (((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_KR4) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_AOC) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_CR4) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_SR4) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_40GBASE_LR4)) + +#define I40E_PHY_TYPE_SUPPORT_25G(phy_type) \ + (((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_KR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_CR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_SR) || \ + ((phy_type) & I40E_CAP_PHY_TYPE_25GBASE_LR)) + #endif /* _I40E_ETHDEV_H_ */ diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index ba63a7f1..aa306d61 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1314,7 +1314,7 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev, switch (pf_msg->event) { case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n"); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL); break; case I40E_VIRTCHNL_EVENT_LINK_CHANGE: PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n"); @@ -1527,38 +1527,18 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev) */ static struct eth_driver rte_i40evf_pmd = { .pci_drv = { - .name = "rte_i40evf_pmd", .id_table = pci_id_i40evf_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = i40evf_dev_init, .eth_dev_uninit = i40evf_dev_uninit, .dev_private_size = sizeof(struct i40e_adapter), }; -/* - * VF Driver initialization routine. - * Invoked one at EAL init time. - * Register itself as the [Virtual Poll Mode] Driver of PCI Fortville devices. - */ -static int -rte_i40evf_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - - rte_eth_driver_register(&rte_i40evf_pmd); - - return 0; -} - -static struct rte_driver rte_i40evf_driver = { - .type = PMD_PDEV, - .init = rte_i40evf_pmd_init, -}; - -PMD_REGISTER_DRIVER(rte_i40evf_driver, i40evf); -DRIVER_REGISTER_PCI_TABLE(i40evf, pci_id_i40evf_map); +RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map); static int i40evf_dev_configure(struct rte_eth_dev *dev) @@ -2539,8 +2519,11 @@ i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf) rss_hf = rss_conf->rss_hf; hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - hena &= ~I40E_RSS_HENA_ALL; - hena |= i40e_config_hena(rss_hf); + if (hw->mac.type == I40E_MAC_X722) + hena &= ~I40E_RSS_HENA_ALL_X722; + else + hena &= ~I40E_RSS_HENA_ALL; + hena |= i40e_config_hena(rss_hf, hw->mac.type); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); I40EVF_WRITE_FLUSH(hw); @@ -2556,7 +2539,10 @@ i40evf_disable_rss(struct i40e_vf *vf) hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - hena &= ~I40E_RSS_HENA_ALL; + if (hw->mac.type == I40E_MAC_X722) + hena &= ~I40E_RSS_HENA_ALL_X722; + else + hena &= ~I40E_RSS_HENA_ALL; i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena); i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32)); I40EVF_WRITE_FLUSH(hw); @@ -2617,7 +2603,9 @@ i40evf_dev_rss_hash_update(struct rte_eth_dev *dev, hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0)); hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32; - if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ + if (!(hena & ((hw->mac.type == I40E_MAC_X722) + ? I40E_RSS_HENA_ALL_X722 + : I40E_RSS_HENA_ALL))) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; return 0; diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index f65c4110..335bf15c 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -251,7 +251,7 @@ i40e_fdir_setup(struct i40e_pf *pf) /* reserve memory for the fdir programming packet */ snprintf(z_name, sizeof(z_name), "%s_%s_%d", - eth_dev->driver->pci_drv.name, + eth_dev->driver->pci_drv.driver.name, I40E_FDIR_MZ_NAME, eth_dev->data->port_id); mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY); @@ -353,8 +353,15 @@ i40e_init_flx_pld(struct i40e_pf *pf) /* initialize the masks */ for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) { - if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype)) - continue; + if (hw->mac.type == I40E_MAC_X722) { + if (!I40E_VALID_PCTYPE_X722( + (enum i40e_filter_pctype)pctype)) + continue; + } else { + if (!I40E_VALID_PCTYPE( + (enum i40e_filter_pctype)pctype)) + continue; + } pf->fdir.flex_mask[pctype].word_mask = 0; i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0); for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) { @@ -664,7 +671,16 @@ i40e_fdir_configure(struct rte_eth_dev *dev) i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); /* configure flex mask*/ for (i = 0; i < conf->nb_flexmasks; i++) { - pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type); + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( + hw, I40E_GLQF_FD_PCTYPES( + (int)i40e_flowtype_to_pctype( + conf->flex_mask[i].flow_type))); + } else + pctype = i40e_flowtype_to_pctype( + conf->flex_mask[i].flow_type); + i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } @@ -1012,6 +1028,7 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, const struct rte_eth_fdir_filter *filter, bool add) { + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt; enum i40e_filter_pctype pctype; @@ -1044,7 +1061,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, PMD_DRV_LOG(ERR, "construct packet for fdir fails."); return ret; } - pctype = i40e_flowtype_to_pctype(filter->input.flow_type); + + if (hw->mac.type == I40E_MAC_X722) { + /* get translated pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( + hw, I40E_GLQF_FD_PCTYPES( + (int)i40e_flowtype_to_pctype( + filter->input.flow_type))); + } else + pctype = i40e_flowtype_to_pctype(filter->input.flow_type); + ret = i40e_fdir_filter_programming(pf, pctype, filter, add); if (ret < 0) { PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).", @@ -1273,6 +1299,7 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, { struct i40e_fdir_flex_mask *mask; struct rte_eth_fdir_flex_mask *ptr = flex_mask; + struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint16_t flow_type; uint8_t i, j; uint16_t off_bytes, mask_tmp; @@ -1281,8 +1308,13 @@ i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, i <= I40E_FILTER_PCTYPE_L2_PAYLOAD; i++) { mask = &pf->fdir.flex_mask[i]; - if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i)) - continue; + if (hw->mac.type == I40E_MAC_X722) { + if (!I40E_VALID_PCTYPE_X722((enum i40e_filter_pctype)i)) + continue; + } else { + if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i)) + continue; + } flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i); for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) { if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) { diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index 4e2f6b63..ddfc140d 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -904,11 +904,11 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf) event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; event.event_data.link_event.link_status = - dev->data->dev_link.link_status; + dev->data->dev_link.link_status; event.event_data.link_event.link_speed = - (enum i40e_aq_link_speed)dev->data->dev_link.link_speed; + (enum i40e_aq_link_speed)dev->data->dev_link.link_speed; i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT, - I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); + I40E_SUCCESS, (uint8_t *)&event, sizeof(event)); } void diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 0556a4d4..7ae7d9fb 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -142,8 +142,14 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword) return flags; if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT))) flags |= PKT_RX_IP_CKSUM_BAD; + else + flags |= PKT_RX_IP_CKSUM_GOOD; + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) flags |= PKT_RX_L4_CKSUM_BAD; + else + flags |= PKT_RX_L4_CKSUM_GOOD; + if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) flags |= PKT_RX_EIP_CKSUM_BAD; @@ -174,569 +180,6 @@ i40e_get_iee15888_flags(struct rte_mbuf *mb, uint64_t qword) } #endif -/* For each value it means, datasheet of hardware can tell more details - * - * @note: fix i40e_dev_supported_ptypes_get() if any change here. - */ -static inline uint32_t -i40e_rxd_pkt_type_mapping(uint8_t ptype) -{ - static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { - /* L2 types */ - /* [0] reserved */ - [1] = RTE_PTYPE_L2_ETHER, - [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, - /* [3] - [5] reserved */ - [6] = RTE_PTYPE_L2_ETHER_LLDP, - /* [7] - [10] reserved */ - [11] = RTE_PTYPE_L2_ETHER_ARP, - /* [12] - [21] reserved */ - - /* Non tunneled IPv4 */ - [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_FRAG, - [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_NONFRAG, - [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP, - /* [25] reserved */ - [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_TCP, - [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_SCTP, - [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_ICMP, - - /* IPv4 --> IPv4 */ - [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [32] reserved */ - [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> IPv6 */ - [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [39] reserved */ - [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> GRE/Teredo/VXLAN */ - [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT, - - /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ - [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [47] reserved */ - [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ - [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [54] reserved */ - [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ - [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, - - /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ - [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [62] reserved */ - [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ - [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [69] reserved */ - [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ - [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN, - - /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ - [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [77] reserved */ - [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ - [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [84] reserved */ - [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* Non tunneled IPv6 */ - [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_FRAG, - [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_NONFRAG, - [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP, - /* [91] reserved */ - [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_TCP, - [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_SCTP, - [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_ICMP, - - /* IPv6 --> IPv4 */ - [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [98] reserved */ - [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> IPv6 */ - [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [105] reserved */ - [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> GRE/Teredo/VXLAN */ - [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT, - - /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ - [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [113] reserved */ - [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ - [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [120] reserved */ - [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ - [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, - - /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ - [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [128] reserved */ - [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ - [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [135] reserved */ - [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ - [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN, - - /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ - [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [143] reserved */ - [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ - [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_FRAG, - [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_NONFRAG, - [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_UDP, - /* [150] reserved */ - [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_TCP, - [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_SCTP, - [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_TUNNEL_GRENAT | - RTE_PTYPE_INNER_L2_ETHER_VLAN | - RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_INNER_L4_ICMP, - - /* L2 NSH packet type */ - [154] = RTE_PTYPE_L2_ETHER_NSH, - [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_FRAG, - [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_NONFRAG, - [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP, - [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_TCP, - [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_SCTP, - [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | - RTE_PTYPE_L4_ICMP, - [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_FRAG, - [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_NONFRAG, - [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_UDP, - [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_TCP, - [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_SCTP, - [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | - RTE_PTYPE_L4_ICMP, - - /* All others reserved */ - }; - - return type_table[ptype]; -} - #define I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK 0x03 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FD_ID 0x01 #define I40E_RX_DESC_EXT_STATUS_FLEXBH_FLEX 0x02 @@ -779,33 +222,65 @@ i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb) #endif return flags; } + +static inline void +i40e_parse_tunneling_params(uint64_t ol_flags, + union i40e_tx_offload tx_offload, + uint32_t *cd_tunneling) +{ + /* EIPT: External (outer) IP header type */ + if (ol_flags & PKT_TX_OUTER_IP_CKSUM) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + else if (ol_flags & PKT_TX_OUTER_IPV4) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + else if (ol_flags & PKT_TX_OUTER_IPV6) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + + /* EIPLEN: External (outer) IP header length, in DWords */ + *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* L4TUNT: L4 Tunneling Type */ + switch (ol_flags & PKT_TX_TUNNEL_MASK) { + case PKT_TX_TUNNEL_IPIP: + /* for non UDP / GRE tunneling, set to 00b */ + break; + case PKT_TX_TUNNEL_VXLAN: + case PKT_TX_TUNNEL_GENEVE: + *cd_tunneling |= I40E_TXD_CTX_UDP_TUNNELING; + break; + case PKT_TX_TUNNEL_GRE: + *cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING; + break; + default: + PMD_TX_LOG(ERR, "Tunnel type not supported\n"); + return; + } + + /* L4TUNLEN: L4 Tunneling Length, in Words + * + * We depend on app to set rte_mbuf.l2_len correctly. + * For IP in GRE it should be set to the length of the GRE + * header; + * for MAC in GRE or MAC in UDP it should be set to the length + * of the GRE or UDP headers plus the inner MAC up to including + * its last Ethertype. + */ + *cd_tunneling |= (tx_offload.l2_len >> 1) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; +} + static inline void i40e_txd_enable_checksum(uint64_t ol_flags, uint32_t *td_cmd, uint32_t *td_offset, - union i40e_tx_offload tx_offload, - uint32_t *cd_tunneling) + union i40e_tx_offload tx_offload) { - /* UDP tunneling packet TX checksum offload */ - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) { - + /* Set MACLEN */ + if (ol_flags & PKT_TX_TUNNEL_MASK) *td_offset |= (tx_offload.outer_l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; - - if (ol_flags & PKT_TX_OUTER_IP_CKSUM) - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - else if (ol_flags & PKT_TX_OUTER_IPV4) - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - else if (ol_flags & PKT_TX_OUTER_IPV6) - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (tx_offload.outer_l3_len >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - (tx_offload.l2_len >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - - } else + else *td_offset |= (tx_offload.l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; @@ -994,6 +469,8 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) I40E_RXD_QW1_STATUS_SHIFT; } + rte_smp_rmb(); + /* Compute how many status bits were set */ for (j = 0, nb_dd = 0; j < I40E_LOOK_AHEAD; j++) nb_dd += s[j] & (1 << I40E_RX_DESC_STATUS_DD_SHIFT); @@ -1484,7 +961,8 @@ i40e_calc_context_desc(uint64_t flags) { static uint64_t mask = PKT_TX_OUTER_IP_CKSUM | PKT_TX_TCP_SEG | - PKT_TX_QINQ_PKT; + PKT_TX_QINQ_PKT | + PKT_TX_TUNNEL_MASK; #ifdef RTE_LIBRTE_IEEE1588 mask |= PKT_TX_IEEE1588_TMST; @@ -1506,7 +984,7 @@ i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload) } /** - * in case of tunneling packet, the outer_l2_len and + * in case of non tunneling packet, the outer_l2_len and * outer_l3_len must be 0. */ hdr_len = tx_offload.outer_l2_len + @@ -1623,12 +1101,15 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Always enable CRC offload insertion */ td_cmd |= I40E_TX_DESC_CMD_ICRC; - /* Enable checksum offloading */ + /* Fill in tunneling parameters if necessary */ cd_tunneling_params = 0; - if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) { - i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset, - tx_offload, &cd_tunneling_params); - } + if (ol_flags & PKT_TX_TUNNEL_MASK) + i40e_parse_tunneling_params(ol_flags, tx_offload, + &cd_tunneling_params); + /* Enable checksum offloading */ + if (ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK) + i40e_txd_enable_checksum(ol_flags, &td_cmd, + &td_offset, tx_offload); if (nb_ctx) { /* Setup TX context descriptor if required */ @@ -2136,7 +1617,9 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) #ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC dev->rx_pkt_burst == i40e_recv_pkts_bulk_alloc || #endif - dev->rx_pkt_burst == i40e_recv_scattered_pkts) + dev->rx_pkt_burst == i40e_recv_scattered_pkts || + dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec || + dev->rx_pkt_burst == i40e_recv_pkts_vec) return ptypes; return NULL; } diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 98179f00..ecdb13cb 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -255,4 +255,567 @@ void i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq); void i40e_set_tx_function(struct rte_eth_dev *dev); +/* For each value it means, datasheet of hardware can tell more details + * + * @note: fix i40e_dev_supported_ptypes_get() if any change here. + */ +static inline uint32_t +i40e_rxd_pkt_type_mapping(uint8_t ptype) +{ + static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { + /* L2 types */ + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + [2] = RTE_PTYPE_L2_ETHER_TIMESYNC, + /* [3] - [5] reserved */ + [6] = RTE_PTYPE_L2_ETHER_LLDP, + /* [7] - [10] reserved */ + [11] = RTE_PTYPE_L2_ETHER_ARP, + /* [12] - [21] reserved */ + + /* Non tunneled IPv4 */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv4 --> IPv4 */ + [29] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [30] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [31] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [32] reserved */ + [33] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [35] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> IPv6 */ + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [37] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [38] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [39] reserved */ + [40] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [41] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [42] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN */ + [43] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv4 */ + [44] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [45] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [46] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [47] reserved */ + [48] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [49] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> IPv6 */ + [51] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [53] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [54] reserved */ + [55] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [56] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [57] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC */ + [58] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [59] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [61] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [62] reserved */ + [63] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [65] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [66] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [67] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [68] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [69] reserved */ + [70] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [71] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [72] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [73] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [74] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [75] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [76] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [77] reserved */ + [78] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [79] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv4 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [81] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [82] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [83] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [84] reserved */ + [85] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [86] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [87] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* Non tunneled IPv6 */ + [88] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [89] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [90] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [91] reserved */ + [92] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [93] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [94] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* IPv6 --> IPv4 */ + [95] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [97] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [98] reserved */ + [99] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [100] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [101] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> IPv6 */ + [102] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [103] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [104] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [105] reserved */ + [106] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [107] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [108] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN */ + [109] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv4 */ + [110] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [111] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [112] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [113] reserved */ + [114] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [115] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [116] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> IPv6 */ + [117] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [118] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [119] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [120] reserved */ + [121] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [122] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [123] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC */ + [124] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv4 */ + [125] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [126] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [127] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [128] reserved */ + [129] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [130] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [131] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC --> IPv6 */ + [132] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [133] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [134] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [135] reserved */ + [136] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [137] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [138] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN */ + [139] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv4 */ + [140] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [141] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [142] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [143] reserved */ + [144] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [145] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [146] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* IPv6 --> GRE/Teredo/VXLAN --> MAC/VLAN --> IPv6 */ + [147] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_FRAG, + [148] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_NONFRAG, + [149] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_UDP, + /* [150] reserved */ + [151] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_TCP, + [152] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_SCTP, + [153] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_TUNNEL_GRENAT | + RTE_PTYPE_INNER_L2_ETHER_VLAN | + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_INNER_L4_ICMP, + + /* L2 NSH packet type */ + [154] = RTE_PTYPE_L2_ETHER_NSH, + [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + + /* All others reserved */ + }; + + return type_table[ptype]; +} + #endif /* _I40E_RXTX_H_ */ diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h new file mode 100644 index 00000000..6cb5dce9 --- /dev/null +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h @@ -0,0 +1,251 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _I40E_RXTX_VEC_COMMON_H_ +#define _I40E_RXTX_VEC_COMMON_H_ +#include <stdint.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> + +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" + +static inline uint16_t +reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) { + end->data_len -= rxq->crc_len; + } else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + end = secondlast; + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static inline int __attribute__((always_inline)) +i40e_tx_free_bufs(struct i40e_tx_queue *txq) +{ + struct i40e_tx_entry *txep; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; + + /* check DD bits on threshold descriptor */ + if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & + rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != + rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) + return 0; + + n = txq->tx_rs_thresh; + + /* first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; + m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) { + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, + nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static inline void __attribute__((always_inline)) +tx_backlog_entry(struct i40e_tx_entry *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_rx_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + if (rxq->rxrearm_nb == 0) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } else { + for (i = rxq->rx_tail; + i != rxq->rxrearm_start; + i = (i + 1) & mask) { + if (rxq->sw_ring[i].mbuf != NULL) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + } + } + + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline int +i40e_rxq_vec_setup_default(struct i40e_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline int +i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || + rxmode->hw_vlan_extend != 0 || + rxmode->hw_ip_checksum != 0) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* - no csum error report support + * - no header split support + */ + if (rxmode->header_split == 1) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} +#endif diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c new file mode 100644 index 00000000..011c54e0 --- /dev/null +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -0,0 +1,614 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2016, Linaro Limited + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> + +#include "base/i40e_prototype.h" +#include "base/i40e_type.h" +#include "i40e_ethdev.h" +#include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" + +#include <arm_neon.h> + +#pragma GCC diagnostic ignored "-Wcast-qual" + +static inline void +i40e_rxq_rearm(struct i40e_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + uint64x2_t dma_addr0, dma_addr1; + uint64x2_t zero = vdupq_n_u64(0); + uint64_t paddr; + uint8x8_t p; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (unlikely(rte_mempool_get_bulk(rxq->mp, + (void *)rxep, + RTE_I40E_RXQ_REARM_THRESH) < 0)) { + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + vst1q_u64((uint64_t *)&rxdp[i].read, zero); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_I40E_RXQ_REARM_THRESH; + return; + } + + p = vld1_u8((uint8_t *)&rxq->mbuf_initializer); + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) { + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + * Though, RX will overwrite ol_flags that are coming next + * anyway. So overwrite whole 8 bytes with one load: + * 6 bytes of rearm_data plus first 2 bytes of ol_flags. + */ + vst1_u8((uint8_t *)&mb0->rearm_data, p); + paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM; + dma_addr0 = vdupq_n_u64(paddr); + + /* flush desc with pa dma_addr */ + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); + + vst1_u8((uint8_t *)&mb1->rearm_data, p); + paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM; + dma_addr1 = vdupq_n_u64(paddr); + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +} + +/* Handling the offload flags (olflags) field takes computation + * time when receiving packets. Therefore we provide a flag to disable + * the processing of the olflags field when they are not needed. This + * gives improved performance, at the cost of losing the offload info + * in the received packet + */ +#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE + +static inline void +desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) +{ + uint32x4_t vlan0, vlan1, rss, l3_l4e; + + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ + const uint32x4_t rss_vlan_msk = { + 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804}; + + /* map rss and vlan type to rss hash and vlan flag */ + const uint8x16_t vlan_flags = { + 0, 0, 0, 0, + PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + const uint8x16_t rss_flags = { + 0, PKT_RX_FDIR, 0, 0, + 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR, + 0, 0, 0, 0, + 0, 0, 0, 0}; + + const uint8x16_t l3_l4e_flags = { + 0, + PKT_RX_IP_CKSUM_BAD, + PKT_RX_L4_CKSUM_BAD, + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + 0, 0, 0, 0, 0, 0, 0, 0}; + + vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]), + vreinterpretq_u32_u64(descs[2])).val[1]; + vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]), + vreinterpretq_u32_u64(descs[3])).val[1]; + vlan0 = vzipq_u32(vlan0, vlan1).val[0]; + + vlan1 = vandq_u32(vlan0, rss_vlan_msk); + vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags, + vreinterpretq_u8_u32(vlan1))); + + rss = vshrq_n_u32(vlan1, 11); + rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags, + vreinterpretq_u8_u32(rss))); + + l3_l4e = vshrq_n_u32(vlan1, 22); + l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags, + vreinterpretq_u8_u32(l3_l4e))); + + + vlan0 = vorrq_u32(vlan0, rss); + vlan0 = vorrq_u32(vlan0, l3_l4e); + + rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0); + rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1); + rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2); + rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3); +} +#else +#define desc_to_olflags_v(descs, rx_pkts) do {} while (0) +#endif + +#define PKTLEN_SHIFT 10 + +#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL + +static inline void +desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) +{ + int i; + uint8_t ptype; + uint8x16_t tmp; + + for (i = 0; i < 4; i++) { + tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30)); + ptype = vgetq_lane_u8(tmp, 8); + rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype); + } + +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +static inline uint16_t +_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union i40e_rx_desc *rxdp; + struct i40e_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + + /* mask to shuffle from desc. to mbuf */ + uint8x16_t shuf_msk = { + 0xFF, 0xFF, /* pkt_type set as unknown */ + 0xFF, 0xFF, /* pkt_type set as unknown */ + 14, 15, /* octet 15~14, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 14, 15, /* octet 15~14, 16 bits data_len */ + 2, 3, /* octet 2~3, low 16 bits vlan_macip */ + 4, 5, 6, 7 /* octet 4~7, 32bits rss */ + }; + + uint8x16_t eop_check = { + 0x02, 0x00, 0x02, 0x00, + 0x02, 0x00, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00 + }; + + uint16x8_t crc_adjust = { + 0, 0, /* ignore pkt_type field */ + rxq->crc_len, /* sub crc on pkt_len */ + 0, /* ignore high-16bits of pkt_len */ + rxq->crc_len, /* sub crc on data_len */ + 0, 0, 0 /* ignore non-length fields */ + }; + + /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch_non_temporal(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH) + i40e_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.qword1.status_error_len & + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_I40E_DESCS_PER_LOOP, + rxdp += RTE_I40E_DESCS_PER_LOOP) { + uint64x2_t descs[RTE_I40E_DESCS_PER_LOOP]; + uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + uint16x8x2_t sterr_tmp1, sterr_tmp2; + uint64x2_t mbp1, mbp2; + uint16x8_t staterr; + uint16x8_t tmp; + uint64_t stat; + + int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT}; + + /* B.1 load 1 mbuf point */ + mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = vld1q_u64((uint64_t *)(rxdp + 3)); + rte_rmb(); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); + + /* B.1 load 1 mbuf point */ + mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); + + descs[2] = vld1q_u64((uint64_t *)(rxdp + 2)); + /* B.1 load 2 mbuf point */ + descs[1] = vld1q_u64((uint64_t *)(rxdp + 1)); + descs[0] = vld1q_u64((uint64_t *)(rxdp)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* avoid compiler reorder optimization */ + rte_compiler_barrier(); + + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]), + len_shl); + descs[3] = vreinterpretq_u64_u32(len3); + uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]), + len_shl); + descs[2] = vreinterpretq_u64_u32(len2); + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk); + pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]), + vreinterpretq_u16_u64(descs[3])); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]), + vreinterpretq_u16_u64(descs[2])); + + /* C.2 get 4 pkts staterr value */ + staterr = vzipq_u16(sterr_tmp1.val[1], + sterr_tmp2.val[1]).val[0]; + stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0); + + desc_to_olflags_v(descs, &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); + pkt_mb4 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); + pkt_mb3 = vreinterpretq_u8_u16(tmp); + + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]), + len_shl); + descs[1] = vreinterpretq_u64_u32(len1); + uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]), + len_shl); + descs[0] = vreinterpretq_u64_u32(len0); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk); + pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk); + + /* D.3 copy final 3,4 data to rx_pkts */ + vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); + pkt_mb2 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); + pkt_mb1 = vreinterpretq_u8_u16(tmp); + + /* C* extract and record EOP bit */ + if (split_packet) { + uint8x16_t eop_shuf_mask = { + 0x00, 0x02, 0x04, 0x06, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF}; + uint8x16_t eop_bits; + + /* and with mask to extract bits, flipping 1-0 */ + eop_bits = vmvnq_u8(vreinterpretq_u8_u16(staterr)); + eop_bits = vandq_u8(eop_bits, eop_check); + /* the staterr values are not in order, as the count + * count of dd bits doesn't care. However, for end of + * packet tracking, we do care, so shuffle. This also + * compresses the 32-bit values to 8-bit + */ + eop_bits = vqtbl1q_u8(eop_bits, eop_shuf_mask); + + /* store the resulting 32-bit value */ + vst1q_lane_u32((uint32_t *)split_packet, + vreinterpretq_u32_u8(eop_bits), 0); + split_packet += RTE_I40E_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + rte_prefetch_non_temporal(rxdp + RTE_I40E_DESCS_PER_LOOP); + + /* D.3 copy final 1,2 data to rx_pkts */ + vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb2); + vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + desc_to_ptype_v(descs, &rx_pkts[pos]); + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK); + nb_pkts_recd += var; + if (likely(var != RTE_I40E_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + + /* + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + + /* vPMD receive routine that reassembles scattered packets + * Notice: + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST + * numbers of DD bits + */ +uint16_t +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + + struct i40e_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned i = 0; + + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA | + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) | + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT)); + + uint64x2_t descriptor = {pkt->buf_physaddr + pkt->data_off, high_qw}; + vst1q_u64((uint64_t *)txdp, descriptor); +} + +static inline void +vtx(volatile struct i40e_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue; + volatile struct i40e_tx_desc *txdp; + struct i40e_tx_entry *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = I40E_TD_CMD; + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + i40e_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |= + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) << + I40E_TXD_QW1_CMD_SHIFT); + txq->tx_next_rs = + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + + return nb_pkts; +} + +void __attribute__((cold)) +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) +{ + _i40e_rx_queue_release_mbufs_vec(rxq); +} + +int __attribute__((cold)) +i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) +{ + return i40e_rxq_vec_setup_default(rxq); +} + +int __attribute__((cold)) +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) +{ + return 0; +} + +int __attribute__((cold)) +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + return i40e_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c index a9649d35..7c84a41a 100644 --- a/drivers/net/i40e/i40e_rxtx_vec.c +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -39,6 +39,7 @@ #include "base/i40e_type.h" #include "i40e_ethdev.h" #include "i40e_rxtx.h" +#include "i40e_rxtx_vec_common.h" #include <tmmintrin.h> @@ -138,19 +139,14 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq) static inline void desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) { - __m128i vlan0, vlan1, rss; - union { - uint16_t e[4]; - uint64_t dword; - } vol; + __m128i vlan0, vlan1, rss, l3_l4e; /* mask everything except RSS, flow director and VLAN flags * bit2 is for VLAN tag, bit11 for flow director indication * bit13:12 for RSS indication. */ - const __m128i rss_vlan_msk = _mm_set_epi16( - 0x0000, 0x0000, 0x0000, 0x0000, - 0x3804, 0x3804, 0x3804, 0x3804); + const __m128i rss_vlan_msk = _mm_set_epi32( + 0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804); /* map rss and vlan type to rss hash and vlan flag */ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, @@ -163,23 +159,36 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, 0, 0, PKT_RX_FDIR, 0); - vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); - vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); - vlan0 = _mm_unpacklo_epi32(vlan0, vlan1); + const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_EIP_CKSUM_BAD, + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD, + PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD, + 0); + + vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]); + vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]); + vlan0 = _mm_unpacklo_epi64(vlan0, vlan1); vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); - rss = _mm_srli_epi16(vlan1, 11); + rss = _mm_srli_epi32(vlan1, 11); rss = _mm_shuffle_epi8(rss_flags, rss); + l3_l4e = _mm_srli_epi32(vlan1, 22); + l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e); + vlan0 = _mm_or_si128(vlan0, rss); - vol.dword = _mm_cvtsi128_si64(vlan0); + vlan0 = _mm_or_si128(vlan0, l3_l4e); - rx_pkts[0]->ol_flags = vol.e[0]; - rx_pkts[1]->ol_flags = vol.e[1]; - rx_pkts[2]->ol_flags = vol.e[2]; - rx_pkts[3]->ol_flags = vol.e[3]; + rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0); + rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2); + rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4); + rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6); } #else #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) @@ -187,6 +196,21 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) #define PKTLEN_SHIFT 10 +static inline void +desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +{ + __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); + __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); + + ptype0 = _mm_srli_epi64(ptype0, 30); + ptype1 = _mm_srli_epi64(ptype1, 30); + + rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); + rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); + rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); + rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); +} + /* * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet @@ -224,7 +248,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, */ rxdp = rxq->rx_ring + rxq->rx_tail; - _mm_prefetch((const void *)rxdp, _MM_HINT_T0); + rte_prefetch0(rxdp); /* See if we need to rearm the RX queue - gives the prefetch a bit * of time to act @@ -283,6 +307,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* A.1 load 4 pkts desc */ descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); rte_compiler_barrier(); + /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); @@ -395,6 +420,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb2); _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); + desc_to_ptype_v(descs, &rx_pkts[pos]); /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var; @@ -423,68 +449,6 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } -static inline uint16_t -reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs, - uint16_t nb_bufs, uint8_t *split_flags) -{ - struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/ - struct rte_mbuf *start = rxq->pkt_first_seg; - struct rte_mbuf *end = rxq->pkt_last_seg; - unsigned pkt_idx, buf_idx; - - for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { - if (end != NULL) { - /* processing a split packet */ - end->next = rx_bufs[buf_idx]; - rx_bufs[buf_idx]->data_len += rxq->crc_len; - - start->nb_segs++; - start->pkt_len += rx_bufs[buf_idx]->data_len; - end = end->next; - - if (!split_flags[buf_idx]) { - /* it's the last packet of the set */ - start->hash = end->hash; - start->ol_flags = end->ol_flags; - /* we need to strip crc for the whole packet */ - start->pkt_len -= rxq->crc_len; - if (end->data_len > rxq->crc_len) { - end->data_len -= rxq->crc_len; - } else { - /* free up last mbuf */ - struct rte_mbuf *secondlast = start; - - while (secondlast->next != end) - secondlast = secondlast->next; - secondlast->data_len -= (rxq->crc_len - - end->data_len); - secondlast->next = NULL; - rte_pktmbuf_free_seg(end); - end = secondlast; - } - pkts[pkt_idx++] = start; - start = end = NULL; - } - } else { - /* not processing a split packet */ - if (!split_flags[buf_idx]) { - /* not a split packet, save and skip */ - pkts[pkt_idx++] = rx_bufs[buf_idx]; - continue; - } - end = start = rx_bufs[buf_idx]; - rx_bufs[buf_idx]->data_len += rxq->crc_len; - rx_bufs[buf_idx]->pkt_len += rxq->crc_len; - } - } - - /* save the partial packet for next time */ - rxq->pkt_first_seg = start; - rxq->pkt_last_seg = end; - memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); - return pkt_idx; -} - /* vPMD receive routine that reassembles scattered packets * Notice: * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet @@ -550,73 +514,6 @@ vtx(volatile struct i40e_tx_desc *txdp, vtx1(txdp, *pkt, flags); } -static inline int __attribute__((always_inline)) -i40e_tx_free_bufs(struct i40e_tx_queue *txq) -{ - struct i40e_tx_entry *txep; - uint32_t n; - uint32_t i; - int nb_free = 0; - struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ]; - - /* check DD bits on threshold descriptor */ - if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz & - rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) != - rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE)) - return 0; - - n = txq->tx_rs_thresh; - - /* first buffer to free from S/W ring is at index - * tx_next_dd - (tx_rs_thresh-1) - */ - txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)]; - m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); - if (likely(m != NULL)) { - free[0] = m; - nb_free = 1; - for (i = 1; i < n; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (likely(m != NULL)) { - if (likely(m->pool == free[0]->pool)) { - free[nb_free++] = m; - } else { - rte_mempool_put_bulk(free[0]->pool, - (void *)free, - nb_free); - free[0] = m; - nb_free = 1; - } - } - } - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); - } else { - for (i = 1; i < n; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (m != NULL) - rte_mempool_put(m->pool, m); - } - } - - /* buffers were freed, update counters */ - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); - txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); - if (txq->tx_next_dd >= txq->nb_tx_desc) - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - - return txq->tx_rs_thresh; -} - -static inline void __attribute__((always_inline)) -tx_backlog_entry(struct i40e_tx_entry *txep, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - int i; - - for (i = 0; i < (int)nb_pkts; ++i) - txep[i].mbuf = tx_pkts[i]; -} - uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -687,49 +584,13 @@ i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, void __attribute__((cold)) i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq) { - const unsigned mask = rxq->nb_rx_desc - 1; - unsigned i; - - if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) - return; - - /* free all mbufs that are valid in the ring */ - if (rxq->rxrearm_nb == 0) { - for (i = 0; i < rxq->nb_rx_desc; i++) { - if (rxq->sw_ring[i].mbuf != NULL) - rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); - } - } else { - for (i = rxq->rx_tail; - i != rxq->rxrearm_start; - i = (i + 1) & mask) { - if (rxq->sw_ring[i].mbuf != NULL) - rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); - } - } - - rxq->rxrearm_nb = rxq->nb_rx_desc; - - /* set all entries to NULL */ - memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); + _i40e_rx_queue_release_mbufs_vec(rxq); } int __attribute__((cold)) i40e_rxq_vec_setup(struct i40e_rx_queue *rxq) { - uintptr_t p; - struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ - - mb_def.nb_segs = 1; - mb_def.data_off = RTE_PKTMBUF_HEADROOM; - mb_def.port = rxq->port_id; - rte_mbuf_refcnt_set(&mb_def, 1); - - /* prevent compiler reordering: rearm_data covers previous fields */ - rte_compiler_barrier(); - p = (uintptr_t)&mb_def.rearm_data; - rxq->mbuf_initializer = *(uint64_t *)p; - return 0; + return i40e_rxq_vec_setup_default(rxq); } int __attribute__((cold)) @@ -742,34 +603,10 @@ int __attribute__((cold)) i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) { #ifndef RTE_LIBRTE_IEEE1588 - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; - /* need SSE4.1 support */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) return -1; - -#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE - /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_strip != 0 || - rxmode->hw_vlan_extend != 0) - return -1; #endif - /* no fdir support */ - if (fconf->mode != RTE_FDIR_MODE_NONE) - return -1; - - /* - no csum error report support - * - no header split support - */ - if (rxmode->hw_ip_checksum == 1 || - rxmode->header_split == 1) - return -1; - - return 0; -#else - RTE_SET_USED(dev); - return -1; -#endif + return i40e_rx_vec_dev_conf_condition_check_default(dev); } |