summaryrefslogtreecommitdiffstats
path: root/drivers/net/qede
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qede')
-rw-r--r--drivers/net/qede/Makefile1
-rw-r--r--drivers/net/qede/base/bcm_osal.c89
-rw-r--r--drivers/net/qede/base/bcm_osal.h15
-rw-r--r--drivers/net/qede/base/common_hsi.h68
-rw-r--r--drivers/net/qede/base/ecore.h4
-rw-r--r--drivers/net/qede/base/ecore_dev.c474
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h55
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h45
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h24
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_func.h4
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c94
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h64
-rw-r--r--drivers/net/qede/base/ecore_int.c146
-rw-r--r--drivers/net/qede/base/ecore_int.h3
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h12
-rw-r--r--drivers/net/qede/base/ecore_l2.c188
-rw-r--r--drivers/net/qede/base/ecore_mcp.c48
-rw-r--r--drivers/net/qede/base/ecore_mcp.h11
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h11
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h649
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c23
-rw-r--r--drivers/net/qede/base/eth_common.h4
-rw-r--r--drivers/net/qede/base/mcp_public.h58
-rw-r--r--drivers/net/qede/base/reg_addr.h5
-rw-r--r--drivers/net/qede/qede_eth_if.c318
-rw-r--r--drivers/net/qede/qede_eth_if.h132
-rw-r--r--drivers/net/qede/qede_ethdev.c990
-rw-r--r--drivers/net/qede/qede_ethdev.h41
-rw-r--r--drivers/net/qede/qede_if.h81
-rw-r--r--drivers/net/qede/qede_logs.h23
-rw-r--r--drivers/net/qede/qede_main.c52
-rw-r--r--drivers/net/qede/qede_rxtx.c1439
-rw-r--r--drivers/net/qede/qede_rxtx.h38
33 files changed, 2708 insertions, 2501 deletions
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 3323914c..f03441d9 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -101,7 +101,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 3f895cd4..2603a8b3 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -16,6 +16,10 @@
#include "ecore_mcp_api.h"
#include "ecore_l2_api.h"
+/* Array of memzone pointers */
+static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
+/* Counter to track current memzone allocated */
+uint16_t ecore_mz_count;
unsigned long qede_log2_align(unsigned long n)
{
@@ -118,6 +122,13 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@@ -134,9 +145,11 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n",
- mz->len, mz->phys_addr, mz->addr, socket_id);
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
@@ -148,6 +161,13 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
uint32_t core_id = rte_lcore_id();
unsigned int socket_id;
+ if (ecore_mz_count >= RTE_MAX_MEMZONE) {
+ DP_ERR(p_dev, "Memzone allocation count exceeds %u\n",
+ RTE_MAX_MEMZONE);
+ *phys = 0;
+ return OSAL_NULL;
+ }
+
OSAL_MEM_ZERO(mz_name, sizeof(*mz_name));
snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
(unsigned long)rte_get_timer_cycles());
@@ -163,12 +183,30 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
return OSAL_NULL;
}
*phys = mz->phys_addr;
- DP_VERBOSE(p_dev, ECORE_MSG_PROBE,
- "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n",
- mz->len, mz->phys_addr, mz->addr, core_id);
+ ecore_mz_mapping[ecore_mz_count++] = mz;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Allocated aligned dma memory size=%zu phys=0x%lx"
+ " virt=%p core=%d\n",
+ mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
return mz->addr;
}
+void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
+{
+ uint16_t j;
+
+ for (j = 0 ; j < ecore_mz_count; j++) {
+ if (phys == ecore_mz_mapping[j]->phys_addr) {
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "Free memzone %s\n", ecore_mz_mapping[j]->name);
+ rte_memzone_free(ecore_mz_mapping[j]);
+ return;
+ }
+ }
+
+ DP_ERR(p_dev, "Unexpected memory free request\n");
+}
+
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
@@ -211,11 +249,46 @@ qede_get_mcp_proto_stats(struct ecore_dev *edev,
if (type == ECORE_MCP_LAN_STATS) {
ecore_get_vport_stats(edev, &lan_stats);
- stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts;
- stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts;
+
+ /* @DPDK */
+ stats->lan_stats.ucast_rx_pkts = lan_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts = lan_stats.common.tx_ucast_pkts;
+
stats->lan_stats.fcs_err = -1;
} else {
DP_INFO(edev, "Statistics request type %d not supported\n",
type);
}
}
+
+void
+qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
+{
+ char err_str[64];
+
+ switch (err_type) {
+ case ECORE_HW_ERR_FAN_FAIL:
+ strcpy(err_str, "Fan Failure");
+ break;
+ case ECORE_HW_ERR_MFW_RESP_FAIL:
+ strcpy(err_str, "MFW Response Failure");
+ break;
+ case ECORE_HW_ERR_HW_ATTN:
+ strcpy(err_str, "HW Attention");
+ break;
+ case ECORE_HW_ERR_DMAE_FAIL:
+ strcpy(err_str, "DMAE Failure");
+ break;
+ case ECORE_HW_ERR_RAMROD_FAIL:
+ strcpy(err_str, "Ramrod Failure");
+ break;
+ case ECORE_HW_ERR_FW_ASSERT:
+ strcpy(err_str, "FW Assertion");
+ break;
+ default:
+ strcpy(err_str, "Unknown");
+ }
+
+ DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
+ ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 32c9b251..3acf8f7c 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -27,6 +27,7 @@ struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
union ecore_mcp_protocol_stats;
+enum ecore_hw_err_type;
void qed_link_update(struct ecore_hwfn *hwfn);
@@ -107,14 +108,16 @@ void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t);
void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
size_t, int);
+void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
+
#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
osal_dma_alloc_coherent(dev, phys, size)
#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \
osal_dma_alloc_coherent_aligned(dev, phys, size, align)
-/* TODO: */
-#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing
+#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \
+ osal_dma_free_mem(dev, phys)
/* HW reads/writes */
@@ -348,6 +351,8 @@ u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
struct ecore_vf_acquire_sw_info *);
+void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
+ enum ecore_hw_err_type err_type);
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \
qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info)
@@ -356,7 +361,8 @@ void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *,
/* TODO: */
#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing
-#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
+#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) \
+ qede_hw_err_notify(hwfn, err_type)
#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
#define OSAL_NUM_ACTIVE_CPU() 0
@@ -421,6 +427,9 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
+#define OSAL_CRC32(crc, buf, length) 0
+#define OSAL_CRC8_POPULATE(table, polynomial) nothing
+#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index cbcde227..bfe50e1f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -97,8 +97,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 18
-#define FW_REVISION_VERSION 9
+#define FW_MINOR_VERSION 20
+#define FW_REVISION_VERSION 0
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -913,24 +913,25 @@ struct db_l2_dpm_data {
__le16 bd_prod /* bd producer value to update */;
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
-#define DB_L2_DPM_DATA_SIZE_SHIFT 0
+#define DB_L2_DPM_DATA_SIZE_MASK 0x3F
+#define DB_L2_DPM_DATA_SIZE_SHIFT 0
/* Type of DPM transaction (DPM_L2_INLINE or DPM_L2_BD) (use enum db_dpm_type)
*/
-#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
-#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
-#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
-#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
+#define DB_L2_DPM_DATA_DPM_TYPE_MASK 0x3
+#define DB_L2_DPM_DATA_DPM_TYPE_SHIFT 6
+#define DB_L2_DPM_DATA_NUM_BDS_MASK 0xFF /* number of BD-s */
+#define DB_L2_DPM_DATA_NUM_BDS_SHIFT 8
/* size of the packet to be transmitted in bytes */
-#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
-#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
-#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
+#define DB_L2_DPM_DATA_PKT_SIZE_MASK 0x7FF
+#define DB_L2_DPM_DATA_PKT_SIZE_SHIFT 16
+#define DB_L2_DPM_DATA_RESERVED0_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
/* In DPM_L2_BD mode: the number of SGE-s */
-#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
-#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
-#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
+#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
+#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
+/* Flag indicating whether to enable GFS search */
+#define DB_L2_DPM_DATA_RESERVED1_MASK 0x1
+#define DB_L2_DPM_DATA_RESERVED1_SHIFT 31
};
/*
@@ -989,26 +990,29 @@ struct db_pwm_addr {
struct db_rdma_dpm_params {
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
/* opcode for RDMA operation */
-#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
/* the size of the WQE payload in bytes */
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
/* RoCE completion flag */
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
-#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+/* Connection type is iWARP */
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
};
/*
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 80b11a4c..0d68a9bc 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -31,7 +31,7 @@
#define ECORE_MAJOR_VERSION 8
#define ECORE_MINOR_VERSION 18
#define ECORE_REVISION_VERSION 7
-#define ECORE_ENGINEERING_VERSION 0
+#define ECORE_ENGINEERING_VERSION 1
#define ECORE_VERSION \
((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
@@ -770,7 +770,7 @@ struct ecore_dev {
bool attn_clr_en;
/* Indicates whether allowing the MFW to collect a crash dump */
- bool mdump_en;
+ bool allow_mdump;
/* Indicates if the reg_fifo is checked after any register access */
bool chk_reg_fifo;
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 865103c6..65b89b8f 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -1080,7 +1080,7 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Sending final cleanup for PFVF[%d] [Command %08x\n]",
+ "Sending final cleanup for PFVF[%d] [Command %08x]\n",
id, command);
ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
@@ -1776,13 +1776,6 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* perform debug configuration when chip is out of reset */
OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
- /* Cleanup chip from previous driver if such remains exist */
- rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
- if (rc != ECORE_SUCCESS) {
- ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
- return rc;
- }
-
/* PF Init sequence */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc)
@@ -1866,17 +1859,17 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
}
-static enum _ecore_status_t
-ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 enable)
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable)
{
- u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
- /* Change PF in PXP */
+ /* Configure the PF's internal FID_enable for master transactions */
ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
- /* wait until value is set - try for 1 second every 50us */
+ /* Wait until value is set - try for 1 second every 50us */
for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
@@ -1918,14 +1911,21 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
struct ecore_load_req_params load_req_params;
- u32 load_code, param, drv_mb_param;
+ u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
- enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
@@ -1942,7 +1942,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ p_hwfn = &p_dev->hwfns[i];
/* If management didn't provide a default, set one of our own */
if (!p_hwfn->hw_info.mtu) {
@@ -1955,11 +1955,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
continue;
}
- /* Enable DMAE in PXP */
- rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
- if (rc != ECORE_SUCCESS)
- return rc;
-
rc = ecore_calc_hw_mode(p_hwfn);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2009,6 +2004,30 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
qm_lock_init = true;
}
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = ecore_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc != ECORE_SUCCESS) {
+ ecore_hw_err_notify(p_hwfn,
+ ECORE_HW_ERR_RAMROD_FAIL);
+ goto load_err;
+ }
+ }
+
+ /* Log and clean previous pglue_b errors if such exist */
+ ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc != ECORE_SUCCESS)
+ goto load_err;
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -2037,35 +2056,28 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
break;
}
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"init phase failed for loadcode 0x%x (rc %d)\n",
load_code, rc);
+ goto load_err;
+ }
- /* ACK mfw regardless of success or failure of initialization */
- mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_LOAD_DONE,
- 0, &load_code, &param);
+ rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
return rc;
- if (mfw_rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "Failed sending a LOAD_DONE command\n");
- return mfw_rc;
- }
-
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
- mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
- &load_code, &param);
- if (mfw_rc != ECORE_SUCCESS) {
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
"Failed to send DCBX attention request\n");
- return mfw_rc;
+ return rc;
}
p_hwfn->hw_init_done = true;
@@ -2076,7 +2088,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
drv_mb_param = STORM_FW_VERSION;
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
- drv_mb_param, &load_code, &param);
+ drv_mb_param, &resp, &param);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
@@ -2094,6 +2106,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
return rc;
+
+load_err:
+ /* The MFW load lock should be released regardless of success or failure
+ * of initialization.
+ * TODO: replace this with an attempt to send cancel_load.
+ */
+ ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
@@ -2261,18 +2281,20 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
}
} /* hwfn loop */
- if (IS_PF(p_dev)) {
+ if (IS_PF(p_dev) && !p_dev->recov_in_prog) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
+ */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ false);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "ecore_change_pci_hwfn failed. rc = %d.\n",
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
rc);
rc2 = ECORE_UNKNOWN_ERROR;
}
@@ -2370,9 +2392,8 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
- /* Clean Previous errors if such exist */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
+ /* Clean previous pglue_b errors if such exist */
+ ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
/* enable internal target-read */
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
@@ -3565,6 +3586,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
enum _ecore_status_t rc;
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ p_dev->allow_mdump = p_params->allow_mdump;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -3718,7 +3740,7 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
if (!p_chain->b_external_pbl)
OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table, pbl_size);
- out:
+out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -3994,92 +4016,182 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter)
+static enum _ecore_status_t
+ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
{
- u32 high, low, en;
+ u32 en;
int i;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return ECORE_SUCCESS;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
/* Find a free entry and utilize it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
if (en)
continue;
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), low);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), high);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 1);
break;
}
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return ECORE_INVAL;
- }
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
- p_filter[0], p_filter[1], p_filter[2],
- p_filter[3], p_filter[4], p_filter[5], i);
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
return ECORE_SUCCESS;
}
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
{
- u32 high, low;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return;
+ return ECORE_SUCCESS;
high = p_filter[1] | (p_filter[0] << 8);
low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find an empty LLH filter to utilize\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 high, u32 low,
+ u32 *p_entry_num)
+{
+ int i;
/* Find the entry and clean it */
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32)) != low)
continue;
if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32)) != high)
continue;
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), 0);
break;
}
+
if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_filter)
+{
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
+
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ high = p_filter[1] | (p_filter[0] << 8);
+ low = p_filter[5] | (p_filter[4] << 8) |
+ (p_filter[3] << 16) | (p_filter[2] << 24);
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
+ low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Tried to remove a non-configured filter\n");
+ return;
+ }
+
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
+ p_filter[0], p_filter[1], p_filter[2], p_filter[3],
+ p_filter[4], p_filter[5], entry_num);
+}
+
+static enum _ecore_status_t
+ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ u32 en;
+ int i;
+
+ /* Find a free entry and utilize it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ en = ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32));
+ if (en)
+ continue;
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), low);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), high);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 1 << type);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_NORESOURCES;
+
+ *p_entry_num = i;
+
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
@@ -4089,14 +4201,15 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
u16 dest_port,
enum ecore_llh_port_filter_type_t type)
{
- u32 high, low, en;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return ECORE_SUCCESS;
high = 0;
low = 0;
+
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
high = source_port_or_eth_type;
@@ -4118,67 +4231,109 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
"Non valid LLH protocol filter type %d\n", type);
return ECORE_INVAL;
}
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 1 << type);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
- break;
- }
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low, &entry_num);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"Failed to find an empty LLH filter to utilize\n");
- return ECORE_NORESOURCES;
+ return rc;
}
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"ETH type %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_TCP_SRC_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"TCP src port %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_UDP_SRC_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"UDP src port %x is added at %d\n",
- source_port_or_eth_type, i);
+ source_port_or_eth_type, entry_num);
break;
case ECORE_LLH_FILTER_TCP_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port, i);
+ "TCP dst port %x is added at %d\n", dest_port,
+ entry_num);
break;
case ECORE_LLH_FILTER_UDP_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port, i);
+ "UDP dst port %x is added at %d\n", dest_port,
+ entry_num);
break;
case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
+ source_port_or_eth_type, dest_port, entry_num);
break;
case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, i);
+ source_port_or_eth_type, dest_port, entry_num);
break;
}
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_llh_port_filter_type_t type,
+ u32 high, u32 low, u32 *p_entry_num)
+{
+ int i;
+
+ /* Find the entry and clean it */
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32)))
+ continue;
+ if (!(ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32)) & (1 << type)))
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32)) != low)
+ continue;
+ if (ecore_rd(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32)) != high)
+ continue;
+
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
+ i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ 2 * i * sizeof(u32), 0);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
+ (2 * i + 1) * sizeof(u32), 0);
+ break;
+ }
+
+ if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
+ return ECORE_INVAL;
+
+ *p_entry_num = i;
+
return ECORE_SUCCESS;
}
@@ -4189,14 +4344,15 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
u16 dest_port,
enum ecore_llh_port_filter_type_t type)
{
- u32 high, low;
- int i;
+ u32 high, low, entry_num;
+ enum _ecore_status_t rc;
if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
return;
high = 0;
low = 0;
+
switch (type) {
case ECORE_LLH_FILTER_ETHERTYPE:
high = source_port_or_eth_type;
@@ -4219,49 +4375,24 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
return;
}
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
- continue;
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
- continue;
- if (!(ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32)) & (1 << type)))
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
- (2 * i + 1) * sizeof(u32), 0);
- break;
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
+ high, low,
+ &entry_num);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
+ type, source_port_or_eth_type, dest_port);
+ return;
}
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
+ type, source_port_or_eth_type, dest_port, entry_num);
}
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
int i;
@@ -4270,16 +4401,27 @@ void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
+ NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
+ i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
2 * i * sizeof(u32), 0);
ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE +
+ NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
(2 * i + 1) * sizeof(u32), 0);
}
}
+void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ return;
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
+}
+
enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -4396,7 +4538,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
- out:
+out:
return rc;
}
@@ -4434,7 +4576,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
- out:
+out:
return rc;
}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index e64a768d..9126cf95 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -186,6 +186,9 @@ struct ecore_hw_prepare_params {
/* The OS Epoch time in seconds */
u32 epoch;
+ /* Allow the MFW to collect a crash dump */
+ bool allow_mdump;
+
/* Allow prepare to pass even if some initializations are failing.
* If set, the `p_prepare_res' field would be set with the return,
* and might allow probe to pass even if there are certain issues.
@@ -238,7 +241,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#ifndef __EXTRACT__LINUX__
-struct ecore_eth_stats {
+struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -270,11 +273,6 @@ struct ecore_eth_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -291,14 +289,8 @@ struct ecore_eth_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
- u64 tx_1519_to_2047_byte_packets;
- u64 tx_2048_to_4095_byte_packets;
- u64 tx_4096_to_9216_byte_packets;
- u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
- u64 tx_lpi_entry_count;
- u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
@@ -312,6 +304,33 @@ struct ecore_eth_stats {
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
};
+
+struct ecore_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct ecore_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct ecore_eth_stats {
+ struct ecore_eth_stats_common common;
+ union {
+ struct ecore_eth_stats_bb bb;
+ struct ecore_eth_stats_ah ah;
+ };
+};
#endif
enum ecore_dmae_address_type_t {
@@ -581,4 +600,16 @@ enum _ecore_status_t
ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
u16 tx_coal, void *p_handle);
+/**
+ * @brief ecore_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param b_enable - true/false
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool b_enable);
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 3042ed55..5c2a08f9 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -856,7 +856,8 @@ struct core_rx_gsi_offload_cqe {
__le16 vlan /* 802.1q VLAN tag */;
__le32 src_mac_addrhi /* hi 4 bytes source mac address */;
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
- u8 reserved1[2];
+/* These are the lower 16 bit of QP id in RoCE BTH header */
+ __le16 qp_id;
__le32 gid_dst[4] /* Gid destination address */;
};
@@ -998,11 +999,9 @@ struct core_tx_bd {
*/
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
-/* Packet destination - Network, LB (use enum core_tx_dest) */
-#define CORE_TX_BD_TX_DST_MASK 0x1
+/* Packet destination - Network, Loopback or Drop (use enum core_tx_dest) */
+#define CORE_TX_BD_TX_DST_MASK 0x3
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED_MASK 0x1
-#define CORE_TX_BD_RESERVED_SHIFT 15
};
@@ -1011,8 +1010,10 @@ struct core_tx_bd {
* Light L2 TX Destination
*/
enum core_tx_dest {
- CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */,
- CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
+ CORE_TX_DEST_NW /* TX Destination to the Network */,
+ CORE_TX_DEST_LB /* TX Destination to the Loopback */,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP /* TX Drop */,
MAX_CORE_TX_DEST
};
@@ -1337,20 +1338,14 @@ struct pf_start_tunnel_config {
* FW will use a default port
*/
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
-/* If set, enable l2 GENEVE tunnel in TX path. */
- u8 tx_enable_l2geneve;
-/* If set, enable IP GENEVE tunnel in TX path. */
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
- u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
-/* Classification scheme for l2 GENEVE tunnel. */
+ u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
+/* Rx classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
-/* Classification scheme for ip GENEVE tunnel. */
+/* Rx classification scheme for ip GENEVE tunnel. */
u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
- u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
+ u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
+ u8 reserved;
/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
__le16 vxlan_udp_port;
/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
@@ -1366,6 +1361,7 @@ struct pf_start_ramrod_data {
struct regpair consolid_q_pbl_addr;
/* tunnel configuration. */
struct pf_start_tunnel_config tunnel_config;
+ __le32 reserved;
__le16 event_ring_sb_id /* Status block ID */;
/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
@@ -1425,19 +1421,10 @@ struct pf_update_tunnel_config {
* unicast outer MAC in NPAR mode.
*/
u8 update_rx_def_non_ucast_clss;
-/* Update TX per PF tunnel classification scheme. used by pf update. */
- u8 update_tx_pf_clss;
/* Update VXLAN tunnel UDP destination port. */
u8 set_vxlan_udp_port_flg;
/* Update GENEVE tunnel UDP destination port. */
u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
-/* If set, enable l2 GENEVE tunnel in TX path. */
- u8 tx_enable_l2geneve;
-/* If set, enable IP GENEVE tunnel in TX path. */
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1447,7 +1434,7 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
- __le16 reserved[2];
+ __le16 reserved;
};
/*
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 917e8f4c..7443ff9d 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -67,6 +67,8 @@ enum block_addr {
GRCBASE_MULD = 0x4e0000,
GRCBASE_YULD = 0x4c8000,
GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PTLD = 0x590000,
+ GRCBASE_YPLD = 0x5b0000,
GRCBASE_PRM = 0x230000,
GRCBASE_PBF_PB1 = 0xda0000,
GRCBASE_PBF_PB2 = 0xda4000,
@@ -80,6 +82,10 @@ enum block_addr {
GRCBASE_TCFC = 0x2d0000,
GRCBASE_IGU = 0x180000,
GRCBASE_CAU = 0x1c0000,
+ GRCBASE_RGFS = 0xf00000,
+ GRCBASE_RGSRC = 0x320000,
+ GRCBASE_TGFS = 0xd00000,
+ GRCBASE_TGSRC = 0x322000,
GRCBASE_UMAC = 0x51000,
GRCBASE_XMAC = 0x210000,
GRCBASE_DBG = 0x10000,
@@ -93,12 +99,6 @@ enum block_addr {
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
GRCBASE_AVS_WRAP = 0x6b0000,
- GRCBASE_RGFS = 0x1fa0000,
- GRCBASE_RGSRC = 0x1fa8000,
- GRCBASE_TGFS = 0x1fb0000,
- GRCBASE_TGSRC = 0x1fb8000,
- GRCBASE_PTLD = 0x1fc0000,
- GRCBASE_YPLD = 0x1fe0000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -159,6 +159,8 @@ enum block_id {
BLOCK_MULD,
BLOCK_YULD,
BLOCK_XYLD,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_PRM,
BLOCK_PBF_PB1,
BLOCK_PBF_PB2,
@@ -172,6 +174,10 @@ enum block_id {
BLOCK_TCFC,
BLOCK_IGU,
BLOCK_CAU,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
BLOCK_UMAC,
BLOCK_XMAC,
BLOCK_DBG,
@@ -185,12 +191,6 @@ enum block_id {
BLOCK_PHY_PCIE,
BLOCK_LED,
BLOCK_AVS_WRAP,
- BLOCK_RGFS,
- BLOCK_RGSRC,
- BLOCK_TGFS,
- BLOCK_TGSRC,
- BLOCK_PTLD,
- BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h
index fca74791..48b0048f 100644
--- a/drivers/net/qede/base/ecore_hsi_init_func.h
+++ b/drivers/net/qede/base/ecore_hsi_init_func.h
@@ -15,6 +15,10 @@
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
+/* Size of CRC8 lookup table */
+#ifndef LINUX_REMOVE
+#define CRC8_TABLE_SIZE 256
+#endif
/*
* BRB RAM init requirements
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index 004ab351..b5ef173e 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1590,7 +1590,8 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
/* Filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
@@ -1644,8 +1645,9 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
i * REG_SIZE, *(ramLinePointer + i));
/* Set default profile so that no filter match will happen */
- ramLine.lo = 0xffff;
- ramLine.hi = 0xffff;
+ ramLine.lo = 0xffffffff;
+ ramLine.hi = 0x3ff;
+
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
@@ -1722,40 +1724,30 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
return offset;
}
-/* Calculate CRC8 of first 4 bytes in buf */
-static u8 ecore_calc_crc8(const u8 *buf)
-{
- u32 i, j, crc = 0xff << 8;
-
- /* CRC-8 polynomial */
- #define POLY 0x1070
-
- for (j = 0; j < 4; j++, buf++) {
- crc ^= (*buf << 8);
- for (i = 0; i < 8; i++) {
- if (crc & 0x8000)
- crc ^= (POLY << 3);
-
- crc <<= 1;
- }
- }
-
- return (u8)(crc >> 8);
-}
+#ifndef LINUX_REMOVE
+#define CRC8_INIT_VALUE 0xFF
+#define CRC8_TABLE_SIZE 256
+#endif
+static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
-/* Calculate and return CDU validation byte per conneciton type / region /
+/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
- u32 cid)
+static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
+ u8 conn_type,
+ u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+
+ static u8 crc8_table_valid; /*automatically initialized to 0*/
u8 crc, validation_byte = 0;
u32 validation_string = 0;
- const u8 *data_to_crc_rev;
- u8 data_to_crc[4];
+ u32 data_to_crc;
- data_to_crc_rev = (const u8 *)&validation_string;
+ if (crc8_table_valid == 0) {
+ OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
/*
* The CRC is calculated on the String-to-compress:
@@ -1772,13 +1764,22 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
validation_string |= (conn_type & 0xF);
- /* Convert to big-endian (ntoh())*/
- data_to_crc[0] = data_to_crc_rev[3];
- data_to_crc[1] = data_to_crc_rev[2];
- data_to_crc[2] = data_to_crc_rev[1];
- data_to_crc[3] = data_to_crc_rev[0];
-
- crc = ecore_calc_crc8(data_to_crc);
+ /* Convert to big-endian and calculate CRC8*/
+ data_to_crc = OSAL_BE32_TO_CPU(validation_string);
+
+ crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
+ CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
validation_byte |= ((validation_cfg >>
CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
@@ -1793,8 +1794,9 @@ static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@@ -1805,14 +1807,14 @@ void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@@ -1821,7 +1823,8 @@ void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
+ 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */
@@ -1847,8 +1850,7 @@ void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
}
/* Memset task context to 0 while preserving validation bytes */
-void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
- const u8 ctx_type)
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
{
u8 *p_ctx, *region1_val_ptr;
u8 region1_val;
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 4da3fc29..488dc005 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -424,48 +424,54 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * session context.
*
- *
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 cid);
+void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 cid);
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
- *
+ * context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
- u8 ctx_type, u32 tid);
+void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
+ void *p_ctx_mem,
+ u16 ctx_size,
+ u8 ctx_type,
+ u32 tid);
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * preserving validation bytes.
*
- *
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_hwfn - HW device data
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
-void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+void ecore_memset_session_ctx(void *p_ctx_mem,
+ u32 ctx_size,
u8 ctx_type);
/**
- * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
- * validation bytes.
- *
+ * @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
*/
-void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+void ecore_memset_task_ctx(void *p_ctx_mem,
+ u32 ctx_size,
u8 ctx_type);
#endif
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 8dc4d150..b57c510c 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -284,122 +284,119 @@ out:
#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
-static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 tmp;
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked."
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
- " Details2 %08x [Was_error %02x BME deassert %02x"
- " FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
- ? 1 : 0), tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
- 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0));
+ DP_NOTICE(p_hwfn, false,
+ "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
u32 addr_lo, addr_hi, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_RD_DETAILS);
- DP_INFO(p_hwfn,
- "Illegal read by chip from [%08x:%08x] blocked."
- " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
- " Details2 %08x [Was_error %02x BME deassert %02x"
- " FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
- (u8)((details &
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
- ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
- (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
- ? 1 : 0), tmp,
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
- : 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
- 0),
- (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
- : 0));
+ DP_NOTICE(p_hwfn, false,
+ "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
+ ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
+ (u8)((details &
+ ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
+ tmp,
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
+ 1 : 0),
+ (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
+ 1 : 0));
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
- DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
+ DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
u32 addr_hi, addr_lo;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
- DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
- tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, false,
+ "ICPL erorr - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
}
- tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
u32 addr_hi, addr_lo, details;
- addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_lo = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
- addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ addr_hi = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
- details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ details = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_VF_ILT_ERR_DETAILS);
- DP_INFO(p_hwfn,
- "ILT error - Details %08x Details2 %08x"
- " [Address %08x:%08x]\n",
- details, tmp, addr_hi, addr_lo);
+ DP_NOTICE(p_hwfn, false,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
}
/* Clear the indications */
- ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
- PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
+{
+ return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+}
+
static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
{
DP_NOTICE(p_hwfn, false, "FW assertion!\n");
@@ -505,7 +502,7 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{ /* After Invert 2 */
{"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
- {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
+ {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
BLOCK_PGLUE_B},
{"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -827,8 +824,9 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
ATTN_TYPE_INTERRUPT, !b_fatal);
}
+ /* @DPDK */
/* Reach assertion if attention is fatal */
- if (b_fatal) {
+ if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
p_bit_name);
@@ -842,7 +840,7 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
u32 mask = ~bitmask;
val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
- DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
p_bit_name);
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 0c8929e3..067ed605 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -208,4 +208,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
#endif
+enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 6764bfa6..bc8df8f8 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -27,7 +27,7 @@ static const struct iro iro_arr[49] = {
/* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2},
/* XSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ { 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3df0, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
@@ -37,13 +37,13 @@ static const struct iro iro_arr[49] = {
/* MSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4990, 0x0, 0x0, 0x0, 0x78},
/* USTORM_INTEG_TEST_DATA_OFFSET */
- { 0x7e48, 0x0, 0x0, 0x0, 0x78},
+ { 0x7f48, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0x61f8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0xb820, 0x30, 0x0, 0x0, 0x30},
+ { 0xbd20, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
{ 0x95b8, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
@@ -57,9 +57,9 @@ static const struct iro iro_arr[49] = {
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x8050, 0x40, 0x0, 0x0, 0x30},
+ { 0x8150, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xe770, 0x60, 0x0, 0x0, 0x60},
+ { 0xec70, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
@@ -89,7 +89,7 @@ static const struct iro iro_arr[49] = {
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12988, 0x10, 0x0, 0x0, 0x8},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x11aa0, 0x38, 0x0, 0x0, 0x18},
+ { 0x11fa0, 0x38, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xa8c0, 0x38, 0x0, 0x0, 0x10},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 4ab8fd5f..e58b8fa0 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1714,13 +1714,20 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&pstats, 0, sizeof(pstats));
ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
- p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
@@ -1746,10 +1753,10 @@ static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&tstats, 0, sizeof(tstats));
ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
- p_stats->mftag_filter_discards +=
- HILO_64_REGPAIR(tstats.mftag_filter_discard);
- p_stats->mac_filter_discards +=
- HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
}
static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1783,12 +1790,18 @@ static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&ustats, 0, sizeof(ustats));
ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
- p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
@@ -1822,23 +1835,27 @@ static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(&mstats, 0, sizeof(mstats));
ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
- p_stats->packet_too_big_discard +=
- HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
- p_stats->tpa_coalesced_pkts +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
- p_stats->tpa_coalesced_events +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
- p_stats->tpa_coalesced_bytes +=
- HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard +=
+ HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_eth_stats *p_stats)
{
+ struct ecore_eth_stats_common *p_common = &p_stats->common;
struct port_stats port_stats;
int j;
@@ -1849,54 +1866,75 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.eth.r64;
- p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
- p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
- p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
- p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
- p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
- p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
- p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
- p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
- p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
- p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
- p_stats->rx_crc_errors += port_stats.eth.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
- p_stats->rx_pause_frames += port_stats.eth.rxpf;
- p_stats->rx_pfc_frames += port_stats.eth.rxpp;
- p_stats->rx_align_errors += port_stats.eth.raln;
- p_stats->rx_carrier_errors += port_stats.eth.rfcr;
- p_stats->rx_oversize_packets += port_stats.eth.rovr;
- p_stats->rx_jabbers += port_stats.eth.rjbr;
- p_stats->rx_undersize_packets += port_stats.eth.rund;
- p_stats->rx_fragments += port_stats.eth.rfrg;
- p_stats->tx_64_byte_packets += port_stats.eth.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
- p_stats->tx_pause_frames += port_stats.eth.txpf;
- p_stats->tx_pfc_frames += port_stats.eth.txpp;
- p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
- p_stats->tx_total_collisions += port_stats.eth.tncl;
- p_stats->rx_mac_bytes += port_stats.eth.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
- p_stats->tx_mac_bytes += port_stats.eth.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
- p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
- p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
- p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
- p_stats->brb_discards += port_stats.brb.brb_discard[j];
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
}
}
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index a834ac74..88c5ceb0 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -893,6 +893,30 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn, false,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -1556,10 +1580,9 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false,
"Received a critical error notification from the MFW!\n");
- if (p_hwfn->p_dev->mdump_en) {
+ if (p_hwfn->p_dev->allow_mdump) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
- p_hwfn->p_dev->mdump_en = false;
return;
}
@@ -2894,6 +2917,27 @@ struct ecore_resc_alloc_out_params {
u32 flags;
};
+#define ECORE_RECOVERY_PROLOG_SLEEP_MS 100
+
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ enum _ecore_status_t rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ OSAL_MSLEEP(ECORE_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "ecore_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 37d1835f..77fb5a3c 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -171,6 +171,17 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_load_req_params *p_params);
/**
+ * @brief Sends a LOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index 190c1352..abc190c9 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -736,6 +736,17 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief A recovery handler must call this function as its first step.
+ * It is assumed that the handler is not run from an interrupt context.
+ *
+ * @param p_dev
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_recovery_prolog(struct ecore_dev *p_dev);
+
+/**
* @brief Notify MFW about the change in base device properties
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 846dc6d1..c9c23096 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -94,359 +94,358 @@
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28705
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28706
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28708
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28709
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28710
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28711
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28712
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28713
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28714
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28715
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30857
+#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30873
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
+#define QM_REG_RLPFENABLE_RT_OFFSET 30871
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_OFFSET 30905
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31677
+#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32701
+#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33213
+#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
-#define RUNTIME_ARRAY_SIZE 34311
+#define RUNTIME_ARRAY_SIZE 34309
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index 8fd64d7a..d6e4b9e0 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -165,23 +165,19 @@ static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
}
static void
-__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
-
- if (tun_type->b_mode_enabled)
- *p_enable_tx_clas = 1;
}
static void
-ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type,
u8 *p_update_port, __le16 *p_port,
struct ecore_tunn_update_udp_port *p_udp_port)
{
- __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
- tun_type);
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
@@ -200,33 +196,27 @@ ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
- p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
@@ -282,29 +272,24 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
- &p_tunn_cfg->tx_enable_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
- &p_tunn_cfg->tx_enable_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
- &p_tunn_cfg->tx_enable_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
- &p_tunn_cfg->tx_enable_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
- &p_tunn_cfg->tx_enable_ipgre,
&p_tun->ip_gre);
}
@@ -345,7 +330,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
/* For easier debugging */
p_ramrod->dont_log_ramrods = 0;
- p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+ p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
switch (mode) {
case ECORE_MF_DEFAULT:
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 6dc969b0..45a0356d 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -79,6 +79,10 @@
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
+/* Minimum number of free BDs in RX ring, that guarantee receiving of at least
+ * one RX packet.
+ */
+#define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index fcf98477..1ad8a962 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -132,13 +132,28 @@ struct eth_stats {
u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter*/
/* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */
u64 r1518;
+ union {
+ struct { /* bb */
/* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged frame counter */
- u64 r1522;
- u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
- u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
- u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r1522;
+/* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter*/
+ u64 r2047;
+/* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter*/
+ u64 r4095;
+/* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter*/
+ u64 r9216;
/* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame counter */
- u64 r16383;
+ u64 r16383;
+ } bb0;
+ struct { /* ah */
+ u64 unused1;
+/* 0x07 (Offset 0x38 ) RX 1519 to max byte frame counter*/
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
@@ -156,19 +171,40 @@ struct eth_stats {
u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter*/
/* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */
u64 t1518;
+ union {
+ struct { /* bb */
/* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */
- u64 t2047;
+ u64 t2047;
/* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */
- u64 t4095;
+ u64 t4095;
/* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */
- u64 t9216;
+ u64 t9216;
/* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame counter */
- u64 t16383;
+ u64 t16383;
+ } bb1;
+ struct { /* ah */
+/* 0x47 (Offset 0xd8 ) TX 1519 to max byte frame counter */
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
- u64 tlpiec;
- u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ union {
+ struct { /* bb */
+/* 0x6C (Offset 0x108) Transmit Logical Type LLFC message counter */
+ u64 tlpiec;
+/* 0x6E (Offset 0x110) Transmit Total Collision Counter */
+ u64 tncl;
+ } bb2;
+ struct { /* ah */
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index f9920f37..60286545 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1200,3 +1200,8 @@
#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
deleted file mode 100644
index a3c0b137..00000000
--- a/drivers/net/qede/qede_eth_if.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#include "qede_ethdev.h"
-
-static int
-qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
-{
- int rc, i;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
- u8 tx_switching = 0;
- struct ecore_sp_vport_start_params start = { 0 };
-
- start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
- ECORE_TPA_MODE_NONE;
- start.remove_inner_vlan = p_params->remove_inner_vlan;
- start.tx_switching = tx_switching;
- start.only_untagged = false; /* untagged only */
- start.drop_ttl0 = p_params->drop_ttl0;
- start.concrete_fid = p_hwfn->hw_info.concrete_fid;
- start.opaque_fid = p_hwfn->hw_info.opaque_fid;
- start.concrete_fid = p_hwfn->hw_info.concrete_fid;
- start.handle_ptp_pkts = p_params->handle_ptp_pkts;
- start.vport_id = p_params->vport_id;
- start.mtu = p_params->mtu;
- /* @DPDK - Disable FW placement */
- start.zero_placement_offset = 1;
-
- rc = ecore_sp_vport_start(p_hwfn, &start);
- if (rc) {
- DP_ERR(edev, "Failed to start VPORT\n");
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started V-PORT %d with MTU %d\n",
- p_params->vport_id, p_params->mtu);
- }
-
- ecore_reset_vport_stats(edev);
-
- return 0;
-}
-
-static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
-{
- int rc, i;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
- rc = ecore_sp_vport_stop(p_hwfn,
- p_hwfn->hw_info.opaque_fid, vport_id);
-
- if (rc) {
- DP_ERR(edev, "Failed to stop VPORT\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-static int
-qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
-{
- struct ecore_sp_vport_update_params sp_params;
- struct ecore_rss_params sp_rss_params;
- int rc, i;
-
- memset(&sp_params, 0, sizeof(sp_params));
- memset(&sp_rss_params, 0, sizeof(sp_rss_params));
-
- /* Translate protocol params into sp params */
- sp_params.vport_id = params->vport_id;
- sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
- sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
- sp_params.vport_active_rx_flg = params->vport_active_flg;
- sp_params.vport_active_tx_flg = params->vport_active_flg;
- sp_params.update_inner_vlan_removal_flg =
- params->update_inner_vlan_removal_flg;
- sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
- sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
- sp_params.tx_switching_flg = params->tx_switching_flg;
- sp_params.accept_any_vlan = params->accept_any_vlan;
- sp_params.update_accept_any_vlan_flg =
- params->update_accept_any_vlan_flg;
- sp_params.mtu = params->mtu;
- sp_params.sge_tpa_params = params->sge_tpa_params;
-
- for_each_hwfn(edev, i) {
- struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
-
- sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
- rc = ecore_sp_vport_update(p_hwfn, &sp_params,
- ECORE_SPQ_MODE_EBLOCK, NULL);
- if (rc) {
- DP_ERR(edev, "Failed to update VPORT\n");
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Updated V-PORT %d: active_flag %d [update %d]\n",
- params->vport_id, params->vport_active_flg,
- params->update_vport_active_flg);
- }
-
- return 0;
-}
-
-static int
-qed_start_rxq(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- uint16_t bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size,
- struct ecore_rxq_start_ret_params *ret_params)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_num % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->stats_id = p_params->vport_id;
-
- rc = ecore_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size,
- ret_params);
-
- if (rc) {
- DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
-
- return 0;
-}
-
-static int
-qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
- int rc, hwfn_index;
- struct ecore_hwfn *p_hwfn;
-
- hwfn_index = rss_id % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
- return rc;
- }
-
- return 0;
-}
-
-static int
-qed_start_txq(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- uint16_t pbl_size,
- struct ecore_txq_start_ret_params *ret_params)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_num % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->stats_id = p_params->vport_id;
-
- rc = ecore_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params, 0 /* tc */,
- pbl_addr, pbl_size,
- ret_params);
-
- if (rc) {
- DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
- return rc;
- }
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
- p_params->queue_id, rss_num, p_params->vport_id,
- p_params->sb);
-
- return 0;
-}
-
-static int
-qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
-{
- struct ecore_hwfn *p_hwfn;
- int rc, hwfn_index;
-
- hwfn_index = rss_id % edev->num_hwfns;
- p_hwfn = &edev->hwfns[hwfn_index];
-
- rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
- return rc;
- }
-
- return 0;
-}
-
-static int
-qed_fp_cqe_completion(struct ecore_dev *edev,
- uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
-{
- return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
- cqe);
-}
-
-static int qed_fastpath_stop(struct ecore_dev *edev)
-{
- ecore_hw_stop_fastpath(edev);
-
- return 0;
-}
-
-static void qed_fastpath_start(struct ecore_dev *edev)
-{
- struct ecore_hwfn *p_hwfn;
- int i;
-
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- ecore_hw_start_fastpath(p_hwfn);
- }
-}
-
-static void
-qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
-{
- ecore_get_vport_stats(edev, stats);
-}
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type type)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_accept_flags flags;
-
- memset(&flags, 0, sizeof(flags));
-
- flags.update_rx_mode_config = 1;
- flags.update_tx_mode_config = 1;
- flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
- ECORE_ACCEPT_MCAST_MATCHED |
- ECORE_ACCEPT_BCAST;
-
- flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
- ECORE_ACCEPT_MCAST_MATCHED |
- ECORE_ACCEPT_BCAST;
-
- if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- if (IS_VF(edev)) {
- flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
- DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
- }
- } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
- flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
- } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
- QED_FILTER_RX_MODE_TYPE_PROMISC)) {
- flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
- ECORE_ACCEPT_MCAST_UNMATCHED;
- }
-
- return ecore_filter_accept_cmd(edev, 0, flags, false, false,
- ECORE_SPQ_MODE_CB, NULL);
-}
-
-static const struct qed_eth_ops qed_eth_ops_pass = {
- INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
- INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
- INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
- INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
- INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
- INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
- INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
- INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
- INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
- INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
- INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
- INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
- INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
-};
-
-const struct qed_eth_ops *qed_get_eth_ops(void)
-{
- return &qed_eth_ops_pass;
-}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
deleted file mode 100644
index 097e0257..00000000
--- a/drivers/net/qede/qede_eth_if.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
- * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
- */
-
-#ifndef _QEDE_ETH_IF_H
-#define _QEDE_ETH_IF_H
-
-#include "qede_if.h"
-
-/*forward decl */
-struct eth_slow_path_rx_cqe;
-
-#define INIT_STRUCT_FIELD(field, value) .field = value
-
-#define QED_ETH_INTERFACE_VERSION 609
-
-#define QEDE_MAX_MCAST_FILTERS 64
-
-enum qed_filter_rx_mode_type {
- QED_FILTER_RX_MODE_TYPE_REGULAR,
- QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
- QED_FILTER_RX_MODE_TYPE_PROMISC,
-};
-
-enum qed_filter_type {
- QED_FILTER_TYPE_UCAST,
- QED_FILTER_TYPE_MCAST,
- QED_FILTER_TYPE_RX_MODE,
- QED_MAX_FILTER_TYPES,
-};
-
-struct qed_dev_eth_info {
- struct qed_dev_info common;
-
- uint8_t num_queues;
- uint8_t num_tc;
-
- struct ether_addr port_mac;
- uint16_t num_vlan_filters;
- uint32_t num_mac_filters;
-
- /* Legacy VF - this affects the datapath */
- bool is_legacy;
-};
-
-struct qed_update_vport_params {
- uint8_t vport_id;
- uint8_t update_vport_active_flg;
- uint8_t vport_active_flg;
- uint8_t update_inner_vlan_removal_flg;
- uint8_t inner_vlan_removal_flg;
- uint8_t update_tx_switching_flg;
- uint8_t tx_switching_flg;
- uint8_t update_accept_any_vlan_flg;
- uint8_t accept_any_vlan;
- uint8_t update_rss_flg;
- uint16_t mtu;
- struct ecore_sge_tpa_params *sge_tpa_params;
-};
-
-struct qed_start_vport_params {
- bool remove_inner_vlan;
- bool handle_ptp_pkts;
- bool enable_lro;
- bool drop_ttl0;
- uint8_t vport_id;
- uint16_t mtu;
- bool clear_stats;
-};
-
-struct qed_eth_ops {
- const struct qed_common_ops *common;
-
- int (*fill_dev_info)(struct ecore_dev *edev,
- struct qed_dev_eth_info *info);
-
- int (*vport_start)(struct ecore_dev *edev,
- struct qed_start_vport_params *params);
-
- int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id);
-
- int (*vport_update)(struct ecore_dev *edev,
- struct qed_update_vport_params *params);
-
- int (*q_rx_start)(struct ecore_dev *cdev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- uint16_t bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size,
- struct ecore_rxq_start_ret_params *ret_params);
-
- int (*q_rx_stop)(struct ecore_dev *edev,
- uint8_t rss_id, void *handle);
-
- int (*q_tx_start)(struct ecore_dev *edev,
- uint8_t rss_num,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- uint16_t pbl_size,
- struct ecore_txq_start_ret_params *ret_params);
-
- int (*q_tx_stop)(struct ecore_dev *edev,
- uint8_t rss_id, void *handle);
-
- int (*eth_cqe_completion)(struct ecore_dev *edev,
- uint8_t rss_id,
- struct eth_slow_path_rx_cqe *cqe);
-
- int (*fastpath_stop)(struct ecore_dev *edev);
-
- void (*fastpath_start)(struct ecore_dev *edev);
-
- void (*get_vport_stats)(struct ecore_dev *edev,
- struct ecore_eth_stats *stats);
-};
-
-/* externs */
-
-extern const struct qed_common_ops qed_common_ops_pass;
-
-const struct qed_eth_ops *qed_get_eth_ops(void);
-
-int qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
- enum qed_filter_rx_mode_type type);
-
-#endif /* _QEDE_ETH_IF_H */
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 9fae40b6..0e059898 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off {
};
static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
- {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+ {"rx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
{"rx_multicast_bytes",
- offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
{"rx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
- {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+ {"rx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
{"rx_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
{"rx_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
- {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+ {"tx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
{"tx_multicast_bytes",
- offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
{"tx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
- {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+ {"tx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
{"tx_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
{"tx_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
{"rx_64_byte_packets",
- offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
{"rx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_65_to_127_byte_packets)},
{"rx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_128_to_255_byte_packets)},
{"rx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_256_to_511_byte_packets)},
{"rx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_512_to_1023_byte_packets)},
{"rx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
- {"rx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
- {"rx_1519_to_2047_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
- {"rx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
- {"rx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
- {"rx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- rx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_1024_to_1518_byte_packets)},
{"tx_64_byte_packets",
- offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
{"tx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_65_to_127_byte_packets)},
{"tx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_128_to_255_byte_packets)},
{"tx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_256_to_511_byte_packets)},
{"tx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_512_to_1023_byte_packets)},
{"tx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
- {"trx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
- {"tx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
- {"tx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
- {"tx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- tx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_1024_to_1518_byte_packets)},
{"rx_mac_crtl_frames",
- offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
{"tx_mac_control_frames",
- offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
- {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
- {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+ {"rx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+ {"tx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
{"rx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
{"tx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
- {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
- {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+ {"rx_crc_errors",
+ offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+ {"rx_align_errors",
+ offsetof(struct ecore_eth_stats_common, rx_align_errors)},
{"rx_carrier_errors",
- offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+ offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
{"rx_oversize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_oversize_packets)},
- {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+ offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+ {"rx_jabber_errors",
+ offsetof(struct ecore_eth_stats_common, rx_jabbers)},
{"rx_undersize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_undersize_packets)},
- {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+ offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+ {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
{"rx_host_buffer_not_available",
- offsetof(struct ecore_eth_stats, no_buff_discards)},
+ offsetof(struct ecore_eth_stats_common, no_buff_discards)},
/* Number of packets discarded because they are bigger than MTU */
{"rx_packet_too_big_discards",
- offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+ offsetof(struct ecore_eth_stats_common,
+ packet_too_big_discard)},
{"rx_ttl_zero_discards",
- offsetof(struct ecore_eth_stats, ttl0_discard)},
+ offsetof(struct ecore_eth_stats_common, ttl0_discard)},
{"rx_multi_function_tag_filter_discards",
- offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
{"rx_mac_filter_discards",
- offsetof(struct ecore_eth_stats, mac_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
{"rx_hw_buffer_truncates",
- offsetof(struct ecore_eth_stats, brb_truncates)},
+ offsetof(struct ecore_eth_stats_common, brb_truncates)},
{"rx_hw_buffer_discards",
- offsetof(struct ecore_eth_stats, brb_discards)},
- {"tx_lpi_entry_count",
- offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
- {"tx_total_collisions",
- offsetof(struct ecore_eth_stats, tx_total_collisions)},
+ offsetof(struct ecore_eth_stats_common, brb_discards)},
{"tx_error_drop_packets",
- offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
- {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+ {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
{"rx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
{"rx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
{"rx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
{"rx_mac_frames_ok",
- offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
- {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+ {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
{"tx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
{"tx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
{"tx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
{"lro_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
{"lro_coalesced_events",
- offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
{"lro_aborts_num",
- offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+ offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
{"lro_not_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_not_coalesced_pkts)},
{"lro_coalesced_bytes",
- offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+ {"rx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_1522_byte_packets)},
+ {"rx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_2047_byte_packets)},
+ {"rx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_2048_to_4095_byte_packets)},
+ {"rx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_4096_to_9216_byte_packets)},
+ {"rx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_9217_to_16383_byte_packets)},
+
+ {"tx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_1519_to_2047_byte_packets)},
+ {"tx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_2048_to_4095_byte_packets)},
+ {"tx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_4096_to_9216_byte_packets)},
+ {"tx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_9217_to_16383_byte_packets)},
+
+ {"tx_lpi_entry_count",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+ {"tx_total_collisions",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+ {"rx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ rx_1519_to_max_byte_packets)},
+ {"tx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ tx_1519_to_max_byte_packets)},
};
static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
@@ -294,7 +350,6 @@ static void
qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
{
rte_memcpy(&qdev->dev_info, info, sizeof(*info));
- qdev->num_tc = qdev->dev_info.num_tc;
qdev->ops = qed_ops;
}
@@ -308,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s%d\n",
+ DP_INFO(edev, " Chip details : %s %c%d\n",
ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
@@ -329,6 +385,178 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
}
#endif
+static int
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+ }
+ ecore_reset_vport_stats(edev);
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ return 0;
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
+ int rc;
+ int i;
+
+ vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* Activate or deactivate vport via vport-update */
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc = -1;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_vport_active_rx_flg = 1;
+ params.update_vport_active_tx_flg = 1;
+ params.vport_active_rx_flg = flg;
+ params.vport_active_tx_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ break;
+ }
+ }
+ DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+ return rc;
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
+{
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+/* Enable/disable LRO via vport-update */
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_sge_tpa_params tpa_params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
+ params.vport_id = 0;
+ params.sge_tpa_params = &tpa_params;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update LRO\n");
+ return -1;
+ }
+ }
+
+ DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
+
+ return 0;
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+ }
+ }
+ DP_INFO(edev, "MTU updated to %u\n", mtu);
+
+ return 0;
+}
+
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -337,6 +565,43 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
uint8_t clss, bool mode, bool mask)
{
@@ -363,6 +628,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
+ ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
DP_ERR(edev, "Unicast MAC is already added"
" with vlan = %u, vni = %u\n",
@@ -565,49 +831,57 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_update_vport_params params = {
- .vport_id = 0,
- .accept_any_vlan = action,
- .update_accept_any_vlan_flg = 1,
- };
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- /* Proceed only if action actually needs to be performed */
- if (qdev->accept_any_vlan == action)
- return;
-
- rc = qdev->ops->vport_update(edev, &params);
- if (rc) {
- DP_ERR(edev, "Failed to %s accept-any-vlan\n",
- action ? "enable" : "disable");
- } else {
- DP_INFO(edev, "%s accept-any-vlan\n",
- action ? "enabled" : "disabled");
- qdev->accept_any_vlan = action;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to configure accept-any-vlan\n");
+ return;
+ }
}
+
+ DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
}
-static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
{
- struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_inner_vlan_removal_flg = 1;
- vport_update_params.inner_vlan_removal_flg = set_stripping;
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_inner_vlan_removal_flg = 1;
+ params.inner_vlan_removal_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return -1;
+ }
}
- qdev->vlan_strip_flg = set_stripping;
+ DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
return 0;
}
@@ -741,33 +1015,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
}
-static int qede_init_vport(struct qede_dev *qdev)
-{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_start_vport_params start = {0};
- int rc;
-
- start.remove_inner_vlan = 1;
- start.enable_lro = qdev->enable_lro;
- start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
- start.vport_id = 0;
- start.drop_ttl0 = false;
- start.clear_stats = 1;
- start.handle_ptp_pkts = 0;
-
- rc = qdev->ops->vport_start(edev, &start);
- if (rc) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
- return rc;
- }
-
- DP_INFO(edev,
- "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
- start.vport_id, ETHER_MTU);
-
- return 0;
-}
-
static void qede_prandom_bytes(uint32_t *buff)
{
uint8_t i;
@@ -818,33 +1065,119 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
return 0;
}
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Update MTU only if it has changed */
+ if (qdev->mtu != qdev->new_mtu) {
+ if (qede_update_mtu(eth_dev, qdev->new_mtu))
+ goto err;
+ qdev->mtu = qdev->new_mtu;
+ /* If MTU has changed then update TPA too */
+ if (qdev->enable_lro)
+ if (qede_enable_tpa(eth_dev, true))
+ goto err;
+ }
+
+ /* Start queues */
+ if (qede_start_queues(eth_dev))
+ goto err;
+
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ goto err;
+
+ /* Enable vport*/
+ if (qede_activate_vport(eth_dev, true))
+ goto err;
+
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
+ /* Start/resume traffic */
+ qede_fastpath_start(edev);
+
+ DP_INFO(edev, "Device started\n");
+
+ return 0;
+err:
+ DP_ERR(edev, "Device start fails\n");
+ return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Disable vport */
+ if (qede_activate_vport(eth_dev, false))
+ return;
+
+ if (qdev->enable_lro)
+ qede_enable_tpa(eth_dev, false);
+
+ /* TODO: Do we need disable LRO or RSS */
+ /* Stop queues */
+ qede_stop_queues(eth_dev);
+
+ /* Disable traffic */
+ ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+
+ DP_INFO(edev, "Device is stopped\n");
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
/* Sanity checks and throw warnings */
- if (rxmode->enable_scatter == 1)
+ if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
if (!rxmode->hw_strip_crc)
@@ -852,83 +1185,77 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (!rxmode->hw_ip_checksum)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
-
- if (rxmode->enable_lro) {
- qdev->enable_lro = true;
- /* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ "in hw\n");
+ if (rxmode->header_split)
+ DP_INFO(edev, "Header split enable is not supported\n");
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+ ETH_MQ_RX_RSS)) {
+ DP_ERR(edev, "Unsupported multi-queue mode\n");
+ return -ENOTSUP;
}
+ /* Flow director mode check */
+ if (qede_check_fdir_support(eth_dev))
+ return -ENOTSUP;
- /* Check for the port restart case */
- if (qdev->state != QEDE_DEV_INIT) {
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- return rc;
+ /* Deallocate resources if held previously. It is needed only if the
+ * queue count has been changed from previous configuration. If its
+ * going to change then it means RX/TX queue setup will be called
+ * again and the fastpath pointers will be reinitialized there.
+ */
+ if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+ qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
qede_dealloc_fp_resc(eth_dev);
+ /* Proceed with updated queue count */
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
}
- qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
- qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
- qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
- /* Fastpath status block should be initialized before sending
- * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
+ /* VF's MTU has to be set using vport-start where as
+ * PF's MTU can be updated via vport-update.
*/
- rc = qede_alloc_fp_resc(qdev);
- if (rc != 0)
- return rc;
-
- /* Issue VPORT-START with default config values to allow
- * other port configurations early on.
- */
- rc = qede_init_vport(qdev);
- if (rc != 0)
- return rc;
-
- if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
- rxmode->mq_mode == ETH_MQ_RX_NONE)) {
- DP_ERR(edev, "Unsupported RSS mode\n");
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
+ if (IS_VF(edev)) {
+ if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+ return -1;
+ } else {
+ if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+ return -1;
}
- /* Flow director mode check */
- rc = qede_check_fdir_support(eth_dev);
- if (rc) {
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
- }
- SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+ qdev->mtu = rxmode->max_rx_pkt_len;
+ qdev->new_mtu = qdev->mtu;
- SLIST_INIT(&qdev->vlan_list_head);
+ /* Configure TPA parameters */
+ if (rxmode->enable_lro) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+ qdev->enable_lro = rxmode->enable_lro;
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
- qdev->state = QEDE_DEV_CONFIG;
-
- DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
- (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
- qdev->num_tc);
+ DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+ QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
return 0;
}
/* Info about HW descriptor ring limitations */
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
- .nb_max = NUM_RX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 128,
.nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
- .nb_max = NUM_TX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 256,
.nb_align = 256,
.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
@@ -946,7 +1273,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1103,44 +1430,34 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_fdir_dealloc_resc(eth_dev);
-
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
- if (qdev->state != QEDE_DEV_STOP)
+ if (eth_dev->data->dev_started)
qede_dev_stop(eth_dev);
- else
- DP_INFO(edev, "Device is already stopped\n");
-
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- DP_ERR(edev, "Failed to stop VPORT\n");
+ qede_stop_vport(edev);
+ qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
- qdev->ops->common->slowpath_stop(edev);
+ eth_dev->data->nb_rx_queues = 0;
+ eth_dev->data->nb_tx_queues = 0;
+ qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
-
rte_intr_disable(&pci_dev->intr_handle);
-
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (edev->num_hwfns > 1)
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
- qdev->state = QEDE_DEV_INIT; /* Go back to init state */
}
static void
@@ -1153,35 +1470,36 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
/* RX Stats */
- eth_stats->ipackets = stats.rx_ucast_pkts +
- stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+ eth_stats->ipackets = stats.common.rx_ucast_pkts +
+ stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
- eth_stats->ibytes = stats.rx_ucast_bytes +
- stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+ eth_stats->ibytes = stats.common.rx_ucast_bytes +
+ stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
- eth_stats->ierrors = stats.rx_crc_errors +
- stats.rx_align_errors +
- stats.rx_carrier_errors +
- stats.rx_oversize_packets +
- stats.rx_jabbers + stats.rx_undersize_packets;
+ eth_stats->ierrors = stats.common.rx_crc_errors +
+ stats.common.rx_align_errors +
+ stats.common.rx_carrier_errors +
+ stats.common.rx_oversize_packets +
+ stats.common.rx_jabbers + stats.common.rx_undersize_packets;
- eth_stats->rx_nombuf = stats.no_buff_discards;
+ eth_stats->rx_nombuf = stats.common.no_buff_discards;
- eth_stats->imissed = stats.mftag_filter_discards +
- stats.mac_filter_discards +
- stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+ eth_stats->imissed = stats.common.mftag_filter_discards +
+ stats.common.mac_filter_discards +
+ stats.common.no_buff_discards +
+ stats.common.brb_truncates + stats.common.brb_discards;
/* TX stats */
- eth_stats->opackets = stats.tx_ucast_pkts +
- stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+ eth_stats->opackets = stats.common.tx_ucast_pkts +
+ stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
- eth_stats->obytes = stats.tx_ucast_bytes +
- stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+ eth_stats->obytes = stats.common.tx_ucast_bytes +
+ stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
- eth_stats->oerrors = stats.tx_err_drop_pkts;
+ eth_stats->oerrors = stats.common.tx_err_drop_pkts;
/* Queue stats */
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
@@ -1195,38 +1513,34 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
" appropriately and retry.\n");
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
- eth_stats->q_ipackets[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rcv_pkts));
- eth_stats->q_errors[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_hw_errors)) +
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_alloc_errors));
- i++;
- }
+ for_each_rss(qid) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
if (i == rxq_stat_cntrs)
break;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- txq = qdev->fp_array[(qid)].txqs[0];
- eth_stats->q_opackets[j] =
- *((uint64_t *)(uintptr_t)
- (((uint64_t)(uintptr_t)(txq)) +
- offsetof(struct qede_tx_queue,
- xmit_pkts)));
- j++;
- }
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
if (j == txq_stat_cntrs)
break;
}
@@ -1234,18 +1548,27 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
- return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ if (ECORE_IS_BB(&qdev->edev))
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_bb_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ else
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_ah_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
-qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
+qede_get_xstats_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
__rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
unsigned int rxq_stat_cntrs;
@@ -1259,6 +1582,24 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_bb_xstats_strings[i].name);
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_ah_xstats_strings[i].name);
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
@@ -1290,7 +1631,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (n < num)
return num;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1299,13 +1640,31 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_bb_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_ah_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for_each_rss(qid) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
+ ((char *)(qdev->fp_array[qid].rxq)) +
qede_rxq_xstats_strings[i].offset);
xstats[stat_idx].id = stat_idx;
stat_idx++;
@@ -1723,6 +2082,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
+
+
static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1756,19 +2117,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
rte_delay_ms(1000);
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
- for_each_queue(i) {
+ for_each_rss(i) {
fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
- }
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
}
qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
@@ -1910,6 +2269,8 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
uint16_t filter_type;
int rc, i;
+ PMD_INIT_FUNC_TRACE(edev);
+
filter_type = conf->filter_type | qdev->vxlan_filter_type;
/* First determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
@@ -2163,7 +2524,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -2177,8 +2538,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DP_NOTICE(edev, false,
- "Skipping device init from secondary process\n");
+ DP_ERR(edev, "Skipping device init from secondary process\n");
return 0;
}
@@ -2195,20 +2555,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
}
DP_INFO(edev, "Starting qede probe\n");
-
- rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
- dp_module, dp_level, is_vf);
-
+ rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+ dp_level, is_vf);
if (rc != 0) {
DP_ERR(edev, "qede probe failed rc %d\n", rc);
return -ENODEV;
}
-
qede_update_pf_params(edev);
-
rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -2306,8 +2661,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ether_addr_copy(&eth_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
- DP_NOTICE(edev, false,
- "No VF macaddr assigned\n");
+ DP_ERR(edev, "No VF macaddr assigned\n");
}
}
}
@@ -2321,17 +2675,28 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
do_once = false;
}
- adapter->state = QEDE_DEV_INIT;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->vlan_list_head);
+ SLIST_INIT(&adapter->uc_list_head);
+ adapter->mtu = ETHER_MTU;
+ adapter->new_mtu = ETHER_MTU;
+ if (!is_vf)
+ if (qede_start_vport(adapter, adapter->mtu))
+ return -1;
- DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
- adapter->primary_mac.addr_bytes[0],
- adapter->primary_mac.addr_bytes[1],
- adapter->primary_mac.addr_bytes[2],
- adapter->primary_mac.addr_bytes[3],
- adapter->primary_mac.addr_bytes[4],
- adapter->primary_mac.addr_bytes[5]);
+ DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
- return rc;
+ DP_INFO(edev, "Device initialized\n");
+
+ return 0;
}
static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2346,6 +2711,13 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index e4323a0d..a3254b12 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -41,8 +41,6 @@
#include "qede_logs.h"
#include "qede_if.h"
-#include "qede_eth_if.h"
-
#include "qede_rxtx.h"
#define qede_stringify1(x...) #x
@@ -51,8 +49,8 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 4
-#define QEDE_PMD_VERSION_REVISION 0
+#define QEDE_PMD_VERSION_MINOR 5
+#define QEDE_PMD_VERSION_REVISION 2
#define QEDE_PMD_VERSION_PATCH 1
#define QEDE_PMD_VERSION qede_stringify(QEDE_PMD_VERSION_MAJOR) "." \
@@ -73,12 +71,8 @@
(edev)->dev_info.num_tc)
#define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)
-#define QEDE_RSS_COUNT(qdev) ((qdev)->num_queues - (qdev)->fp_num_tx)
-#define QEDE_TSS_COUNT(qdev) (((qdev)->num_queues - (qdev)->fp_num_rx) * \
- (qdev)->num_tc)
-
-#define QEDE_FASTPATH_TX (1 << 0)
-#define QEDE_FASTPATH_RX (1 << 1)
+#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)
+#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)
#define QEDE_DUPLEX_FULL 1
#define QEDE_DUPLEX_HALF 2
@@ -138,12 +132,12 @@ extern char fw_file[];
/* Maximum number of flowdir filters */
#define QEDE_RFS_MAX_FLTR (256)
-/* Port/function states */
-enum qede_dev_state {
- QEDE_DEV_INIT, /* Init the chip and Slowpath */
- QEDE_DEV_CONFIG, /* Create Vport/Fastpath resources */
- QEDE_DEV_START, /* Start RX/TX queues, enable traffic */
- QEDE_DEV_STOP, /* Deactivate vport and stop traffic */
+#define QEDE_MAX_MCAST_FILTERS (64)
+
+enum qed_filter_rx_mode_type {
+ QED_FILTER_RX_MODE_TYPE_REGULAR,
+ QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
+ QED_FILTER_RX_MODE_TYPE_PROMISC,
};
struct qede_vlan_entry {
@@ -183,23 +177,20 @@ struct qede_fdir_info {
*/
struct qede_dev {
struct ecore_dev edev;
- uint8_t protocol;
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
struct ecore_sb_info *sb_array;
struct qede_fastpath *fp_array;
- uint8_t num_tc;
uint16_t mtu;
+ uint16_t new_mtu;
bool rss_enable;
struct rte_eth_rss_conf rss_conf;
uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
uint64_t rss_hf;
uint8_t rss_key_len;
bool enable_lro;
- uint16_t num_queues;
- uint8_t fp_num_tx;
- uint8_t fp_num_rx;
- enum qede_dev_state state;
+ uint8_t num_rx_queues;
+ uint8_t num_tx_queues;
SLIST_HEAD(vlan_list_head, qede_vlan_entry)vlan_list_head;
uint16_t configured_vlans;
bool accept_any_vlan;
@@ -248,4 +239,10 @@ uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
+
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
+
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
+
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 405c525e..9864bb44 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -50,14 +50,26 @@ struct qed_dev_info {
bool geneve_enable;
};
-enum qed_sb_type {
- QED_SB_TYPE_L2_QUEUE,
- QED_SB_TYPE_STORAGE,
- QED_SB_TYPE_CNQ,
+struct qed_dev_eth_info {
+ struct qed_dev_info common;
+
+ uint8_t num_queues;
+ uint8_t num_tc;
+
+ struct ether_addr port_mac;
+ uint16_t num_vlan_filters;
+ uint32_t num_mac_filters;
+
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
};
-enum qed_protocol {
- QED_PROTOCOL_ETH,
+#define INIT_STRUCT_FIELD(field, value) .field = value
+
+struct qed_eth_ops {
+ const struct qed_common_ops *common;
+ int (*fill_dev_info)(struct ecore_dev *edev,
+ struct qed_dev_eth_info *info);
};
struct qed_link_params {
@@ -99,64 +111,13 @@ struct qed_slowpath_params {
uint8_t name[NAME_SIZE];
};
-#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
-
-struct qed_eth_tlvs {
- u16 feat_flags;
- u8 mac[3][ETH_ALEN];
- u16 lso_maxoff;
- u16 lso_minseg;
- bool prom_mode;
- u16 num_txqs;
- u16 num_rxqs;
- u16 num_netqs;
- u16 flex_vlan;
- u32 tcp4_offloads;
- u32 tcp6_offloads;
- u16 tx_avg_qdepth;
- u16 rx_avg_qdepth;
- u8 txqs_empty;
- u8 rxqs_empty;
- u8 num_txqs_full;
- u8 num_rxqs_full;
-};
-
-struct qed_tunn_update_params {
- unsigned long tunn_mode_update_mask;
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
-};
-
struct qed_common_cb_ops {
void (*link_update)(void *dev, struct qed_link_output *link);
- void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
-};
-
-struct qed_selftest_ops {
-/**
- * @brief registers - Perform register tests
- *
- * @param edev
- *
- * @return 0 on success, error otherwise.
- */
- int (*registers)(struct ecore_dev *edev);
};
struct qed_common_ops {
int (*probe)(struct ecore_dev *edev,
struct rte_pci_device *pci_dev,
- enum qed_protocol protocol,
uint32_t dp_module, uint8_t dp_level, bool is_vf);
void (*set_name)(struct ecore_dev *edev, char name[]);
enum _ecore_status_t
@@ -196,7 +157,7 @@ struct qed_common_ops {
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
- uint16_t sb_id, enum qed_sb_type type);
+ uint16_t sb_id);
int (*get_sb_info)(struct ecore_dev *edev,
struct ecore_sb_info *sb, u16 qid,
@@ -210,4 +171,8 @@ struct qed_common_ops {
int (*send_drv_state)(struct ecore_dev *edev, bool active);
};
+/* Externs */
+
+const struct qed_eth_ops *qed_get_eth_ops(void);
+
#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 25c14d8b..15821151 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -16,16 +16,21 @@
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
- rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
- "[QEDE PMD: (%s)]%s:" fmt, \
- (p_dev)->name ? (p_dev)->name : "", \
- __func__, \
- ##__VA_ARGS__)
-#else
-#define DP_NOTICE(p_dev, fmt, ...) do { } while (0)
-#endif
+do { \
+ if (is_assert) \
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+ else \
+ rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
+ "[QEDE PMD: (%s)]%s:" fmt, \
+ (p_dev)->name ? (p_dev)->name : "", \
+ __func__, \
+ ##__VA_ARGS__); \
+} while (0)
#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_INFO(p_dev, fmt, ...) \
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 712c03fd..a6ff7af2 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -19,7 +19,7 @@
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.20.0.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -40,16 +40,14 @@ static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev)
static int
qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
- enum qed_protocol protocol, uint32_t dp_module,
- uint8_t dp_level, bool is_vf)
+ uint32_t dp_module, uint8_t dp_level, bool is_vf)
{
struct ecore_hw_prepare_params hw_prepare_params;
- struct qede_dev *qdev = (struct qede_dev *)edev;
int rc;
ecore_init_struct(edev);
edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
- qdev->protocol = protocol;
+ /* Protocol type is always fixed to PROTOCOL_ETH */
if (is_vf)
edev->b_is_vf = true;
@@ -62,6 +60,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.drv_resc_alloc = false;
hw_prepare_params.chk_reg_fifo = false;
hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.allow_mdump = false;
hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
@@ -132,12 +131,12 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
fd = open(fw_file, O_RDONLY);
if (fd < 0) {
- DP_NOTICE(edev, false, "Can't open firmware file\n");
+ DP_ERR(edev, "Can't open firmware file\n");
return -ENOENT;
}
if (fstat(fd, &st) < 0) {
- DP_NOTICE(edev, false, "Can't stat firmware file\n");
+ DP_ERR(edev, "Can't stat firmware file\n");
close(fd);
return -1;
}
@@ -145,20 +144,20 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
edev->firmware = rte_zmalloc("qede_fw", st.st_size,
RTE_CACHE_LINE_SIZE);
if (!edev->firmware) {
- DP_NOTICE(edev, false, "Can't allocate memory for firmware\n");
+ DP_ERR(edev, "Can't allocate memory for firmware\n");
close(fd);
return -ENOMEM;
}
if (read(fd, edev->firmware, st.st_size) != st.st_size) {
- DP_NOTICE(edev, false, "Can't read firmware data\n");
+ DP_ERR(edev, "Can't read firmware data\n");
close(fd);
return -1;
}
edev->fw_len = st.st_size;
if (edev->fw_len < 104) {
- DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n",
+ DP_ERR(edev, "Invalid fw size: %" PRIu64 "\n",
edev->fw_len);
close(fd);
return -EINVAL;
@@ -262,8 +261,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(edev);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed to allocate stream memory\n");
+ DP_ERR(edev, "Failed to allocate stream memory\n");
goto err1;
}
}
@@ -303,8 +301,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed sending drv version command\n");
+ DP_ERR(edev, "Failed sending drv version command\n");
goto err3;
}
}
@@ -460,23 +457,14 @@ static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
static uint32_t
qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info,
- void *sb_virt_addr, dma_addr_t sb_phy_addr,
- uint16_t sb_id, enum qed_sb_type type)
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, uint16_t sb_id)
{
struct ecore_hwfn *p_hwfn;
int hwfn_index;
uint16_t rel_sb_id;
- uint8_t n_hwfns;
+ uint8_t n_hwfns = edev->num_hwfns;
uint32_t rc;
- /* RoCE uses single engine and CMT uses two engines. When using both
- * we force only a single engine. Storage uses only engine 0 too.
- */
- if (type == QED_SB_TYPE_L2_QUEUE)
- n_hwfns = edev->num_hwfns;
- else
- n_hwfns = 1;
-
hwfn_index = sb_id % n_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
rel_sb_id = sb_id / n_hwfns;
@@ -617,7 +605,7 @@ static int qed_drain(struct ecore_dev *edev)
hwfn = &edev->hwfns[i];
ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
- DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n");
+ DP_ERR(hwfn, "Failed to drain NIG; No PTT\n");
return -EBUSY;
}
rc = ecore_mcp_drain(hwfn, ptt);
@@ -710,7 +698,7 @@ static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
ptt = ecore_ptt_acquire(hwfn);
if (!ptt) {
- DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
+ DP_ERR(hwfn, "Can't acquire PTT\n");
return -EAGAIN;
}
@@ -737,3 +725,13 @@ const struct qed_common_ops qed_common_ops_pass = {
INIT_STRUCT_FIELD(remove, &qed_remove),
INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
};
+
+const struct qed_eth_ops qed_eth_ops_pass = {
+ INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
+ INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index f5aa43dd..5c3613c7 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -37,64 +37,20 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
-static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
-{
- uint16_t i;
-
- if (rxq->sw_rx_ring != NULL) {
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_rx_ring[i].mbuf != NULL) {
- rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
- rxq->sw_rx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
-void qede_rx_queue_release(void *rx_queue)
-{
- struct qede_rx_queue *rxq = rx_queue;
-
- if (rxq != NULL) {
- qede_rx_queue_release_mbufs(rxq);
- rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
- rte_free(rxq);
- rxq = NULL;
- }
-}
-
-static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
-{
- unsigned int i;
-
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
-
- if (txq->sw_tx_ring) {
- for (i = 0; i < txq->nb_tx_desc; i++) {
- if (txq->sw_tx_ring[i].mbuf) {
- rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
- txq->sw_tx_ring[i].mbuf = NULL;
- }
- }
- }
-}
-
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
- struct qede_dev *qdev = dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
uint16_t max_rx_pkt_len;
uint16_t bufsz;
size_t size;
int rc;
- int i;
PMD_INIT_FUNC_TRACE(edev);
@@ -126,6 +82,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
+
max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
qdev->mtu = max_rx_pkt_len;
@@ -138,6 +95,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
dev->data->scattered_rx = 1;
}
}
+
if (dev->data->scattered_rx)
rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
@@ -153,11 +111,9 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq->sw_rx_ring) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for sw_rx_ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
+ " socket %u\n", socket_id);
rte_free(rxq);
- rxq = NULL;
return -ENOMEM;
}
@@ -172,13 +128,10 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for rxbd ring on socket %u\n",
- socket_id);
+ DP_ERR(edev, "Memory allocation fails for RX BD ring"
+ " on socket %u\n", socket_id);
rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
rte_free(rxq);
- rxq = NULL;
return -ENOMEM;
}
@@ -193,50 +146,91 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
NULL);
if (rc != ECORE_SUCCESS) {
- DP_NOTICE(edev, false,
- "Unable to alloc memory for cqe ring on socket %u\n",
- socket_id);
- /* TBD: Freeing RX BD ring */
+ DP_ERR(edev, "Memory allocation fails for RX CQE ring"
+ " on socket %u\n", socket_id);
+ qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
rte_free(rxq->sw_rx_ring);
- rxq->sw_rx_ring = NULL;
rte_free(rxq);
return -ENOMEM;
}
- /* Allocate buffers for the Rx ring */
- for (i = 0; i < rxq->nb_rx_desc; i++) {
- rc = qede_alloc_rx_buffer(rxq);
- if (rc) {
- DP_NOTICE(edev, false,
- "RX buffer allocation failed at idx=%d\n", i);
- goto err4;
- }
- }
-
dev->data->rx_queues[queue_idx] = rxq;
+ qdev->fp_array[queue_idx].rxq = rxq;
DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
queue_idx, nb_desc, qdev->mtu, socket_id);
return 0;
-err4:
- qede_rx_queue_release(rxq);
- return -ENOMEM;
}
-void qede_tx_queue_release(void *tx_queue)
+static void
+qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq)
{
- struct qede_tx_queue *txq = tx_queue;
+ DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
+ ecore_chain_reset(&rxq->rx_bd_ring);
+ ecore_chain_reset(&rxq->rx_comp_ring);
+ rxq->sw_rx_prod = 0;
+ rxq->sw_rx_cons = 0;
+ *rxq->hw_cons_ptr = 0;
+}
- if (txq != NULL) {
- qede_tx_queue_release_mbufs(txq);
- if (txq->sw_tx_ring) {
- rte_free(txq->sw_tx_ring);
- txq->sw_tx_ring = NULL;
+static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
+{
+ uint16_t i;
+
+ if (rxq->sw_rx_ring) {
+ for (i = 0; i < rxq->nb_rx_desc; i++) {
+ if (rxq->sw_rx_ring[i].mbuf) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
+ rxq->sw_rx_ring[i].mbuf = NULL;
+ }
}
- rte_free(txq);
}
- txq = NULL;
+}
+
+void qede_rx_queue_release(void *rx_queue)
+{
+ struct qede_rx_queue *rxq = rx_queue;
+
+ if (rxq) {
+ qede_rx_queue_release_mbufs(rxq);
+ rte_free(rxq->sw_rx_ring);
+ rte_free(rxq);
+ }
+}
+
+/* Stops a given RX queue in the HW */
+static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_rx_queue *rxq;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
+ true, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
+ return -1;
+ }
+ qede_rx_queue_release_mbufs(rxq);
+ qede_rx_queue_reset(qdev, rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
+
+ return rc;
}
int
@@ -305,6 +299,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
DP_ERR(edev,
"Unable to allocate memory for txbd ring on socket %u",
socket_id);
+ qdev->ops->common->chain_free(edev, &txq->tx_pbl);
qede_tx_queue_release(txq);
return -ENOMEM;
}
@@ -318,6 +313,7 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
(txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
dev->data->tx_queues[queue_idx] = txq;
+ qdev->fp_array[queue_idx].txq = txq;
DP_INFO(edev,
"txq %u num_desc %u tx_free_thresh %u socket %u\n",
@@ -326,71 +322,40 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
-/* This function inits fp content and resets the SB, RXQ and TXQ arrays */
-static void qede_init_fp(struct qede_dev *qdev)
+static void
+qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
+ struct qede_tx_queue *txq)
{
- struct qede_fastpath *fp;
- uint8_t i;
- int fp_rx = qdev->fp_num_rx;
-
- memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
- sizeof(*qdev->fp_array)));
- memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
- sizeof(*qdev->sb_array)));
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp_rx) {
- fp->type = QEDE_FASTPATH_RX;
- fp_rx--;
- } else{
- fp->type = QEDE_FASTPATH_TX;
- }
- fp->qdev = qdev;
- fp->id = i;
- fp->sb_info = &qdev->sb_array[i];
- snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
- }
-
+ DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
+ ecore_chain_reset(&txq->tx_pbl);
+ txq->sw_tx_cons = 0;
+ txq->sw_tx_prod = 0;
+ *txq->hw_cons_ptr = 0;
}
-void qede_free_fp_arrays(struct qede_dev *qdev)
+static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
{
- /* It asseumes qede_free_mem_load() is called before */
- if (qdev->fp_array != NULL) {
- rte_free(qdev->fp_array);
- qdev->fp_array = NULL;
- }
+ uint16_t i;
- if (qdev->sb_array != NULL) {
- rte_free(qdev->sb_array);
- qdev->sb_array = NULL;
+ if (txq->sw_tx_ring) {
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ if (txq->sw_tx_ring[i].mbuf) {
+ rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
+ txq->sw_tx_ring[i].mbuf = NULL;
+ }
+ }
}
}
-static int qede_alloc_fp_array(struct qede_dev *qdev)
+void qede_tx_queue_release(void *tx_queue)
{
- struct ecore_dev *edev = &qdev->edev;
-
- qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
- sizeof(*qdev->fp_array),
- RTE_CACHE_LINE_SIZE);
-
- if (!qdev->fp_array) {
- DP_ERR(edev, "fp array allocation failed\n");
- return -ENOMEM;
- }
-
- qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
- sizeof(*qdev->sb_array),
- RTE_CACHE_LINE_SIZE);
+ struct qede_tx_queue *txq = tx_queue;
- if (!qdev->sb_array) {
- DP_ERR(edev, "sb array allocation failed\n");
- rte_free(qdev->fp_array);
- return -ENOMEM;
+ if (txq) {
+ qede_tx_queue_release_mbufs(txq);
+ rte_free(txq->sw_tx_ring);
+ rte_free(txq);
}
-
- return 0;
}
/* This function allocates fast-path status block memory */
@@ -398,24 +363,23 @@ static int
qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
uint16_t sb_id)
{
- struct ecore_dev *edev = &qdev->edev;
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
- sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
-
+ sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
+ sizeof(struct status_block));
if (!sb_virt) {
DP_ERR(edev, "Status block allocation failed\n");
return -ENOMEM;
}
-
- rc = qdev->ops->common->sb_init(edev, sb_info,
- sb_virt, sb_phys, sb_id,
- QED_SB_TYPE_L2_QUEUE);
+ rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
+ sb_phys, sb_id);
if (rc) {
DP_ERR(edev, "Status block initialization failed\n");
- /* TBD: No dma_free_coherent possible */
+ OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
+ sizeof(struct status_block));
return rc;
}
@@ -427,9 +391,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
struct ecore_dev *edev = &qdev->edev;
struct qede_fastpath *fp;
uint32_t num_sbs;
- uint16_t i;
uint16_t sb_idx;
- int rc;
if (IS_VF(edev))
ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
@@ -442,25 +404,31 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
return -EINVAL;
}
- if (qdev->fp_array)
- qede_free_fp_arrays(qdev);
+ qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
+ sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
- rc = qede_alloc_fp_array(qdev);
- if (rc != 0)
- return rc;
+ if (!qdev->fp_array) {
+ DP_ERR(edev, "fp array allocation failed\n");
+ return -ENOMEM;
+ }
- qede_init_fp(qdev);
+ memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
+ sizeof(*qdev->fp_array));
- for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
- fp = &qdev->fp_array[i];
- if (IS_VF(edev))
- sb_idx = i % num_sbs;
- else
- sb_idx = i;
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
+ RTE_CACHE_LINE_SIZE);
+ if (!fp->sb_info) {
+ DP_ERR(edev, "FP sb_info allocation fails\n");
+ return -1;
+ }
if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
- qede_free_fp_arrays(qdev);
- return -ENOMEM;
+ DP_ERR(edev, "FP status block allocation fails\n");
+ return -1;
}
+ DP_INFO(edev, "sb_info idx 0x%x initialized\n",
+ fp->sb_info->igu_sb_id);
}
return 0;
@@ -469,9 +437,54 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)
void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_fastpath *fp;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txq;
+ uint16_t sb_idx;
+ uint8_t i;
- qede_free_mem_load(eth_dev);
- qede_free_fp_arrays(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
+
+ for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
+ fp = &qdev->fp_array[sb_idx];
+ DP_INFO(edev, "Free sb_info index 0x%x\n",
+ fp->sb_info->igu_sb_id);
+ if (fp->sb_info) {
+ OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
+ fp->sb_info->sb_phys,
+ sizeof(struct status_block));
+ rte_free(fp->sb_info);
+ fp->sb_info = NULL;
+ }
+ }
+
+ /* Free packet buffers and ring memories */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ if (eth_dev->data->rx_queues[i]) {
+ qede_rx_queue_release(eth_dev->data->rx_queues[i]);
+ rxq = eth_dev->data->rx_queues[i];
+ qdev->ops->common->chain_free(edev,
+ &rxq->rx_bd_ring);
+ qdev->ops->common->chain_free(edev,
+ &rxq->rx_comp_ring);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ if (eth_dev->data->tx_queues[i]) {
+ txq = eth_dev->data->tx_queues[i];
+ qede_tx_queue_release(eth_dev->data->tx_queues[i]);
+ qdev->ops->common->chain_free(edev,
+ &txq->tx_pbl);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ if (qdev->fp_array)
+ rte_free(qdev->fp_array);
+ qdev->fp_array = NULL;
}
static inline void
@@ -506,165 +519,297 @@ qede_update_rx_prod(__rte_unused struct qede_dev *edev,
PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
}
-static void
-qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
- uint16_t mtu, bool enable)
-{
- /* Enable LRO in split mode */
- sge_tpa_params->tpa_ipv4_en_flg = enable;
- sge_tpa_params->tpa_ipv6_en_flg = enable;
- sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
- sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
- /* set if tpa enable changes */
- sge_tpa_params->update_tpa_en_flg = 1;
- /* set if tpa parameters should be handled */
- sge_tpa_params->update_tpa_param_flg = enable;
-
- sge_tpa_params->max_buffers_per_cqe = 20;
- /* Enable TPA in split mode. In this mode each TPA segment
- * starts on the new BD, so there is one BD per segment.
- */
- sge_tpa_params->tpa_pkt_split_flg = 1;
- sge_tpa_params->tpa_hdr_data_split_flg = 0;
- sge_tpa_params->tpa_gro_consistent_flg = 0;
- sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
- sge_tpa_params->tpa_max_size = 0x7FFF;
- sge_tpa_params->tpa_min_size_to_start = mtu / 2;
- sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
-}
-
-static int qede_start_queues(struct rte_eth_dev *eth_dev,
- __rte_unused bool clear_stats)
+/* Starts a given RX queue in HW */
+static int
+qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- struct ecore_queue_start_common_params q_params;
- struct qed_dev_info *qed_info = &qdev->dev_info.common;
- struct qed_update_vport_params vport_update_params;
- struct ecore_sge_tpa_params tpa_params;
- struct qede_tx_queue *txq;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_rxq_start_ret_params ret_params;
+ struct qede_rx_queue *rxq;
struct qede_fastpath *fp;
+ struct ecore_hwfn *p_hwfn;
dma_addr_t p_phys_table;
- int txq_index;
uint16_t page_cnt;
- int rc, tc, i;
-
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- struct ecore_rxq_start_ret_params ret_params;
-
- p_phys_table =
- ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
- page_cnt =
- ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
-
- memset(&ret_params, 0, sizeof(ret_params));
- memset(&q_params, 0, sizeof(q_params));
- q_params.queue_id = i;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = RX_PI;
-
- ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
-
- rc = qdev->ops->q_rx_start(edev, i, &q_params,
- fp->rxq->rx_buf_size,
- fp->rxq->rx_bd_ring.p_phys_addr,
- p_phys_table,
- page_cnt,
- &ret_params);
+ uint16_t j;
+ int hwfn_index;
+ int rc;
+
+ if (rx_queue_id < eth_dev->data->nb_rx_queues) {
+ fp = &qdev->fp_array[rx_queue_id];
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ /* Allocate buffers for the Rx ring */
+ for (j = 0; j < rxq->nb_rx_desc; j++) {
+ rc = qede_alloc_rx_buffer(rxq);
if (rc) {
- DP_ERR(edev, "Start rxq #%d failed %d\n",
- fp->rxq->queue_id, rc);
- return rc;
+ DP_ERR(edev, "RX buffer allocation failed"
+ " for rxq = %u\n", rx_queue_id);
+ return -ENOMEM;
}
+ }
+ /* disable interrupts */
+ ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
+ /* Prepare ramrod */
+ memset(&params, 0, sizeof(params));
+ params.queue_id = rx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.sb = fp->sb_info->igu_sb_id;
+ DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
+ fp->rxq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = RX_PI;
+ hwfn_index = rx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, fp->rxq->rx_buf_size,
+ fp->rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc) {
+ DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
+ rx_queue_id, rc);
+ return -1;
+ }
+ /* Update with the returned parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
+ fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ qede_update_rx_prod(qdev, fp->rxq);
+ eth_dev->data->rx_queue_state[rx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
+ } else {
+ DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
+ rc = -EINVAL;
+ }
- /* Use the return parameters */
- fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
- fp->rxq->handle = ret_params.p_handle;
+ return rc;
+}
- fp->rxq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[RX_PI];
+static int
+qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_queue_start_common_params params;
+ struct ecore_txq_start_ret_params ret_params;
+ struct ecore_hwfn *p_hwfn;
+ dma_addr_t p_phys_table;
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ uint16_t page_cnt;
+ int hwfn_index;
+ int rc;
- qede_update_rx_prod(qdev, fp->rxq);
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ fp = &qdev->fp_array[tx_queue_id];
+ memset(&params, 0, sizeof(params));
+ params.queue_id = tx_queue_id / edev->num_hwfns;
+ params.vport_id = 0;
+ params.stats_id = params.vport_id;
+ params.sb = fp->sb_info->igu_sb_id;
+ DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
+ fp->txq->queue_id, fp->sb_info->igu_sb_id);
+ params.sb_idx = TX_PI(0); /* tc = 0 */
+ p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
+ page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ if (qdev->dev_info.is_legacy)
+ fp->txq->is_legacy = true;
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ &params, 0 /* tc */,
+ p_phys_table, page_cnt,
+ &ret_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
+ tx_queue_id, rc);
+ return -1;
}
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
+ DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
+ DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params,
+ ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
+ }
- if (!(fp->type & QEDE_FASTPATH_TX))
- continue;
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct ecore_txq_start_ret_params ret_params;
+ return rc;
+}
- txq = fp->txqs[tc];
- txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
+{
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
- p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
- page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ }
+}
- memset(&q_params, 0, sizeof(q_params));
- memset(&ret_params, 0, sizeof(ret_params));
- q_params.queue_id = txq->queue_id;
- q_params.vport_id = 0;
- q_params.sb = fp->sb_info->igu_sb_id;
- q_params.sb_idx = TX_PI(tc);
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
- rc = qdev->ops->q_tx_start(edev, i, &q_params,
- p_phys_table,
- page_cnt, /* **pp_doorbell */
- &ret_params);
- if (rc) {
- DP_ERR(edev, "Start txq %u failed %d\n",
- txq_index, rc);
- return rc;
+ rte_compiler_barrier();
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
+}
+
+static int qede_drain_txq(struct qede_dev *qdev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ struct ecore_dev *edev = &qdev->edev;
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ qede_process_tx_compl(edev, txq);
+ if (!cnt) {
+ if (allow_drain) {
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
+ "requesting MCP to drain\n",
+ txq->queue_id);
+ rc = qdev->ops->common->drain(edev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(qdev, txq, false);
}
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
+ "PROD=%d, CONS=%d\n",
+ txq->queue_id, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -1;
+ }
+ cnt--;
+ DELAY(1000);
+ rte_compiler_barrier();
+ }
- txq->doorbell_addr = ret_params.p_doorbell;
- txq->handle = ret_params.p_handle;
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ DELAY(2000);
- txq->hw_cons_ptr =
- &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
- DB_AGG_CMD_SET);
- SET_FIELD(txq->tx_db.data.params,
- ETH_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_ETH_TX_BD_PROD_CMD);
+ return 0;
+}
+
+/* Stops a given TX queue in the HW */
+static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_hwfn *p_hwfn;
+ struct qede_tx_queue *txq;
+ int hwfn_index;
+ int rc;
- txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+ if (tx_queue_id < eth_dev->data->nb_tx_queues) {
+ txq = eth_dev->data->tx_queues[tx_queue_id];
+ /* Drain txq */
+ if (qede_drain_txq(qdev, txq, true))
+ return -1; /* For the lack of retcodes */
+ /* Stop txq */
+ hwfn_index = tx_queue_id % edev->num_hwfns;
+ p_hwfn = &edev->hwfns[hwfn_index];
+ rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
+ return -1;
}
+ qede_tx_queue_release_mbufs(txq);
+ qede_tx_queue_reset(qdev, txq);
+ eth_dev->data->tx_queue_state[tx_queue_id] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
+ } else {
+ DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
+ rc = -EINVAL;
}
- /* Prepare and send the vport enable */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- /* Update MTU via vport update */
- vport_update_params.mtu = qdev->mtu;
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 1;
-
- /* @DPDK */
- if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
- /* TBD: Check SRIOV enabled for VF */
- vport_update_params.update_tx_switching_flg = 1;
- vport_update_params.tx_switching_flg = 1;
+ return rc;
+}
+
+int qede_start_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+ int rc;
+
+ for_each_rss(id) {
+ rc = qede_rx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
}
- /* TPA */
- if (qdev->enable_lro) {
- DP_INFO(edev, "Enabling LRO\n");
- memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
- qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
- vport_update_params.sge_tpa_params = &tpa_params;
+ for_each_tss(id) {
+ rc = qede_tx_queue_start(eth_dev, id);
+ if (rc != ECORE_SUCCESS)
+ return -1;
}
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ return rc;
+}
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ uint8_t id;
+
+ /* Stopping RX/TX queues */
+ for_each_tss(id) {
+ qede_tx_queue_stop(eth_dev, id);
}
- return 0;
+ for_each_rss(id) {
+ qede_rx_queue_stop(eth_dev, id);
+ }
}
static bool qede_tunn_exist(uint16_t flag)
@@ -937,7 +1082,7 @@ qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
pkt_len;
if (unlikely(!cur_size)) {
PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
- " left for mapping jumbo", num_segs);
+ " left for mapping jumbo\n", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
@@ -961,7 +1106,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
struct ecore_dev *edev = &qdev->edev;
- struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
uint16_t rx_pkt = 0;
union eth_rx_cqe *cqe;
@@ -1047,7 +1191,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto tpa_end;
case ETH_RX_CQE_TYPE_SLOW_PATH:
PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
- qdev->ops->eth_cqe_completion(edev, fp->id,
+ ecore_eth_cqe_completion(
+ &edev->hwfns[rxq->queue_id % edev->num_hwfns],
(struct eth_slow_path_rx_cqe *)cqe);
/* fall-thru */
default:
@@ -1235,53 +1380,6 @@ next_cqe:
return rx_pkt;
}
-static inline void
-qede_free_tx_pkt(struct qede_tx_queue *txq)
-{
- struct rte_mbuf *mbuf;
- uint16_t nb_segs;
- uint16_t idx;
-
- idx = TX_CONS(txq);
- mbuf = txq->sw_tx_ring[idx].mbuf;
- if (mbuf) {
- nb_segs = mbuf->nb_segs;
- PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
- while (nb_segs) {
- /* It's like consuming rxbuf in recv() */
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
- nb_segs--;
- }
- rte_pktmbuf_free(mbuf);
- txq->sw_tx_ring[idx].mbuf = NULL;
- txq->sw_tx_cons++;
- PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
- } else {
- ecore_chain_consume(&txq->tx_pbl);
- txq->nb_tx_avail++;
- }
-}
-
-static inline void
-qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
- struct qede_tx_queue *txq)
-{
- uint16_t hw_bd_cons;
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- uint16_t sw_tx_cons;
-#endif
-
- rte_compiler_barrier();
- hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
-#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
- sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
- PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
- abs(hw_bd_cons - sw_tx_cons));
-#endif
- while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
- qede_free_tx_pkt(txq);
-}
/* Populate scatter gather buffer descriptor fields */
static inline uint8_t
@@ -1378,7 +1476,9 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
uint64_t ol_flags;
struct rte_mbuf *m;
uint16_t i;
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
int ret;
+#endif
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
@@ -1411,14 +1511,6 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
break;
}
#endif
- /* TBD: pseudo csum calcuation required iff
- * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
- */
- ret = rte_net_intel_cksum_prepare(m);
- if (ret != 0) {
- rte_errno = ret;
- break;
- }
}
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
@@ -1429,6 +1521,27 @@ qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
return i;
}
+#define MPLSINUDP_HDR_SIZE (12)
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
+ struct qede_tx_queue *txq)
+{
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
+ if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2) > 0xff)
+ PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+ if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
+}
+#endif
+
uint16_t
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
@@ -1443,14 +1556,30 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_frags;
uint16_t nb_pkt_sent = 0;
uint8_t nbds;
- bool ipv6_ext_flg;
bool lso_flg;
- bool tunn_flg;
+ bool mplsoudp_flg;
+ __rte_unused bool tunn_flg;
+ bool tunn_ipv6_ext_flg;
struct eth_tx_1st_bd *bd1;
struct eth_tx_2nd_bd *bd2;
struct eth_tx_3rd_bd *bd3;
uint64_t tx_ol_flags;
uint16_t hdr_size;
+ /* BD1 */
+ uint16_t bd1_bf;
+ uint8_t bd1_bd_flags_bf;
+ uint16_t vlan;
+ /* BD2 */
+ uint16_t bd2_bf1;
+ uint16_t bd2_bf2;
+ /* BD3 */
+ uint16_t mss;
+ uint16_t bd3_bf;
+
+ uint8_t tunn_l4_hdr_start_offset;
+ uint8_t tunn_hdr_size;
+ uint8_t inner_l2_hdr_size;
+ uint16_t inner_l4_hdr_offset;
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
@@ -1462,14 +1591,24 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
while (nb_tx_pkts--) {
/* Init flags/values */
- ipv6_ext_flg = false;
tunn_flg = false;
lso_flg = false;
nbds = 0;
+ vlan = 0;
bd1 = NULL;
bd2 = NULL;
bd3 = NULL;
hdr_size = 0;
+ bd1_bf = 0;
+ bd1_bd_flags_bf = 0;
+ bd2_bf1 = 0;
+ bd2_bf2 = 0;
+ mss = 0;
+ bd3_bf = 0;
+ mplsoudp_flg = false;
+ tunn_ipv6_ext_flg = false;
+ tunn_hdr_size = 0;
+ tunn_l4_hdr_start_offset = 0;
mbuf = *tx_pkts++;
assert(mbuf);
@@ -1479,36 +1618,177 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
break;
tx_ol_flags = mbuf->ol_flags;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
-#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
- if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
- ipv6_ext_flg = true;
-
- if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+ /* TX prepare would have already checked supported tunnel Tx
+ * offloads. Don't rely on pkt_type marked by Rx, instead use
+ * tx_ol_flags to decide.
+ */
+ if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_VXLAN) ||
+ ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP)) {
+ /* Check against max which is Tunnel IPv6 + ext */
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
tunn_flg = true;
+ /* First indicate its a tunnel pkt */
+ bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy)) {
+ bd1_bf ^= 1 <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
- if (tx_ol_flags & PKT_TX_TCP_SEG)
- lso_flg = true;
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_OUTER_IPV4)) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
- if (lso_flg) {
+ /**
+ * Currently, only inner checksum offload in MPLS-in-UDP
+ * tunnel with one MPLS label is supported. Both outer
+ * and inner layers lengths need to be provided in
+ * mbuf.
+ */
+ if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
+ PKT_TX_TUNNEL_MPLSINUDP) {
+ mplsoudp_flg = true;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ qede_mpls_tunn_tx_sanity_check(mbuf, txq);
+#endif
+ /* Outer L4 offset in two byte words */
+ tunn_l4_hdr_start_offset =
+ (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
+ /* Tunnel header size in two byte words */
+ tunn_hdr_size = (mbuf->outer_l2_len +
+ mbuf->outer_l3_len +
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L2 header size in two byte words */
+ inner_l2_hdr_size = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE) / 2;
+ /* Inner L4 header offset from the beggining
+ * of inner packet in two byte words
+ */
+ inner_l4_hdr_offset = (mbuf->l2_len -
+ MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
+
+ /* Inner L2 size and address type */
+ bd2_bf1 |= (inner_l2_hdr_size &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
+ bd2_bf1 |= (UNICAST_ADDRESS &
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
+ ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
+ /* Treated as IPv6+Ext */
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
+
+ /* Mark inner IPv6 if present */
+ if (tx_ol_flags & PKT_TX_IPV6)
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
+
+ /* Inner L4 offsets */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM |
+ PKT_TX_TCP_CKSUM))) {
+ /* Determines if BD3 is needed */
+ tunn_ipv6_ext_flg = true;
+ if ((tx_ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_UDP_CKSUM) {
+ bd2_bf1 |=
+ 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+ }
+
+ /* TODO other pseudo checksum modes are
+ * not supported
+ */
+ bd2_bf1 |=
+ ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ bd2_bf2 |= (inner_l4_hdr_offset &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+ }
+ } /* End MPLSoUDP */
+ } /* End Tunnel handling */
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ lso_flg = true;
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_LSO_PKT))
break;
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ mss = rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3_bf |= rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
} else {
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
break;
+ bd1_bf |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
- if (tunn_flg && ipv6_ext_flg) {
- if (unlikely(txq->nb_tx_avail <
- ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
- break;
+ /* Descriptor based VLAN insertion */
+ if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
- if (ipv6_ext_flg) {
- if (unlikely(txq->nb_tx_avail <
- ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
- break;
+
+ /* Offload the IP checksum in the hardware */
+ if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
+ }
+
+ /* L4 checksum offload (tcp or udp) */
+ if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
+ (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ bd1_bd_flags_bf |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+ /* There's no DPDK flag to request outer-L4 csum
+ * offload. But in the case of tunnel if inner L3 or L4
+ * csum offload is requested then we need to force
+ * recalculation of L4 tunnel header csum also.
+ */
+ if (tunn_flg) {
+ bd1_bd_flags_bf |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
}
/* Fill the entry in the SW ring and the BDs in the FW ring */
@@ -1520,107 +1800,49 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
nbds++;
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
- /* FW 8.10.x specific change */
- if (!lso_flg) {
- bd1->data.bitfields |=
- (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
- << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
- /* Map MBUF linear data for DMA and set in the BD1 */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- mbuf->data_len);
- } else {
- /* For LSO, packet header and payload must reside on
- * buffers pointed by different BDs. Using BD1 for HDR
- * and BD2 onwards for data.
- */
- hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- hdr_size);
- }
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+ bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
+ bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
+ bd1->data.vlan = vlan;
- if (tunn_flg) {
- /* First indicate its a tunnel pkt */
- bd1->data.bitfields |=
- ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
- ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* Legacy FW had flipped behavior in regard to this bit
- * i.e. it needed to set to prevent FW from touching
- * encapsulated packets when it didn't need to.
- */
- if (unlikely(txq->is_legacy))
- bd1->data.bitfields ^=
- 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
-
- /* Outer IP checksum offload */
- if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- bd1->data.bd_flags.bitfields |=
- ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
- ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
- }
-
- /* Outer UDP checksum offload */
- bd1->data.bd_flags.bitfields |=
- ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
- ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
- }
-
- /* Descriptor based VLAN insertion */
- if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
- }
-
- if (lso_flg)
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
-
- /* Offload the IP checksum in the hardware */
- if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
-
- /* L4 checksum offload (tcp or udp) */
- if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
- PKT_TX_UDP_CKSUM)))
- /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
- bd1->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
-
- /* BD2 */
- if (lso_flg || ipv6_ext_flg) {
+ if (lso_flg || mplsoudp_flg) {
bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
(&txq->tx_pbl);
memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
nbds++;
- QEDE_BD_SET_ADDR_LEN(bd2,
- (hdr_size +
- rte_mbuf_data_dma_addr(mbuf)),
- mbuf->data_len - hdr_size);
- /* TBD: check pseudo csum iff tx_prepare not called? */
- if (ipv6_ext_flg) {
- bd2->data.bitfields1 |=
- ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
- ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
- }
- }
- /* BD3 */
- if (lso_flg || ipv6_ext_flg) {
- bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
- (&txq->tx_pbl);
- memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
- nbds++;
- if (lso_flg) {
- bd3->data.lso_mss =
- rte_cpu_to_le_16(mbuf->tso_segsz);
- /* Using one header BD */
- bd3->data.bitfields |=
- rte_cpu_to_le_16(1 <<
- ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ /* BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ hdr_size);
+ /* BD2 */
+ QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
+ rte_mbuf_data_dma_addr(mbuf)),
+ mbuf->data_len - hdr_size);
+ bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
+ if (mplsoudp_flg) {
+ bd2->data.bitfields2 =
+ rte_cpu_to_le_16(bd2_bf2);
+ /* Outer L3 size */
+ bd2->data.tunn_ip_size =
+ rte_cpu_to_le_16(mbuf->outer_l3_len);
+ }
+ /* BD3 */
+ if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
+ bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
+ if (lso_flg)
+ bd3->data.lso_mss = mss;
+ if (mplsoudp_flg) {
+ bd3->data.tunn_l4_hdr_start_offset_w =
+ tunn_l4_hdr_start_offset;
+ bd3->data.tunn_hdr_size_w =
+ tunn_hdr_size;
+ }
}
}
@@ -1636,8 +1858,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
- PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
- lso_flg, tunn_flg, ipv6_ext_flg);
+ PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d", lso_flg, tunn_flg);
#endif
nb_pkt_sent++;
txq->xmit_pkts++;
@@ -1659,290 +1880,6 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
return nb_pkt_sent;
}
-static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct qede_fastpath *fp;
- uint8_t i, txq_index, tc;
- int rxq = 0, txq = 0;
-
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- fp->rxq = eth_dev->data->rx_queues[i];
- fp->rxq->queue_id = rxq++;
- }
-
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
- fp->txqs[tc] =
- eth_dev->data->tx_queues[txq_index];
- fp->txqs[tc]->queue_id = txq_index;
- if (qdev->dev_info.is_legacy)
- fp->txqs[tc]->is_legacy = true;
- }
- txq++;
- }
- }
-}
-
-int qede_dev_start(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- int rc;
-
- DP_INFO(edev, "Device state is %d\n", qdev->state);
-
- if (qdev->state == QEDE_DEV_START) {
- DP_INFO(edev, "Port is already started\n");
- return 0;
- }
-
- if (qdev->state == QEDE_DEV_CONFIG)
- qede_init_fp_queue(eth_dev);
-
- rc = qede_start_queues(eth_dev, true);
- if (rc) {
- DP_ERR(edev, "Failed to start queues\n");
- /* TBD: free */
- return rc;
- }
-
- /* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
- * Also, we would like to retain similar behavior in PF case, so we
- * don't do PF/VF specific check here.
- */
- if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
- if (qede_config_rss(eth_dev))
- return -1;
-
- /* Bring-up the link */
- qede_dev_set_link_state(eth_dev, true);
-
- /* Start/resume traffic */
- qdev->ops->fastpath_start(edev);
-
- qdev->state = QEDE_DEV_START;
-
- DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
-
- return 0;
-}
-
-static int qede_drain_txq(struct qede_dev *qdev,
- struct qede_tx_queue *txq, bool allow_drain)
-{
- struct ecore_dev *edev = &qdev->edev;
- int rc, cnt = 1000;
-
- while (txq->sw_tx_cons != txq->sw_tx_prod) {
- qede_process_tx_compl(edev, txq);
- if (!cnt) {
- if (allow_drain) {
- DP_ERR(edev, "Tx queue[%u] is stuck,"
- "requesting MCP to drain\n",
- txq->queue_id);
- rc = qdev->ops->common->drain(edev);
- if (rc)
- return rc;
- return qede_drain_txq(qdev, txq, false);
- }
- DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
- "PROD=%d, CONS=%d\n",
- txq->queue_id, txq->sw_tx_prod,
- txq->sw_tx_cons);
- return -1;
- }
- cnt--;
- DELAY(1000);
- rte_compiler_barrier();
- }
-
- /* FW finished processing, wait for HW to transmit all tx packets */
- DELAY(2000);
-
- return 0;
-}
-
-static int qede_stop_queues(struct qede_dev *qdev)
-{
- struct qed_update_vport_params vport_update_params;
- struct ecore_dev *edev = &qdev->edev;
- struct ecore_sge_tpa_params tpa_params;
- struct qede_fastpath *fp;
- int rc, tc, i;
-
- /* Disable the vport */
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_vport_active_flg = 1;
- vport_update_params.vport_active_flg = 0;
- vport_update_params.update_rss_flg = 0;
- /* Disable TPA */
- if (qdev->enable_lro) {
- DP_INFO(edev, "Disabling LRO\n");
- memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
- qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
- vport_update_params.sge_tpa_params = &tpa_params;
- }
-
- DP_INFO(edev, "Deactivate vport\n");
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Failed to update vport\n");
- return rc;
- }
-
- DP_INFO(edev, "Flushing tx queues\n");
-
- /* Flush Tx queues. If needed, request drain from MCP */
- for_each_queue(i) {
- fp = &qdev->fp_array[i];
-
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
-
- rc = qede_drain_txq(qdev, txq, true);
- if (rc)
- return rc;
- }
- }
- }
-
- /* Stop all Queues in reverse order */
- for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
- fp = &qdev->fp_array[i];
-
- /* Stop the Tx Queue(s) */
- if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qede_tx_queue *txq = fp->txqs[tc];
- DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop TXQ #%d\n",
- i);
- return rc;
- }
- }
- }
-
- /* Stop the Rx Queue */
- if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
- DP_INFO(edev, "Stopping rx queues\n");
- rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
- if (rc) {
- DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
- return rc;
- }
- }
- }
- qede_reset_fp_rings(qdev);
-
- return 0;
-}
-
-int qede_reset_fp_rings(struct qede_dev *qdev)
-{
- struct qede_fastpath *fp;
- struct qede_tx_queue *txq;
- uint8_t tc;
- uint16_t id, i;
-
- for_each_queue(id) {
- fp = &qdev->fp_array[id];
-
- if (fp->type & QEDE_FASTPATH_RX) {
- DP_INFO(&qdev->edev,
- "Reset FP chain for RSS %u\n", id);
- qede_rx_queue_release_mbufs(fp->rxq);
- ecore_chain_reset(&fp->rxq->rx_bd_ring);
- ecore_chain_reset(&fp->rxq->rx_comp_ring);
- fp->rxq->sw_rx_prod = 0;
- fp->rxq->sw_rx_cons = 0;
- *fp->rxq->hw_cons_ptr = 0;
- for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
- if (qede_alloc_rx_buffer(fp->rxq)) {
- DP_ERR(&qdev->edev,
- "RX buffer allocation failed\n");
- return -ENOMEM;
- }
- }
- }
- if (fp->type & QEDE_FASTPATH_TX) {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- txq = fp->txqs[tc];
- qede_tx_queue_release_mbufs(txq);
- ecore_chain_reset(&txq->tx_pbl);
- txq->sw_tx_cons = 0;
- txq->sw_tx_prod = 0;
- *txq->hw_cons_ptr = 0;
- }
- }
- }
-
- return 0;
-}
-
-/* This function frees all memory of a single fp */
-void qede_free_mem_load(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fastpath *fp;
- uint16_t txq_idx;
- uint8_t id;
- uint8_t tc;
-
- for_each_queue(id) {
- fp = &qdev->fp_array[id];
- if (fp->type & QEDE_FASTPATH_RX) {
- if (!fp->rxq)
- continue;
- qede_rx_queue_release(fp->rxq);
- eth_dev->data->rx_queues[id] = NULL;
- } else {
- for (tc = 0; tc < qdev->num_tc; tc++) {
- if (!fp->txqs[tc])
- continue;
- txq_idx = fp->txqs[tc]->queue_id;
- qede_tx_queue_release(fp->txqs[tc]);
- eth_dev->data->tx_queues[txq_idx] = NULL;
- }
- }
- }
-}
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
-
- DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
-
- if (qdev->state != QEDE_DEV_START) {
- DP_INFO(edev, "Device not yet started\n");
- return;
- }
-
- if (qede_stop_queues(qdev))
- DP_ERR(edev, "Didn't succeed to close queues\n");
-
- DP_INFO(edev, "Stopped queues\n");
-
- qdev->ops->fastpath_stop(edev);
-
- /* Bring the link down */
- qede_dev_set_link_state(eth_dev, false);
-
- qdev->state = QEDE_DEV_STOP;
-
- DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
-}
-
uint16_t
qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
__rte_unused struct rte_mbuf **pkts,
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index 21f2dacd..b551fd6a 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -77,10 +77,10 @@
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
-#define MAX_NUM_TC 8
-
-#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
-
+#define for_each_rss(i) for (i = 0; i < qdev->num_rx_queues; i++)
+#define for_each_tss(i) for (i = 0; i < qdev->num_tx_queues; i++)
+#define QEDE_RXTX_MAX(qdev) \
+ (RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))
/* Macros for non-tunnel packet types lkup table */
#define QEDE_PKT_TYPE_UNKNOWN 0x0
@@ -135,7 +135,8 @@
#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
PKT_TX_QINQ_PKT | \
PKT_TX_VLAN_PKT | \
- PKT_TX_TUNNEL_VXLAN)
+ PKT_TX_TUNNEL_VXLAN | \
+ PKT_TX_TUNNEL_MPLSINUDP)
#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
(PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
@@ -165,6 +166,7 @@ struct qede_rx_queue {
uint16_t *hw_cons_ptr;
void OSAL_IOMEM *hw_rxq_prod_addr;
struct qede_rx_entry *sw_rx_ring;
+ struct ecore_sb_info *sb_info;
uint16_t sw_rx_cons;
uint16_t sw_rx_prod;
uint16_t nb_rx_desc;
@@ -213,13 +215,9 @@ struct qede_tx_queue {
};
struct qede_fastpath {
- struct qede_dev *qdev;
- u8 type;
- uint8_t id;
struct ecore_sb_info *sb_info;
struct qede_rx_queue *rxq;
- struct qede_tx_queue *txqs[MAX_NUM_TC];
- char name[80];
+ struct qede_tx_queue *txq;
};
/*
@@ -240,16 +238,6 @@ void qede_rx_queue_release(void *rx_queue);
void qede_tx_queue_release(void *tx_queue);
-int qede_dev_start(struct rte_eth_dev *eth_dev);
-
-void qede_dev_stop(struct rte_eth_dev *eth_dev);
-
-int qede_reset_fp_rings(struct qede_dev *qdev);
-
-void qede_free_fp_arrays(struct qede_dev *qdev);
-
-void qede_free_mem_load(struct rte_eth_dev *eth_dev);
-
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -259,9 +247,13 @@ uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
-uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
- __rte_unused struct rte_mbuf **pkts,
- __rte_unused uint16_t nb_pkts);
+uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
+ struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+
+int qede_start_queues(struct rte_eth_dev *eth_dev);
+
+void qede_stop_queues(struct rte_eth_dev *eth_dev);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);