summaryrefslogtreecommitdiffstats
path: root/drivers/net/qede/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qede/base')
-rw-r--r--drivers/net/qede/base/bcm_osal.c22
-rw-r--r--drivers/net/qede/base/bcm_osal.h36
-rw-r--r--drivers/net/qede/base/common_hsi.h760
-rw-r--r--drivers/net/qede/base/ecore.h232
-rw-r--r--drivers/net/qede/base/ecore_cxt.c150
-rw-r--r--drivers/net/qede/base/ecore_cxt.h6
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c591
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h13
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h46
-rw-r--r--drivers/net/qede/base/ecore_dev.c1115
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h113
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h245
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h6
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h65
-rw-r--r--drivers/net/qede/base/ecore_hw.c25
-rw-r--r--drivers/net/qede/base/ecore_hw.h44
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c512
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h98
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c73
-rw-r--r--drivers/net/qede/base/ecore_init_ops.h3
-rw-r--r--drivers/net/qede/base/ecore_int.c1011
-rw-r--r--drivers/net/qede/base/ecore_int.h73
-rw-r--r--drivers/net/qede/base/ecore_int_api.h47
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h55
-rw-r--r--drivers/net/qede/base/ecore_iro.h8
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h44
-rw-r--r--drivers/net/qede/base/ecore_l2.c293
-rw-r--r--drivers/net/qede/base/ecore_l2.h82
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h30
-rw-r--r--drivers/net/qede/base/ecore_mcp.c1651
-rw-r--r--drivers/net/qede/base/ecore_mcp.h195
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h190
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c9
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h5
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h858
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h2
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c152
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h33
-rw-r--r--drivers/net/qede/base/ecore_spq.c111
-rw-r--r--drivers/net/qede/base/ecore_spq.h20
-rw-r--r--drivers/net/qede/base/ecore_sriov.c992
-rw-r--r--drivers/net/qede/base/ecore_sriov.h60
-rw-r--r--drivers/net/qede/base/ecore_vf.c447
-rw-r--r--drivers/net/qede/base/ecore_vf.h79
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h13
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h80
-rw-r--r--drivers/net/qede/base/mcp_public.h516
-rw-r--r--drivers/net/qede/base/nvm_cfg.h100
-rw-r--r--drivers/net/qede/base/reg_addr.h17
49 files changed, 7470 insertions, 3858 deletions
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 2603a8b3..fe42f325 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -144,12 +144,12 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev,
*phys = 0;
return OSAL_NULL;
}
- *phys = mz->phys_addr;
+ *phys = mz->iova;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Allocated dma memory size=%zu phys=0x%lx"
" virt=%p core=%d\n",
- mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
return mz->addr;
}
@@ -182,12 +182,12 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev,
*phys = 0;
return OSAL_NULL;
}
- *phys = mz->phys_addr;
+ *phys = mz->iova;
ecore_mz_mapping[ecore_mz_count++] = mz;
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Allocated aligned dma memory size=%zu phys=0x%lx"
" virt=%p core=%d\n",
- mz->len, (unsigned long)mz->phys_addr, mz->addr, core_id);
+ mz->len, (unsigned long)mz->iova, mz->addr, core_id);
return mz->addr;
}
@@ -196,7 +196,7 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys)
uint16_t j;
for (j = 0 ; j < ecore_mz_count; j++) {
- if (phys == ecore_mz_mapping[j]->phys_addr) {
+ if (phys == ecore_mz_mapping[j]->iova) {
DP_VERBOSE(p_dev, ECORE_MSG_SP,
"Free memzone %s\n", ecore_mz_mapping[j]->name);
rte_memzone_free(ecore_mz_mapping[j]);
@@ -292,3 +292,15 @@ qede_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type)
DP_ERR(p_hwfn, "HW error occurred [%s]\n", err_str);
ecore_int_attn_clr_enable(p_hwfn->p_dev, true);
}
+
+u32 qede_crc32(u32 crc, u8 *ptr, u32 length)
+{
+ int i;
+
+ while (length--) {
+ crc ^= *ptr++;
+ for (i = 0; i < 8; i++)
+ crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+ }
+ return crc;
+}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 3acf8f7c..52c2f0ec 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -23,6 +23,7 @@
/* Forward declaration */
struct ecore_dev;
struct ecore_hwfn;
+struct ecore_ptt;
struct ecore_vf_acquire_sw_info;
struct vf_pf_resc_request;
enum ecore_mcp_protocol_type;
@@ -45,6 +46,8 @@ void qed_link_update(struct ecore_hwfn *hwfn);
#define OSAL_WARN(arg1, arg2, arg3, ...) (0)
+#define UNUSED(x) (void)(x)
+
/* Memory Types */
typedef uint8_t u8;
typedef uint16_t u16;
@@ -60,7 +63,7 @@ typedef u32 OSAL_BE32;
#define osal_uintptr_t uintptr_t
-typedef phys_addr_t dma_addr_t;
+typedef rte_iova_t dma_addr_t;
typedef rte_spinlock_t osal_spinlock_t;
@@ -147,6 +150,9 @@ void osal_dma_free_mem(struct ecore_dev *edev, dma_addr_t phys);
((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
(_db_addr)), (u32)_val)
+#define DIRECT_REG_WR64(hwfn, addr, value) nothing
+#define DIRECT_REG_RD64(hwfn, addr) 0
+
/* Mutexes */
typedef pthread_mutex_t osal_mutex_t;
@@ -161,7 +167,12 @@ typedef pthread_mutex_t osal_mutex_t;
#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock)
#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock)
#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock)
-#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing
+#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) \
+ do { \
+ UNUSED(lock); \
+ flags = 0; \
+ UNUSED(flags); \
+ } while (0)
#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing
#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing
#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing
@@ -328,6 +339,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
+#define OSAL_TRANSCEIVER_UPDATE(hwfn) nothing
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
/* SR-IOV channel */
@@ -344,8 +356,9 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0
#define OSAL_IOV_GET_OS_TYPE() 0
-#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) 0
-#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) 0
+#define OSAL_IOV_VF_MSG_TYPE(hwfn, vfid, vf_msg_type) nothing
+#define OSAL_IOV_PF_RESP_TYPE(hwfn, vfid, pf_resp_type) nothing
+#define OSAL_IOV_VF_VPORT_STOP(hwfn, vf) nothing
u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf);
@@ -365,7 +378,7 @@ void qede_hw_err_notify(struct ecore_hwfn *p_hwfn,
qede_hw_err_notify(hwfn, err_type)
#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1)
-#define OSAL_NUM_ACTIVE_CPU() 0
+#define OSAL_NUM_CPUS() 0
/* Utility functions */
@@ -414,7 +427,9 @@ u32 qede_osal_log2(u32);
#define OSAL_REG_ADDR(_p_hwfn, _offset) \
(void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset))
#define OSAL_PAGE_SIZE 4096
+#define OSAL_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
#define OSAL_IOMEM volatile
+#define OSAL_UNUSED __attribute__((unused))
#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0)
#define OSAL_MIN_T(type, __min1, __min2) \
((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2))
@@ -427,10 +442,17 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
-#define OSAL_CRC32(crc, buf, length) 0
+
+u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
+#define OSAL_CRC32(crc, buf, length) qede_crc32(crc, buf, length)
#define OSAL_CRC8_POPULATE(table, polynomial) nothing
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
-#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
+
+#define OSAL_DIV_S64(a, b) ((a) / (b))
+#define OSAL_LLDP_RX_TLVS(p_hwfn, tlv_buf, tlv_size) nothing
+
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index bfe50e1f..9a6059ac 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -97,8 +97,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 20
-#define FW_REVISION_VERSION 0
+#define FW_MINOR_VERSION 30
+#define FW_REVISION_VERSION 12
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -106,73 +106,70 @@
/***********************/
/* PCI functions */
-#define MAX_NUM_PORTS_K2 (4)
-#define MAX_NUM_PORTS_BB (2)
-#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2)
-
-#define MAX_NUM_PFS_K2 (16)
-#define MAX_NUM_PFS_BB (8)
-#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
-#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
-
-#define MAX_NUM_VFS_BB (120)
-#define MAX_NUM_VFS_K2 (192)
-#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
-#define COMMON_MAX_NUM_VFS (240)
-
-#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
-#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
+#define MAX_NUM_PORTS_BB (2)
+#define MAX_NUM_PORTS_K2 (4)
+#define MAX_NUM_PORTS_E5 (4)
+#define MAX_NUM_PORTS (MAX_NUM_PORTS_E5)
+
+#define MAX_NUM_PFS_BB (8)
+#define MAX_NUM_PFS_K2 (16)
+#define MAX_NUM_PFS_E5 (16)
+#define MAX_NUM_PFS (MAX_NUM_PFS_E5)
+#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
+
+#define MAX_NUM_VFS_BB (120)
+#define MAX_NUM_VFS_K2 (192)
+#define MAX_NUM_VFS_E4 (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_E5 (240)
+#define COMMON_MAX_NUM_VFS (MAX_NUM_VFS_E5)
+
+#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
+#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS_E4)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all
* possible PFs and VFs - we need a constant for this size
*/
-#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
-#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
-
-#define MAX_NUM_VPORTS_K2 (208)
-#define MAX_NUM_VPORTS_BB (160)
-#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2)
+#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_FUNCTION_NUMBER_E4 (MAX_NUM_PFS + MAX_NUM_VFS_E4)
+#define MAX_FUNCTION_NUMBER_E5 (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+#define COMMON_MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS_E5)
+
+#define MAX_NUM_VPORTS_K2 (208)
+#define MAX_NUM_VPORTS_BB (160)
+#define MAX_NUM_VPORTS_E4 (MAX_NUM_VPORTS_K2)
+#define MAX_NUM_VPORTS_E5 (256)
+#define COMMON_MAX_NUM_VPORTS (MAX_NUM_VPORTS_E5)
-#define MAX_NUM_L2_QUEUES_K2 (320)
#define MAX_NUM_L2_QUEUES_BB (256)
-#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2)
+#define MAX_NUM_L2_QUEUES_K2 (320)
+#define MAX_NUM_L2_QUEUES_E5 (320) /* TODO_E5_VITALY - fix to 512 */
+#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_E5)
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
-/* 4-Port K2. */
-#define NUM_PHYS_TCS_4PORT_K2 (4)
-#define NUM_OF_PHYS_TCS (8)
-
-#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
-#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
-
-#define LB_TC (NUM_OF_PHYS_TCS)
-
-/* Num of possible traffic priority values */
-#define NUM_OF_PRIO (8)
-
-#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
-#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
-#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2)
-#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
+#define NUM_PHYS_TCS_4PORT_K2 4
+#define NUM_PHYS_TCS_4PORT_TX_E5 6
+#define NUM_PHYS_TCS_4PORT_RX_E5 4
+#define NUM_OF_PHYS_TCS 8
+#define PURE_LB_TC NUM_OF_PHYS_TCS
+#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
+#define NUM_TCS_4PORT_TX_E5 (NUM_PHYS_TCS_4PORT_TX_E5 + 1)
+#define NUM_TCS_4PORT_RX_E5 (NUM_PHYS_TCS_4PORT_RX_E5 + 1)
+#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
/* CIDs */
-#define E4_NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_TASK_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
-
-/* Clock values */
-#define MASTER_CLK_FREQ_E4 (375e6)
-#define STORM_CLK_FREQ_E4 (1000e6)
-#define CLK25M_CLK_FREQ_E4 (25e6)
+#define NUM_OF_CONNECTION_TYPES_E4 (8)
+#define NUM_OF_CONNECTION_TYPES_E5 (16)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Global PXP windows (GTT) */
-#define NUM_OF_GTT 19
-#define GTT_DWORD_SIZE_BITS 10
-#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
-#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
+#define NUM_OF_GTT 19
+#define GTT_DWORD_SIZE_BITS 10
+#define GTT_BYTE_SIZE_BITS (GTT_DWORD_SIZE_BITS + 2)
+#define GTT_DWORD_SIZE (1 << GTT_DWORD_SIZE_BITS)
/* Tools Version */
#define TOOLS_VERSION 10
@@ -417,49 +414,51 @@
#define CAU_FSM_ETH_TX 1
/* Number of Protocol Indices per Status Block */
-#define PIS_PER_SB 12
+#define PIS_PER_SB_E4 12
+#define PIS_PER_SB_E5 8
+#define MAX_PIS_PER_SB_E4 OSAL_MAX_T(PIS_PER_SB_E4, PIS_PER_SB_E5)
/* fsm is stopped or not valid for this sb */
-#define CAU_HC_STOPPED_STATE 3
+#define CAU_HC_STOPPED_STATE 3
/* fsm is working without interrupt coalescing for this sb*/
-#define CAU_HC_DISABLE_STATE 4
+#define CAU_HC_DISABLE_STATE 4
/* fsm is working with interrupt coalescing for this sb*/
-#define CAU_HC_ENABLE_STATE 0
+#define CAU_HC_ENABLE_STATE 0
/*****************/
/* IGU CONSTANTS */
/*****************/
-#define MAX_SB_PER_PATH_K2 (368)
-#define MAX_SB_PER_PATH_BB (288)
-#define MAX_TOT_SB_PER_PATH \
- MAX_SB_PER_PATH_K2
+#define MAX_SB_PER_PATH_K2 (368)
+#define MAX_SB_PER_PATH_BB (288)
+#define MAX_SB_PER_PATH_E5 (512)
+#define MAX_TOT_SB_PER_PATH MAX_SB_PER_PATH_E5
-#define MAX_SB_PER_PF_MIMD 129
-#define MAX_SB_PER_PF_SIMD 64
-#define MAX_SB_PER_VF 64
+#define MAX_SB_PER_PF_MIMD 129
+#define MAX_SB_PER_PF_SIMD 64
+#define MAX_SB_PER_VF 64
/* Memory addresses on the BAR for the IGU Sub Block */
-#define IGU_MEM_BASE 0x0000
+#define IGU_MEM_BASE 0x0000
-#define IGU_MEM_MSIX_BASE 0x0000
-#define IGU_MEM_MSIX_UPPER 0x0101
-#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
+#define IGU_MEM_MSIX_BASE 0x0000
+#define IGU_MEM_MSIX_UPPER 0x0101
+#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff
-#define IGU_MEM_PBA_MSIX_BASE 0x0200
-#define IGU_MEM_PBA_MSIX_UPPER 0x0202
-#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
+#define IGU_MEM_PBA_MSIX_BASE 0x0200
+#define IGU_MEM_PBA_MSIX_UPPER 0x0202
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
-#define IGU_CMD_INT_ACK_BASE 0x0400
-#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - \
- 1)
-#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
+#define IGU_CMD_INT_ACK_BASE 0x0400
+#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
+ MAX_TOT_SB_PER_PATH - \
+ 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
-#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
-#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
-#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
+#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
+#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2
#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3
#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4
@@ -467,8 +466,8 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
-#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - \
+#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE + \
+ MAX_TOT_SB_PER_PATH - \
1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
@@ -491,16 +490,16 @@
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
-#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
+#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4
#define PXP_PF_WINDOW_ADMIN_START 0
#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000
#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \
- PXP_PF_WINDOW_ADMIN_LENGTH - 1)
+ PXP_PF_WINDOW_ADMIN_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0
#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \
PXP_PER_PF_ENTRY_SIZE)
-#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
- PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
+#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \
+ PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1)
#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200
#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \
PXP_GLOBAL_ENTRY_SIZE)
@@ -575,19 +574,79 @@
#define PXP_BAR0_FIRST_INVALID_ADDRESS \
(PXP_BAR0_END_PSDM + 1)
-#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
-#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
-
-/* ILT Records */
+/* VF BAR */
+#define PXP_VF_BAR0 0
+
+#define PXP_VF_BAR0_START_IGU 0
+#define PXP_VF_BAR0_IGU_LENGTH 0x3000
+#define PXP_VF_BAR0_END_IGU \
+ (PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_DQ 0x3000
+#define PXP_VF_BAR0_DQ_LENGTH 0x200
+#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
+#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
+#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
+ (PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
+#define PXP_VF_BAR0_END_DQ \
+ (PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
+#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
+#define PXP_VF_BAR0_END_TSDM_ZONE_B \
+ (PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
+#define PXP_VF_BAR0_END_MSDM_ZONE_B \
+ (PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
+#define PXP_VF_BAR0_END_USDM_ZONE_B \
+ (PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
+#define PXP_VF_BAR0_END_XSDM_ZONE_B \
+ (PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
+#define PXP_VF_BAR0_END_YSDM_ZONE_B \
+ (PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
+#define PXP_VF_BAR0_END_PSDM_ZONE_B \
+ (PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
+
+#define PXP_VF_BAR0_START_GRC 0x3E00
+#define PXP_VF_BAR0_GRC_LENGTH 0x200
+#define PXP_VF_BAR0_END_GRC \
+ (PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
+
+#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
+#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
+
+#define PXP_VF_BAR0_START_IGU2 0x10000
+#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
+#define PXP_VF_BAR0_END_IGU2 \
+ (PXP_VF_BAR0_START_IGU2 + PXP_VF_BAR0_IGU2_LENGTH - 1)
+
+#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+
+#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
+#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
+
+// ILT Records
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
-#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-
+#define MAX_NUM_ILT_RECORDS \
+ OSAL_MAX_T(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2)
-/* Host Interface */
-#define PXP_QUEUES_ZONE_MAX_NUM 320
+#define PXP_NUM_ILT_RECORDS_E5 13664
+// Host Interface
+#define PXP_QUEUES_ZONE_MAX_NUM_E4 320
+#define PXP_QUEUES_ZONE_MAX_NUM_E5 512
/*****************/
@@ -635,7 +694,8 @@
/******************/
/* Number of PBF command queue lines. Each line is 32B. */
-#define PBF_MAX_CMD_LINES 3328
+#define PBF_MAX_CMD_LINES_E4 3328
+#define PBF_MAX_CMD_LINES_E5 5280
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
@@ -645,17 +705,6 @@
/*****************/
#define PRS_GFT_CAM_LINES_NO_MATCH 31
-/* Async data KCQ CQE */
-struct async_data {
- /* Context ID of the connection */
- __le32 cid;
- /* Task Id of the task (for error that happened on a a task) */
- __le16 itid;
- /* error code - relevant only if the opcode indicates its an error */
- u8 error_code;
- /* internal fw debug parameter */
- u8 fw_debug_param;
-};
/*
* Interrupt coalescing TimeSet
@@ -683,22 +732,29 @@ struct eth_rx_prod_data {
__le16 cqe_prod /* CQE producer. */;
};
-struct regpair {
- __le32 lo /* low word for reg-pair */;
- __le32 hi /* high word for reg-pair */;
+
+struct tcp_ulp_connect_done_params {
+ __le16 mss;
+ u8 snd_wnd_scale;
+ u8 flags;
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
+#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
+#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
};
-/*
- * Event Ring VF-PF Channel data
- */
-struct vf_pf_channel_eqe_data {
- struct regpair msg_addr /* VF-PF message address */;
+struct iscsi_connect_done_results {
+ __le16 icid /* Context ID of the connection */;
+ __le16 conn_id /* Driver connection ID */;
+/* decided tcp params after connect done */
+ struct tcp_ulp_connect_done_params params;
};
+
struct iscsi_eqe_data {
- __le32 cid /* Context ID of the connection */;
- /* Task Id of the task (for error that happened on a a task) */;
- __le16 conn_id;
+ __le16 icid /* Context ID of the connection */;
+ __le16 conn_id /* Driver connection ID */;
+ __le16 reserved;
/* error code - relevant only if the opcode indicates its an error */
u8 error_code;
u8 error_pdu_opcode_reserved;
@@ -714,52 +770,10 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
-/*
- * Event Ring malicious VF data
- */
-struct malicious_vf_eqe_data {
- u8 vfId /* Malicious VF ID */;
- u8 errId /* Malicious VF error */;
- __le16 reserved[3];
-};
/*
- * Event Ring initial cleanup data
+ * Multi function mode
*/
-struct initial_cleanup_eqe_data {
- u8 vfId /* VF ID */;
- u8 reserved[7];
-};
-
-/*
- * Event Data Union
- */
-union event_ring_data {
- u8 bytes[8] /* Byte Array */;
- struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
- struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
- struct regpair roceHandle /* Dedicated field for RDMA data */;
- struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
- struct initial_cleanup_eqe_data vf_init_cleanup
- /* VF Initial Cleanup data */;
-};
-/* Event Ring Entry */
-struct event_ring_entry {
- u8 protocol_id /* Event Protocol ID */;
- u8 opcode /* Event Opcode */;
- __le16 reserved0 /* Reserved */;
- __le16 echo /* Echo value from ramrod data on the host */;
- u8 fw_return_code /* FW return code for SP ramrods */;
- u8 flags;
-/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
-#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
-#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
-#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
-#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
- union event_ring_data data;
-};
-
-/* Multi function mode */
enum mf_mode {
ERROR_MODE /* Unsupported mode */,
MF_OVLAN /* Multi function based on outer VLAN */,
@@ -783,6 +797,12 @@ enum protocol_type {
};
+struct regpair {
+ __le32 lo /* low word for reg-pair */;
+ __le32 hi /* high word for reg-pair */;
+};
+
+
/*
* Ustorm Queue Zone
@@ -852,6 +872,18 @@ struct cau_sb_entry {
#define CAU_SB_ENTRY_TPH_SHIFT 31
};
+
+/*
+ * Igu cleanup bit values to distinguish between clean or producer consumer
+ * update.
+ */
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+
/* core doorbell data */
struct core_db_data {
u8 params;
@@ -1008,7 +1040,7 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
/* Connection type is iWARP */
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
@@ -1072,9 +1104,9 @@ enum igu_seg_access {
* to the last-ethertype)
*/
enum l3_type {
- e_l3Type_unknown,
- e_l3Type_ipv4,
- e_l3Type_ipv6,
+ e_l3_type_unknown,
+ e_l3_type_ipv4,
+ e_l3_type_ipv6,
MAX_L3_TYPE
};
@@ -1085,9 +1117,9 @@ enum l3_type {
* first fragment, the protocol-type should be set to none.
*/
enum l4_protocol {
- e_l4Protocol_none,
- e_l4Protocol_tcp,
- e_l4Protocol_udp,
+ e_l4_protocol_none,
+ e_l4_protocol_tcp,
+ e_l4_protocol_udp,
MAX_L4_PROTOCOL
};
@@ -1311,260 +1343,230 @@ struct pxp_vf_zone_a_permission {
* Rdif context
*/
struct rdif_task_context {
- __le32 initialRefTag;
- __le16 appTagValue;
- __le16 appTagMask;
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
u8 flags0;
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
/* 0 = IP checksum, 1 = CRC */
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
/* 1/2/3 - Protection Type */
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
/* 0=0x0000, 1=0xffff */
-#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
/* Keep reference tag constant */
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7
- u8 partialDifData[7];
- __le16 partialCrcValue;
- __le16 partialChecksumValue;
- __le32 offsetInIO;
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7
+ u8 partial_dif_data[7];
+ __le16 partial_crc_value;
+ __le16 partial_checksum_value;
+ __le32 offset_in_io;
__le16 flags1;
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
/* 0=None, 1=DIF, 2=DIX */
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
/* DIF tag right at the beginning of DIF interval */
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
-#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1
+#define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12
/* 0=None, 1=DIF */
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
/* Forward application tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
/* Forward reference tag with mask */
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
__le16 state;
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF
-#define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4
-#define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1
-#define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF
+#define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1
+#define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1
+#define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9
/* mask for refernce tag handling */
-#define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10
-#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
-#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10
+#define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3
+#define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14
__le32 reserved2;
};
-/* RSS hash type */
+/*
+ * RSS hash type
+ */
enum rss_hash_type {
- RSS_HASH_TYPE_DEFAULT = 0,
- RSS_HASH_TYPE_IPV4 = 1,
- RSS_HASH_TYPE_TCP_IPV4 = 2,
- RSS_HASH_TYPE_IPV6 = 3,
- RSS_HASH_TYPE_TCP_IPV6 = 4,
- RSS_HASH_TYPE_UDP_IPV4 = 5,
- RSS_HASH_TYPE_UDP_IPV6 = 6,
+ RSS_HASH_TYPE_DEFAULT = 0,
+ RSS_HASH_TYPE_IPV4 = 1,
+ RSS_HASH_TYPE_TCP_IPV4 = 2,
+ RSS_HASH_TYPE_IPV6 = 3,
+ RSS_HASH_TYPE_TCP_IPV6 = 4,
+ RSS_HASH_TYPE_UDP_IPV4 = 5,
+ RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
-/* status block structure */
-struct status_block {
- __le16 pi_array[PIS_PER_SB];
- __le32 sb_num;
-#define STATUS_BLOCK_SB_NUM_MASK 0x1FF
-#define STATUS_BLOCK_SB_NUM_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F
-#define STATUS_BLOCK_ZERO_PAD_SHIFT 9
-#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF
-#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16
+/*
+ * status block structure
+ */
+struct status_block_e4 {
+ __le16 pi_array[PIS_PER_SB_E4];
+ __le32 sb_num;
+#define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E4_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16
__le32 prod_index;
-#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF
-#define STATUS_BLOCK_PROD_INDEX_SHIFT 0
-#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF
-#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24
+#define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24
};
-/* VF BAR */
-#define PXP_VF_BAR0 0
-
-#define PXP_VF_BAR0_START_GRC 0x3E00
-#define PXP_VF_BAR0_GRC_LENGTH 0x200
-#define PXP_VF_BAR0_END_GRC \
-(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_IGU 0
-#define PXP_VF_BAR0_IGU_LENGTH 0x3000
-#define PXP_VF_BAR0_END_IGU \
-(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_DQ 0x3000
-#define PXP_VF_BAR0_DQ_LENGTH 0x200
-#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
-#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
-(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
-#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \
-(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4)
-#define PXP_VF_BAR0_END_DQ \
-(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1)
-
-#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
-#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
-#define PXP_VF_BAR0_END_TSDM_ZONE_B \
-(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
-#define PXP_VF_BAR0_END_MSDM_ZONE_B \
-(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
-#define PXP_VF_BAR0_END_USDM_ZONE_B \
-(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
-#define PXP_VF_BAR0_END_XSDM_ZONE_B \
-(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
-#define PXP_VF_BAR0_END_YSDM_ZONE_B \
-(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
-#define PXP_VF_BAR0_END_PSDM_ZONE_B \
-(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1)
-
-#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
-#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
-
-#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
+/*
+ * status block structure
+ */
+struct status_block_e5 {
+ __le16 pi_array[PIS_PER_SB_E5];
+ __le32 sb_num;
+#define STATUS_BLOCK_E5_SB_NUM_MASK 0x1FF
+#define STATUS_BLOCK_E5_SB_NUM_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD_MASK 0x7F
+#define STATUS_BLOCK_E5_ZERO_PAD_SHIFT 9
+#define STATUS_BLOCK_E5_ZERO_PAD2_MASK 0xFFFF
+#define STATUS_BLOCK_E5_ZERO_PAD2_SHIFT 16
+ __le32 prod_index;
+#define STATUS_BLOCK_E5_PROD_INDEX_MASK 0xFFFFFF
+#define STATUS_BLOCK_E5_PROD_INDEX_SHIFT 0
+#define STATUS_BLOCK_E5_ZERO_PAD3_MASK 0xFF
+#define STATUS_BLOCK_E5_ZERO_PAD3_SHIFT 24
+};
+
/*
* Tdif context
*/
struct tdif_task_context {
- __le32 initialRefTag;
- __le16 appTagValue;
- __le16 appTagMask;
- __le16 partialCrcValueB;
- __le16 partialChecksumValueB;
+ __le32 initial_ref_tag;
+ __le16 app_tag_value;
+ __le16 app_tag_mask;
+ __le16 partial_crc_value_b;
+ __le16 partial_checksum_value_b;
__le16 stateB;
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4
-#define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9
-#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
-#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9
+#define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F
+#define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10
u8 reserved1;
u8 flags0;
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1
-#define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1
+#define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1
/* 0 = IP checksum, 1 = CRC */
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1
-#define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1
-#define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1
+#define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1
+#define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3
/* 1/2/3 - Protection Type */
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3
-#define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3
+#define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4
/* 0=0x0000, 1=0xffff */
-#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
-#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
-#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
+#define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1
+#define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6
+#define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7
__le32 flags1;
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5
/* 0=512B, 1=1KB, 2=2KB, 3=4KB, 4=8KB */
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7
-#define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7
+#define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6
/* 0=None, 1=DIF, 2=DIX */
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3
-#define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3
+#define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9
/* DIF tag right at the beginning of DIF interval */
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1
-#define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11
-/* reserved */
-#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1
+#define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11
+#define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 /* reserved */
+#define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12
/* 0=None, 1=DIF */
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1
-#define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF
-#define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18
-#define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1
-#define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1
-#define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1
+#define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF
+#define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1
+#define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23
/* mask for refernce tag handling */
-#define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF
-#define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF
+#define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24
/* Forward application tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28
/* Forward reference tag with mask */
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1
-#define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
+#define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29
/* Keep reference tag constant */
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1
-#define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30
-#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
-#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
- __le32 offsetInIOB;
- __le16 partialCrcValueA;
- __le16 partialChecksumValueA;
- __le32 offsetInIOA;
- u8 partialDifDataA[8];
- u8 partialDifDataB[8];
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1
+#define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30
+#define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1
+#define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31
+ __le32 offset_in_io_b;
+ __le16 partial_crc_value_a;
+ __le16 partial_checksum_value_a;
+ __le32 offset_in_io_a;
+ u8 partial_dif_data_a[8];
+ u8 partial_dif_data_b[8];
};
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 0d68a9bc..ce5f3a90 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -29,9 +29,9 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
-#define ECORE_MINOR_VERSION 18
-#define ECORE_REVISION_VERSION 7
-#define ECORE_ENGINEERING_VERSION 1
+#define ECORE_MINOR_VERSION 30
+#define ECORE_REVISION_VERSION 8
+#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
@@ -50,6 +50,7 @@
#define FCOE_BDQ_ID(_port_id) (_port_id + 2)
/* Constants */
#define ECORE_WID_SIZE (1024)
+#define ECORE_MIN_WIDS (4)
/* Configurable */
#define ECORE_PF_DEMS_SIZE (4)
@@ -66,6 +67,7 @@ enum ecore_nvm_cmd {
ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
+ ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE,
ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
@@ -97,16 +99,16 @@ do { \
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
-#endif
-#define ECORE_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+#define GET_MFW_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _OFFSET))
-#define ECORE_MFW_SET_FIELD(name, field, value) \
+#define SET_MFW_FIELD(name, field, value) \
do { \
- (name) &= ~(field ## _MASK); \
- (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
+ (name) &= ~((field ## _MASK)); \
+ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK)); \
} while (0)
+#endif
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
@@ -279,7 +281,6 @@ struct ecore_qm_iids {
* is received from MFW.
*/
enum ecore_resources {
- ECORE_SB,
ECORE_L2_QUEUE,
ECORE_VPORT,
ECORE_RSS_ENG,
@@ -293,7 +294,13 @@ enum ecore_resources {
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
ECORE_BDQ,
- ECORE_MAX_RESC, /* must be last */
+
+ /* This is needed only internally for matching against the IGU.
+ * In case of legacy MFW, would be set to `0'.
+ */
+ ECORE_SB,
+
+ ECORE_MAX_RESC,
};
/* Features that require resources, given as input to the resource management
@@ -345,22 +352,32 @@ enum ecore_hw_err_type {
};
#endif
+enum ecore_db_rec_exec {
+ DB_REC_DRY_RUN,
+ DB_REC_REAL_DEAL,
+ DB_REC_ONCE,
+};
+
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
-#define ECORE_IS_RDMA_PERSONALITY(dev) \
- ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
(dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
-#define ECORE_IS_ROCE_PERSONALITY(dev) \
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
-#define ECORE_IS_IWARP_PERSONALITY(dev) \
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
-#define ECORE_IS_L2_PERSONALITY(dev) \
+#define ECORE_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH || \
ECORE_IS_RDMA_PERSONALITY(dev))
+#define ECORE_IS_FCOE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_FCOE)
+#define ECORE_IS_ISCSI_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ISCSI)
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
@@ -473,6 +490,12 @@ struct ecore_qm_info {
u8 num_pf_rls;
};
+struct ecore_db_recovery_info {
+ osal_list_t list;
+ osal_spinlock_t lock;
+ u32 db_recovery_counter;
+};
+
struct storm_stats {
u32 address;
u32 len;
@@ -488,14 +511,60 @@ struct ecore_fw_data {
u32 init_ops_size;
};
+enum ecore_mf_mode_bit {
+ /* Supports PF-classification based on tag */
+ ECORE_MF_OVLAN_CLSS,
+
+ /* Supports PF-classification based on MAC */
+ ECORE_MF_LLH_MAC_CLSS,
+
+ /* Supports PF-classification based on protocol type */
+ ECORE_MF_LLH_PROTO_CLSS,
+
+ /* Requires a default PF to be set */
+ ECORE_MF_NEED_DEF_PF,
+
+ /* Allow LL2 to multicast/broadcast */
+ ECORE_MF_LL2_NON_UNICAST,
+
+ /* Allow Cross-PF [& child VFs] Tx-switching */
+ ECORE_MF_INTER_PF_SWITCH,
+
+ /* TODO - if we ever re-utilize any of this logic, we can rename */
+ ECORE_MF_UFP_SPECIFIC,
+
+ ECORE_MF_DISABLE_ARFS,
+};
+
+enum ecore_ufp_mode {
+ ECORE_UFP_MODE_ETS,
+ ECORE_UFP_MODE_VNIC_BW,
+};
+
+enum ecore_ufp_pri_type {
+ ECORE_UFP_PRI_OS,
+ ECORE_UFP_PRI_VNIC
+};
+
+struct ecore_ufp_info {
+ enum ecore_ufp_pri_type pri_type;
+ enum ecore_ufp_mode mode;
+ u8 tc;
+};
+
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
- #define ECORE_PATH_ID(_p_hwfn) \
- (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+#define ECORE_PATH_ID(_p_hwfn) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? ((_p_hwfn)->abs_pf_id & 1) : 0)
u8 port_id;
bool b_active;
@@ -556,10 +625,6 @@ struct ecore_hwfn {
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
- /* Array of sb_info of all status blocks */
- struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
-
struct ecore_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/
@@ -573,6 +638,7 @@ struct ecore_hwfn {
struct ecore_pf_iov *pf_iov_info;
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
+ struct ecore_ufp_info ufp_info;
struct ecore_dmae_info dmae_info;
@@ -603,17 +669,19 @@ struct ecore_hwfn {
/* L2-related */
struct ecore_l2_info *p_l2_info;
+ /* Mechanism for recovering from doorbell drop */
+ struct ecore_db_recovery_info db_recovery_info;
+
/* @DPDK */
struct ecore_ptt *p_arfs_ptt;
};
-#ifndef __EXTRACT__LINUX__
enum ecore_mf_mode {
ECORE_MF_DEFAULT,
ECORE_MF_OVLAN,
ECORE_MF_NPAR,
+ ECORE_MF_UFP,
};
-#endif
/* @DPDK */
struct ecore_dbg_feature {
@@ -632,15 +700,18 @@ enum qed_dbg_features {
DBG_FEATURE_NUM
};
+enum ecore_dev_type {
+ ECORE_DEV_TYPE_BB,
+ ECORE_DEV_TYPE_AH,
+};
+
struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
- u8 type;
-#define ECORE_DEV_TYPE_BB (0 << 0)
-#define ECORE_DEV_TYPE_AH (1 << 0)
+ enum ecore_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
@@ -653,66 +724,68 @@ struct ecore_dev {
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+ u16 vendor_id;
+ u16 device_id;
#define ECORE_DEV_ID_MASK 0xff00
#define ECORE_DEV_ID_MASK_BB 0x1600
#define ECORE_DEV_ID_MASK_AH 0x8000
- u16 vendor_id;
- u16 device_id;
-
u16 chip_num;
- #define CHIP_NUM_MASK 0xffff
- #define CHIP_NUM_SHIFT 16
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 0
- u16 chip_rev;
- #define CHIP_REV_MASK 0xf
- #define CHIP_REV_SHIFT 12
+ u8 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 0
#ifndef ASIC_ONLY
- #define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
- #define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
- #define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
- #define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
- CHIP_REV_IS_EMUL_B0(_p_dev))
- #define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
- #define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
- #define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
- CHIP_REV_IS_FPGA_B0(_p_dev))
- #define CHIP_REV_IS_SLOW(_p_dev) \
- (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
- #define CHIP_REV_IS_A0(_p_dev) \
- (CHIP_REV_IS_EMUL_A0(_p_dev) || \
- CHIP_REV_IS_FPGA_A0(_p_dev) || \
- !(_p_dev)->chip_rev)
- #define CHIP_REV_IS_B0(_p_dev) \
- (CHIP_REV_IS_EMUL_B0(_p_dev) || \
- CHIP_REV_IS_FPGA_B0(_p_dev) || \
- (_p_dev)->chip_rev == 1)
- #define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
+#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
+#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
+#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
+#define CHIP_REV_IS_EMUL(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_EMUL_B0(_p_dev))
+#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
+#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
+#define CHIP_REV_IS_FPGA(_p_dev) \
+ (CHIP_REV_IS_FPGA_A0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev))
+#define CHIP_REV_IS_SLOW(_p_dev) \
+ (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
+#define CHIP_REV_IS_A0(_p_dev) \
+ (CHIP_REV_IS_EMUL_A0(_p_dev) || CHIP_REV_IS_FPGA_A0(_p_dev) || \
+ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_B0(_p_dev) \
+ (CHIP_REV_IS_EMUL_B0(_p_dev) || CHIP_REV_IS_FPGA_B0(_p_dev) || \
+ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal))
+#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
#else
- #define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
- #define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
+#define CHIP_REV_IS_A0(_p_dev) \
+ (!(_p_dev)->chip_rev && !(_p_dev)->chip_metal)
+#define CHIP_REV_IS_B0(_p_dev) \
+ ((_p_dev)->chip_rev == 1 && !(_p_dev)->chip_metal)
#endif
- u16 chip_metal;
- #define CHIP_METAL_MASK 0xff
- #define CHIP_METAL_SHIFT 4
+ u8 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 0
- u16 chip_bond_id;
- #define CHIP_BOND_ID_MASK 0xf
- #define CHIP_BOND_ID_SHIFT 0
+ u8 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xff
+#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
- u8 num_ports_in_engines;
+ u8 num_ports;
+ u8 num_ports_in_engine;
u8 num_funcs_in_port;
u8 path_id;
+
+ unsigned long mf_bits;
enum ecore_mf_mode mf_mode;
- #define IS_MF_DEFAULT(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
- #define IS_MF_SI(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
- #define IS_MF_SD(_p_hwfn) \
- (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
+#define IS_MF_DEFAULT(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
+#define IS_MF_SI(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
+#define IS_MF_SD(_p_hwfn) \
+ (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
int pcie_width;
int pcie_speed;
@@ -744,12 +817,14 @@ struct ecore_dev {
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
struct ecore_tunnel_info tunnel;
bool b_is_vf;
+ bool b_dont_override_vf_msix;
u32 drv_type;
@@ -800,6 +875,8 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
+#define CRC8_TABLE_SIZE 256
+
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
@@ -808,8 +885,7 @@ struct ecore_dev {
*
* @return OSAL_INLINE u8
*/
-static OSAL_INLINE u8
-ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
+static OSAL_INLINE u8 ecore_concrete_to_sw_fid(u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
@@ -824,11 +900,12 @@ ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
return sw_fid;
}
-#define PURE_LB_TC 8
#define PKT_LB_TC 9
+#define MAX_NUM_VOQS_E4 20
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
u32 min_pf_rate);
int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
@@ -854,6 +931,13 @@ u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
+/* doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec);
+
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
@@ -863,6 +947,4 @@ u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
-const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
-
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 688118bb..50bd66da 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -41,10 +41,7 @@
#define TM_ELEM_SIZE 4
/* ILT constants */
-/* If for some reason, HW P size is modified to be less than 32K,
- * special handling needs to be made for CDU initialization
- */
-#define ILT_DEFAULT_HW_P_SIZE 3
+#define ILT_DEFAULT_HW_P_SIZE 4
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
@@ -59,8 +56,8 @@
/* connection context union */
union conn_context {
- struct core_conn_context core_ctx;
- struct eth_conn_context eth_ctx;
+ struct e4_core_conn_context core_ctx;
+ struct e4_eth_conn_context eth_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
@@ -69,6 +66,7 @@ union type0_task_context {
/* TYPE-1 task context - ROCE */
union type1_task_context {
+ struct regpair reserved; /* @DPDK */
};
struct src_ent {
@@ -230,13 +228,6 @@ struct ecore_cxt_mngr {
/* TODO - VF arfs filters ? */
};
-/* check if resources/configuration is required according to protocol type */
-static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
- enum protocol_type type)
-{
- return type == PROTOCOLID_TOE;
-}
-
static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
{
return type == PROTOCOLID_TOE;
@@ -270,16 +261,12 @@ struct ecore_src_iids {
u32 per_vf_cids;
};
-static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
- struct ecore_cxt_mngr *p_mngr,
- struct ecore_src_iids *iids)
+static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_src_iids *iids)
{
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- if (!src_proto(p_hwfn, i))
- continue;
-
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
@@ -297,8 +284,8 @@ struct ecore_tm_iids {
u32 per_vf_tids;
};
-static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
- struct ecore_tm_iids *iids)
+static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
+ struct ecore_tm_iids *iids)
{
bool tm_vf_required = false;
bool tm_required = false;
@@ -397,6 +384,20 @@ static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
+}
+
/* set the iids (cid/tid) count per protocol */
static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
@@ -687,7 +688,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
p_blk = &p_cli->pf_blks[0];
ecore_cxt_qm_iids(p_hwfn, &qm_iids);
- total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ total = ecore_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
@@ -706,7 +707,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
/* SRC */
p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
* database. Thus sum the PF searcher cids and all the VFs searcher
@@ -820,7 +821,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (!p_src->active)
return ECORE_SUCCESS;
- ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
total_size = conn_num * sizeof(struct src_ent);
@@ -1156,7 +1157,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
- /* default ILT page size for all clients is 32K */
+ /* default ILT page size for all clients is 64K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
@@ -1170,7 +1171,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
/* Initialize the dynamic ILT allocation mutex */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
/* Set the cxt mangr pointer priori to further allocations */
@@ -1219,7 +1222,9 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
+#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
}
@@ -1422,29 +1427,32 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
}
}
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct ecore_mcp_link_state *p_link;
struct ecore_qm_iids iids;
OSAL_MEM_ZERO(&iids, sizeof(iids));
ecore_cxt_qm_iids(p_hwfn, &iids);
- ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id,
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id,
p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port,
- p_hwfn->first_on_engine,
iids.cids, iids.vf_cids, iids.tids,
qm_info->start_pq,
qm_info->num_pqs - qm_info->num_vf_pqs,
qm_info->num_vf_pqs,
qm_info->start_vport,
qm_info->num_vports, qm_info->pf_wfq,
- qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params,
+ qm_info->pf_rl, p_link->speed,
+ p_hwfn->qm_info.qm_pq_params,
p_hwfn->qm_info.qm_vport_params);
}
/* CM PF */
-void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
{
STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
@@ -1639,7 +1647,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_src_iids src_iids;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
- ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
if (!conn_num)
return;
@@ -1766,9 +1774,11 @@ static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_conn_type_cfg *p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+ struct ecore_conn_type_cfg *p_fcoe;
struct ecore_tid_seg *p_tid;
+ p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
/* If FCoE is active set the MAX OX_ID (tid) in the Parser */
if (!p_fcoe->cid_count)
return;
@@ -1785,9 +1795,9 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
ecore_cdu_init_common(p_hwfn);
}
-void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt);
ecore_cm_init_pf(p_hwfn);
ecore_dq_init_pf(p_hwfn);
ecore_cdu_init_pf(p_hwfn);
@@ -1969,20 +1979,6 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
-{
- struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
- p_mgr->srq_count = num_srqs;
-}
-
-u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
-{
- struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
-
- return p_mgr->srq_count;
-}
-
enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
{
/* Set the number of required CORE connections */
@@ -1993,19 +1989,24 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
{
- struct ecore_eth_pf_params *p_params =
+ u32 count = 0;
+
+ struct ecore_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- /* TODO - we probably want to add VF number to the PF
- * params;
- * As of now, allocates 16 * 2 per-VF [to retain regular
- * functionality].
- */
- ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons, 32);
- p_hwfn->p_cxt_mngr->arfs_count =
- p_params->num_arfs_filters;
- break;
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
+
+ count = p_params->num_arfs_filters;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
+ &p_hwfn->p_dev->mf_bits))
+ p_hwfn->p_cxt_mngr->arfs_count = count;
+
+ break;
}
default:
return ECORE_INVAL;
@@ -2220,34 +2221,3 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-
-enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto)
-{
- enum _ecore_status_t rc;
- u32 cid;
-
- /* Free Connection CXT */
- rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT,
- ecore_cxt_get_proto_cid_start(p_hwfn,
- proto),
- ecore_cxt_get_proto_cid_count(p_hwfn,
- proto,
- &cid));
-
- if (rc)
- return rc;
-
- /* Free Task CXT */
- rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0,
- ecore_cxt_get_proto_tid_count(p_hwfn,
- proto));
- if (rc)
- return rc;
-
- /* Free TSDM CXT */
- rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_SRQ, 0,
- ecore_cxt_get_srq_count(p_hwfn));
-
- return rc;
-}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 6ff823a5..54761e4e 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -98,15 +98,17 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
+ * @param p_ptt
*/
-void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
+void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
+ * @param p_ptt
*/
-void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
+void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
* @brief Reconfigures QM pf on the fly
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 4f1b0698..632297a7 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -28,13 +28,13 @@
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
- return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_ETHTYPE);
}
static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
{
- u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+ u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
@@ -45,13 +45,13 @@ static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
static bool ecore_dcbx_app_port(u32 app_info_bitmap)
{
- return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ return !!(GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
{
- u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+ u8 mfw_val = GET_MFW_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
/* Old MFW */
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
@@ -114,6 +114,21 @@ ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
}
}
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
+{
+ struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
+ u8 i;
+
+ if (!dscp->enabled)
+ return ECORE_DCBX_DSCP_DISABLED;
+
+ for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
+ if (pri == dscp->dscp_pri_map[i])
+ return i;
+
+ return ECORE_DCBX_DSCP_DISABLED;
+}
+
static void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
@@ -121,29 +136,18 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
- struct ecore_dcbx_dscp_params *dscp = &p_hwfn->p_dcbx_info->get.dscp;
-
/* PF update ramrod data */
p_data->arr[type].enable = enable;
p_data->arr[type].priority = prio;
p_data->arr[type].tc = tc;
- p_data->arr[type].dscp_enable = dscp->enabled;
- if (p_data->arr[type].dscp_enable) {
- u8 i;
-
- for (i = 0; i < ECORE_DCBX_DSCP_SIZE; i++)
- if (prio == dscp->dscp_pri_map[i]) {
- p_data->arr[type].dscp_val = i;
- break;
- }
+ p_data->arr[type].dscp_val = ecore_dcbx_get_dscp_value(p_hwfn, prio);
+ if (p_data->arr[type].dscp_val == ECORE_DCBX_DSCP_DISABLED) {
+ p_data->arr[type].dscp_enable = false;
+ p_data->arr[type].dscp_val = 0;
+ } else {
+ p_data->arr[type].dscp_enable = true;
}
-
- if (enable && p_data->arr[type].dscp_enable)
- p_data->arr[type].update = UPDATE_DCB_DSCP;
- else if (enable)
- p_data->arr[type].update = UPDATE_DCB;
- else
- p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ p_data->arr[type].update = UPDATE_DCB_DSCP;
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
@@ -159,7 +163,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
- const char *name; /* @DPDK */
int i;
for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
@@ -169,7 +172,6 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
continue;
personality = ecore_dcbx_app_update[i].personality;
- name = ecore_dcbx_app_update[i].name;
ecore_dcbx_set_params(p_data, p_hwfn, enable,
prio, tc, type, personality);
@@ -224,7 +226,7 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
return true;
}
-/* Parse app TLV's to update TC information in hw_info structure for
+/* Parse app TLV's to update TC information in hw_info structure for
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
@@ -234,8 +236,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
int count, u8 dcbx_version)
{
enum dcbx_protocol_type type;
+ bool enable, ieee, eth_tlv;
u8 tc, priority_map;
- bool enable, ieee;
u16 protocol_id;
u8 priority;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -246,12 +248,12 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
count, pri_tc_tbl, dcbx_version);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ eth_tlv = false;
/* Parse APP TLV */
for (i = 0; i < count; i++) {
- protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_PROTOCOL_ID);
- priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_PRI_MAP);
+ protocol_id = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
protocol_id, priority_map);
rc = ecore_dcbx_get_app_priority(priority_map, &priority);
@@ -270,12 +272,23 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = !(type == DCBX_PROTOCOL_ETH);
+ if (type == DCBX_PROTOCOL_ETH) {
+ enable = false;
+ eth_tlv = true;
+ } else {
+ enable = true;
+ }
ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
+
+ /* If Eth TLV is not detected, use UFP TC as default TC */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC,
+ &p_hwfn->p_dev->mf_bits) && !eth_tlv)
+ p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
/* Update ramrod protocol data and hw_info fields
* with default info when corresponding APP TLV's are not detected.
* The enabled field has a different logic for ethernet as only for
@@ -303,17 +316,17 @@ static enum _ecore_status_t
ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
{
struct dcbx_app_priority_feature *p_app;
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_results data = { 0 };
struct dcbx_app_priority_entry *p_tbl;
+ struct ecore_dcbx_results data = { 0 };
struct dcbx_ets_feature *p_ets;
struct ecore_hw_info *p_info;
u32 pri_tc_tbl, flags;
u8 dcbx_version;
int num_entries;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
flags = p_hwfn->p_dcbx_info->operational.flags;
- dcbx_version = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+ dcbx_version = GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@@ -322,16 +335,15 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
pri_tc_tbl = p_ets->pri_tc_tbl[0];
p_info = &p_hwfn->hw_info;
- num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+ num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
- p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_MAX_TCS);
- p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
+ p_info->num_active_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = GET_MFW_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -349,9 +361,9 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_mib_meta_data *p_data,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
u32 prefix_seq_num, suffix_seq_num;
int read_count = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* The data is considered to be valid only if both sequence numbers are
* the same.
@@ -362,6 +374,12 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
p_data->addr, p_data->size);
prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else if (type == ECORE_DCBX_LLDP_TLVS) {
+ ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_tlvs,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_tlvs->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_tlvs->suffix_seq_num;
+
} else {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib,
p_data->addr, p_data->size);
@@ -414,26 +432,24 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
u8 pri_map;
int i;
- p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_WILLING);
- p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_ENABLED);
- p_params->app_error = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
- p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags,
- DCBX_APP_NUM_ENTRIES);
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ p_params->app_willing = GET_MFW_FIELD(p_app->flags, DCBX_APP_WILLING);
+ p_params->app_valid = GET_MFW_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = GET_MFW_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = GET_MFW_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_params->app_entry[i];
if (ieee) {
u8 sf_ieee;
u32 val;
- sf_ieee = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF_IEEE);
+ sf_ieee = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
switch (sf_ieee) {
case DCBX_APP_SF_IEEE_RESERVED:
/* Old MFW */
- val = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF);
+ val = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
entry->sf_ieee = val ?
ECORE_DCBX_SF_IEEE_TCP_UDP_PORT :
ECORE_DCBX_SF_IEEE_ETHTYPE;
@@ -453,14 +469,14 @@ ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn,
break;
}
} else {
- entry->ethtype = !(ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF));
+ entry->ethtype = !(GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
}
- pri_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ pri_map = GET_MFW_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
ecore_dcbx_get_app_priority(pri_map, &entry->prio);
- entry->proto_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_PROTOCOL_ID);
+ entry->proto_id = GET_MFW_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
entry->proto_id,
&entry->proto_type, ieee);
@@ -478,10 +494,10 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
{
u8 pfc_map;
- p_params->pfc.willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
- p_params->pfc.max_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
- p_params->pfc.enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
- pfc_map = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.willing = GET_MFW_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = GET_MFW_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = GET_MFW_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = GET_MFW_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
@@ -505,13 +521,10 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
u32 bw_map[2], tsa_map[2], pri_map;
int i;
- p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_WILLING);
- p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_ENABLED);
- p_params->ets_cbs = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
- p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
- DCBX_ETS_MAX_TCS);
+ p_params->ets_willing = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_WILLING);
+ p_params->ets_enabled = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_ENABLED);
+ p_params->ets_cbs = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = GET_MFW_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
p_params->ets_willing, p_params->ets_enabled,
@@ -552,7 +565,6 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -566,7 +578,6 @@ ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct dcbx_features *p_feat;
@@ -579,9 +590,33 @@ ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
params->remote.valid = true;
}
-static enum _ecore_status_t
+static void ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
+{
+ struct ecore_dcbx_dscp_params *p_dscp;
+ struct dcb_dscp_map *p_dscp_map;
+ int i, j, entry;
+ u32 pri_map;
+
+ p_dscp = &params->dscp;
+ p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
+ p_dscp->enabled = GET_MFW_FIELD(p_dscp_map->flags, DCB_DSCP_ENABLE);
+
+ /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
+ * where each entry holds the 4bit priority map for 8 dscp entries.
+ */
+ for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
+ pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
+ entry, pri_map);
+ for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
+ p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
+ (j * 4)) & 0xf;
+ }
+}
+
+static void
ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
@@ -597,27 +632,27 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
* was successfuly performed
*/
p_operational = &params->operational;
- enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ enabled = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) !=
DCBX_CONFIG_VERSION_DISABLED);
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
- return ECORE_INVAL;
+ return;
}
p_feat = &p_hwfn->p_dcbx_info->operational.features;
p_results = &p_hwfn->p_dcbx_info->results;
- val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_IEEE);
p_operational->ieee = val;
- val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_CEE);
p_operational->cee = val;
- val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ val = !!(GET_MFW_FIELD(flags, DCBX_CONFIG_VERSION) ==
DCBX_CONFIG_VERSION_STATIC);
p_operational->local = val;
@@ -632,45 +667,14 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
- err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ err = GET_MFW_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
-
- return ECORE_SUCCESS;
-}
-
-static void
-ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
-{
- struct ecore_dcbx_dscp_params *p_dscp;
- struct dcb_dscp_map *p_dscp_map;
- int i, j, entry;
- u32 pri_map;
-
- p_dscp = &params->dscp;
- p_dscp_map = &p_hwfn->p_dcbx_info->dscp_map;
- p_dscp->enabled = ECORE_MFW_GET_FIELD(p_dscp_map->flags,
- DCB_DSCP_ENABLE);
- /* MFW encodes 64 dscp entries into 8 element array of u32 entries,
- * where each entry holds the 4bit priority map for 8 dscp entries.
- */
- for (i = 0, entry = 0; i < ECORE_DCBX_DSCP_SIZE / 8; i++) {
- pri_map = OSAL_BE32_TO_CPU(p_dscp_map->dscp_pri_map[i]);
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "elem %d pri_map 0x%x\n",
- entry, pri_map);
- for (j = 0; j < ECORE_DCBX_DSCP_SIZE / 8; j++, entry++)
- p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
- (j * 4)) & 0xf;
- }
}
-static void
-ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct lldp_config_params_s *p_local;
@@ -678,15 +682,13 @@ ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(params->lldp_local.local_chassis_id,
p_local->local_chassis_id,
- OSAL_ARRAY_SIZE(p_local->local_chassis_id));
+ sizeof(params->lldp_local.local_chassis_id));
OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
- OSAL_ARRAY_SIZE(p_local->local_port_id));
+ sizeof(params->lldp_local.local_port_id));
}
-static void
-ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_dcbx_get *params)
+static void ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *params)
{
struct lldp_status_params_s *p_remote;
@@ -694,40 +696,38 @@ ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
p_remote->peer_chassis_id,
- OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
+ sizeof(params->lldp_remote.peer_chassis_id));
OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
- OSAL_ARRAY_SIZE(p_remote->peer_port_id));
+ sizeof(params->lldp_remote.peer_port_id));
}
static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *p_params,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
- ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_MIB:
- ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_params(p_hwfn, p_params);
break;
case ECORE_DCBX_OPERATIONAL_MIB:
- ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_operational_params(p_hwfn, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
- ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
- ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
return ECORE_INVAL;
}
- return rc;
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t
@@ -857,7 +857,7 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
}
- return rc;
+ return ECORE_SUCCESS;
}
/*
@@ -876,33 +876,28 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
- ecore_dcbx_get_dscp_params(p_hwfn, p_ptt,
- &p_hwfn->p_dcbx_info->get);
+ ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
rc = ecore_dcbx_process_mib_info(p_hwfn);
if (!rc) {
- bool enabled;
-
/* reconfigure tcs of QM queues according
* to negotiation results
*/
ecore_qm_reconf(p_hwfn, p_ptt);
/* update storm FW with negotiation results */
- ecore_sp_pf_update(p_hwfn);
-
- /* set eagle enigne 1 flow control workaround
- * according to negotiation results
- */
- enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
+ ecore_sp_pf_update_dcbx(p_hwfn);
}
}
- ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
+
+ ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
/* Update the DSCP to TC mapping bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
p_hwfn->p_dcbx_info->dscp_nig_update) {
- ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, 0x1);
+ u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
+
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val);
p_hwfn->p_dcbx_info->dscp_nig_update = false;
}
@@ -927,8 +922,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
- struct ecore_dcbx_info *p_dcbx_info)
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info);
}
@@ -951,8 +945,6 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct protocol_dcb_data *p_dcb_data;
u8 update_flag;
- p_dest->pf_id = p_src->pf_id;
-
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
p_dest->update_eth_dcb_data_mode = update_flag;
update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
@@ -975,17 +967,16 @@ enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- rc = ECORE_TIMEOUT;
- DP_ERR(p_hwfn, "rc = %d\n", rc);
- return rc;
- }
+ if (!p_ptt)
+ return ECORE_TIMEOUT;
rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type);
if (rc != ECORE_SUCCESS)
goto out;
- rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+ ecore_dcbx_get_dscp_params(p_hwfn, p_get);
+
+ rc = ecore_dcbx_get_params(p_hwfn, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
@@ -1010,13 +1001,13 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
*pfc &= ~DCBX_PFC_ENABLED_MASK;
*pfc &= ~DCBX_PFC_CAPS_MASK;
- *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_OFFSET;
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
pfc_map |= (1 << i);
*pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
- *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_OFFSET);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
}
@@ -1046,7 +1037,7 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
- p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_OFFSET;
bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
@@ -1092,9 +1083,9 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
p_app->flags |= (u32)p_params->num_app_entries <<
- DCBX_APP_NUM_ENTRIES_SHIFT;
+ DCBX_APP_NUM_ENTRIES_OFFSET;
- for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ for (i = 0; i < p_params->num_app_entries; i++) {
entry = &p_app->app_pri_tbl[i].entry;
*entry = 0;
if (ieee) {
@@ -1102,50 +1093,50 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
- DCBX_APP_SF_IEEE_SHIFT);
+ DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
- DCBX_APP_SF_IEEE_SHIFT);
+ DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
- DCBX_APP_SF_IEEE_SHIFT);
+ DCBX_APP_SF_IEEE_OFFSET);
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
- DCBX_APP_SF_IEEE_SHIFT;
+ DCBX_APP_SF_IEEE_OFFSET;
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
break;
}
} else {
*entry &= ~DCBX_APP_SF_MASK;
if (p_params->app_entry[i].ethtype)
*entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
else
*entry |= ((u32)DCBX_APP_SF_PORT <<
- DCBX_APP_SF_SHIFT);
+ DCBX_APP_SF_OFFSET);
}
*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
*entry |= ((u32)p_params->app_entry[i].proto_id <<
- DCBX_APP_PROTOCOL_ID_SHIFT);
+ DCBX_APP_PROTOCOL_ID_OFFSET);
*entry &= ~DCBX_APP_PRI_MAP_MASK;
*entry |= ((u32)(p_params->app_entry[i].prio) <<
- DCBX_APP_PRI_MAP_SHIFT);
+ DCBX_APP_PRI_MAP_OFFSET);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
}
-static enum _ecore_status_t
+static void
ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
struct dcbx_local_params *local_admin,
struct ecore_dcbx_set *params)
@@ -1164,6 +1155,9 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
if (params->override_flags & ECORE_DCBX_OVERRIDE_PFC_CFG)
ecore_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
&params->config.params);
@@ -1175,8 +1169,6 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
if (params->override_flags & ECORE_DCBX_OVERRIDE_APP_CFG)
ecore_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
&params->config.params, ieee);
-
- return ECORE_SUCCESS;
}
static enum _ecore_status_t
@@ -1206,6 +1198,12 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->dscp_nig_update = true;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "pri_map[] = 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ p_dscp_map->dscp_pri_map[0], p_dscp_map->dscp_pri_map[1],
+ p_dscp_map->dscp_pri_map[2], p_dscp_map->dscp_pri_map[3],
+ p_dscp_map->dscp_pri_map[4], p_dscp_map->dscp_pri_map[5],
+ p_dscp_map->dscp_pri_map[6], p_dscp_map->dscp_pri_map[7]);
return ECORE_SUCCESS;
}
@@ -1221,15 +1219,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
u32 resp = 0, param = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!hw_commit) {
- OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
- sizeof(p_hwfn->p_dcbx_info->set));
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(p_hwfn->p_dcbx_info->set));
+ if (!hw_commit)
return ECORE_SUCCESS;
- }
-
- /* clear set-parmas cache */
- OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
- sizeof(struct ecore_dcbx_set));
OSAL_MEMSET(&local_admin, 0, sizeof(local_admin));
ecore_dcbx_set_local_params(p_hwfn, &local_admin, params);
@@ -1253,12 +1246,10 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
- if (rc != ECORE_SUCCESS) {
+ 1 << DRV_MB_PARAM_LLDP_SEND_OFFSET, &resp, &param);
+ if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false,
"Failed to send DCBX update request\n");
- return rc;
- }
return rc;
}
@@ -1277,10 +1268,8 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(*dcbx_info));
- if (!dcbx_info) {
- DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
+ if (!dcbx_info)
return ECORE_NOMEM;
- }
OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
@@ -1300,9 +1289,12 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.dscp,
+ &p_hwfn->p_dcbx_info->get.dscp,
+ sizeof(struct ecore_dcbx_dscp_params));
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set.config.params,
&dcbx_info->operational.params,
- sizeof(struct ecore_dcbx_admin_params));
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
p_hwfn->p_dcbx_info->set.config.valid = true;
OSAL_MEMCPY(params, &p_hwfn->p_dcbx_info->set,
@@ -1312,3 +1304,230 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+
+enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_lldp_agent agent,
+ u8 tlv_type)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0, val = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ switch (agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_TLV_RX_TYPE, tlv_type);
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to register TLV\n");
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct lldp_received_tlvs_s tlvs;
+ int i;
+
+ for (i = 0; i < LLDP_MAX_LLDP_AGENTS; i++) {
+ OSAL_MEM_ZERO(&data, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_received_tlvs[i]);
+ data.lldp_tlvs = &tlvs;
+ data.size = sizeof(tlvs);
+ rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data,
+ ECORE_DCBX_LLDP_TLVS);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false, "Failed to read lldp TLVs\n");
+ return rc;
+ }
+
+ if (!tlvs.length)
+ continue;
+
+ for (i = 0; i < MAX_TLV_BUFFER; i++)
+ tlvs.tlvs_buffer[i] =
+ OSAL_CPU_TO_BE32(tlvs.tlvs_buffer[i]);
+
+ OSAL_LLDP_RX_TLVS(p_hwfn, tlvs.tlvs_buffer, tlvs.length);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params)
+{
+ struct lldp_config_params_s lldp_params;
+ u32 addr, val;
+ int i;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_config_params[val]);
+
+ ecore_memcpy_from(p_hwfn, p_ptt, &lldp_params, addr,
+ sizeof(lldp_params));
+
+ p_params->tx_interval = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_TX_INTERVAL);
+ p_params->tx_hold = GET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD);
+ p_params->tx_credit = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_MAX_CREDIT);
+ p_params->rx_enable = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_ENABLE_RX);
+ p_params->tx_enable = GET_MFW_FIELD(lldp_params.config,
+ LLDP_CONFIG_ENABLE_TX);
+
+ OSAL_MEMCPY(p_params->chassis_id_tlv, lldp_params.local_chassis_id,
+ sizeof(p_params->chassis_id_tlv));
+ for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
+ p_params->chassis_id_tlv[i] =
+ OSAL_BE32_TO_CPU(p_params->chassis_id_tlv[i]);
+
+ OSAL_MEMCPY(p_params->port_id_tlv, lldp_params.local_port_id,
+ sizeof(p_params->port_id_tlv));
+ for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
+ p_params->port_id_tlv[i] =
+ OSAL_BE32_TO_CPU(p_params->port_id_tlv[i]);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
+ struct lldp_config_params_s lldp_params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 addr, val;
+ int i;
+
+ switch (p_params->agent) {
+ case ECORE_LLDP_NEAREST_BRIDGE:
+ val = LLDP_NEAREST_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE:
+ val = LLDP_NEAREST_NON_TPMR_BRIDGE;
+ break;
+ case ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE:
+ val = LLDP_NEAREST_CUSTOMER_BRIDGE;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid agent type %d\n", p_params->agent);
+ return ECORE_INVAL;
+ }
+
+ SET_MFW_FIELD(mb_param, DRV_MB_PARAM_LLDP_AGENT, val);
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, lldp_config_params[val]);
+
+ OSAL_MEMSET(&lldp_params, 0, sizeof(lldp_params));
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_TX_INTERVAL,
+ p_params->tx_interval);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_HOLD, p_params->tx_hold);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_MAX_CREDIT,
+ p_params->tx_credit);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_RX,
+ !!p_params->rx_enable);
+ SET_MFW_FIELD(lldp_params.config, LLDP_CONFIG_ENABLE_TX,
+ !!p_params->tx_enable);
+
+ for (i = 0; i < ECORE_LLDP_CHASSIS_ID_STAT_LEN; i++)
+ p_params->chassis_id_tlv[i] =
+ OSAL_CPU_TO_BE32(p_params->chassis_id_tlv[i]);
+ OSAL_MEMCPY(lldp_params.local_chassis_id, p_params->chassis_id_tlv,
+ sizeof(lldp_params.local_chassis_id));
+
+ for (i = 0; i < ECORE_LLDP_PORT_ID_STAT_LEN; i++)
+ p_params->port_id_tlv[i] =
+ OSAL_CPU_TO_BE32(p_params->port_id_tlv[i]);
+ OSAL_MEMCPY(lldp_params.local_port_id, p_params->port_id_tlv,
+ sizeof(lldp_params.local_port_id));
+
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &lldp_params, sizeof(lldp_params));
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_sys_tlvs *p_params)
+{
+ u32 mb_param = 0, mcp_resp = 0, mcp_param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct lldp_system_tlvs_buffer_s lld_tlv_buf;
+ u32 addr, *p_val;
+ u8 len;
+ int i;
+
+ p_val = (u32 *)p_params->buf;
+ for (i = 0; i < ECORE_LLDP_SYS_TLV_SIZE / 4; i++)
+ p_val[i] = OSAL_CPU_TO_BE32(p_val[i]);
+
+ OSAL_MEMSET(&lld_tlv_buf, 0, sizeof(lld_tlv_buf));
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_VALID, 1);
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_MANDATORY,
+ !!p_params->discard_mandatory_tlv);
+ SET_MFW_FIELD(lld_tlv_buf.flags, LLDP_SYSTEM_TLV_LENGTH,
+ p_params->buf_size);
+ len = ECORE_LLDP_SYS_TLV_SIZE / 2;
+ OSAL_MEMCPY(lld_tlv_buf.data, p_params->buf, len);
+
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, system_lldp_tlvs_buf);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &lld_tlv_buf, sizeof(lld_tlv_buf));
+
+ if (p_params->buf_size > len) {
+ addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, system_lldp_tlvs_buf2);
+ ecore_memcpy_to(p_hwfn, p_ptt, addr, &p_params->buf[len],
+ ECORE_LLDP_SYS_TLV_SIZE / 2);
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LLDP,
+ mb_param, &mcp_resp, &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "SET_LLDP failed, error = %d\n", rc);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index eba2d91b..469e42dd 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -17,6 +17,8 @@
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
+#define ECORE_DCBX_DSCP_DISABLED 0XFF
+
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -35,6 +37,7 @@ struct ecore_dcbx_info {
struct ecore_dcbx_mib_meta_data {
struct lldp_config_params_s *lldp_local;
struct lldp_status_params_s *lldp_remote;
+ struct lldp_received_tlvs_s *lldp_tlvs;
struct dcbx_local_params *local_admin;
struct dcb_dscp_map *dscp_map;
struct dcbx_mib *mib;
@@ -47,11 +50,15 @@ enum _ecore_status_t
ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
enum ecore_mib_read_type);
-enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
- struct ecore_ptt *);
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
-void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
+void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
+/* Returns TOS value for a given priority */
+u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri);
+
+enum _ecore_status_t
+ecore_lldp_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 2dc76796..9ff4df4c 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -18,7 +18,8 @@ enum ecore_mib_read_type {
ECORE_DCBX_REMOTE_MIB,
ECORE_DCBX_LOCAL_MIB,
ECORE_DCBX_REMOTE_LLDP_MIB,
- ECORE_DCBX_LOCAL_LLDP_MIB
+ ECORE_DCBX_LOCAL_LLDP_MIB,
+ ECORE_DCBX_LLDP_TLVS
};
struct ecore_dcbx_app_data {
@@ -101,7 +102,6 @@ struct ecore_dcbx_params {
bool ets_willing;
bool ets_enabled;
bool ets_cbs;
- bool valid; /* Indicate validity of params */
u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
@@ -175,6 +175,31 @@ struct ecore_dcbx_app_metadata {
enum ecore_pci_personality personality;
};
+enum ecore_lldp_agent {
+ ECORE_LLDP_NEAREST_BRIDGE = 0,
+ ECORE_LLDP_NEAREST_NON_TPMR_BRIDGE,
+ ECORE_LLDP_NEAREST_CUSTOMER_BRIDGE,
+ ECORE_LLDP_MAX_AGENTS
+};
+
+struct ecore_lldp_config_params {
+ enum ecore_lldp_agent agent;
+ u8 tx_interval;
+ u8 tx_hold;
+ u8 tx_credit;
+ bool rx_enable;
+ bool tx_enable;
+ u32 chassis_id_tlv[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
+ u32 port_id_tlv[ECORE_LLDP_PORT_ID_STAT_LEN];
+};
+
+#define ECORE_LLDP_SYS_TLV_SIZE 256
+struct ecore_lldp_sys_tlvs {
+ bool discard_mandatory_tlv;
+ u8 buf[ECORE_LLDP_SYS_TLV_SIZE];
+ u16 buf_size;
+};
+
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
struct ecore_dcbx_get *,
enum ecore_mib_read_type);
@@ -187,6 +212,23 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
struct ecore_dcbx_set *,
bool);
+enum _ecore_status_t ecore_lldp_register_tlv(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_lldp_agent agent,
+ u8 tlv_type);
+
+enum _ecore_status_t
+ecore_lldp_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params);
+
+enum _ecore_status_t
+ecore_lldp_set_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_config_params *p_params);
+
+enum _ecore_status_t
+ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_lldp_sys_tlvs *p_params);
+
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 65b89b8f..da1830ce 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -28,7 +28,6 @@
#include "mcp_public.h"
#include "ecore_iro.h"
#include "nvm_cfg.h"
-#include "ecore_dev_api.h"
#include "ecore_dcbx.h"
#include "ecore_l2.h"
@@ -42,6 +41,318 @@
static osal_spinlock_t qm_lock;
static bool qm_lock_init;
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct ecore_db_recovery_entry {
+ osal_list_entry_t list_entry;
+ void OSAL_IOMEM *db_addr;
+ void *db_data;
+ enum ecore_db_rec_width db_width;
+ enum ecore_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* display a single doorbell recovery entry */
+void ecore_db_recovery_dp_entry(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ const char *action)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action, db_entry, db_entry->db_addr, db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* doorbell address sanity (address within doorbell bar range) */
+bool ecore_db_rec_sanity(struct ecore_dev *p_dev, void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ /* make sure doorbell address is within the doorbell bar */
+ if (db_addr < p_dev->doorbells || (u8 *)db_addr >
+ (u8 *)p_dev->doorbells + p_dev->db_size) {
+ OSAL_WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr, p_dev->doorbells,
+ (u8 *)p_dev->doorbells + p_dev->db_size);
+ return false;
+ }
+
+ /* make sure doorbell data pointer is not null */
+ if (!db_data) {
+ OSAL_WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* find hwfn according to the doorbell address */
+struct ecore_hwfn *ecore_db_rec_find_hwfn(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr)
+{
+ struct ecore_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and
+ * enigne 1
+ */
+ if (ECORE_IS_CMT(p_dev))
+ p_hwfn = db_addr < p_dev->hwfns[1].doorbells ?
+ &p_dev->hwfns[0] : &p_dev->hwfns[1];
+ else
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+
+ return p_hwfn;
+}
+
+/* add a new entry to the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space)
+{
+ struct ecore_db_recovery_entry *db_entry;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* create entry */
+ db_entry = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*db_entry));
+ if (!db_entry) {
+ DP_NOTICE(p_dev, false, "Failed to allocate a db recovery entry\n");
+ return ECORE_NOMEM;
+ }
+
+ /* populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* display */
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_PUSH_TAIL(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ return ECORE_SUCCESS;
+}
+
+/* remove an entry from the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+
+ /* shortcircuit VFs, for now */
+ if (IS_VF(p_dev)) {
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return ECORE_SUCCESS;
+ }
+
+ /* sanitize doorbell address */
+ if (!ecore_db_rec_sanity(p_dev, db_addr, db_data))
+ return ECORE_INVAL;
+
+ /* obtain hwfn from doorbell address */
+ p_hwfn = ecore_db_rec_find_hwfn(p_dev, db_addr);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ /* search according to db_data addr since db_addr is not unique
+ * (roce)
+ */
+ if (db_entry->db_data == db_data) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry,
+ "Deleting");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ rc = ECORE_SUCCESS;
+ break;
+ }
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == ECORE_INVAL)
+ /*OSAL_WARN(true,*/
+ DP_NOTICE(p_hwfn, false,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ OSAL_FREE(p_dev, db_entry);
+
+ return rc;
+}
+
+/* initialize the doorbell recovery mechanism */
+enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Setting up db recovery\n");
+
+ /* make sure db_size was set in p_dev */
+ if (!p_hwfn->p_dev->db_size) {
+ DP_ERR(p_hwfn->p_dev, "db_size not set\n");
+ return ECORE_INVAL;
+ }
+
+ OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock);
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return ECORE_SUCCESS;
+}
+
+/* destroy the doorbell recovery mechanism */
+void ecore_db_recovery_teardown(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Tearing down db recovery\n");
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn, false, "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!OSAL_LIST_IS_EMPTY(&p_hwfn->db_recovery_info.list)) {
+ db_entry = OSAL_LIST_FIRST_ENTRY(
+ &p_hwfn->db_recovery_info.list,
+ struct ecore_db_recovery_entry,
+ list_entry);
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ OSAL_LIST_REMOVE_ENTRY(&db_entry->list_entry,
+ &p_hwfn->db_recovery_info.list);
+ OSAL_FREE(p_hwfn->p_dev, db_entry);
+ }
+ }
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->db_recovery_info.lock);
+#endif
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* print the content of the doorbell recovery mechanism */
+void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ DP_NOTICE(p_hwfn, false,
+ "Dispalying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+
+/* ring the doorbell of a single doorbell recovery entry */
+void ecore_db_recovery_ring(struct ecore_hwfn *p_hwfn,
+ struct ecore_db_recovery_entry *db_entry,
+ enum ecore_db_rec_exec db_exec)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %x\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr, *(u32 *)db_entry->db_data);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "%s doorbell address %p data %lx\n",
+ db_exec == DB_REC_DRY_RUN ? "would have rung" : "ringing",
+ db_entry->db_addr,
+ *(unsigned long *)(db_entry->db_data));
+
+ /* Sanity */
+ if (!ecore_db_rec_sanity(p_hwfn->p_dev, db_entry->db_addr,
+ db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+
+ /* Ring the doorbell */
+ if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(p_hwfn, db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(p_hwfn, db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ OSAL_WMB(p_hwfn->p_dev);
+}
+
+/* traverse the doorbell recovery entry list and ring all the doorbells */
+void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
+ enum ecore_db_rec_exec db_exec)
+{
+ struct ecore_db_recovery_entry *db_entry = OSAL_NULL;
+
+ if (db_exec != DB_REC_ONCE) {
+ DP_NOTICE(p_hwfn, false, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+ }
+
+ /* protect the list */
+ OSAL_SPIN_LOCK(&p_hwfn->db_recovery_info.lock);
+ OSAL_LIST_FOR_EACH_ENTRY(db_entry,
+ &p_hwfn->db_recovery_info.list,
+ list_entry,
+ struct ecore_db_recovery_entry) {
+ ecore_db_recovery_ring(p_hwfn, db_entry, db_exec);
+ if (db_exec == DB_REC_ONCE)
+ break;
+ }
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->db_recovery_info.lock);
+}
+/******************** Doorbell Recovery end ****************/
+
/* Configurable */
#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
* load the driver. The number was
@@ -49,28 +360,20 @@ static bool qm_lock_init;
*/
/* Derived */
-#define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
-
-enum BAR_ID {
- BAR_ID_0, /* used for GRC */
- BAR_ID_1 /* Used for doorbells */
-};
+#define ECORE_MIN_PWM_REGION (ECORE_WID_SIZE * ECORE_MIN_DPIS)
-static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
+static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val;
- if (IS_VF(p_hwfn->p_dev)) {
- /* TODO - assume each VF hwfn has 64Kb for Bar0; Bar1 can be
- * read from actual register, but we're currently not using
- * it for actual doorbelling.
- */
- return 1 << 17;
- }
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_hw_bar_size(p_hwfn, bar_id);
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ val = ecore_rd(p_hwfn, p_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -78,15 +381,13 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 256kB"
- " for GRC and 512kB for DB\n");
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
} else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size of 512kB"
- " for GRC and 512kB for DB\n");
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
val = 512 * 1024;
}
@@ -121,7 +422,9 @@ void ecore_init_struct(struct ecore_dev *p_dev)
p_hwfn->my_id = i;
p_hwfn->b_active = false;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
+#endif
OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
}
@@ -168,8 +471,11 @@ void ecore_resc_free(struct ecore_dev *p_dev)
ecore_iov_free(p_hwfn);
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
- ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
+ ecore_dcbx_info_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
+
+ /* destroy doorbell recovery mechanism */
+ ecore_db_recovery_teardown(p_hwfn);
}
}
@@ -307,7 +613,7 @@ static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */
- four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
+ four_port = p_hwfn->p_dev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
@@ -336,7 +642,7 @@ static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
{
/* Initialize qm port parameters */
- u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
@@ -348,7 +654,7 @@ static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
p_qm_port->active = 1;
p_qm_port->active_phys_tcs = active_phys_tcs;
- p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
+ p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES_E4 / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
}
@@ -690,7 +996,7 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
/* port table */
- for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engine; i++) {
port = &qm_info->qm_port_params[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
"port idx %d, active %d, active_phys_tcs %d,"
@@ -777,7 +1083,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
ecore_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */
- ecore_qm_init_pf(p_hwfn);
+ ecore_qm_init_pf(p_hwfn, p_ptt);
/* activate init tool on runtime array */
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
@@ -819,7 +1125,7 @@ static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(struct init_qm_port_params) *
- p_hwfn->p_dev->num_ports_in_engines);
+ p_hwfn->p_dev->num_ports_in_engine);
if (!qm_info->qm_port_params)
goto alloc_err;
@@ -861,12 +1167,17 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
+ /* initialize the doorbell recovery mechanism */
+ rc = ecore_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* First allocate the context manager structure */
rc = ecore_cxt_mngr_alloc(p_hwfn);
if (rc)
goto alloc_err;
- /* Set the HW cid/tid numbers (in the contest manager)
+ /* Set the HW cid/tid numbers (in the context manager)
* Must be done prior to any further computations.
*/
rc = ecore_cxt_set_pf_params(p_hwfn);
@@ -1036,7 +1347,7 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
ecore_l2_setup(p_hwfn);
- ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_iov_setup(p_hwfn);
}
}
@@ -1116,7 +1427,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
}
/* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
- switch (p_hwfn->p_dev->num_ports_in_engines) {
+ switch (p_hwfn->p_dev->num_ports_in_engine) {
case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break;
@@ -1129,23 +1440,15 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
default:
DP_NOTICE(p_hwfn, true,
"num_ports_in_engine = %d not supported\n",
- p_hwfn->p_dev->num_ports_in_engines);
+ p_hwfn->p_dev->num_ports_in_engine);
return ECORE_INVAL;
}
- switch (p_hwfn->p_dev->mf_mode) {
- case ECORE_MF_DEFAULT:
- case ECORE_MF_NPAR:
- hw_mode |= 1 << MODE_MF_SI;
- break;
- case ECORE_MF_OVLAN:
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
+ &p_hwfn->p_dev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Unsupported MF mode, init as DEFAULT\n");
+ else
hw_mode |= 1 << MODE_MF_SI;
- }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
@@ -1161,7 +1464,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
#endif
hw_mode |= 1 << MODE_ASIC;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
hw_mode |= 1 << MODE_100G;
p_hwfn->hw_info.hw_mode = hw_mode;
@@ -1203,10 +1506,10 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_AH(p_dev)) {
/* 2 for 4-port, 1 for 2-port, 0 for 1-port */
ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_dev->num_ports_in_engines >> 1));
+ (p_dev->num_ports_in_engine >> 1));
ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ p_dev->num_ports_in_engine == 4 ? 0 : 3);
}
}
@@ -1232,7 +1535,7 @@ static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
{
u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
- int i, sb_id;
+ int i, igu_sb_id;
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -1242,20 +1545,77 @@ static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
p_igu_info = p_hwfn->hw_info.p_igu_info;
- for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
- sb_id++) {
- p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
+ igu_sb_id++) {
+ p_block = &p_igu_info->entry[igu_sb_id];
if (!p_block->is_pf)
continue;
ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
p_block->function_id, 0, 0);
- STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
+ STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+ sb_entry);
}
}
}
+static void ecore_init_cache_line_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = OSAL_MIN_T(u32, OSAL_CACHE_LINE_SIZE, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (wr_mbs < OSAL_CACHE_LINE_SIZE)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ OSAL_CACHE_LINE_SIZE, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
+}
+
static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
@@ -1270,11 +1630,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_init_cau_rt_data(p_dev);
/* Program GTT windows */
- ecore_gtt_init(p_hwfn);
+ ecore_gtt_init(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev)) {
- rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_hw_init_chip(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS)
return rc;
}
@@ -1288,7 +1648,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
}
ecore_qm_common_rt_init(p_hwfn,
- p_dev->num_ports_in_engines,
+ p_dev->num_ports_in_engine,
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -1296,6 +1656,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_cxt_hw_init_common(p_hwfn);
+ ecore_init_cache_line_size(p_hwfn, p_ptt);
+
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1511,9 +1873,9 @@ static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
{
- u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
- u32 dpi_bit_shift, dpi_count;
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
u32 min_dpis;
+ u32 n_wids;
/* Calculate DPI size
* ------------------
@@ -1536,12 +1898,11 @@ ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
* 0 is 4kB, 1 is 8kB and etc. Hence the minimum size is 4,096
* containing 4 WIDs.
*/
- dpi_page_size_1 = ECORE_WID_SIZE * n_cpus;
- dpi_page_size_2 = OSAL_MAX_T(u32, ECORE_WID_SIZE, OSAL_PAGE_SIZE);
- dpi_page_size = OSAL_MAX_T(u32, dpi_page_size_1, dpi_page_size_2);
- dpi_page_size = OSAL_ROUNDUP_POW_OF_TWO(dpi_page_size);
+ n_wids = OSAL_MAX_T(u32, ECORE_MIN_WIDS, n_cpus);
+ dpi_page_size = ECORE_WID_SIZE * OSAL_ROUNDUP_POW_OF_TWO(n_wids);
+ dpi_page_size = (dpi_page_size + OSAL_PAGE_SIZE - 1) &
+ ~(OSAL_PAGE_SIZE - 1);
dpi_bit_shift = OSAL_LOG2(dpi_page_size / 4096);
-
dpi_count = pwm_region_size / dpi_page_size;
min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
@@ -1578,8 +1939,8 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
- db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
- if (p_hwfn->p_dev->num_hwfns > 1)
+ db_bar_size = ecore_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
db_bar_size /= 2;
/* Calculate doorbell regions
@@ -1600,7 +1961,8 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
OSAL_NULL) +
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
- norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
+ norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn,
+ OSAL_PAGE_SIZE);
min_addr_reg1 = norm_regsize / 4096;
pwm_regsize = db_bar_size - norm_regsize;
@@ -1626,7 +1988,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
/* Either EDPM is mandatory, or we are attempting to allocate a
* WID per CPU.
*/
- n_cpus = OSAL_NUM_ACTIVE_CPU();
+ n_cpus = OSAL_NUM_CPUS();
rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
@@ -1678,12 +2040,29 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
{
+ u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
+ u32 val;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 i;
+
+ /* In CMT for non-RoCE packets - use connection based classification */
+ val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
+ for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
+ ppf_to_eng_sel[i] = val;
+ STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
+ ppf_to_eng_sel);
+
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
+
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
return ECORE_SUCCESS;
@@ -1694,7 +2073,7 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
else if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
} else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_hwfn->p_dev)) {
/* Activate OPTE in CMT */
u32 val;
@@ -1746,7 +2125,7 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
/* Update rate limit once we'll actually have a link */
p_hwfn->qm_info.pf_rl = 100000;
}
- ecore_cxt_hw_init_pf(p_hwfn);
+ ecore_cxt_hw_init_pf(p_hwfn, p_ptt);
ecore_int_igu_init_rt(p_hwfn);
@@ -1756,6 +2135,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
}
/* Enable classification by MAC if needed */
@@ -1815,7 +2199,7 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
/* send function start command */
- rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
@@ -1898,6 +2282,37 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
+static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ 1 << p_hwfn->abs_pf_id);
+}
+
+static void
+ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req,
+ struct ecore_drv_load_params *p_drv_load)
+{
+ /* Make sure that if ecore-client didn't provide inputs, all the
+ * expected defaults are indeed zero.
+ */
+ OSAL_BUILD_BUG_ON(ECORE_DRV_ROLE_OS != 0);
+ OSAL_BUILD_BUG_ON(ECORE_LOAD_REQ_LOCK_TO_DEFAULT != 0);
+ OSAL_BUILD_BUG_ON(ECORE_OVERRIDE_FORCE_LOAD_NONE != 0);
+
+ OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req));
+
+ if (p_drv_load != OSAL_NULL) {
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load =
+ p_drv_load->override_force_load;
+ }
+}
+
enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
struct ecore_hw_init_params *p_params)
{
@@ -1911,13 +2326,6 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
- 1 << p_hwfn->abs_pf_id);
-}
-
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
@@ -1928,8 +2336,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
- (p_dev->num_hwfns > 1)) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
@@ -1959,12 +2366,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
- load_req_params.drv_role = p_params->is_crash_kernel ?
- ECORE_DRV_ROLE_KDUMP :
- ECORE_DRV_ROLE_OS;
- load_req_params.timeout_val = p_params->mfw_timeout_val;
- load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ ecore_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
&load_req_params);
if (rc != ECORE_SUCCESS) {
@@ -1978,6 +2381,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
"Load request was sent. Load code: 0x%x\n",
load_code);
+ ecore_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
+
/* CQ75580:
* When coming back from hiberbate state, the registers from
* which shadow is read initially are not initialized. It turns
@@ -2072,7 +2477,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
"sending phony dcbx set command to trigger DCBx attention handling\n");
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_SET_DCBX,
- 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, &resp,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp,
&param);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
@@ -2255,6 +2660,13 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+ rc = ecore_int_igu_reset_cam_default(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed to return IGU CAM to default\n");
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
@@ -2303,18 +2715,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
return rc2;
}
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
{
int j;
for_each_hwfn(p_dev, j) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
continue;
}
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
"Shutting down the fastpath\n");
@@ -2336,15 +2751,22 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
+ ecore_ptt_release(p_hwfn, p_ptt);
}
+
+ return ECORE_SUCCESS;
}
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
{
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_ptt;
if (IS_VF(p_hwfn->p_dev))
- return;
+ return ECORE_SUCCESS;
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
@@ -2357,8 +2779,11 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
}
/* Re-open incoming traffic */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return ECORE_SUCCESS;
}
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
@@ -2423,27 +2848,35 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
- struct ecore_sb_cnt_info sb_cnt_info;
- int num_features = 1;
+ struct ecore_sb_cnt_info sb_cnt;
+ u32 non_l2_sbs = 0;
+
+ OSAL_MEM_ZERO(&sb_cnt, sizeof(sb_cnt));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt);
/* L2 Queues require each: 1 status block. 1 L2 queue */
- feat_num[ECORE_PF_L2_QUE] =
- OSAL_MIN_T(u32,
- RESC_NUM(p_hwfn, ECORE_SB) / num_features,
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
-
- OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
- ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- feat_num[ECORE_VF_L2_QUE] =
- OSAL_MIN_T(u32,
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
- FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
- sb_cnt_info.sb_iov_cnt);
-
- feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
- RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
- feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
- RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+ if (ECORE_IS_L2_PERSONALITY(p_hwfn)) {
+ /* Start by allocating VF queues, then PF's */
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
+ sb_cnt.iov_cnt);
+ feat_num[ECORE_PF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ sb_cnt.cnt - non_l2_sbs,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
+ }
+
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ feat_num[ECORE_FCOE_CQ] =
+ OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
+ ECORE_CMDQS_CQS));
+
+ if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+ feat_num[ECORE_ISCSI_CQ] =
+ OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
+ ECORE_CMDQS_CQS));
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -2452,14 +2885,12 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
(int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
- RESC_NUM(p_hwfn, ECORE_SB));
+ (int)sb_cnt.cnt);
}
const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
{
switch (res_id) {
- case ECORE_SB:
- return "SB";
case ECORE_L2_QUEUE:
return "L2_QUEUE";
case ECORE_VPORT:
@@ -2486,6 +2917,8 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
return "RDMA_STATS_QUEUE";
case ECORE_BDQ:
return "BDQ";
+ case ECORE_SB:
+ return "SB";
default:
return "UNKNOWN_RESOURCE";
}
@@ -2493,12 +2926,14 @@ const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
static enum _ecore_status_t
__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id, u32 resc_max_val,
+ struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id,
+ u32 resc_max_val,
u32 *p_mcp_resp)
{
enum _ecore_status_t rc;
- rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
resc_max_val, p_mcp_resp);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
@@ -2516,7 +2951,8 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
-ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
u32 resc_max_val, mcp_resp;
@@ -2536,7 +2972,7 @@ ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
continue;
}
- rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
resc_max_val, &mcp_resp);
if (rc != ECORE_SUCCESS)
return rc;
@@ -2561,14 +2997,8 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
- struct ecore_sb_cnt_info sb_cnt_info;
switch (res_id) {
- case ECORE_SB:
- OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
- ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- *p_resc_num = sb_cnt_info.sb_cnt;
- break;
case ECORE_L2_QUEUE:
*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
MAX_NUM_L2_QUEUES_BB) / num_funcs;
@@ -2625,6 +3055,12 @@ enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
if (!*p_resc_num)
*p_resc_start = 0;
break;
+ case ECORE_SB:
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
+ break;
default:
*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
break;
@@ -2689,14 +3125,9 @@ __ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
goto out;
}
- /* TBD - remove this when revising the handling of the SB resource */
- if (res_id == ECORE_SB) {
- /* Excluding the slowpath SB */
- *p_resc_num -= 1;
- *p_resc_start -= p_hwfn->enabled_func_idx;
- }
-
- if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
+ if ((*p_resc_num != dflt_resc_num ||
+ *p_resc_start != dflt_resc_start) &&
+ res_id != ECORE_SB) {
DP_INFO(p_hwfn,
"MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
@@ -2726,10 +3157,8 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
-#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
-
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
{
struct ecore_resc_unlock_params resc_unlock_params;
@@ -2759,15 +3188,10 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
* Old drivers that don't acquire the lock can run in parallel, and
* their allocation values won't be affected by the updated max values.
*/
- OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
- resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
- resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
- resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
- resc_lock_params.sleep_b4_retry = true;
- OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
- resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
-
- rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ ecore_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ ECORE_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
return rc;
} else if (rc == ECORE_NOTIMPL) {
@@ -2779,7 +3203,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
rc = ECORE_BUSY;
goto unlock_and_exit;
} else {
- rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ rc = ecore_hw_set_soft_resc_size(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
DP_NOTICE(p_hwfn, false,
"Failed to set the max values of the soft resources\n");
@@ -2787,7 +3211,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
} else if (rc == ECORE_NOTIMPL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
@@ -2800,7 +3224,7 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
goto unlock_and_exit;
if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
- rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_ptt,
&resc_unlock_params);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn,
@@ -2846,6 +3270,10 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
+ /* This will also learn the number of SBs from MFW */
+ if (ecore_int_igu_reset_cam(p_hwfn, p_ptt))
+ return ECORE_INVAL;
+
ecore_hw_set_feat(p_hwfn);
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
@@ -2859,7 +3287,9 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
unlock_and_exit:
- ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ ecore_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
return rc;
}
@@ -2870,6 +3300,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
{
u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+ struct ecore_mcp_link_capabilities *p_caps;
struct ecore_mcp_link_params *link;
enum _ecore_status_t rc;
@@ -2889,8 +3320,8 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
- core_cfg);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, core_cfg);
core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
@@ -2959,6 +3390,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
link_temp = ecore_rd(p_hwfn, p_ptt,
@@ -2966,13 +3398,11 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
link->speed.advertised_speeds = link_temp;
-
- link_temp = link->speed.advertised_speeds;
- p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+ p_caps->speed_capabilities = link->speed.advertised_speeds;
link_temp = ecore_rd(p_hwfn, p_ptt,
- port_cfg_addr +
- OFFSETOF(struct nvm_cfg1_port, link_settings));
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, link_settings));
switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
@@ -3000,10 +3430,8 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
}
- p_hwfn->mcp_info->link_capabilities.default_speed =
- link->speed.forced_speed;
- p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
- link->speed.autoneg;
+ p_caps->default_speed = link->speed.forced_speed;
+ p_caps->default_speed_autoneg = link->speed.autoneg;
link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
@@ -3015,15 +3443,47 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
link->loopback_mode = 0;
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
+ link_temp = ecore_rd(p_hwfn, p_ptt, port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, ext_phy));
+ link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
+ link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
+ p_caps->default_eee = ECORE_MCP_EEE_ENABLED;
+ link->eee.enable = true;
+ switch (link_temp) {
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
+ p_caps->default_eee = ECORE_MCP_EEE_DISABLED;
+ link->eee.enable = false;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
+ p_caps->eee_lpi_timer =
+ EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
+ break;
+ }
+
+ link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
+ link->eee.tx_lpi_enable = link->eee.enable;
+ link->eee.adv_caps = ECORE_EEE_1G_ADV | ECORE_EEE_10G_ADV;
+ } else {
+ p_caps->default_eee = ECORE_MCP_EEE_UNSUPPORTED;
+ }
+
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n EEE: %02x [%08x usec]",
link->speed.forced_speed, link->speed.advertised_speeds,
- link->speed.autoneg, link->pause.autoneg);
+ link->speed.autoneg, link->pause.autoneg,
+ p_caps->default_eee, p_caps->eee_lpi_timer);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
@@ -3032,6 +3492,41 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
switch (mf_mode) {
case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
+ 1 << ECORE_MF_UFP_SPECIFIC;
+ break;
+
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST |
+ 1 << ECORE_MF_INTER_PF_SWITCH |
+ 1 << ECORE_MF_DISABLE_ARFS;
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
+ 1 << ECORE_MF_LLH_PROTO_CLSS |
+ 1 << ECORE_MF_LL2_NON_UNICAST;
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= 1 << ECORE_MF_NEED_DEF_PF;
+ break;
+ }
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->p_dev->mf_bits);
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ p_hwfn->p_dev->mf_bits |= (1 << ECORE_MF_DISABLE_ARFS);
+
+ /* It's funny since we have another switch, but it's easier
+ * to throw this away in linux this way. Long term, it might be
+ * better to have have getters for needed ECORE_MF_* fields,
+ * convert client code and eliminate this.
+ */
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
@@ -3040,31 +3535,32 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ p_hwfn->p_dev->mf_mode = ECORE_MF_UFP;
+ break;
}
- DP_INFO(p_hwfn, "Multi function mode is %08x\n",
- p_hwfn->p_dev->mf_mode);
/* Read Multi-function information from shmem */
addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
- OFFSETOF(struct nvm_cfg1, glob) +
- OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
+ OFFSETOF(struct nvm_cfg1, glob) +
+ OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
OSAL_SET_BIT(ECORE_DEV_CAP_FCOE,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
OSAL_SET_BIT(ECORE_DEV_CAP_ISCSI,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
OSAL_SET_BIT(ECORE_DEV_CAP_ROCE,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP)
OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
- &p_hwfn->hw_info.device_capabilities);
+ &p_hwfn->hw_info.device_capabilities);
rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
@@ -3101,7 +3597,7 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
- if (ECORE_PATH_ID(p_hwfn) && p_dev->num_hwfns == 1) {
+ if (ECORE_PATH_ID(p_hwfn) && !ECORE_IS_CMT(p_dev)) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
@@ -3151,14 +3647,14 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 port_mode;
#ifndef ASIC_ONLY
/* Read the port mode */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ if (CHIP_REV_IS_FPGA(p_dev))
port_mode = 4;
- else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
- (p_hwfn->p_dev->num_hwfns > 1))
+ else if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_CMT(p_dev))
/* In CMT on emulation, assume 1 port */
port_mode = 1;
else
@@ -3166,38 +3662,39 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ p_dev->num_ports_in_engine = 1;
} else if (port_mode <= 5) {
- p_hwfn->p_dev->num_ports_in_engines = 2;
+ p_dev->num_ports_in_engine = 2;
} else {
DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
- p_hwfn->p_dev->num_ports_in_engines);
+ p_dev->num_ports_in_engine);
- /* Default num_ports_in_engines to something */
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
}
}
static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 port;
int i;
- p_hwfn->p_dev->num_ports_in_engines = 0;
+ p_dev->num_ports_in_engine = 0;
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
port = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
switch ((port & 0xf000) >> 12) {
case 1:
- p_hwfn->p_dev->num_ports_in_engines = 1;
+ p_dev->num_ports_in_engine = 1;
break;
case 3:
- p_hwfn->p_dev->num_ports_in_engines = 2;
+ p_dev->num_ports_in_engine = 2;
break;
case 0xf:
- p_hwfn->p_dev->num_ports_in_engines = 4;
+ p_dev->num_ports_in_engine = 4;
break;
default:
DP_NOTICE(p_hwfn, false,
@@ -3211,17 +3708,68 @@ static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
CNIG_REG_NIG_PORT0_CONF_K2_E5 +
(i * 4));
if (port & 1)
- p_hwfn->p_dev->num_ports_in_engines++;
+ p_dev->num_ports_in_engine++;
}
+
+ if (!p_dev->num_ports_in_engine) {
+ DP_NOTICE(p_hwfn, true, "All NIG ports are inactive\n");
+
+ /* Default num_ports_in_engine to something */
+ p_dev->num_ports_in_engine = 1;
+ }
}
static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (ECORE_IS_BB(p_hwfn->p_dev))
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+
+ /* Determine the number of ports per engine */
+ if (ECORE_IS_BB(p_dev))
ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
else
ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
+
+ /* Get the total number of ports of the device */
+ if (ECORE_IS_CMT(p_dev)) {
+ /* In CMT there is always only one port */
+ p_dev->num_ports = 1;
+#ifndef ASIC_ONLY
+ } else if (CHIP_REV_IS_EMUL(p_dev) || CHIP_REV_IS_TEDIBEAR(p_dev)) {
+ p_dev->num_ports = p_dev->num_ports_in_engine *
+ ecore_device_num_engines(p_dev);
+#endif
+ } else {
+ u32 addr, global_offsize, global_addr;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, max_ports);
+ p_dev->num_ports = (u8)ecore_rd(p_hwfn, p_ptt, addr);
+ }
+}
+
+static void ecore_mcp_get_eee_caps(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_link_capabilities *p_caps;
+ u32 eee_status;
+
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ if (p_caps->default_eee == ECORE_MCP_EEE_UNSUPPORTED)
+ return;
+
+ p_caps->eee_speed_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
+ EEE_SUPPORTED_SPEED_OFFSET;
+ if (eee_status & EEE_1G_SUPPORTED)
+ p_caps->eee_speed_caps |= ECORE_EEE_1G_ADV;
+ if (eee_status & EEE_10G_ADV)
+ p_caps->eee_speed_caps |= ECORE_EEE_10G_ADV;
}
static enum _ecore_status_t
@@ -3244,14 +3792,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
}
- /* TODO In get_hw_info, amoungst others:
- * Get MCP FW revision and determine according to it the supported
- * featrues (e.g. DCB)
- * Get boot mode
- * ecore_get_pcie_width_speed, WOL capability.
- * Number of global CQ-s (for storage
- */
- ecore_hw_info_port_num(p_hwfn, p_ptt);
+ if (IS_LEAD_HWFN(p_hwfn))
+ ecore_hw_info_port_num(p_hwfn, p_ptt);
+
+ ecore_mcp_get_capabilities(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
@@ -3291,6 +3835,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
p_hwfn->mcp_info->func_info.ovlan;
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+
+ ecore_mcp_get_eee_caps(p_hwfn, p_ptt);
+
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
}
if (personality != ECORE_PCI_DEFAULT) {
@@ -3336,7 +3884,7 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ rc = ecore_hw_get_resc(p_hwfn, p_ptt, drv_resc_alloc);
if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
rc = ECORE_SUCCESS;
p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
@@ -3345,9 +3893,11 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
-static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
+static enum _ecore_status_t ecore_get_dev_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u16 device_id_mask;
u32 tmp;
/* Read Vendor Id / Device Id */
@@ -3357,21 +3907,27 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
&p_dev->device_id);
/* Determine type */
- if ((p_dev->device_id & ECORE_DEV_ID_MASK) == ECORE_DEV_ID_MASK_AH)
- p_dev->type = ECORE_DEV_TYPE_AH;
- else
+ device_id_mask = p_dev->device_id & ECORE_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case ECORE_DEV_ID_MASK_BB:
p_dev->type = ECORE_DEV_TYPE_BB;
+ break;
+ case ECORE_DEV_ID_MASK_AH:
+ p_dev->type = ECORE_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Unknown device id 0x%x\n",
+ p_dev->device_id);
+ return ECORE_ABORTED;
+ }
- p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_NUM);
- p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_REV);
-
- MASK_FIELD(CHIP_REV, p_dev->chip_rev);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ p_dev->chip_num = (u16)GET_FIELD(tmp, CHIP_NUM);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ p_dev->chip_rev = (u8)GET_FIELD(tmp, CHIP_REV);
/* Learn number of HW-functions */
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CMT_ENABLED_FOR_PAIR);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
if (tmp & (1 << p_hwfn->rel_pf_id)) {
DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
@@ -3391,32 +3947,29 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
}
#endif
- p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_TEST_REG) >> 4;
- MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
- p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_CHIP_METAL);
- MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_TEST_REG);
+ p_dev->chip_bond_id = (u8)GET_FIELD(tmp, CHIP_BOND_ID);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+ p_dev->chip_metal = (u8)GET_FIELD(tmp, CHIP_METAL);
+
DP_INFO(p_dev->hwfns,
- "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %02x Bond id: %02x Metal: %02x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
- if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
+ if (ECORE_IS_BB_A0(p_dev)) {
DP_NOTICE(p_dev->hwfns, false,
"The chip type/rev (BB A0) is not supported!\n");
return ECORE_ABORTED;
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
if (CHIP_REV_IS_EMUL(p_dev)) {
- tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
- MISCS_REG_ECO_RESERVED);
+ tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_ECO_RESERVED);
if (tmp & (1 << 29)) {
DP_NOTICE(p_hwfn, false,
"Emulation: Running on a FULL build\n");
@@ -3446,7 +3999,6 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
"Mark hw/fw uninitialized\n");
p_hwfn->hw_init_done = false;
- p_hwfn->first_on_engine = false;
ecore_ptt_invalidate(p_hwfn);
}
@@ -3459,6 +4011,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * p_doorbells,
struct ecore_hw_prepare_params *p_params)
{
+ struct ecore_mdump_retain_data mdump_retain;
struct ecore_dev *p_dev = p_hwfn->p_dev;
struct ecore_mdump_info mdump_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -3495,7 +4048,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
- rc = ecore_get_dev_info(p_dev);
+ rc = ecore_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS) {
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res =
@@ -3526,24 +4079,37 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
* called, since among others it sets the ports number in an engine.
*/
- if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ if (p_params->initiate_pf_flr && IS_LEAD_HWFN(p_hwfn) &&
!p_dev->recov_in_prog) {
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
}
- /* Check if mdump logs are present and update the epoch value */
- if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ /* Check if mdump logs/data are present and update the epoch value */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+#ifndef ASIC_ONLY
+ if (!CHIP_REV_IS_EMUL(p_dev)) {
+#endif
rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
&mdump_info);
- if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs)
DP_NOTICE(p_hwfn, false,
"* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
- }
+
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid)
+ DP_NOTICE(p_hwfn, false,
+ "mdump retained data: epoch 0x%08x, pf 0x%x, status 0x%08x\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
p_params->epoch);
+#ifndef ASIC_ONLY
+ }
+#endif
}
/* Allocate the init RT array and initialize the init-ops engine */
@@ -3605,17 +4171,21 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_params->personality = p_hwfn->hw_info.personality;
/* initilalize 2nd hwfn if necessary */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
/* adjust bar offset for second engine */
addr = (u8 OSAL_IOMEM *)p_dev->regview +
- ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
p_regview = (void OSAL_IOMEM *)addr;
addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
- ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
+ ecore_hw_bar_size(p_hwfn,
+ p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
p_doorbell = (void OSAL_IOMEM *)addr;
/* prepare second hw function */
@@ -3666,7 +4236,9 @@ void ecore_hw_remove(struct ecore_dev *p_dev)
ecore_hw_hwfn_free(p_hwfn);
ecore_mcp_free(p_hwfn);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
+#endif
}
ecore_iov_free_hw_info(p_dev);
@@ -3844,11 +4416,11 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
struct ecore_chain *p_chain,
struct ecore_chain_ext_pbl *ext_pbl)
{
- void *p_virt = OSAL_NULL;
- u8 *p_pbl_virt = OSAL_NULL;
- void **pp_virt_addr_tbl = OSAL_NULL;
- dma_addr_t p_phys = 0, p_pbl_phys = 0;
u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = OSAL_NULL;
+ u8 *p_pbl_virt = OSAL_NULL;
+ void *p_virt = OSAL_NULL;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
@@ -4061,9 +4633,10 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 *p_filter)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return ECORE_SUCCESS;
high = p_filter[1] | (p_filter[0] << 8);
@@ -4084,7 +4657,7 @@ enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
p_filter[0], p_filter[1], p_filter[2], p_filter[3],
p_filter[4], p_filter[5], entry_num);
- return ECORE_SUCCESS;
+ return rc;
}
static enum _ecore_status_t
@@ -4128,9 +4701,10 @@ void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 *p_filter)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
high = p_filter[1] | (p_filter[0] << 8);
@@ -4202,10 +4776,11 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
enum ecore_llh_port_filter_type_t type)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return ECORE_SUCCESS;
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
+ return rc;
high = 0;
low = 0;
@@ -4278,7 +4853,7 @@ ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
break;
}
- return ECORE_SUCCESS;
+ return rc;
}
static enum _ecore_status_t
@@ -4345,9 +4920,10 @@ ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
enum ecore_llh_port_filter_type_t type)
{
u32 high, low, entry_num;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
high = 0;
@@ -4415,7 +4991,10 @@ static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
+ &p_hwfn->p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
+ &p_hwfn->p_dev->mf_bits))
return;
if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
@@ -4426,7 +5005,7 @@ enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- if (IS_MF_DEFAULT(p_hwfn) && ECORE_IS_BB(p_hwfn->p_dev)) {
+ if (OSAL_TEST_BIT(ECORE_MF_NEED_DEF_PF, &p_hwfn->p_dev->mf_bits)) {
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR,
1 << p_hwfn->abs_pf_id / 2);
@@ -4526,7 +5105,7 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
- p_cid->abs.sb_idx, false);
+ p_cid->sb_igu_id, false);
if (rc != ECORE_SUCCESS)
goto out;
@@ -4567,7 +5146,7 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
- p_cid->abs.sb_idx, true);
+ p_cid->sb_igu_id, true);
if (rc != ECORE_SUCCESS)
goto out;
@@ -4604,8 +5183,7 @@ static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
}
}
-static void
-ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
+static void ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn)
{
int i;
@@ -4614,8 +5192,7 @@ ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
}
static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 min_pf_rate)
+ struct ecore_ptt *p_ptt)
{
struct init_qm_vport_params *vport_params;
int i;
@@ -4623,7 +5200,7 @@ static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
vport_params = p_hwfn->qm_info.qm_vport_params;
for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
- ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
+ ecore_init_wfq_default_param(p_hwfn);
ecore_init_vport_wfq(p_hwfn, p_ptt,
vport_params[i].first_tx_pq_id,
vport_params[i].vport_wfq);
@@ -4664,13 +5241,6 @@ static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
non_requested_count = num_vports - req_count;
/* validate possible error cases */
- if (req_rate > min_pf_rate) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Vport [%d] - Requested rate[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
- vport_id, req_rate, min_pf_rate);
- return ECORE_INVAL;
- }
-
if (req_rate < min_pf_rate / ECORE_WFQ_UNIT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
@@ -4777,7 +5347,7 @@ static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
if (rc == ECORE_SUCCESS && use_wfq)
ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
else
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
return rc;
}
@@ -4791,7 +5361,7 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
int i, rc = ECORE_INVAL;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_NOTICE(p_dev, false,
"WFQ configuration is not supported for this device\n");
return rc;
@@ -4820,12 +5390,13 @@ int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
/* API to configure WFQ from mcp link change */
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
+ struct ecore_ptt *p_ptt,
u32 min_pf_rate)
{
int i;
/* TBD - for multiple hardware functions - that is 100 gig */
- if (p_dev->num_hwfns > 1) {
+ if (ECORE_IS_CMT(p_dev)) {
DP_VERBOSE(p_dev, ECORE_MSG_LINK,
"WFQ configuration is not supported for this device\n");
return;
@@ -4834,8 +5405,7 @@ void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- __ecore_configure_vp_wfq_on_link_change(p_hwfn,
- p_hwfn->p_dpc_ptt,
+ __ecore_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
min_pf_rate);
}
}
@@ -4980,8 +5550,7 @@ void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
p_link = &p_hwfn->mcp_info->link_output;
if (p_link->min_pf_rate)
- ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
- p_link->min_pf_rate);
+ ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt);
OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) *
@@ -4995,11 +5564,7 @@ int ecore_device_num_engines(struct ecore_dev *p_dev)
int ecore_device_num_ports(struct ecore_dev *p_dev)
{
- /* in CMT always only one port */
- if (p_dev->num_hwfns > 1)
- return 1;
-
- return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
+ return p_dev->num_ports;
}
void ecore_set_fw_mac_addr(__le16 *fw_msb,
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 9126cf95..98bcabe8 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -57,22 +57,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
-struct ecore_hw_init_params {
- /* Tunnelling parameters */
- struct ecore_tunnel_info *p_tunn;
-
- bool b_hw_start;
-
- /* Interrupt mode [msix, inta, etc.] to use */
- enum ecore_int_mode int_mode;
-
- /* NPAR tx switching to be used for vports configured for tx-switching
- */
- bool allow_npar_tx_switch;
-
- /* Binary fw data pointer in binary fw file */
- const u8 *bin_fw_data;
+enum ecore_override_force_load {
+ ECORE_OVERRIDE_FORCE_LOAD_NONE,
+ ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
+ ECORE_OVERRIDE_FORCE_LOAD_NEVER,
+};
+struct ecore_drv_load_params {
/* Indicates whether the driver is running over a crash kernel.
* As part of the load request, this will be used for providing the
* driver role to the MFW.
@@ -90,6 +81,29 @@ struct ecore_hw_init_params {
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum ecore_override_force_load override_force_load;
+};
+
+struct ecore_hw_init_params {
+ /* Tunneling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum ecore_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct ecore_drv_load_params *p_drv_load_params;
};
/**
@@ -128,8 +142,9 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
*
* @param p_dev
*
+ * @return enum _ecore_status_t
*/
-void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
+enum _ecore_status_t ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
#ifndef LINUX_REMOVE
/**
@@ -140,16 +155,62 @@ void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
*
*/
void ecore_prepare_hibernate(struct ecore_dev *p_dev);
+
+enum ecore_db_rec_width {
+ DB_REC_WIDTH_32B,
+ DB_REC_WIDTH_64B,
+};
+
+enum ecore_db_rec_space {
+ DB_REC_KERNEL,
+ DB_REC_USER,
+};
+
+/**
+ * @brief db_recovery_add - add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @param p_dev
+ * @param db_addr - doorbell address
+ * @param db_data - address of where db_data is stored
+ * @param db_width - doorbell is 32b pr 64b
+ * @param db_space - doorbell recovery addresses are user or kernel space
+ */
+enum _ecore_status_t ecore_db_recovery_add(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data,
+ enum ecore_db_rec_width db_width,
+ enum ecore_db_rec_space db_space);
+
+/**
+ * @brief db_recovery_del - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @param cdev
+ * @param db_addr - doorbell address
+ * @param db_data - address where db_data is stored. Serves as key for the
+ * entry to delete.
+ */
+enum _ecore_status_t ecore_db_recovery_del(struct ecore_dev *p_dev,
+ void OSAL_IOMEM *db_addr,
+ void *db_data);
+
+static OSAL_INLINE bool ecore_is_mf_ufp(struct ecore_hwfn *p_hwfn)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits);
+}
+
#endif
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
- * @param p_dev
+ * @param p_hwfn
*
+ * @return enum _ecore_status_t
*/
-void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_SUCCESS,
@@ -240,7 +301,6 @@ struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
-#ifndef __EXTRACT__LINUX__
struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
@@ -331,7 +391,6 @@ struct ecore_eth_stats {
struct ecore_eth_stats_ah ah;
};
};
-#endif
enum ecore_dmae_address_type_t {
ECORE_DMAE_ADDRESS_HOST_VIRT,
@@ -580,6 +639,20 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
+
+/**
+ * @brief ecore_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - store coalesce value read from the hardware.
+ * @param p_handle
+ *
+ * @return enum _ecore_status_t
+ **/
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *coal,
+ void *handle);
+
/**
* @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 5c2a08f9..d8abd604 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -618,7 +618,7 @@ struct ustorm_core_conn_st_ctx {
/*
* core connection context
*/
-struct core_conn_context {
+struct e4_core_conn_context {
/* ystorm storm context */
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2] /* padding */;
@@ -661,6 +661,7 @@ enum core_event_opcode {
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
MAX_CORE_EVENT_OPCODE
};
@@ -745,6 +746,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
+ CORE_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
@@ -858,7 +860,8 @@ struct core_rx_gsi_offload_cqe {
__le16 src_mac_addrlo /* lo 2 bytes of source mac address */;
/* These are the lower 16 bit of QP id in RoCE BTH header */
__le16 qp_id;
- __le32 gid_dst[4] /* Gid destination address */;
+ __le32 src_qp /* Source QP from DETH header */;
+ __le32 reserved[3];
};
/*
@@ -899,7 +902,10 @@ struct core_rx_start_ramrod_data {
u8 drop_ttl0_flg /* drop packet with ttl0 if set */;
__le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
/* if set, 802.1q tags will be removed and copied to CQE */
- u8 inner_vlan_removal_en;
+/* if set, 802.1q tags will be removed and copied to CQE */
+ u8 inner_vlan_stripping_en;
+/* if set, outer tag wont be stripped, valid only in MF OVLAN. */
+ u8 outer_vlan_stripping_dis;
u8 queue_id /* Light L2 RX Queue ID */;
u8 main_func_queue /* Is this the main queue for the PF */;
/* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if
@@ -916,7 +922,7 @@ struct core_rx_start_ramrod_data {
struct core_rx_action_on_error action_on_error;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
- u8 reserved[7];
+ u8 reserved[6];
};
@@ -938,48 +944,51 @@ struct core_rx_stop_ramrod_data {
struct core_tx_bd_data {
__le16 as_bitfield;
/* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
/* Insert VLAN into packet */
-#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_DATA_START_BD_MASK 0x1
-#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
/* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
/* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
/* Packet is IPv6 with extensions */
-#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
* 0-TCP, 1-UDP
*/
-#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
/* The pseudo checksum mode to place in the L4 checksum field. Required only
* when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
*/
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
/* Number of BDs that make up one packet - width wide enough to present
* CORE_LL2_TX_MAX_BDS_PER_PACKET
*/
-#define CORE_TX_BD_DATA_NBDS_MASK 0xF
-#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
* connType is ROCE (use enum core_roce_flavor_type)
*/
-#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
/* Calculate ip length */
-#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
-#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
-#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
-#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+/* disables the STAG insertion, relevant only in MF OVLAN mode. */
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
};
/*
@@ -1046,6 +1055,17 @@ struct core_tx_stop_ramrod_data {
/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+ u8 reserved0;
+ __le16 qm_pq_id /* Updated QM PQ ID */;
+ __le32 reserved1[1];
+};
+
+
+/*
* Enum flag for what type of dcb data to update
*/
enum dcb_dscp_update_mode {
@@ -1182,6 +1202,63 @@ struct eth_ustorm_per_queue_stat {
/*
+ * Event Ring VF-PF Channel data
+ */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr /* VF-PF message address */;
+};
+
+/*
+ * Event Ring malicious VF data
+ */
+struct malicious_vf_eqe_data {
+ u8 vf_id /* Malicious VF ID */;
+ u8 err_id /* Malicious VF error (use enum malicious_vf_error_id) */;
+ __le16 reserved[3];
+};
+
+/*
+ * Event Ring initial cleanup data
+ */
+struct initial_cleanup_eqe_data {
+ u8 vf_id /* VF ID */;
+ u8 reserved[7];
+};
+
+/*
+ * Event Data Union
+ */
+union event_ring_data {
+ u8 bytes[8] /* Byte Array */;
+ struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
+ struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
+/* Dedicated fields to iscsi connect done results */
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
+/* VF Initial Cleanup data */
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+};
+
+
+/*
+ * Event Ring Entry
+ */
+struct event_ring_entry {
+ u8 protocol_id /* Event Protocol ID (use enum protocol_type) */;
+ u8 opcode /* Event Opcode */;
+ __le16 reserved0 /* Reserved */;
+ __le16 echo /* Echo value from ramrod data on the host */;
+ u8 fw_return_code /* FW return code for SP ramrods */;
+ u8 flags;
+/* 0: synchronous EQE - a completion of SP message. 1: asynchronous EQE */
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/*
* Event Ring Next Page Address
*/
struct event_ring_next_addr {
@@ -1211,6 +1288,18 @@ enum fw_flow_ctrl_mode {
/*
+ * GFT profile type.
+ */
+enum gft_profile_type {
+ GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */,
+/* L4 destination port, IP type and L4 type match. */
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */,
+ MAX_GFT_PROFILE_TYPE
+};
+
+
+/*
* Major and Minor hsi Versions
*/
struct hsi_fp_ver_struct {
@@ -1311,6 +1400,34 @@ struct mstorm_vf_zone {
/*
+ * vlan header including TPID and TCI fields
+ */
+struct vlan_header {
+ __le16 tpid /* Tag Protocol Identifier */;
+ __le16 tci /* Tag Control Information */;
+};
+
+/*
+ * outer tag configurations
+ */
+struct outer_tag_config_struct {
+/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with
+ * Host Control mode. Else - 0
+ */
+ u8 enable_stag_pri_change;
+/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
+ u8 pri_map_valid;
+ u8 reserved[2];
+/* In case mf_mode is MF_OVLAN, this field specifies the outer tag protocol
+ * identifier and outer tag control information
+ */
+ struct vlan_header outer_tag;
+/* Map from inner to outer priority. Set pri_map_valid when init map */
+ u8 inner_to_outer_pri_map[8];
+};
+
+
+/*
* personality per PF
*/
enum personality_type {
@@ -1361,7 +1478,6 @@ struct pf_start_ramrod_data {
struct regpair consolid_q_pbl_addr;
/* tunnel configuration. */
struct pf_start_tunnel_config tunnel_config;
- __le32 reserved;
__le16 event_ring_sb_id /* Status block ID */;
/* All VfIds owned by Pf will be from baseVfId till baseVfId+numVfs */
u8 base_vf_id;
@@ -1381,16 +1497,11 @@ struct pf_start_ramrod_data {
u8 integ_phase /* Integration phase */;
/* If set, inter-pf tx switching is allowed in Switch Independent func mode */
u8 allow_npar_tx_switching;
-/* Map from inner to outer priority. Set pri_map_valid when init map */
- u8 inner_to_outer_pri_map[8];
-/* If inner_to_outer_pri_map is initialize then set pri_map_valid */
- u8 pri_map_valid;
-/* In case mf_mode is MF_OVLAN, this field specifies the outer vlan
- * (lower 16 bits) and ethType to use (higher 16 bits)
- */
- __le32 outer_tag;
+ u8 reserved0;
/* FP HSI version to be used by FW */
struct hsi_fp_ver_struct hsi_fp_ver;
+/* Outer tag configurations */
+ struct outer_tag_config_struct outer_tag_config;
};
@@ -1441,15 +1552,19 @@ struct pf_update_tunnel_config {
* Data for port update ramrod
*/
struct pf_update_ramrod_data {
- u8 pf_id;
- u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
- u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
- u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
+/* Update Eth DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_eth_dcb_data_mode;
+/* Update FCOE DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_fcoe_dcb_data_mode;
+/* Update iSCSI DCB data indication (use enum dcb_dscp_update_mode) */
+ u8 update_iscsi_dcb_data_mode;
u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
/* Update RROCE (RoceV2) DCB data indication */
u8 update_rroce_dcb_data_mode;
u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
+/* Update Enable STAG Priority Change indication */
+ u8 update_enable_stag_pri_change;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
/* core iscsi related fields */
@@ -1460,7 +1575,11 @@ struct pf_update_ramrod_data {
/* core iwarp related fields */
struct protocol_dcb_data iwarp_dcb_data;
__le16 mf_vlan /* new outer vlan id value */;
- __le16 reserved;
+/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis
+ * and UFP with Host Control mode, else - 0.
+ */
+ u8 enable_stag_pri_change;
+ u8 reserved;
/* tunnel configuration. */
struct pf_update_tunnel_config tunnel_config;
};
@@ -1745,6 +1864,7 @@ enum vf_zone_size_mode {
+
/*
* Attentions status block
*/
@@ -1758,17 +1878,6 @@ struct atten_status_block {
/*
- * Igu cleanup bit values to distinguish between clean or producer consumer
- * update.
- */
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
-};
-
-
-/*
* DMAE command
*/
struct dmae_cmd {
@@ -2200,23 +2309,23 @@ struct qm_rf_opportunistic_mask {
/*
* QM hardware structure of QM map memory
*/
-struct qm_rf_pq_map {
+struct qm_rf_pq_map_e4 {
__le32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1 /* PQ active */
+#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF /* RL ID */
+#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
/* the first PQ associated with the VPORT and VOQ of this PQ */
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F /* VOQ */
+#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
+#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1 /* RL active */
+#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
};
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index 7443ff9d..ebb66482 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1053,7 +1053,7 @@ enum dbg_status {
DBG_STATUS_MCP_TRACE_NO_META,
DBG_STATUS_MCP_COULD_NOT_HALT,
DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_RESERVED2,
DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
DBG_STATUS_IGU_FIFO_BAD_DATA,
DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
@@ -1107,7 +1107,9 @@ struct dbg_tools_data {
u8 chip_id /* Chip ID (from enum chip_ids) */;
u8 platform_id /* Platform ID */;
u8 initialized /* Indicates if the data was initialized */;
- u8 reserved;
+ u8 use_dmae /* Indicates if DMAE should be used */;
+/* Numbers of registers that were read since last log */
+ __le32 num_regs_read;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 397c408d..ffbf5c71 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -669,7 +669,7 @@ struct mstorm_eth_conn_st_ctx {
/*
* eth connection context
*/
-struct eth_conn_context {
+struct e4_eth_conn_context {
/* tstorm storm context */
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2] /* padding */;
@@ -765,6 +765,7 @@ enum eth_event_opcode {
ETH_EVENT_RX_DELETE_UDP_FILTER,
ETH_EVENT_RX_CREATE_GFT_ACTION,
ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
MAX_ETH_EVENT_OPCODE
};
@@ -882,6 +883,7 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create a Gft Action */,
/* RX - Add/Delete a GFT Filter to the Searcher */
ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE /* TX Queue Update Ramrod */,
MAX_ETH_RAMROD_CMD_ID
};
@@ -1092,7 +1094,7 @@ struct eth_vport_tx_mode {
/*
- * Ramrod data for rx create gft action
+ * GFT filter update action type.
*/
enum gft_filter_update_action {
GFT_ADD_FILTER,
@@ -1101,16 +1103,6 @@ enum gft_filter_update_action {
};
-/*
- * Ramrod data for rx create gft action
- */
-enum gft_logic_filter_type {
- GFT_FILTER_TYPE /* flow FW is GFT-logic as well */,
- RFS_FILTER_TYPE /* flow FW is A-RFS-logic */,
- MAX_GFT_LOGIC_FILTER_TYPE
-};
-
-
/*
@@ -1166,7 +1158,7 @@ struct rx_create_openflow_action_data {
*/
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id /* ID of RX queue */;
- __le16 num_of_pbl_pages /* Num of pages in CQE PBL */;
+ __le16 num_of_pbl_pages /* Number of pages in CQE PBL */;
__le16 bd_max_bytes /* maximal bytes that can be places on the bd */;
__le16 sb_id /* Status block ID */;
u8 sb_index /* index of the protocol index */;
@@ -1254,26 +1246,34 @@ struct rx_udp_filter_data {
/*
- * Ramrod to add filter - filter is packet headr of type of packet wished to
- * pass certin FW flow
+ * add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow
*/
struct rx_update_gft_filter_data {
/* Pointer to Packet Header That Defines GFT Filter */
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length /* Packet Header Length */;
-/* If is_rfs flag is set: Queue Id to associate filter with else: action icid */
- __le16 rx_qid_or_action_icid;
-/* Field is used if is_rfs flag is set: vport Id of which to associate filter
- * with
- */
- u8 vport_id;
-/* Use enum to set type of flow using gft HW logic blocks */
- u8 filter_type;
+/* Action icid. Valid if action_icid_valid flag set. */
+ __le16 action_icid;
+ __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */;
+ __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */;
+ u8 vport_id /* RX vport Id. */;
+/* If set, action_icid will used for GFT filter update. */
+ u8 action_icid_valid;
+/* If set, rx_qid will used for traffic steering, in additional to vport_id.
+ * flow_id_valid must be cleared. If cleared, queue ID will selected by RSS.
+ */
+ u8 rx_qid_valid;
+/* If set, flow_id will reported by CQE, rx_qid_valid must be cleared. If
+ * cleared, flow_id 0 will reported by CQE.
+ */
+ u8 flow_id_valid;
u8 filter_action /* Use to set type of action on filter */;
/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
* case of error.
*/
u8 assert_on_error;
+ u8 reserved[2];
};
@@ -1344,6 +1344,17 @@ struct tx_queue_stop_ramrod_data {
};
+/*
+ * Ramrod data for tx queue update ramrod
+ */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg /* Flag to Update QM PQ ID */;
+ __le16 qm_pq_id /* Updated QM PQ ID */;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
+
/*
* Ramrod data for vport update ramrod
@@ -1388,9 +1399,9 @@ struct vport_start_ramrod_data {
/* If set, ETH header padding will not inserted. placement_offset will be zero.
*/
u8 zero_placement_offset;
-/* If set, Contorl frames will be filtered according to MAC check. */
+/* If set, control frames will be filtered according to MAC check. */
u8 ctl_frame_mac_check_en;
-/* If set, Contorl frames will be filtered according to ethtype check. */
+/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
u8 reserved[5];
};
@@ -1456,9 +1467,9 @@ struct vport_update_ramrod_data_cmn {
* updated
*/
u8 update_ctl_frame_checks_en_flg;
-/* If set, Contorl frames will be filtered according to MAC check. */
+/* If set, control frames will be filtered according to MAC check. */
u8 ctl_frame_mac_check_en;
-/* If set, Contorl frames will be filtered according to ethtype check. */
+/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
u8 reserved[15];
};
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 2bcc32d3..84f273b0 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -64,7 +64,9 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
}
p_hwfn->p_ptt_pool = p_pool;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
+#endif
OSAL_SPIN_LOCK_INIT(&p_pool->lock);
return ECORE_SUCCESS;
@@ -83,8 +85,10 @@ void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
{
+#ifdef CONFIG_ECORE_LOCK_ALLOC
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
+#endif
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
}
@@ -132,7 +136,7 @@ void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
}
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+static u32 ecore_ptt_get_hw_addr(struct ecore_ptt *p_ptt)
{
/* The HW is using DWORDS and we need to translate it to Bytes */
return OSAL_LE32_TO_CPU(p_ptt->pxp.offset) << 2;
@@ -155,7 +159,7 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
{
u32 prev_hw_addr;
- prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ prev_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
if (new_hw_addr == prev_hw_addr)
return;
@@ -177,7 +181,7 @@ void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 hw_addr)
{
- u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 win_hw_addr = ecore_ptt_get_hw_addr(p_ptt);
u32 offset;
offset = hw_addr - win_hw_addr;
@@ -740,10 +744,10 @@ ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
if (ecore_status != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, ECORE_MSG_HW,
- "ecore_dmae_host2grc: Wait Failed. source_addr"
- " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
+ "Wait Failed. source_addr 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x, intermediate buffer 0x%lx.\n",
(unsigned long)src_addr, (unsigned long)dst_addr,
- length_dw);
+ length_dw,
+ (unsigned long)p_hwfn->dmae_info.intermediate_buffer_phys_addr);
return ecore_status;
}
@@ -785,6 +789,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+ if (!cmd) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_dmae_execute_sub_operation failed. Invalid state. source_addr 0x%lx, destination addr 0x%lx, size_in_dwords 0x%x\n",
+ (unsigned long)src_addr,
+ (unsigned long)dst_addr,
+ length_cur);
+ return ECORE_INVAL;
+ }
+
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 0750b2ed..0b9814f5 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -71,8 +71,10 @@ enum _dmae_cmd_crc_mask {
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
+* @param p_ptt
*/
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
@@ -98,17 +100,6 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
- *
- * @param p_hwfn
- * @param p_ptt
- *
- * @return u32
- */
-u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
-
-/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
*
* @param p_hwfn
@@ -258,35 +249,6 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
*/
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
-union ecore_qm_pq_params {
- struct {
- u8 q_idx;
- } iscsi;
-
- struct {
- u8 tc;
- } core;
-
- struct {
- u8 is_vf;
- u8 vf_id;
- u8 tc;
- } eth;
-
- struct {
- u8 dcqcn;
- u8 qpid; /* roce relative */
- } roce;
-
- struct {
- u8 qidx;
- } iwarp;
-};
-
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto,
- union ecore_qm_pq_params *params);
-
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index b5ef173e..1da80a65 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -20,12 +20,12 @@
#define CDU_VALIDATION_DEFAULT_CFG 61
-static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
{ 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
{ 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
{ 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
};
-static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
{ 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@@ -43,6 +43,9 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
+/* VOQ constants */
+#define QM_E5_NUM_EXT_VOQ (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
+
/* WFQ constants: */
/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
@@ -52,18 +55,19 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+#define QM_WFQ_VP_PQ_PF_E5_SHIFT 6
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-/* 0.7 * upper bound (62500000) */
-#define QM_WFQ_MAX_INC_VAL 43750000
+/* Max WFQ increment value is 0.7 * upper bound */
+#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
-/* RL constants: */
+/* Number of VOQs in E5 QmWfqCrd register */
+#define QM_WFQ_CRD_E5_NUM_VOQS 16
-/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
-#define QM_RL_UPPER_BOUND 62500000
+/* RL constants: */
/* Period in us */
#define QM_RL_PERIOD 5
@@ -71,18 +75,32 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-/* 0.7 * upper bound (62500000) */
-#define QM_RL_MAX_INC_VAL 43750000
-
/* RL increment value - rate is specified in mbps. the factor of 1.01 was
- * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
- * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
- * although the credit increment value was the correct one and FW calculated
- * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
- * this point.
- */
-#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
- QM_RL_PERIOD * 101) / (8 * 100)), 1)
+* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+* 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+* although the credit increment value was the correct one and FW calculated
+* correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+* this point.
+*/
+#define QM_RL_INC_VAL(rate) \
+ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
+ (8 * 100)), 1)
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* Vport RL Upper bound, link speed is in Mpbs */
+#define QM_VP_RL_UPPER_BOUND(speed) \
+ ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
+
+/* Max Vport RL increment value is the Vport RL upper bound */
+#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
+
+/* Vport RL credit threshold in case of QM bypass */
+#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@@ -94,13 +112,17 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
- (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+#define PBF_CMDQ_LINES_E5_RSVD_RATIO 8
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ ext_voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
-#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
- (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ ext_voq * \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
@@ -140,25 +162,58 @@ static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
-/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
- ((port) * (max_phys_tcs_per_port) + (tc))
-#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
-#define VOQ(port, tc, max_phys_tcs_per_port) \
- ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
- LB_VOQ(port))
-
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
+ vp_pq_id, rl_id, ext_voq, wrr) \
+ do { \
+ OSAL_MEMSET(&map, 0, sizeof(map)); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
+ SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
+ SET_FIELD(map.reg, \
+ QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
+ STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
+ *((u32 *)&map)); \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \
+ ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
/******************** INTERNAL IMPLEMENTATION *********************/
+/* Returns the external VOQ number */
+static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
+ u8 port_id,
+ u8 tc,
+ u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
+ else
+ return port_id * (max_phys_tcs_per_port) + tc;
+}
+
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
/* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
- (1 << MAX_NUM_VOQS) - 1);
+ (u32)voq_bit_mask);
+#ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
+ if (num_ext_voqs >= 32)
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
+ (u32)(voq_bit_mask >> 32));
+#endif
/* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
@@ -169,7 +224,7 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
/* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_PF_RL_UPPER_BOUND);
}
}
@@ -200,7 +255,7 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_RL_UPPER_BOUND);
+ QM_VP_RL_BYPASS_THRESH_SPEED);
}
}
@@ -220,17 +275,19 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
* the specified VOQ
*/
static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
- u8 voq, u16 cmdq_lines)
+ u8 ext_voq,
+ u16 cmdq_lines)
{
u32 qm_line_crd;
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
- OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
(u32)cmdq_lines);
- STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
- STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq,
- qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
}
/* Prepare runtime init values to allocate PBF command queue lines. */
@@ -240,11 +297,12 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS_E4;
- /* Clear PBF lines for all VOQs */
- for (voq = 0; voq < MAX_NUM_VOQS; voq++)
- STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u16 phys_lines, phys_lines_per_tc;
@@ -252,31 +310,35 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
if (!port_params[port_id].active)
continue;
- /* Find #lines to divide between the active physical TCs */
- phys_lines = port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
+ /* Find number of command queue lines to divide between the
+ * active physical TCs. In E5, 1/8 of the lines are reserved.
+ * the lines for pure LB TC are subtracted.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
/* Find #lines per active physical TC */
num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
if (((port_params[port_id].active_phys_tcs >> tc) &
0x1) == 1)
num_tcs_in_port++;
phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* Init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ 0x1) == 1)
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
phys_lines_per_tc);
- }
}
/* Init registers for pure LB TC */
- ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -308,7 +370,7 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id, num_tcs_in_port;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (!port_params[port_id].active)
@@ -339,18 +401,19 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
/* Init physical TCs */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
if (((port_params[port_id].active_phys_tcs >> tc) &
- 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
+ 0x1) == 1) {
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
+ max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(voq),
- phys_blocks);
+ PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ phys_blocks);
}
}
/* Init pure LB TC */
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
pure_lb_blocks);
}
}
@@ -361,7 +424,6 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
- bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u16 start_pq,
@@ -401,12 +463,12 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- struct qm_rf_pq_map tx_pq_map;
+ u8 ext_voq, vport_id_in_pf;
bool is_vf_pq, rl_valid;
- u8 voq, vport_id_in_pf;
u16 first_tx_pq_id;
- voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
is_vf_pq = (i >= num_pf_pqs);
rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
max_qm_global_rls;
@@ -416,16 +478,17 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
first_tx_pq_id =
vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
+ (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
+
/* Create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
/* Map VP PQ to VOQ and PF */
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
- (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
- QM_WFQ_VP_PQ_PF_SHIFT));
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
+ first_tx_pq_id, map_val);
}
/* Check RL ID */
@@ -434,26 +497,29 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Invalid VPORT ID for rate limiter config\n");
- /* Fill PQ map entry */
- OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- rl_valid ? 1 : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- rl_valid ? pq_params[i].vport_id : 0);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
- SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
- pq_params[i].wrr_group);
-
- /* Write PQ map entry to CAM */
- STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
- *((u32 *)&tx_pq_map));
+ /* Prepare PQ map entry */
+ struct qm_rf_pq_map_e4 tx_pq_map;
+ QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
+ 1 : 0,
+ first_tx_pq_id, rl_valid ?
+ pq_params[i].vport_id : 0,
+ ext_voq, pq_params[i].wrr_group);
/* Set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+ pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
+ pq_params[i].tc_id, port_id,
+ rl_valid ? 1 : 0, rl_valid ?
+ pq_params[i].vport_id : 0);
+ ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
/* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
@@ -473,10 +539,10 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
/* Prepare Other PQ mapping runtime init values for the specified PF */
static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
- u8 port_id,
u8 pf_id,
u32 num_pf_cids,
- u32 num_tids, u32 base_mem_addr_4kb)
+ u32 num_tids,
+ u32 base_mem_addr_4kb)
{
u32 pq_size, pq_mem_4kb, mem_addr_4kb;
u16 i, pq_id, pq_group;
@@ -518,13 +584,9 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params)
{
u32 inc_val, crd_reg_offset;
- u8 voq;
+ u8 ext_voq;
u16 i;
- crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
- QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
- (pf_id % MAX_NUM_PFS_BB);
-
inc_val = QM_WFQ_INC_VAL(pf_wfq);
if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
@@ -533,14 +595,21 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
}
for (i = 0; i < num_tx_pqs; i++) {
- voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
- OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
+ ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
+ max_phys_tcs_per_port);
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (pf_id % MAX_NUM_PFS_BB);
+ OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
+ inc_val);
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
@@ -552,7 +621,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid PF rate limit configuration\n");
return -1;
@@ -561,7 +630,7 @@ static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
- QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
return 0;
@@ -612,6 +681,7 @@ static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
u8 start_vport,
u8 num_vports,
+ u32 link_speed,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
@@ -625,8 +695,9 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
/* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
- u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
+ vport_params[i].vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration\n");
return -1;
@@ -636,7 +707,8 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
- QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ QM_VP_RL_UPPER_BOUND(link_speed) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
@@ -667,7 +739,9 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+ u32 cmd_addr,
+ u32 cmd_data_lsb,
+ u32 cmd_data_msb)
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@@ -684,10 +758,11 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
/******************** INTERFACE IMPLEMENTATION *********************/
-u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -748,7 +823,6 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
- bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
@@ -759,6 +833,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
+ u32 link_speed,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
@@ -775,16 +850,14 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
- ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
- num_tids, 0);
+ ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
#endif
/* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
- max_phys_tcs_per_port, is_first_pf, num_pf_cids,
- num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
- start_vport, other_mem_size_4kb, pq_params,
- vport_params);
+ max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
+ start_pq, num_pf_pqs, num_vf_pqs, start_vport,
+ other_mem_size_4kb, pq_params, vport_params);
/* Init PF WFQ */
if (pf_wfq)
@@ -803,7 +876,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
/* Set VPORT RL */
if (ecore_vport_rl_rt_init
- (p_hwfn, start_vport, num_vports, vport_params))
+ (p_hwfn, start_vport, num_vports, link_speed, vport_params))
return -1;
return 0;
@@ -832,7 +905,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
u32 inc_val;
inc_val = QM_RL_INC_VAL(pf_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
"Invalid PF rate limit configuration\n");
return -1;
@@ -872,7 +945,9 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
}
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
+ struct ecore_ptt *p_ptt, u8 vport_id,
+ u32 vport_rl,
+ u32 link_speed)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
@@ -882,8 +957,8 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
return -1;
}
- inc_val = QM_RL_INC_VAL(vport_rl);
- if (inc_val > QM_RL_MAX_INC_VAL) {
+ inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
+ if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
DP_NOTICE(p_hwfn, true,
"Invalid VPORT rate-limit configuration\n");
return -1;
@@ -1335,23 +1410,8 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
}
}
-/* In MF should be called once per engine to set EtherType of OuterTag */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType)
-{
- /* Update PRS register */
- STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
- /* Update NIG register */
- STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-
- /* Update PBF register */
- STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
-}
-
/* In MF should be called once per port to set EtherType of OuterTag */
-void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType)
+void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
{
/* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
@@ -1497,35 +1557,23 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
-void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id)
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
{
- union gft_cam_line_union cam_line;
- struct gft_ram_line ram_line;
- u32 i, *ram_line_ptr;
-
- ram_line_ptr = (u32 *)&ram_line;
-
- /* Stop using gft logic, disable gft search */
+ /* disable gft search for PF */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
- /* Clean ram & cam for next rfs/gft session*/
+ /* Clean ram & cam for next gft session*/
/* Zero camline */
- OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- cam_line.cam_line_mapped.camline);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
/* Zero ramline */
- OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
-
- /* Each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(ram_line_ptr + i));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
}
@@ -1543,115 +1591,110 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
}
-void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
- bool ipv6)
+ bool ipv6,
+ enum gft_profile_type profile_type)
{
- u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
- union gft_cam_line_union camLine;
- struct gft_ram_line ramLine;
- u32 *ramLinePointer = (u32 *)&ramLine;
- int i;
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
if (!ipv6 && !ipv4)
- DP_NOTICE(p_hwfn, true,
- "set_rfs_mode_enable: must accept at "
- "least on of - ipv4 or ipv6");
-
+ DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
if (!tcp && !udp)
- DP_NOTICE(p_hwfn, true,
- "set_rfs_mode_enable: must accept at "
- "least on of - udp or tcp");
+ DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
/* Set RFS event ID to be awakened i Tstorm By Prs */
- rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
- PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
- rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
- PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
- ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
- /* Configure Registers for RFS mode */
+ /* Do not load context only cid in PRS on match. */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
- /* Enable gft search */
- ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
- * context only cid
- * in PRS on match
- */
- camLine.cam_line_mapped.camline = 0;
+ /* Do not use tenant ID exist bit for gft search*/
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
- /* Cam line is now valid!! */
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_VALID, 1);
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
/* Filters are per PF!! */
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
if (!(tcp && udp)) {
- SET_FIELD(camLine.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
- SET_FIELD(camLine.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_TCP_PROTOCOL);
else
- SET_FIELD(camLine.cam_line_mapped.camline,
+ SET_FIELD(cam_line,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
if (!(ipv4 && ipv6)) {
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
if (ipv4)
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV4);
else
- SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_IP_VERSION,
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
/* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
- camLine.cam_line_mapped.camline);
- camLine.cam_line_mapped.camline =
- ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+ cam_line);
+ cam_line = ecore_rd(p_hwfn, p_ptt,
+ PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
/* Write line to RAM - compare to filter 4 tuple */
- ramLine.lo = 0;
- ramLine.hi = 0;
- SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
- SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
- SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
-
- /* Each iteration write to reg */
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * pf_id +
- i * REG_SIZE, *(ramLinePointer + i));
+ ram_line_lo = 0;
+ ram_line_hi = 0;
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
+ SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ }
+
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ ram_line_lo);
+ ecore_wr(p_hwfn, p_ptt,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
+ REG_SIZE, ram_line_hi);
/* Set default profile so that no filter match will happen */
- ramLine.lo = 0xffffffff;
- ramLine.hi = 0x3ff;
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
- for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
- ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
- RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
- i * REG_SIZE, *(ramLinePointer + i));
+ /* Enable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
}
/* Configure VF zone size mode */
@@ -1726,16 +1769,13 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
#ifndef LINUX_REMOVE
#define CRC8_INIT_VALUE 0xFF
-#define CRC8_TABLE_SIZE 256
#endif
static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
/* Calculate and return CDU validation byte per connection type / region /
* cid
*/
-static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
- u8 conn_type,
- u8 region, u32 cid)
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
{
const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
@@ -1794,9 +1834,8 @@ static u8 ecore_calc_cdu_validation_byte(struct ecore_hwfn *p_hwfn,
}
/* Calcualte and set validation bytes for session context */
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 cid)
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
{
u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
@@ -1807,14 +1846,14 @@ void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *x_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 3, cid);
- *t_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 4, cid);
- *u_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type, 5, cid);
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
}
/* Calcualte and set validation bytes for task context */
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
- u16 ctx_size, u8 ctx_type, u32 tid)
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
+ u32 tid)
{
u8 *p_ctx, *region1_val_ptr;
@@ -1823,8 +1862,7 @@ void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn, void *p_ctx_mem,
OSAL_MEMSET(p_ctx, 0, ctx_size);
- *region1_val_ptr = ecore_calc_cdu_validation_byte(p_hwfn, ctx_type,
- 1, tid);
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
}
/* Memset session context to 0 while preserving validation bytes */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 488dc005..ab560e59 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -18,7 +18,6 @@ struct init_qm_pq_params;
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -27,8 +26,7 @@ struct init_qm_pq_params;
*
* @return The required host memory size in 4KB units.
*/
-u32 ecore_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
+u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
@@ -66,7 +64,6 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
@@ -80,6 +77,7 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* be 0. otherwise, the weight must be non-zero.
* @param pf_rl - rate limit in Mb/sec units. a value of 0 means don't
* configure. ignored if PF RL is globally disabled.
+ * @param link_speed - link speed in Mbps.
* @param pq_params - array of size (num_pf_pqs+num_vf_pqs) with parameters for
* each Tx PQ associated with the specified PF.
* @param vport_params - array of size num_vports with parameters for each
@@ -88,23 +86,23 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
* @return 0 on success, -1 on error.
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 port_id,
- u8 pf_id,
- u8 max_phys_tcs_per_port,
- bool is_first_pf,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 start_pq,
- u16 num_pf_pqs,
- u16 num_vf_pqs,
- u8 start_vport,
- u8 num_vports,
- u16 pf_wfq,
- u32 pf_rl,
- struct init_qm_pq_params *pq_params,
- struct init_qm_vport_params *vport_params);
+ struct ecore_ptt *p_ptt,
+ u8 port_id,
+ u8 pf_id,
+ u8 max_phys_tcs_per_port,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids,
+ u16 start_pq,
+ u16 num_pf_pqs,
+ u16 num_vf_pqs,
+ u8 start_vport,
+ u8 num_vports,
+ u16 pf_wfq,
+ u32 pf_rl,
+ u32 link_speed,
+ struct init_qm_pq_params *pq_params,
+ struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
@@ -157,17 +155,19 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
* @brief ecore_init_vport_rl - Initializes the rate limit of the specified
* VPORT.
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ * @param link_speed - link speed in Mbps.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
- u32 vport_rl);
+ u32 vport_rl,
+ u32 link_speed);
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
@@ -261,28 +261,15 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
#ifndef UNUSED_HSI_FUNC
/**
- * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
- * ethType Regs to input ethType
- * should Be called once per engine
- * if engine
- * is in BD mode.
- *
- * @param p_ptt - ptt window used for writing the registers.
- * @param ethType - etherType to configure
- */
-void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType);
-
-/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType should Be called
* once per port.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_hwfn - HW device data
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 ethType);
+ u32 ethType);
#endif /* UNUSED_HSI_FUNC */
/**
@@ -351,33 +338,35 @@ void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ * @brief ecore_gft_disable - Disable and GFT
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to disable RFS.
+ * @param pf_id - pf on which to disable GFT.
*/
-void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 pf_id);
+void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
/**
-* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
+ * @brief ecore_gft_config - Enable and configure HW for GFT
*
* @param p_ptt - ptt window used for writing the registers.
-* @param pf_id - pf on which to enable RFS.
+ * @param pf_id - pf on which to enable GFT.
* @param tcp - set profile tcp packets.
* @param udp - set profile udp packet.
* @param ipv4 - set profile ipv4 packet.
* @param ipv6 - set profile ipv6 packet.
+ * @param profile_type - define packet same fields. Use enum gft_profile_type.
*/
-void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
+void ecore_gft_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
- bool ipv6);
+ bool ipv6,
+ enum gft_profile_type profile_type);
#endif /* UNUSED_HSI_FUNC */
/**
@@ -431,26 +420,25 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
* @param ctx_type - context type.
* @param cid - context cid.
*/
-void ecore_calc_session_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
+void ecore_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 cid);
+
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
- * @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
-void ecore_calc_task_ctx_validation(struct ecore_hwfn *p_hwfn,
- void *p_ctx_mem,
+void ecore_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 tid);
+
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
* preserving validation bytes.
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index b907a95e..91633c11 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -40,6 +40,13 @@ void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
+ val, rt_offset, RUNTIME_ARRAY_SIZE);
+ return;
+ }
+
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@@ -49,6 +56,14 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
{
osal_size_t i;
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
+ rt_offset, (u32)(rt_offset + size - 1),
+ RUNTIME_ARRAY_SIZE);
+ return;
+ }
+
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@@ -161,8 +176,7 @@ static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 addr, u32 fill,
- u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
@@ -294,7 +308,7 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@@ -303,10 +317,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
- ecore_init_rt(p_hwfn, p_ptt, addr,
- OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
- OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
- b_must_dmae);
+ rc = ecore_init_rt(p_hwfn, p_ptt, addr,
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
+ OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
+ b_must_dmae);
break;
}
@@ -382,10 +396,13 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
OSAL_LE32_TO_CPU(cmd->op_data));
}
-/* init_ops callbacks entry point */
+/* init_ops callbacks entry point.
+ * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings.
+ * Should be removed when the function is actually used.
+ */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct init_callback_op *p_cmd)
+ struct ecore_ptt OSAL_UNUSED * p_ptt,
+ struct init_callback_op OSAL_UNUSED * p_cmd)
{
DP_NOTICE(p_hwfn, true,
"Currently init values have no need of callbacks\n");
@@ -429,17 +446,16 @@ static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
+ u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
- return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
- INIT_IF_PHASE_OP_CMD_OFFSET);
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
else
return 0;
}
@@ -485,8 +501,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
- phase, phase_id);
+ cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
+ phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
@@ -510,7 +526,8 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
return rc;
}
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 gtt_base;
u32 i;
@@ -528,7 +545,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_wr(p_hwfn, p_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
@@ -537,7 +554,7 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
- val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
+ val = ecore_rd(p_hwfn, p_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);
@@ -557,7 +574,11 @@ void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
}
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
- const u8 *data)
+#ifdef CONFIG_ECORE_BINARY_FW
+ const u8 *fw_data)
+#else
+ const u8 OSAL_UNUSED * fw_data)
+#endif
{
struct ecore_fw_data *fw = p_dev->fw_data;
@@ -565,24 +586,24 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
- if (!data) {
+ if (!fw_data) {
DP_NOTICE(p_dev, true, "Invalid fw data\n");
return ECORE_INVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
- fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
+ fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
- fw->init_ops = (union init_op *)((uintptr_t)(data + offset));
+ fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
- fw->arr_data = (u32 *)((uintptr_t)(data + offset));
+ fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
- fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset));
+ fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
#else
diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h
index d58c7d6a..e293a4a3 100644
--- a/drivers/net/qede/base/ecore_init_ops.h
+++ b/drivers/net/qede/base/ecore_init_ops.h
@@ -107,5 +107,6 @@ void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
*/
-void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
+void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif /* __ECORE_INIT_OPS__ */
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index b57c510c..e6cef85b 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -9,7 +9,6 @@
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_spq.h"
-#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_init_ops.h"
#include "ecore_rt_defs.h"
@@ -30,7 +29,7 @@ struct ecore_pi_info {
struct ecore_sb_sp_info {
struct ecore_sb_info sb_info;
/* per protocol index data */
- struct ecore_pi_info pi_info_arr[PIS_PER_SB];
+ struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
};
enum ecore_attention_type {
@@ -248,21 +247,21 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
- DP_INFO(p_hwfn->p_dev,
- "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
- " [PF: %02x %s %02x]\n",
- tmp2, tmp,
- (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
- (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
- grc_timeout_attn_master_to_str((tmp &
- ECORE_GRC_ATTENTION_MASTER_MASK) >>
- ECORE_GRC_ATTENTION_MASTER_SHIFT),
- (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
- (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
+ : "Read from",
+ (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
+ grc_timeout_attn_master_to_str(
+ (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
+ ECORE_GRC_ATTENTION_MASTER_SHIFT),
+ (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
+ (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
- ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
- (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
- ECORE_GRC_ATTENTION_VF_SHIFT);
+ ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
+ (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
+ ECORE_GRC_ATTENTION_VF_SHIFT);
out:
/* Regardles of anything else, clean the validity bit */
@@ -414,31 +413,136 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
-#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
-#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+#define ECORE_DB_REC_COUNT 10
+#define ECORE_DB_REC_INTERVAL 100
+
+/* assumes sticky overflow indication was set for this PF */
+static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 count = ECORE_DB_REC_COUNT;
+ u32 usage = 1;
+
+ /* wait for usage to zero or count to run out. This is necessary since
+ * EDPM doorbell transactions can take multiple 64b cycles, and as such
+ * can "split" over the pci. Possibly, the doorbell drop can happen with
+ * half an EDPM in the queue and other half dropped. Another EDPM
+ * doorbell to the same address (from doorbell recovery mechanism or
+ * from the doorbelling entity) could have first half dropped and second
+ * half interperted as continuation of the first. To prevent such
+ * malformed doorbells from reaching the device, flush the queue before
+ * releaseing the overflow sticky indication.
+ */
+ while (count-- && usage) {
+ usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
+ }
+
+ /* should have been depleted by now */
+ if (usage) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
+ ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
+ return ECORE_TIMEOUT;
+ }
+
+ /* flush any pedning (e)dpm as they may never arrive */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ /* release overflow sticky indication (stop silently dropping
+ * everything)
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+
+ /* repeat all last doorbells (doorbell drop recovery) */
+ ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+
+ return ECORE_SUCCESS;
+}
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
{
- u32 reason;
+ u32 int_sts, first_drop_reason, details, address, overflow,
+ all_drops_reason;
+ struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ enum _ecore_status_t rc;
- reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
- ECORE_DORQ_ATTENTION_REASON_MASK;
- if (reason) {
- u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS);
+ int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
+ int_sts);
- DP_INFO(p_hwfn->p_dev,
- "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
- " Size [bytes] 0x%08x Reason: 0x%08x\n",
- ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- DORQ_REG_DB_DROP_DETAILS_ADDRESS),
- (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
- ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
- ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
+ /* int_sts may be zero since all PFs were interrupted for doorbell
+ * overflow but another one already handled it. Can abort here. If
+ * This PF also requires overflow recovery we will be interrupted again
+ */
+ if (!int_sts)
+ return ECORE_SUCCESS;
+
+ /* check if db_drop or overflow happened */
+ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
+ /* obtain data about db drop/overflow */
+ first_drop_reason = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_REASON) &
+ ECORE_DORQ_ATTENTION_REASON_MASK;
+ details = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS);
+ address = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS);
+ overflow = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_PF_OVFL_STICKY);
+ all_drops_reason = ecore_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_REASON);
+
+ /* log info */
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Doorbell drop occurred\n"
+ "Address\t\t0x%08x\t(second BAR address)\n"
+ "FID\t\t0x%04x\t\t(Opaque FID)\n"
+ "Size\t\t0x%04x\t\t(in bytes)\n"
+ "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
+ "Overflow\t0x%x\t\t(a per PF indication)\n",
+ address,
+ GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
+ GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
+ first_drop_reason, all_drops_reason, overflow);
+
+ /* if this PF caused overflow, initiate recovery */
+ if (overflow) {
+ rc = ecore_db_rec_attn(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ /* clear the doorbell drop details and prepare for next drop */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
+
+ /* mark interrupt as handeld (note: even if drop was due to a
+ * different reason than overflow we mark as handled)
+ */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
+ DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+
+ /* if there are no indications otherthan drop indications,
+ * success
+ */
+ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
+ DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
+ return ECORE_SUCCESS;
}
+ /* some other indication was present - non recoverable */
+ DP_INFO(p_hwfn, "DORQ fatal attention\n");
+
return ECORE_INVAL;
}
@@ -851,32 +955,38 @@ ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
* @brief ecore_int_deassertion_parity - handle a single parity AEU source
*
* @param p_hwfn
- * @param p_aeu - descriptor of an AEU bit which caused the
- * parity
+ * @param p_aeu - descriptor of an AEU bit which caused the parity
+ * @param aeu_en_reg - address of the AEU enable register
* @param bit_index
*/
static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_aeu,
- u8 bit_index)
+ u32 aeu_en_reg, u8 bit_index)
{
- u32 block_id = p_aeu->block_index;
+ u32 block_id = p_aeu->block_index, mask, val;
- DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
- p_aeu->bit_name, bit_index);
-
- if (block_id == MAX_BLOCK_ID)
- return;
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "%s parity attention is set [address 0x%08x, bit %d]\n",
+ p_aeu->bit_name, aeu_en_reg, bit_index);
- ecore_int_attn_print(p_hwfn, block_id,
- ATTN_TYPE_PARITY, false);
+ if (block_id != MAX_BLOCK_ID) {
+ ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
- /* In A0, there's a single parity bit for several blocks */
- if (block_id == BLOCK_BTB) {
- ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
- ATTN_TYPE_PARITY, false);
- ecore_int_attn_print(p_hwfn, BLOCK_MCP,
- ATTN_TYPE_PARITY, false);
+ /* In A0, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ ecore_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
+ }
}
+
+ /* Prevent this parity error from being re-asserted */
+ mask = ~(0x1 << bit_index);
+ val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
+ DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
+ p_aeu->bit_name);
}
/**
@@ -891,8 +1001,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
u16 deasserted_bits)
{
struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
- u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
- bool b_parity = false;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
u8 i, j, k, bit_idx;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -908,11 +1017,11 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
/* Handle parity attentions first */
for (i = 0; i < NUM_ATTN_REGS; i++) {
struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
- u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
- MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32));
+ u32 parities;
- u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
/* Skip register in which no parity bit is currently set */
if (!parities)
@@ -922,11 +1031,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
- !!(parities & (1 << bit_idx))) {
+ !!(parities & (1 << bit_idx)))
ecore_int_deassertion_parity(p_hwfn, p_bit,
- bit_idx);
- b_parity = true;
- }
+ aeu_en, bit_idx);
bit_idx += ATTENTION_LENGTH(p_bit->flags);
}
@@ -941,10 +1048,13 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
continue;
for (i = 0; i < NUM_ATTN_REGS; i++) {
- u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
- i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
- u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
- u32 bits = aeu_inv_arr[i] & en;
+ u32 bits;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+ en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
/* Skip if no bit from this group is currently set */
if (!bits)
@@ -1369,6 +1479,49 @@ void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
+static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ struct cau_pi_entry pi_entry;
+ u32 sb_offset, pi_offset;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return;/* @@@TBD MichalK- VF CAU... */
+
+ sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
+
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ pi_offset = sb_offset + pi_index;
+ if (p_hwfn->hw_init_done) {
+ ecore_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
+ *((u32 *)&(pi_entry)));
+ } else {
+ STORE_RT_REG(p_hwfn,
+ CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ *((u32 *)&(pi_entry)));
+ }
+}
+
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb, u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
+ pi_index, coalescing_fsm, timeset);
+}
+
void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t sb_phys, u16 igu_sb_id,
@@ -1420,8 +1573,9 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
else
timer_res = 2;
timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
- ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
- ECORE_COAL_RX_STATE_MACHINE, timeset);
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ ECORE_COAL_RX_STATE_MACHINE,
+ timeset);
if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
timer_res = 0;
@@ -1431,46 +1585,14 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
timer_res = 2;
timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) {
- ecore_int_cau_conf_pi(p_hwfn, p_ptt,
- igu_sb_id, TX_PI(i),
- ECORE_COAL_TX_STATE_MACHINE,
- timeset);
+ _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ ECORE_COAL_TX_STATE_MACHINE,
+ timeset);
}
}
}
-void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 igu_sb_id, u32 pi_index,
- enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
-{
- struct cau_pi_entry pi_entry;
- u32 sb_offset, pi_offset;
-
- if (IS_VF(p_hwfn->p_dev))
- return; /* @@@TBD MichalK- VF CAU... */
-
- sb_offset = igu_sb_id * PIS_PER_SB;
- OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
-
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
- if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
- else
- SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
-
- pi_offset = sb_offset + pi_index;
- if (p_hwfn->hw_init_done) {
- ecore_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
- *((u32 *)&(pi_entry)));
- } else {
- STORE_RT_REG(p_hwfn,
- CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
- *((u32 *)&(pi_entry)));
- }
-}
-
void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
{
@@ -1483,16 +1605,50 @@ void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
sb_info->igu_sb_id, 0, 0);
}
-/**
- * @brief ecore_get_igu_sb_id - given a sw sb_id return the
- * igu_sb_id
- *
- * @param p_hwfn
- * @param sb_id
- *
- * @return u16
- */
-static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
+{
+ struct ecore_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !(p_block->status & ECORE_IGU_STATUS_FREE))
+ continue;
+
+ if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
+ b_is_pf)
+ return p_block;
+ }
+
+ return OSAL_NULL;
+}
+
+static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
+ u16 vector_id)
+{
+ struct ecore_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ p_block->vector_number != vector_id)
+ continue;
+
+ return igu_id;
+ }
+
+ return ECORE_SB_INVALID_IDX;
+}
+
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
{
u16 igu_sb_id;
@@ -1500,11 +1656,15 @@ static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
if (sb_id == ECORE_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else if (IS_PF(p_hwfn->p_dev))
- igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
else
igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
- if (sb_id == ECORE_SP_SB_ID)
+ if (igu_sb_id == ECORE_SB_INVALID_IDX)
+ DP_NOTICE(p_hwfn, true,
+ "Slowpath SB vector %04x doesn't exist\n",
+ sb_id);
+ else if (sb_id == ECORE_SP_SB_ID)
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
else
@@ -1525,9 +1685,24 @@ enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
+ if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+
+ /* Let the igu info reference the client's SB info */
if (sb_id != ECORE_SP_SB_ID) {
- p_hwfn->sbs_info[sb_id] = sb_info;
- p_hwfn->num_sbs++;
+ if (IS_PF(p_hwfn->p_dev)) {
+ struct ecore_igu_info *p_info;
+ struct ecore_igu_block *p_block;
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ p_block->sb_info = sb_info;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ p_info->usage.free_cnt--;
+ } else {
+ ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+ }
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
sb_info->p_hwfn = p_hwfn;
@@ -1559,20 +1734,35 @@ enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *sb_info,
u16 sb_id)
{
- if (sb_id == ECORE_SP_SB_ID) {
- DP_ERR(p_hwfn, "Do Not free sp sb using this function");
- return ECORE_INVAL;
- }
+ struct ecore_igu_info *p_info;
+ struct ecore_igu_block *p_block;
+
+ if (sb_info == OSAL_NULL)
+ return ECORE_SUCCESS;
/* zero status block and ack counter */
sb_info->sb_ack = 0;
OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
- p_hwfn->sbs_info[sb_id] = OSAL_NULL;
- p_hwfn->num_sbs--;
+ if (IS_VF(p_hwfn->p_dev)) {
+ ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
+ return ECORE_SUCCESS;
}
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ /* Vector 0 is reserved to Default SB */
+ if (p_block->vector_number == 0) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return ECORE_INVAL;
+ }
+
+ /* Lose reference to client's SB info, and fix counters */
+ p_block->sb_info = OSAL_NULL;
+ p_block->status |= ECORE_IGU_STATUS_FREE;
+ p_info->usage.free_cnt++;
+
return ECORE_SUCCESS;
}
@@ -1735,15 +1925,6 @@ ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 tmp;
-
- /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
- * attentions. Since we're waiting for BRCM answer regarding this
- * attention, in the meanwhile we simply mask it.
- */
- tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
- tmp &= ~0x800;
- ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
ecore_int_igu_enable_attn(p_hwfn, p_ptt);
@@ -1778,11 +1959,13 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 sb_id, bool cleanup_set, u16 opaque_fid)
+ struct ecore_ptt *p_ptt,
+ u32 igu_sb_id,
+ bool cleanup_set,
+ u16 opaque_fid)
{
u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
- u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
u8 type = 0; /* FIXME MichalS type??? */
@@ -1813,8 +1996,8 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
OSAL_MMIOWB(p_hwfn->p_dev);
/* calculate where to read the status bit from */
- sb_bit = 1 << (sb_id % 32);
- sb_bit_addr = sb_id / 32 * sizeof(u32);
+ sb_bit = 1 << (igu_sb_id % 32);
+ sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
@@ -1829,21 +2012,28 @@ static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
if (!sleep_cnt)
DP_NOTICE(p_hwfn, true,
"Timeout waiting for clear status 0x%08x [for sb %d]\n",
- val, sb_id);
+ val, igu_sb_id);
}
void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 sb_id, u16 opaque, bool b_set)
+ u16 igu_sb_id, u16 opaque, bool b_set)
{
+ struct ecore_igu_block *p_block;
int pi, i;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+ igu_sb_id, p_block->function_id, p_block->is_pf,
+ p_block->vector_number);
+
/* Set */
if (b_set)
- ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
/* Clear */
- ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
/* Wait for the IGU SB to cleanup */
for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
@@ -1851,8 +2041,8 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
val = ecore_rd(p_hwfn, p_ptt,
IGU_REG_WRITE_DONE_PENDING +
- ((sb_id / 32) * 4));
- if (val & (1 << (sb_id % 32)))
+ ((igu_sb_id / 32) * 4));
+ if (val & (1 << (igu_sb_id % 32)))
OSAL_UDELAY(10);
else
break;
@@ -1860,21 +2050,22 @@ void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
if (i == IGU_CLEANUP_SLEEP_LENGTH)
DP_NOTICE(p_hwfn, true,
"Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
- sb_id);
+ igu_sb_id);
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
ecore_wr(p_hwfn, p_ptt,
- CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
+ CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
}
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_set, bool b_slowpath)
{
- u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
- u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0, val = 0;
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block;
+ u16 igu_sb_id = 0;
+ u32 val = 0;
/* @@@TBD MichalK temporary... should be moved to init-tool... */
val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
@@ -1883,53 +2074,204 @@ void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
/* end temporary */
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU cleaning SBs [%d,...,%d]\n",
- igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
- for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
- ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ (p_block->status & ECORE_IGU_STATUS_DSB))
+ continue;
+
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
p_hwfn->hw_info.opaque_fid,
b_set);
+ }
- if (!b_slowpath)
- return;
+ if (b_slowpath)
+ ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ p_info->igu_dsb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+}
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid, b_set);
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
+
+ if (!RESC_NUM(p_hwfn, ECORE_SB)) {
+ /* We're using an old MFW - have to prevent any switching
+ * of SBs between PF and VFs as later driver wouldn't be
+ * able to tell which belongs to which.
+ */
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, ECORE_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
+ }
+
+ /* TODO - how do we learn about VF SBs from MFW? */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
+
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov -
+ p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn, true,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return ECORE_INVAL;
+ }
+ }
+ }
+
+ /* Cap the number of VFs SBs by the number of VFs */
+ if (IS_PF_SRIOV(p_hwfn))
+ p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & ECORE_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id,
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number,
+ rval, val);
+ }
+ }
+
+ return 0;
+}
+
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
+
+ /* Return all the usage indications to default prior to the reset;
+ * The reset expects the !orig to reflect the initial status of the
+ * SBs, and would re-calculate the originals based on those.
+ */
+ p_cnt->cnt = p_cnt->orig;
+ p_cnt->free_cnt = p_cnt->orig;
+ p_cnt->iov_cnt = p_cnt->iov_orig;
+ p_cnt->free_cnt_iov = p_cnt->iov_orig;
+ p_cnt->orig = 0;
+ p_cnt->iov_orig = 0;
+
+ /* TODO - we probably need to re-configure the CAU as well... */
+ return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
}
-static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 sb_id)
+static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 igu_sb_id)
{
u32 val = ecore_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
struct ecore_igu_block *p_block;
- p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-
- /* stop scanning when hit first invalid PF entry */
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- goto out;
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
/* Fill the block information */
- p_block->status = ECORE_IGU_STATUS_VALID;
p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
- " is_pf = %d vector_num = 0x%x\n",
- sb_id, val, p_block->function_id, p_block->is_pf,
- p_block->vector_number);
-
-out:
- return val;
+ p_block->igu_sb_id = igu_sb_id;
}
enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
@@ -1937,140 +2279,217 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
{
struct ecore_igu_info *p_igu_info;
struct ecore_igu_block *p_block;
- u32 min_vf = 0, max_vf = 0, val;
- u16 sb_id, last_iov_sb_id = 0;
- u16 prev_sb_id = 0xFF;
+ u32 min_vf = 0, max_vf = 0;
+ u16 igu_sb_id;
- p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
- GFP_KERNEL,
- sizeof(*p_igu_info));
+ p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
+ GFP_KERNEL,
+ sizeof(*p_igu_info));
if (!p_hwfn->hw_info.p_igu_info)
return ECORE_NOMEM;
-
- OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
-
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs and VFs */
- p_igu_info->igu_base_sb = 0xffff;
- p_igu_info->igu_sb_cnt = 0;
- p_igu_info->igu_dsb_id = 0xffff;
- p_igu_info->igu_base_sb_iov = 0xffff;
+ /* Distinguish between existent and onn-existent default SB */
+ p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
+ /* Find the range of VF ids whose SB belong to this PF */
if (p_hwfn->p_dev->p_iov_info) {
struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
min_vf = p_iov->first_vf_in_pf;
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
- for (sb_id = 0;
- sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
- sb_id++) {
- p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
- val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
- if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
- GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
- break;
- if (p_block->is_pf) {
- if (p_block->function_id == p_hwfn->rel_pf_id) {
- p_block->status |= ECORE_IGU_STATUS_PF;
-
- if (p_block->vector_number == 0) {
- if (p_igu_info->igu_dsb_id == 0xffff)
- p_igu_info->igu_dsb_id = sb_id;
- } else {
- if (p_igu_info->igu_base_sb == 0xffff) {
- p_igu_info->igu_base_sb = sb_id;
- } else if (prev_sb_id != sb_id - 1) {
- DP_NOTICE(p_hwfn->p_dev, false,
- "consecutive igu"
- " vectors for HWFN"
- " %x broken",
- p_hwfn->rel_pf_id);
- break;
- }
- prev_sb_id = sb_id;
- /* we don't count the default */
- (p_igu_info->igu_sb_cnt)++;
- }
- }
- } else {
- if ((p_block->function_id >= min_vf) &&
- (p_block->function_id < max_vf)) {
- /* Available for VFs of this PF */
- if (p_igu_info->igu_base_sb_iov == 0xffff) {
- p_igu_info->igu_base_sb_iov = sb_id;
- } else if (last_iov_sb_id != sb_id - 1) {
- if (!val)
- DP_VERBOSE(p_hwfn->p_dev,
- ECORE_MSG_INTR,
- "First uninited IGU"
- " CAM entry at"
- " index 0x%04x\n",
- sb_id);
- else
- DP_NOTICE(p_hwfn->p_dev, false,
- "Consecutive igu"
- " vectors for HWFN"
- " %x vfs is broken"
- " [jumps from %04x"
- " to %04x]\n",
- p_hwfn->rel_pf_id,
- last_iov_sb_id,
- sb_id);
- break;
- }
- p_block->status |= ECORE_IGU_STATUS_FREE;
- p_hwfn->hw_info.p_igu_info->free_blks++;
- last_iov_sb_id = sb_id;
- }
+ for (igu_sb_id = 0;
+ igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ /* Read current entry; Notice it might not belong to this PF */
+ ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if ((p_block->is_pf) &&
+ (p_block->function_id == p_hwfn->rel_pf_id)) {
+ p_block->status = ECORE_IGU_STATUS_PF |
+ ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+ p_igu_info->usage.cnt++;
+ } else if (!(p_block->is_pf) &&
+ (p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ p_block->status = ECORE_IGU_STATUS_VALID |
+ ECORE_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
+ p_igu_info->usage.iov_cnt++;
+ }
+
+ /* Mark the First entry belonging to the PF or its VFs
+ * as the default SB [we'll reset IGU prior to first usage].
+ */
+ if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
+ (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
+ p_igu_info->igu_dsb_id = igu_sb_id;
+ p_block->status |= ECORE_IGU_STATUS_DSB;
}
+
+ /* While this isn't suitable for all clients, limit number
+ * of prints by having each PF print only its entries with the
+ * exception of PF0 which would print everything.
+ */
+ if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
+ (p_hwfn->abs_pf_id == 0))
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+ }
+
+ if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+ p_igu_info->igu_dsb_id);
+ return ECORE_INVAL;
}
- /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
- * the number of VF SBs [especially for first VF on engine, as we can't
- * diffrentiate between empty entries and its entries].
- * Since we don't really support more SBs than VFs today, prevent any
- * such configuration by sanitizing the number of SBs to equal the
- * number of VFs.
+ /* All non default SB are considered free at this point */
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+ p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
+ p_igu_info->usage.iov_cnt);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 sb_id, bool b_to_vf)
+{
+ struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_block *p_block = OSAL_NULL;
+ u16 igu_sb_id = 0, vf_num = 0;
+ u32 val = 0;
+
+ if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
+ return ECORE_INVAL;
+
+ if (sb_id == ECORE_SP_SB_ID)
+ return ECORE_INVAL;
+
+ if (!p_info->b_allow_pf_vf_change) {
+ DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
+ return ECORE_INVAL;
+ }
+
+ /* If we're moving a SB from PF to VF, the client had to specify
+ * which vector it wants to move.
*/
- if (IS_PF_SRIOV(p_hwfn)) {
- u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-
- if (total_vfs < p_igu_info->free_blks) {
- DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
- "Limiting number of SBs for IOV - %04x --> %04x\n",
- p_igu_info->free_blks,
- p_hwfn->p_dev->p_iov_info->total_vfs);
- p_igu_info->free_blks = total_vfs;
- } else if (total_vfs > p_igu_info->free_blks) {
- DP_NOTICE(p_hwfn, true,
- "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
- p_igu_info->free_blks, total_vfs);
+ if (b_to_vf) {
+ igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
+ if (igu_sb_id == ECORE_SB_INVALID_IDX)
return ECORE_INVAL;
- }
}
- p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+ /* If we're moving a SB from VF to PF, need to validate there isn't
+ * already a line configured for that vector.
+ */
+ if (!b_to_vf) {
+ if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
+ ECORE_SB_INVALID_IDX)
+ return ECORE_INVAL;
+ }
- DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
- "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
- "igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
- p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
- p_igu_info->igu_dsb_id);
-
- if (p_igu_info->igu_base_sb == 0xffff ||
- p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
- DP_NOTICE(p_hwfn, true,
- "IGU CAM returned invalid values igu_base_sb=0x%x "
- "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ /* We need to validate that the SB can actually be relocated.
+ * This would also handle the previous case where we've explicitly
+ * stated which IGU SB needs to move.
+ */
+ for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
+ igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+
+ if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
+ !(p_block->status & ECORE_IGU_STATUS_FREE) ||
+ (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
+ if (b_to_vf)
+ return ECORE_INVAL;
+ else
+ continue;
+ }
+
+ break;
+ }
+
+ if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Failed to find a free SB to move\n");
return ECORE_INVAL;
}
+ /* At this point, p_block points to the SB we want to relocate */
+ if (b_to_vf) {
+ p_block->status &= ~ECORE_IGU_STATUS_PF;
+
+ /* It doesn't matter which VF number we choose, since we're
+ * going to disable the line; But let's keep it in range.
+ */
+ vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+ p_block->function_id = (u8)vf_num;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+
+ p_info->usage.cnt--;
+ p_info->usage.free_cnt--;
+ p_info->usage.iov_cnt++;
+ p_info->usage.free_cnt_iov++;
+
+ /* TODO - if SBs aren't really the limiting factor,
+ * then it might not be accurate [in the since that
+ * we might not need decrement the feature].
+ */
+ p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
+ p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
+ } else {
+ p_block->status |= ECORE_IGU_STATUS_PF;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = sb_id + 1;
+
+ p_info->usage.cnt++;
+ p_info->usage.free_cnt++;
+ p_info->usage.iov_cnt--;
+ p_info->usage.free_cnt_iov--;
+
+ p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
+ p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
+ }
+
+ /* Update the IGU and CAU with the new configuration */
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
+ val);
+
+ ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
+ igu_sb_id, vf_num,
+ p_block->is_pf ? 0 : 1);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
+ "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
return ECORE_SUCCESS;
}
@@ -2170,14 +2589,13 @@ void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_sb_cnt_info *p_sb_cnt_info)
{
- struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
+ struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
- if (!info || !p_sb_cnt_info)
+ if (!p_igu_info || !p_sb_cnt_info)
return;
- p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
- p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
- p_sb_cnt_info->sb_free_blk = info->free_blks;
+ OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
+ sizeof(*p_sb_cnt_info));
}
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
@@ -2249,10 +2667,11 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
IGU_REG_CONSUMER_MEM + sbid * 4);
- for (i = 0; i < PIS_PER_SB; i++)
+ for (i = 0; i < PIS_PER_SB_E4; i++)
p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
CAU_REG_PI_MEMORY +
- sbid * 4 * PIS_PER_SB + i * 4);
+ sbid * 4 * PIS_PER_SB_E4 +
+ i * 4);
return ECORE_SUCCESS;
}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 067ed605..563051c3 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -19,33 +19,78 @@
#define ECORE_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+
+#define ECORE_SB_INVALID_IDX 0xffff
struct ecore_igu_block {
u8 status;
#define ECORE_IGU_STATUS_FREE 0x01
#define ECORE_IGU_STATUS_VALID 0x02
#define ECORE_IGU_STATUS_PF 0x04
+#define ECORE_IGU_STATUS_DSB 0x08
u8 vector_number;
u8 function_id;
u8 is_pf;
-};
-struct ecore_igu_map {
- struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH];
+ /* Index inside IGU [meant for back reference] */
+ u16 igu_sb_id;
+
+ struct ecore_sb_info *sb_info;
};
struct ecore_igu_info {
- struct ecore_igu_map igu_map;
+ struct ecore_igu_block entry[MAX_TOT_SB_PER_PATH];
u16 igu_dsb_id;
- u16 igu_base_sb;
- u16 igu_base_sb_iov;
- u16 igu_sb_cnt;
- u16 igu_sb_cnt_iov;
- u16 free_blks;
+
+ /* The numbers can shift when using APIs to switch SBs between PF and
+ * VF.
+ */
+ struct ecore_sb_cnt_info usage;
+
+ /* Determine whether we can shift SBs between VFs and PFs */
+ bool b_allow_pf_vf_change;
};
+/**
+ * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Make sure IGU CAM reflects the default resources once again,
+ * starting with a 'dirty' SW database.
+ * @param p_hwfn
+ * @param p_ptt
+ */
+int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ *
+ * @param p_hwfn
+ * @param sb_id - user provided sb_id
+ *
+ * @return an index inside IGU CAM where the SB resides
+ */
+u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief return a pointer to an unused valid SB
+ *
+ * @param p_hwfn
+ * @param b_is_pf - true iff we want a SB belonging to a PF
+ *
+ * @return point to an igu_block, OSAL_NULL if none is available
+ */
+struct ecore_igu_block *
+ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf);
/* TODO Names of function may change... */
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -125,9 +170,11 @@ u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
* @param opaque - opaque fid of the sb owner.
* @param cleanup_set - set(1) / clear(0)
*/
-void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 sb_id, u16 opaque, bool b_set);
+void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 sb_id,
+ u16 opaque,
+ bool b_set);
/**
* @brief ecore_int_cau_conf - configure cau for a given status
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index 799fbe82..24cdf5ed 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -26,7 +26,7 @@ enum ecore_int_mode {
#endif
struct ecore_sb_info {
- struct status_block *sb_virt;
+ struct status_block_e4 *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
@@ -44,13 +44,19 @@ struct ecore_sb_info {
struct ecore_sb_info_dbg {
u32 igu_prod;
u32 igu_cons;
- u16 pi[PIS_PER_SB];
+ u16 pi[PIS_PER_SB_E4];
};
struct ecore_sb_cnt_info {
- int sb_cnt;
- int sb_iov_cnt;
- int sb_free_blk;
+ /* Original, current, and free SBs for PF */
+ int orig;
+ int cnt;
+ int free_cnt;
+
+ /* Original, current and free SBS for child VFs */
+ int iov_orig;
+ int iov_cnt;
+ int free_cnt_iov;
};
static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
@@ -61,7 +67,7 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
/* barrier(); status block is written to by the chip */
/* FIXME: need some sort of barrier. */
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
- STATUS_BLOCK_PROD_INDEX_MASK;
+ STATUS_BLOCK_E4_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= ECORE_SB_IDX;
@@ -173,17 +179,17 @@ enum ecore_coalescing_fsm {
*
* @param p_hwfn
* @param p_ptt
- * @param igu_sb_id
+ * @param p_sb
* @param pi_index
* @param state
* @param timeset
*/
-void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 igu_sb_id,
- u32 pi_index,
- enum ecore_coalescing_fsm coalescing_fsm,
- u8 timeset);
+void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ u32 pi_index,
+ enum ecore_coalescing_fsm coalescing_fsm,
+ u8 timeset);
/**
*
@@ -219,6 +225,7 @@ void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
#define ECORE_SP_SB_ID 0xffff
+
/**
* @brief ecore_int_sb_init - Initializes the sb_info structure.
*
@@ -324,4 +331,18 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *p_sb,
struct ecore_sb_info_dbg *p_info);
+/**
+ * @brief - Move a free Status block between PF and child VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming
+ * from VF, given-up if moving to VF]
+ * @param b_to_vf - PF->VF == true, VF->PF == false
+ *
+ * @return ECORE_SUCCESS if SB successfully moved.
+ */
+enum _ecore_status_t
+ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 sb_id, bool b_to_vf);
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 50cb3f2b..218ef50b 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -345,21 +345,13 @@ ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
u16 vfid, bool b_enabled_only);
/**
- * @brief Set pending events bitmap for given @vfid
- *
- * @param p_hwfn
- * @param vfid
- */
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid);
-
-/**
- * @brief Copy pending events bitmap in @events and clear
- * original copy of events
+ * @brief fills a bitmask of all VFs which have pending unhandled
+ * messages.
*
* @param p_hwfn
*/
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
- u64 *events);
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events);
/**
* @brief Copy VF's message to PF's buffer
@@ -693,25 +685,60 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
* @return - rate in Mbps
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+
#endif
/**
+ * @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce
+ * parameters of VFs for Rx and Tx queue.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param vf_id
+ * @param qid
+ *
+ * @return int
+ **/
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid);
+
+/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
* @param rel_vf_id
*
- * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * @return MAX_NUM_VFS_E4 in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/**
+ * @brief Set whether PF should communicate with VF using SW/HW channel
+ * Needs to be called for an enabled VF before acquire is over
+ * [latest good point for doing that is OSAL_IOV_VF_ACQUIRE()]
+ *
+ * @param p_hwfn
+ * @param vfid - relative vf index
+ * @param b_is_hw - true iff PF is to use HW channel for communication
+ */
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw);
+#endif
#endif /* CONFIG_ECORE_SRIOV */
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
- _i < E4_MAX_NUM_VFS; \
+ _i < MAX_NUM_VFS_E4; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index b4bfe89f..360d7f88 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -193,5 +193,13 @@
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
((roce_pf_id) * IRO[48].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + \
+ ((roce_pf_id) * IRO[49].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + \
+ ((roce_pf_id) * IRO[50].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index bc8df8f8..41532eeb 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,13 +9,13 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[49] = {
+static const struct iro iro_arr[51] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
{ 0x4cb0, 0x80, 0x0, 0x0, 0x80},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6518, 0x20, 0x0, 0x0, 0x20},
+ { 0x6508, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -29,9 +29,9 @@ static const struct iro iro_arr[49] = {
/* XSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* YSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x3df0, 0x0, 0x0, 0x0, 0x78},
+ { 0x3e10, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x29b0, 0x0, 0x0, 0x0, 0x78},
+ { 0x2b50, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c38, 0x0, 0x0, 0x0, 0x78},
/* MSTORM_INTEG_TEST_DATA_OFFSET */
@@ -41,11 +41,11 @@ static const struct iro iro_arr[49] = {
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x61f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x61e8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0xbd20, 0x30, 0x0, 0x0, 0x30},
+ { 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
- { 0x95b8, 0x30, 0x0, 0x0, 0x30},
+ { 0x96b8, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x4b60, 0x80, 0x0, 0x0, 0x40},
/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
@@ -59,11 +59,11 @@ static const struct iro iro_arr[49] = {
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x8150, 0x40, 0x0, 0x0, 0x30},
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xec70, 0x60, 0x0, 0x0, 0x60},
+ { 0xe770, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x2b48, 0x80, 0x0, 0x0, 0x38},
+ { 0x2ce8, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf1b0, 0x78, 0x0, 0x0, 0x78},
+ { 0xf2b0, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
@@ -81,33 +81,37 @@ static const struct iro iro_arr[49] = {
/* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8},
/* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
- { 0x200, 0x10, 0x8, 0x0, 0x8},
+ { 0x200, 0x18, 0x8, 0x0, 0x8},
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
- { 0xb78, 0x10, 0x8, 0x0, 0x2},
+ { 0xb78, 0x18, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd9a8, 0x38, 0x0, 0x0, 0x24},
+ { 0xd878, 0x50, 0x0, 0x0, 0x3c},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x12988, 0x10, 0x0, 0x0, 0x8},
+ { 0x12908, 0x18, 0x0, 0x0, 0x10},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x11fa0, 0x38, 0x0, 0x0, 0x18},
+ { 0x11aa8, 0x40, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa8c0, 0x38, 0x0, 0x0, 0x10},
+ { 0xa580, 0x50, 0x0, 0x0, 0x20},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x30, 0x0, 0x0, 0x18},
+ { 0x86f8, 0x40, 0x0, 0x0, 0x28},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x101f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x102f8, 0x18, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0xde28, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
- { 0x10660, 0x20, 0x0, 0x0, 0x20},
+ { 0x10760, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x2b80, 0x80, 0x0, 0x0, 0x10},
+ { 0x2d20, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5020, 0x10, 0x0, 0x0, 0x10},
/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xc9b0, 0x30, 0x0, 0x0, 0x10},
/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
{ 0xeec0, 0x10, 0x0, 0x0, 0x10},
+/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
+ { 0xa398, 0x10, 0x0, 0x0, 0x10},
+/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
+ { 0x13100, 0x8, 0x0, 0x0, 0x8},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index e58b8fa0..e3afc8a3 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -173,16 +173,19 @@ static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid)
{
- /* For VF-queues, stuff is a bit complicated as:
- * - They always maintain the qid_usage on their own.
- * - In legacy mode, they also maintain their CIDs.
- */
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ ECORE_QCID_LEGACY_VF_CID);
- /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
- if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
+ * For legacy vf-queues, the CID doesn't go through here.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
_ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
- if (!p_cid->b_legacy_vf)
+
+ /* VFs maintain the index inside queue-zone on their own */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF)
ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
OSAL_VFREE(p_hwfn->p_dev, p_cid);
}
@@ -193,6 +196,7 @@ static struct ecore_queue_cid *
_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
u16 opaque_fid, u32 cid,
struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@@ -204,14 +208,21 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
- p_cid->rel = *p_params;
p_cid->p_owner = p_hwfn;
+ /* Fill in parameters */
+ p_cid->rel.vport_id = p_params->vport_id;
+ p_cid->rel.queue_id = p_params->queue_id;
+ p_cid->rel.stats_id = p_params->stats_id;
+ p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+ p_cid->b_is_rx = b_is_rx;
+ p_cid->sb_idx = p_params->sb_idx;
+
/* Fill-in bits related to VFs' queues if information was provided */
if (p_vf_params != OSAL_NULL) {
p_cid->vfid = p_vf_params->vfid;
p_cid->vf_qid = p_vf_params->vf_qid;
- p_cid->b_legacy_vf = p_vf_params->b_legacy;
+ p_cid->vf_legacy = p_vf_params->vf_legacy;
} else {
p_cid->vfid = ECORE_QUEUE_CID_PF;
}
@@ -224,7 +235,7 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
}
/* Calculate the engine-absolute indices of the resources.
- * The would guarantee they're valid later on.
+ * This would guarantee they're valid later on.
* In some cases [SBs] we already have the right values.
*/
rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
@@ -248,10 +259,6 @@ _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
p_cid->abs.stats_id = p_cid->rel.stats_id;
}
- /* SBs relevant information was already provided as absolute */
- p_cid->abs.sb = p_cid->rel.sb;
- p_cid->abs.sb_idx = p_cid->rel.sb_idx;
-
out:
/* VF-images have provided the qid_usage_idx on their own.
* Otherwise, we need to allocate a unique one.
@@ -270,7 +277,7 @@ out:
p_cid->rel.queue_id, p_cid->qid_usage_idx,
p_cid->abs.queue_id,
p_cid->rel.stats_id, p_cid->abs.stats_id,
- p_cid->abs.sb, p_cid->abs.sb_idx);
+ p_cid->sb_igu_id, p_cid->sb_idx);
return p_cid;
@@ -282,6 +289,7 @@ fail:
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params)
{
struct ecore_queue_cid *p_cid;
@@ -296,7 +304,8 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
if (p_vf_params) {
vfid = p_vf_params->vfid;
- if (p_vf_params->b_legacy) {
+ if (p_vf_params->vf_legacy &
+ ECORE_QCID_LEGACY_VF_CID) {
b_legacy_vf = true;
cid = p_vf_params->vf_qid;
}
@@ -315,7 +324,7 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
}
p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
- p_params, p_vf_params);
+ p_params, b_is_rx, p_vf_params);
if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
_ecore_cxt_release_cid(p_hwfn, cid, vfid);
@@ -324,9 +333,11 @@ ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
static struct ecore_queue_cid *
ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ bool b_is_rx,
struct ecore_queue_start_common_params *p_params)
{
- return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+ OSAL_NULL);
}
enum _ecore_status_t
@@ -336,6 +347,7 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ struct eth_vport_tpa_param *p_tpa;
u16 rx_mode = 0, tx_err = 0;
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
@@ -360,8 +372,8 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->vport_id = abs_vport_id;
p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
- p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
p_ramrod->untagged = p_params->only_untagged;
p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
@@ -396,22 +408,22 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
/* TPA related fields */
- OSAL_MEMSET(&p_ramrod->tpa_param, 0,
- sizeof(struct eth_vport_tpa_param));
- p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa = &p_ramrod->tpa_param;
+ OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
switch (p_params->tpa_mode) {
case ECORE_TPA_MODE_GRO:
- p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
- p_ramrod->tpa_param.tpa_max_size = (u16)-1;
- p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
- p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
- p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
- p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
- p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
- p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
- p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
- p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
+ p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ p_tpa->tpa_max_size = (u16)-1;
+ p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
+ p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
+ p_tpa->tpa_ipv4_en_flg = 1;
+ p_tpa->tpa_ipv6_en_flg = 1;
+ p_tpa->tpa_ipv4_tunn_en_flg = 1;
+ p_tpa->tpa_ipv6_tunn_en_flg = 1;
+ p_tpa->tpa_pkt_split_flg = 1;
+ p_tpa->tpa_gro_consistent_flg = 1;
break;
default:
break;
@@ -427,8 +439,7 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
- p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
- p_params->concrete_fid);
+ p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
@@ -454,6 +465,7 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
struct ecore_rss_params *p_rss)
{
struct eth_vport_rss_config *p_config;
+ u16 capabilities = 0;
int i, table_size;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -480,26 +492,26 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
p_config->capabilities = 0;
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
- SET_FIELD(p_config->capabilities,
+ SET_FIELD(capabilities,
ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
!!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
p_config->tbl_size = p_rss->rss_table_size_log;
- p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
+ p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
"update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
@@ -627,11 +639,11 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
}
static void
-ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
- struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
+ u16 val;
if (!p_params) {
p_ramrod->common.update_tpa_param_flg = 0;
@@ -653,14 +665,16 @@ ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
- p_tpa->tpa_max_size = p_params->tpa_max_size;
- p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
- p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+ val = p_params->tpa_max_size;
+ p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
+ val = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
+ val = p_params->tpa_min_size_to_cont;
+ p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
}
static void
-ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
- struct vport_update_ramrod_data *p_ramrod,
+ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
struct ecore_sp_vport_update_params *p_params)
{
int i;
@@ -769,11 +783,10 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
}
/* Update mcast bins for VFs, PF doesn't use this functionality */
- ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+ ecore_sp_update_mcast_bin(p_ramrod, p_params);
ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
- ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
- p_params->sge_tpa_params);
+ ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
if (p_params->mtu) {
p_ramrod->common.update_mtu_flg = 1;
p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
@@ -897,7 +910,7 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
- p_cid->abs.vport_id, p_cid->abs.sb);
+ p_cid->abs.vport_id, p_cid->sb_igu_id);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -913,8 +926,8 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->vport_id = p_cid->abs.vport_id;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
@@ -928,12 +941,15 @@ ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD);
+
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ b_legacy_vf ? " [legacy]" : "",
p_cid->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
+ p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
@@ -979,7 +995,7 @@ ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc;
/* Allocate a CID for the queue */
- p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
if (p_cid == OSAL_NULL)
return ECORE_NOMEM;
@@ -1146,8 +1162,8 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
- p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
p_ramrod->stats_counter_id = p_cid->abs.stats_id;
p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
@@ -1195,7 +1211,7 @@ ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc;
- p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
if (p_cid == OSAL_NULL)
return ECORE_INVAL;
@@ -1494,8 +1510,7 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
* Note: crc32_length MUST be aligned to 8
* Return:
******************************************************************************/
-static u32 ecore_calc_crc32c(u8 *crc32_packet,
- u32 crc32_length, u32 crc32_seed, u8 complement)
+static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
{
u32 byte = 0, bit = 0, crc32_result = crc32_seed;
u8 msb = 0, current_byte = 0;
@@ -1520,25 +1535,23 @@ static u32 ecore_calc_crc32c(u8 *crc32_packet,
return crc32_result;
}
-static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
+static u32 ecore_crc32c_le(u32 seed, u8 *mac)
{
u32 packet_buf[2] = { 0 };
OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
- return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+ return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
}
u8 ecore_mcast_bin_from_mac(u8 *mac)
{
- u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
- mac, ETH_ALEN);
+ u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
return crc & 0xff;
}
static enum _ecore_status_t
ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
struct ecore_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@@ -1633,16 +1646,13 @@ ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- u16 opaque_fid;
if (IS_VF(p_dev)) {
ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
- opaque_fid = p_hwfn->hw_info.opaque_fid;
rc = ecore_sp_eth_filter_mcast(p_hwfn,
- opaque_fid,
p_filter_cmd,
comp_mode, p_comp_data);
if (rc != ECORE_SUCCESS)
@@ -1732,8 +1742,7 @@ static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_eth_stats *p_stats,
- u16 statistics_bin)
+ struct ecore_eth_stats *p_stats)
{
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
@@ -1945,7 +1954,7 @@ void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
{
__ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
- __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
__ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
#ifndef ASIC_ONLY
@@ -1970,6 +1979,7 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
+ bool b_get_port_stats;
if (IS_PF(p_dev)) {
/* The main vport index is relative first */
@@ -1984,8 +1994,9 @@ static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
continue;
}
+ b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
__ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
- IS_PF(p_dev) ? true : false);
+ b_get_port_stats);
out:
if (IS_PF(p_dev) && p_ptt)
@@ -2061,12 +2072,16 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params)
{
+ if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
+ return;
+
if (p_cfg_params->arfs_enable) {
- ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_cfg_params->tcp,
- p_cfg_params->udp,
- p_cfg_params->ipv4,
- p_cfg_params->ipv6);
+ ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ GFT_PROFILE_TYPE_4_TUPLE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
p_cfg_params->tcp ? "Enable" : "Disable",
@@ -2074,7 +2089,7 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
p_cfg_params->ipv4 ? "Enable" : "Disable",
p_cfg_params->ipv6 ? "Enable" : "Disable");
} else {
- ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
p_cfg_params->arfs_enable ? "Enable" : "Disable");
@@ -2082,7 +2097,6 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
@@ -2126,9 +2140,17 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
- p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+ p_ramrod->action_icid_valid = 0;
+ p_ramrod->action_icid = 0;
+
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->filter_type = RFS_FILTER_TYPE;
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
@@ -2140,3 +2162,108 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+
+int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_rx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ enum _ecore_status_t rc;
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return ECORE_INVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_rx_coal = (u16)(coalesce << timer_res);
+
+ return ECORE_SUCCESS;
+}
+
+int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_tx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ enum _ecore_status_t rc;
+
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ coalesce = ecore_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return ECORE_INVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_tx_coal = (u16)(coalesce << timer_res);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
+ void *handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ if (IS_VF(p_hwfn->p_dev)) {
+ rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Unable to read queue calescing\n");
+
+ return rc;
+ }
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (p_cid->b_is_rx) {
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ } else {
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 7fe4cbcb..f4212cf2 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -18,7 +18,16 @@
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define ECORE_QUEUE_CID_PF (0xff)
-/* Additional parameters required for initialization of the queue_cid
+/* Almost identical to the ecore_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct ecore_queue_cid_params {
+ u8 vport_id;
+ u16 queue_id;
+ u8 stats_id;
+};
+
+ /* Additional parameters required for initialization of the queue_cid
* and are relevant only for a PF initializing one for its VFs.
*/
struct ecore_queue_cid_vf_params {
@@ -34,7 +43,7 @@ struct ecore_queue_cid_vf_params {
* - Producers would be placed in a different place.
* - Makes assumptions regarding the CIDs.
*/
- bool b_legacy;
+ u8 vf_legacy;
/* For VFs, this index arrives via TLV to diffrentiate between
* different queues opened on the same qzone, and is passed
@@ -44,16 +53,19 @@ struct ecore_queue_cid_vf_params {
};
struct ecore_queue_cid {
- /* 'Relative' is a relative term ;-). Usually the indices [not counting
- * SBs] would be PF-relative, but there are some cases where that isn't
- * the case - specifically for a PF configuring its VF indices it's
- * possible some fields [E.g., stats-id] in 'rel' would already be abs.
- */
- struct ecore_queue_start_common_params rel;
- struct ecore_queue_start_common_params abs;
+ /* For stats-id, the `rel' is actually absolute as well */
+ struct ecore_queue_cid_params rel;
+ struct ecore_queue_cid_params abs;
+
+ /* These have no 'relative' meaning */
+ u16 sb_igu_id;
+ u8 sb_idx;
+
u32 cid;
u16 opaque_fid;
+ bool b_is_rx;
+
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
@@ -69,7 +81,9 @@ struct ecore_queue_cid {
u8 qid_usage_idx;
/* Legacy VFs might have Rx producer located elsewhere */
- bool b_legacy_vf;
+ u8 vf_legacy;
+#define ECORE_QCID_LEGACY_VF_RX_PROD (1 << 0)
+#define ECORE_QCID_LEGACY_VF_CID (1 << 1)
struct ecore_hwfn *p_owner;
};
@@ -84,6 +98,7 @@ void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
+ bool b_is_rx,
struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
@@ -129,31 +144,24 @@ ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u8 ecore_mcast_bin_from_mac(u8 *mac);
-/**
- * @brief - ecore_configure_rfs_ntuple_filter
- *
- * This ramrod should be used to add or remove arfs hw filter
- *
- * @params p_hwfn
- * @params p_ptt
- * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
- it with cookie and callback function address, if not
- using this mode then client must pass NULL.
- * @params p_addr p_addr is an actual packet header that needs to be
- * filter. It has to mapped with IO to read prior to
- * calling this, [contains 4 tuples- src ip, dest ip,
- * src port, dest port].
- * @params length length of p_addr header up to past the transport header.
- * @params qid receive packet will be directed to this queue.
- * @params vport_id
- * @params b_is_add flag to add or remove filter.
- *
- */
-enum _ecore_status_t
-ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_spq_comp_cb *p_cb,
- dma_addr_t p_addr, u16 length,
- u16 qid, u8 vport_id,
- bool b_is_add);
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_hw_coal);
+
+enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_hw_coal);
+
#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index d09f3c4a..ed9837bf 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -11,6 +11,7 @@
#include "ecore_status.h"
#include "ecore_sp_api.h"
+#include "ecore_int_api.h"
#ifndef __EXTRACT__LINUX__
enum ecore_rss_caps {
@@ -35,8 +36,7 @@ struct ecore_queue_start_common_params {
/* Relative, but relevant only for PFs */
u8 stats_id;
- /* These are always absolute */
- u16 sb;
+ struct ecore_sb_info *p_sb;
u8 sb_idx;
};
@@ -436,4 +436,30 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev);
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params);
+
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 88c5ceb0..8edd2e96 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -20,6 +20,8 @@
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#include "ecore_dcbx.h"
+#include "ecore_sp_commands.h"
+#include "ecore_cxt.h"
#define CHIP_MCP_RESP_ITER_US 10
#define EMUL_MCP_RESP_ITER_US (1000 * 1000)
@@ -43,9 +45,9 @@
OFFSETOF(struct public_drv_mb, _field))
#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
- DRV_ID_PDA_COMP_VER_SHIFT)
+ DRV_ID_PDA_COMP_VER_OFFSET)
-#define MCP_BYTES_PER_MBIT_SHIFT 17
+#define MCP_BYTES_PER_MBIT_OFFSET 17
#ifndef ASIC_ONLY
static int loaded;
@@ -96,13 +98,81 @@ void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
}
}
+struct ecore_mcp_cmd_elem {
+ osal_list_entry_t list;
+ struct ecore_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_add_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ p_cmd_elem = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
+ sizeof(*p_cmd_elem));
+ if (!p_cmd_elem) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate `struct ecore_mcp_cmd_elem'\n");
+ goto out;
+ }
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ OSAL_LIST_PUSH_HEAD(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void ecore_mcp_cmd_del_elem(struct ecore_hwfn *p_hwfn,
+ struct ecore_mcp_cmd_elem *p_cmd_elem)
+{
+ OSAL_LIST_REMOVE_ENTRY(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+ OSAL_FREE(p_hwfn->p_dev, p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct ecore_mcp_cmd_elem *
+ecore_mcp_cmd_get_elem(struct ecore_hwfn *p_hwfn, u16 seq_num)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ OSAL_LIST_FOR_EACH_ENTRY(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return OSAL_NULL;
+}
+
enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
{
if (p_hwfn->mcp_info) {
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL, *p_tmp;
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
- OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list,
+ struct ecore_mcp_cmd_elem) {
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
+#endif
}
+
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_SUCCESS;
@@ -157,8 +227,7 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
DRV_PULSE_SEQ_MASK;
- p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
- MISCS_REG_GENERIC_POR_0);
+ p_info->mcp_hist = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
return ECORE_SUCCESS;
}
@@ -176,6 +245,16 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
goto err;
p_info = p_hwfn->mcp_info;
+ /* Initialize the MFW spinlocks */
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock);
+ OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock);
+#endif
+ OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
+ OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+
+ OSAL_LIST_INIT(&p_info->cmd_list);
+
if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
/* Do not free mcp_info here, since public_base indicate that
@@ -190,10 +269,6 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
goto err;
- /* Initialize the MFW spinlock */
- OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
- OSAL_SPIN_LOCK_INIT(&p_info->lock);
-
return ECORE_SUCCESS;
err:
@@ -202,58 +277,28 @@ err:
return ECORE_NOMEM;
}
-/* Locks the MFW mailbox of a PF to ensure a single access.
- * The lock is achieved in most cases by holding a spinlock, causing other
- * threads to wait till a previous access is done.
- * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
- * access is achieved by setting a blocking flag, which will fail other
- * competing contexts to send their mailboxes.
- */
-static enum _ecore_status_t ecore_mcp_mb_lock(struct ecore_hwfn *p_hwfn,
- u32 cmd)
-{
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
-
- /* The spinlock shouldn't be acquired when the mailbox command is
- * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
- * pending [UN]LOAD_REQ command of another PF together with a spinlock
- * (i.e. interrupts are disabled) - can lead to a deadlock.
- * It is assumed that for a single PF, no other mailbox commands can be
- * sent from another context while sending LOAD_REQ, and that any
- * parallel commands to UNLOAD_REQ can be cancelled.
- */
- if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE)
- p_hwfn->mcp_info->block_mb_sending = false;
+static void ecore_mcp_reread_offsets(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 generic_por_0 = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
- if (p_hwfn->mcp_info->block_mb_sending) {
- DP_NOTICE(p_hwfn, false,
- "Trying to send a MFW mailbox command [0x%x]"
- " in parallel to [UN]LOAD_REQ. Aborting.\n",
- cmd);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
- return ECORE_BUSY;
- }
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
- if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) {
- p_hwfn->mcp_info->block_mb_sending = true;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
+ ecore_load_mcp_offsets(p_hwfn, p_ptt);
+ ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
-
- return ECORE_SUCCESS;
-}
-
-static void ecore_mcp_mb_unlock(struct ecore_hwfn *p_hwfn, u32 cmd)
-{
- if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ)
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
}
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 org_mcp_reset_seq, cnt = 0;
+ u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
#ifndef ASIC_ONLY
@@ -261,15 +306,20 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
delay = EMUL_MCP_RESP_ITER_US;
#endif
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
- if (rc != ECORE_SUCCESS)
- return rc;
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return ECORE_ABORTED;
+ }
+
+ /* Ensure that only a single thread is accessing the mailbox */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Set drv command along with the updated sequence */
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Set drv command along with the updated sequence */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
do {
@@ -289,73 +339,238 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
rc = ECORE_AGAIN;
}
- ecore_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
-static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd, u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param)
+/* Must be called while cmd_lock is acquired */
+static bool ecore_mcp_has_pending_cmd(struct ecore_hwfn *p_hwfn)
{
- u32 delay = CHIP_MCP_RESP_ITER_US;
- u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
- u32 seq, cnt = 1, actual_mb_seq;
+ struct ecore_mcp_cmd_elem *p_cmd_elem = OSAL_NULL;
+
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
+ */
+ if (!OSAL_LIST_IS_EMPTY(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = OSAL_LIST_FIRST_ENTRY(&p_hwfn->mcp_info->cmd_list,
+ struct ecore_mcp_cmd_elem,
+ list);
+ return !p_cmd_elem->b_is_completed;
+ }
+
+ return false;
+}
+
+/* Must be called while cmd_lock is acquired */
+static enum _ecore_status_t
+ecore_mcp_update_pending_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params *p_mb_params;
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return ECORE_AGAIN;
+
+ p_cmd_elem = ecore_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb,
+ union_data);
+ ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return ECORE_SUCCESS;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ OFFSETOF(struct public_drv_mb, union_data);
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static void ecore_mcp_cmd_set_blocking(struct ecore_hwfn *p_hwfn,
+ bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+void ecore_mcp_print_cpu_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+
+ cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_1 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ OSAL_UDELAY(CHIP_MCP_RESP_ITER_US);
+ cpu_pc_2 = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn, false,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
+static enum _ecore_status_t
+_ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 delay)
+{
+ struct ecore_mcp_cmd_elem *p_cmd_elem;
+ u32 cnt = 0;
+ u16 seq_num;
enum _ecore_status_t rc = ECORE_SUCCESS;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
- delay = EMUL_MCP_RESP_ITER_US;
- /* There is a built-in delay of 100usec in each MFW response read */
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- max_retries /= 10;
-#endif
+ /* Wait until the mailbox is non-occupied */
+ do {
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
- /* Get actual driver mailbox sequence */
- actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK;
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Use MCP history register to check if MCP reset occurred between
- * init time and now.
- */
- if (p_hwfn->mcp_info->mcp_hist !=
- ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
- ecore_load_mcp_offsets(p_hwfn, p_ptt);
- ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
+ if (!ecore_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_UDELAY(delay);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_AGAIN;
}
- seq = ++p_hwfn->mcp_info->drv_mb_seq;
- /* Set drv param */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
+ /* Send the mailbox command */
+ ecore_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = ecore_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = ECORE_NOMEM;
+ goto err;
+ }
- /* Set drv command along with the updated sequence */
- DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
+ __ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ /* Wait for the MFW response */
do {
- /* Wait for MFW response */
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
OSAL_UDELAY(delay);
- *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
- /* Give the FW up to 5 second (500*10ms) */
- } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
- (cnt++ < max_retries));
+ if (p_cmd_elem->b_is_completed)
+ break;
- /* Is this a reply to our command? */
- if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
- *o_mcp_resp &= FW_MSG_CODE_MASK;
- /* Get the MCP param */
- *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
- } else {
- /* FW BUG! */
- DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
- cmd, param);
- *o_mcp_resp = 0;
- rc = ECORE_AGAIN;
+ rc = ecore_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (rc == ECORE_SUCCESS)
+ break;
+ else if (rc != ECORE_AGAIN)
+ goto err;
+
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MFW_CMD_PREEMPT(p_hwfn);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
+
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
+ return ECORE_AGAIN;
}
+
+ ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp, p_mb_params->mcp_param,
+ (cnt * delay) / 1000, (cnt * delay) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return ECORE_SUCCESS;
+
+err:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -364,9 +579,17 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
- union drv_union_data union_data;
- u32 union_data_addr;
- enum _ecore_status_t rc;
+ osal_size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = ECORE_DRV_MB_MAX_RETRIES;
+ u32 delay = CHIP_MCP_RESP_ITER_US;
+
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ delay = EMUL_MCP_RESP_ITER_US;
+ /* There is a built-in delay of 100usec in each MFW response read */
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
+ max_retries /= 10;
+#endif
/* MCP not initialized */
if (!ecore_mcp_is_init(p_hwfn)) {
@@ -374,44 +597,24 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
return ECORE_BUSY;
}
- if (p_mb_params->data_src_size > sizeof(union_data) ||
- p_mb_params->data_dst_size > sizeof(union_data)) {
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
DP_ERR(p_hwfn,
"The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
p_mb_params->data_src_size, p_mb_params->data_dst_size,
- sizeof(union_data));
+ union_data_size);
return ECORE_INVAL;
}
- union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
- OFFSETOF(struct public_drv_mb, union_data);
-
- /* Ensure that only a single thread is accessing the mailbox at a
- * certain time.
- */
- rc = ecore_mcp_mb_lock(p_hwfn, p_mb_params->cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- OSAL_MEM_ZERO(&union_data, sizeof(union_data));
- if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
- OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
- p_mb_params->data_src_size);
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
- sizeof(union_data));
-
- rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
- p_mb_params->param, &p_mb_params->mcp_resp,
- &p_mb_params->mcp_param);
-
- if (p_mb_params->p_data_dst != OSAL_NULL &&
- p_mb_params->data_dst_size)
- ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr, p_mb_params->data_dst_size);
-
- ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return ECORE_ABORTED;
+ }
- return rc;
+ return _ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ delay);
}
enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
@@ -520,7 +723,7 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
/* On CMT, always tell that it's engine */
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
*p_load_code = load_phase;
@@ -534,11 +737,28 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
}
#endif
-static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+static bool
+ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role,
+ enum ecore_override_force_load override_force_load)
{
- return (drv_role == DRV_ROLE_OS &&
- exist_drv_role == DRV_ROLE_PREBOOT) ||
- (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case ECORE_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case ECORE_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
}
static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
@@ -631,18 +851,16 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
load_req.drv_ver_0 = p_in_params->drv_ver_0;
load_req.drv_ver_1 = p_in_params->drv_ver_1;
load_req.fw_ver = p_in_params->fw_ver;
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
- p_in_params->drv_role);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
- p_in_params->timeout_val);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
- p_in_params->force_cmd);
- ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
- p_in_params->avoid_eng_reset);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE, p_in_params->force_cmd);
+ SET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
DRV_ID_MCP_HSI_VER_CURRENT :
- (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_OFFSET);
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
@@ -655,22 +873,20 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
mb_params.param,
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
- ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ GET_MFW_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
load_req.drv_ver_0, load_req.drv_ver_1,
load_req.fw_ver, load_req.misc0,
- ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
- ECORE_MFW_GET_FIELD(load_req.misc0,
- LOAD_REQ_LOCK_TO),
- ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
- ECORE_MFW_GET_FIELD(load_req.misc0,
- LOAD_REQ_FLAGS0));
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ GET_MFW_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS) {
@@ -689,29 +905,27 @@ __ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
"Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
load_rsp.drv_ver_0, load_rsp.drv_ver_1,
load_rsp.fw_ver, load_rsp.misc0,
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
- ECORE_MFW_GET_FIELD(load_rsp.misc0,
- LOAD_RSP_FLAGS0));
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
p_out_params->exist_fw_ver = load_rsp.fw_ver;
p_out_params->exist_drv_role =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
p_out_params->mfw_hsi_ver =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
p_out_params->drv_exists =
- ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ GET_MFW_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
LOAD_RSP_FLAGS0_DRV_EXISTS;
}
return ECORE_SUCCESS;
}
-static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
- enum ecore_drv_role drv_role,
- u8 *p_mfw_drv_role)
+static void ecore_get_mfw_drv_role(enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
{
switch (drv_role) {
case ECORE_DRV_ROLE_OS:
@@ -720,12 +934,7 @@ static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
case ECORE_DRV_ROLE_KDUMP:
*p_mfw_drv_role = DRV_ROLE_KDUMP;
break;
- default:
- DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
- return ECORE_INVAL;
}
-
- return ECORE_SUCCESS;
}
enum ecore_load_req_force {
@@ -734,10 +943,8 @@ enum ecore_load_req_force {
ECORE_LOAD_REQ_FORCE_ALL,
};
-static enum _ecore_status_t
-ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
- enum ecore_load_req_force force_cmd,
- u8 *p_mfw_force_cmd)
+static void ecore_get_mfw_force_cmd(enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
{
switch (force_cmd) {
case ECORE_LOAD_REQ_FORCE_NONE:
@@ -749,12 +956,7 @@ ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
case ECORE_LOAD_REQ_FORCE_ALL:
*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
break;
- default:
- DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
- return ECORE_INVAL;
}
-
- return ECORE_SUCCESS;
}
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
@@ -763,7 +965,7 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
{
struct ecore_load_req_out_params out_params;
struct ecore_load_req_in_params in_params;
- u8 mfw_drv_role, mfw_force_cmd;
+ u8 mfw_drv_role = 0, mfw_force_cmd;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
@@ -778,17 +980,10 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
in_params.drv_ver_0 = ECORE_VERSION;
in_params.drv_ver_1 = ecore_get_config_bitmap();
in_params.fw_ver = STORM_FW_VERSION;
- rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
- if (rc != ECORE_SUCCESS)
- return rc;
-
+ ecore_get_mfw_drv_role(p_params->drv_role, &mfw_drv_role);
in_params.drv_role = mfw_drv_role;
in_params.timeout_val = p_params->timeout_val;
- rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
- &mfw_force_cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
-
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
in_params.avoid_eng_reset = p_params->avoid_eng_reset;
@@ -805,9 +1000,6 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
DP_INFO(p_hwfn,
"MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
@@ -816,23 +1008,20 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return rc;
} else if (out_params.load_code ==
FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
- /* The previous load request set the mailbox blocking */
- p_hwfn->mcp_info->block_mb_sending = false;
-
if (ecore_mcp_can_force_load(in_params.drv_role,
- out_params.exist_drv_role)) {
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
DP_INFO(p_hwfn,
- "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, 0x%08x_%08x}, existing={%d, 0x%08x, 0x%08x_%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
out_params.exist_drv_ver_1);
- rc = ecore_get_mfw_force_cmd(p_hwfn,
- ECORE_LOAD_REQ_FORCE_ALL,
- &mfw_force_cmd);
- if (rc != ECORE_SUCCESS)
- return rc;
+ ecore_get_mfw_force_cmd(ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
in_params.force_cmd = mfw_force_cmd;
OSAL_MEM_ZERO(&out_params, sizeof(out_params));
@@ -842,7 +1031,9 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return rc;
} else {
DP_NOTICE(p_hwfn, false,
- "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
out_params.exist_drv_role,
out_params.exist_fw_ver,
out_params.exist_drv_ver_0,
@@ -873,19 +1064,11 @@ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
break;
- case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
- case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
- DP_NOTICE(p_hwfn, false,
- "MFW refused a load request [resp 0x%08x]. Aborting.\n",
- out_params.load_code);
- return ECORE_BUSY;
default:
DP_NOTICE(p_hwfn, false,
- "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
out_params.load_code);
- break;
+ return ECORE_BUSY;
}
p_params->load_code = out_params.load_code;
@@ -907,8 +1090,6 @@ enum _ecore_status_t ecore_mcp_load_done(struct ecore_hwfn *p_hwfn,
return rc;
}
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
-
/* Check if there is a DID mismatch between nvm-cfg/efuse */
if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
DP_NOTICE(p_hwfn, false,
@@ -1029,12 +1210,60 @@ static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
OFFSETOF(struct public_port,
transceiver_data)));
- transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
+ transceiver_state = GET_MFW_FIELD(transceiver_state,
+ ETH_TRANSCEIVER_STATE);
if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
+
+ OSAL_TRANSCEIVER_UPDATE(p_hwfn);
+}
+
+static void ecore_mcp_read_eee_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_mcp_link_state *p_link)
+{
+ u32 eee_status, val;
+
+ p_link->eee_adv_caps = 0;
+ p_link->eee_lp_adv_caps = 0;
+ eee_status = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, eee_status));
+ p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
+ val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_adv_caps |= ECORE_EEE_10G_ADV;
+ val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_lp_adv_caps |= ECORE_EEE_10G_ADV;
+}
+
+static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ OSAL_MEM_ZERO(p_data, sizeof(*p_data));
+
+ size = OSAL_MIN_T(u32, sizeof(*p_data),
+ SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+
+ return size;
}
static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
@@ -1045,6 +1274,9 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
u8 max_bw, min_bw;
u32 status = 0;
+ /* Prevent SW/attentions from doing this at the same time */
+ OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+
p_link = &p_hwfn->mcp_info->link_output;
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
if (!b_reset) {
@@ -1060,13 +1292,27 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Resetting link indications\n");
- return;
+ goto out;
}
- if (p_hwfn->b_drv_link_init)
- p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
- else
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ }
+ } else {
p_link->link_up = false;
+ }
p_link->full_duplex = true;
switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
@@ -1113,10 +1359,10 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
__ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
p_link, max_bw);
- /* Mintz bandwidth configuration */
+ /* Min bandwidth configuration */
__ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
p_link, min_bw);
- ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
+ ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, p_ptt,
p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
@@ -1171,7 +1417,12 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
+ ecore_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
+
OSAL_LINK_UPDATE(p_hwfn);
+out:
+ OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
@@ -1198,12 +1449,32 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by ecore, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) &&
+ params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+ if (params->eee.tx_lpi_enable)
+ phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
+ if (params->eee.adv_caps & ECORE_EEE_1G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
+ if (params->eee.adv_caps & ECORE_EEE_10G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
+ phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
+ EEE_TX_TIMER_USEC_OFFSET) &
+ EEE_TX_TIMER_USEC_MASK;
+ }
+
p_hwfn->b_drv_link_init = b_up;
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
- " adv_speed 0x%08x, loopback 0x%08x\n",
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
phy_cfg.loopback_mode);
else
@@ -1221,11 +1492,15 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
return rc;
}
- /* Reset the link status if needed */
- if (!b_up)
- ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ ecore_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
- return rc;
+ return ECORE_SUCCESS;
}
u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
@@ -1300,7 +1575,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Invalid protocol type %d\n", type);
return;
}
@@ -1331,7 +1607,7 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
*/
p_info->bandwidth_min = (p_shmem_info->config &
FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
+ FUNC_MF_CFG_MIN_BW_OFFSET;
if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
DP_INFO(p_hwfn,
"bandwidth minimum out of bounds [%02x]. Set to 1\n",
@@ -1341,7 +1617,7 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
p_info->bandwidth_max = (p_shmem_info->config &
FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
+ FUNC_MF_CFG_MAX_BW_OFFSET;
if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
DP_INFO(p_hwfn,
"bandwidth maximum out of bounds [%02x]. Set to 100\n",
@@ -1350,28 +1626,6 @@ static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
}
}
-static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- OSAL_MEM_ZERO(p_data, sizeof(*p_data));
-
- size = OSAL_MIN_T(u32, sizeof(*p_data),
- SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static void
ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
@@ -1394,8 +1648,7 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
&param);
}
-static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
@@ -1436,11 +1689,16 @@ ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
- DP_NOTICE(p_hwfn, false,
- "MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
- p_mdump_cmd_params->cmd);
- rc = ECORE_INVAL;
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = ECORE_NOTIMPL;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = ECORE_NOTIMPL;
}
return rc;
@@ -1498,16 +1756,10 @@ ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if (rc != ECORE_SUCCESS)
return rc;
- if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
- DP_INFO(p_hwfn,
- "The mdump command is not supported by the MFW\n");
- return ECORE_NOTIMPL;
- }
-
if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
- DP_NOTICE(p_hwfn, false,
- "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
- mdump_cmd_params.mcp_resp);
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
rc = ECORE_UNKNOWN_ERROR;
}
@@ -1568,17 +1820,71 @@ enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+ struct mdump_retain_data_stc mfw_mdump_retain;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = &mfw_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(mfw_mdump_retain);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ p_mdump_retain->valid = mfw_mdump_retain.valid;
+ p_mdump_retain->epoch = mfw_mdump_retain.epoch;
+ p_mdump_retain->pf = mfw_mdump_retain.pf;
+ p_mdump_retain->status = mfw_mdump_retain.status;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLR_RETAIN;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_mdump_retain_data mdump_retain;
+ enum _ecore_status_t rc;
+
/* In CMT mode - no need for more than a single acknowledgment to the
* MFW, and no more than a single notification to the upper driver.
*/
if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
return;
- DP_NOTICE(p_hwfn, false,
- "Received a critical error notification from the MFW!\n");
+ rc = ecore_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == ECORE_SUCCESS && mdump_retain.valid) {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch, mdump_retain.pf,
+ mdump_retain.status);
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "The MFW notified that a critical error occurred in the device\n");
+ }
if (p_hwfn->p_dev->allow_mdump) {
DP_NOTICE(p_hwfn, false,
@@ -1586,10 +1892,80 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
return;
}
+ DP_NOTICE(p_hwfn, false,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
ecore_mcp_mdump_ack(p_hwfn, p_ptt);
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
}
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits))
+ return;
+
+ OSAL_MEMSET(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port, oem_cfg_port));
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_CHANNEL_TYPE);
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn, false, "Incorrect UFP Channel type %d\n",
+ val);
+
+ val = GET_MFW_FIELD(port_cfg, OEM_CFG_SCHED_TYPE);
+ if (val == OEM_CFG_SCHED_TYPE_ETS)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_ETS;
+ else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW)
+ p_hwfn->ufp_info.mode = ECORE_UFP_MODE_VNIC_BW;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown UFP scheduling mode %d\n",
+ val);
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func, OEM_CFG_FUNC_TC);
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = GET_MFW_FIELD(shmem_info.oem_cfg_func,
+ OEM_CFG_FUNC_HOST_PRI_CTRL);
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_VNIC;
+ else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS)
+ p_hwfn->ufp_info.pri_type = ECORE_UFP_PRI_OS;
+ else
+ DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
+ val);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type);
+}
+
+static enum _ecore_status_t
+ecore_mcp_handle_ufp_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ ecore_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == ECORE_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc;
+
+ ecore_qm_reconf(p_hwfn, p_ptt);
+ } else {
+ /* Merge UFP TC with the dcbx TC data */
+ ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ }
+
+ /* update storm FW with negotiation results */
+ ecore_sp_pf_update_ufp(p_hwfn);
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -1632,6 +2008,15 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
ECORE_DCBX_OPERATIONAL_MIB);
+ /* clear the user-config cache */
+ OSAL_MEMSET(&p_hwfn->p_dcbx_info->set, 0,
+ sizeof(struct ecore_dcbx_set));
+ break;
+ case MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED:
+ ecore_lldp_mib_update_event(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ ecore_mcp_handle_ufp_event(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
@@ -1649,7 +2034,7 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_FAILURE_DETECTED:
- ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ ecore_mcp_handle_fan_failure(p_hwfn);
break;
case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
@@ -1732,14 +2117,13 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
u32 *p_media_type)
{
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
- struct ecore_ptt *p_ptt;
/* TODO - Add support for VFs */
- if (IS_VF(p_dev))
+ if (IS_VF(p_hwfn->p_dev))
return ECORE_INVAL;
if (!ecore_mcp_is_init(p_hwfn)) {
@@ -1747,16 +2131,15 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
return ECORE_BUSY;
}
- *p_media_type = MEDIA_UNSPECIFIED;
-
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return ECORE_BUSY;
-
- *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
- OFFSETOF(struct public_port, media_type));
-
- ecore_ptt_release(p_hwfn, p_ptt);
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ return ECORE_INVAL;
+ } else {
+ *p_media_type = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ media_type));
+ }
return ECORE_SUCCESS;
}
@@ -1928,42 +2311,6 @@ const struct ecore_mcp_function_info
return &p_hwfn->mcp_info->func_info;
}
-enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_mcp_nvm_params *params)
-{
- enum _ecore_status_t rc;
-
- switch (params->type) {
- case ECORE_MCP_NVM_RD:
- rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- &params->nvm_common.resp,
- &params->nvm_common.param,
- params->nvm_rd.buf_size,
- params->nvm_rd.buf);
- break;
- case ECORE_MCP_CMD:
- rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- &params->nvm_common.resp,
- &params->nvm_common.param);
- break;
- case ECORE_MCP_NVM_WR:
- rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
- params->nvm_common.offset,
- &params->nvm_common.resp,
- &params->nvm_common.param,
- params->nvm_wr.buf_size,
- params->nvm_wr.buf);
- break;
- default:
- rc = ECORE_NOTIMPL;
- break;
- }
- return rc;
-}
-
int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 personalities)
{
@@ -2009,8 +2356,8 @@ enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
- MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
- flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_OFFSET));
*p_flash_size = flash_size;
@@ -2035,9 +2382,10 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 vf_id, u8 num)
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
enum _ecore_status_t rc;
@@ -2048,9 +2396,9 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
num *= p_hwfn->p_dev->num_hwfns;
- param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
- param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
@@ -2069,6 +2417,39 @@ enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
return rc;
}
+static enum _ecore_status_t
+ecore_mcp_config_vf_msix_ah(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, true, "MFW failed to set MSI-X for VFs\n");
+ rc = ECORE_INVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n",
+ num);
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 vf_id, u8 num)
+{
+ if (ECORE_IS_BB(p_hwfn->p_dev))
+ return ecore_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return ecore_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
@@ -2106,33 +2487,68 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define ECORE_MCP_HALT_SLEEP_MS 10
+#define ECORE_MCP_HALT_MAX_RETRIES 10
+
enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
enum _ecore_status_t rc;
- u32 resp = 0, param = 0;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
&param);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ OSAL_MSLEEP(ECORE_MCP_HALT_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < ECORE_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == ECORE_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return ECORE_SUCCESS;
}
+#define ECORE_MCP_RESUME_SLEEP_MS 10
+
enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+
+ OSAL_MSLEEP(ECORE_MCP_RESUME_SLEEP_MS);
+ cpu_state = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return ECORE_BUSY;
+ }
+
+ ecore_mcp_cmd_set_blocking(p_hwfn, false);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
+ return ECORE_SUCCESS;
}
enum _ecore_status_t
@@ -2140,9 +2556,9 @@ ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_ov_client client)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
+ enum _ecore_status_t rc;
switch (client) {
case ECORE_OV_CLIENT_DRV:
@@ -2172,9 +2588,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_ov_driver_state drv_state)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
+ enum _ecore_status_t rc;
switch (drv_state) {
case ECORE_OV_DRIVER_STATE_NOT_LOADED:
@@ -2247,8 +2663,8 @@ enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 mask_parities)
{
- enum _ecore_status_t rc;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
mask_parities, &resp, &param);
@@ -2270,7 +2686,7 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
u32 bytes_left, offset, bytes_to_copy, buf_size;
- struct ecore_mcp_nvm_params params;
+ u32 nvm_offset, resp, param;
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -2278,22 +2694,29 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
bytes_left = len;
offset = 0;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = (addr + offset) |
- (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_OK)) {
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = (addr + offset) | (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_rd_cmd() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_NVM_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm read failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
break;
}
@@ -2301,14 +2724,14 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
* isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
- (bytes_left - *params.nvm_rd.buf_size) % 0x1000)
+ (bytes_left - buf_size) % 0x1000)
OSAL_MSLEEP(1);
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2318,26 +2741,23 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
- DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
- params.nvm_common.offset = addr;
- params.nvm_rd.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ (cmd == ECORE_PHY_CORE_READ) ?
+ DRV_MSG_CODE_PHY_CORE_READ :
+ DRV_MSG_CODE_PHY_RAW_READ,
+ addr, &resp, &param, &len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2346,14 +2766,12 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
ecore_ptt_release(p_hwfn, p_ptt);
@@ -2363,19 +2781,16 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_DEL_FILE, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2385,19 +2800,16 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2409,37 +2821,58 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
+ u32 buf_idx, buf_size, nvm_cmd, nvm_offset, resp, param;
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
- u32 buf_idx, buf_size;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- if (cmd == ECORE_PUT_FILE_DATA)
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
- else
- params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ switch (cmd) {
+ case ECORE_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case ECORE_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ case ECORE_EXT_PHY_FW_UPGRADE:
+ nvm_cmd = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, true, "Invalid nvm write command 0x%x\n",
+ cmd);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MCP_DRV_NVM_BUF_LEN);
- params.nvm_common.offset = ((buf_size <<
- DRV_MB_PARAM_NVM_LEN_SHIFT)
- | addr) + buf_idx;
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if (rc != ECORE_SUCCESS ||
- ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
- (params.nvm_common.resp !=
- FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
- DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
+ nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) |
+ addr) +
+ buf_idx;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "ecore_mcp_nvm_write() failed, rc = %d\n",
+ rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(p_dev, false,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ break;
+ }
/* This can be a lengthy process, and it's possible scheduler
* isn't preemptible. Sleep a bit to prevent CPU hogging.
@@ -2451,7 +2884,8 @@ enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
buf_idx += buf_size;
}
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
+out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2461,25 +2895,21 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param, nvm_cmd;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_wr.buf_size = len;
- params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
- DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
- params.nvm_common.offset = addr;
- params.nvm_wr.buf = (u32 *)p_buf;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ nvm_cmd = (cmd == ECORE_PHY_CORE_WRITE) ? DRV_MSG_CODE_PHY_CORE_WRITE :
+ DRV_MSG_CODE_PHY_RAW_WRITE;
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, addr,
+ &resp, &param, len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2489,20 +2919,17 @@ enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
- struct ecore_mcp_nvm_params params;
struct ecore_ptt *p_ptt;
+ u32 resp, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt)
return ECORE_BUSY;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.type = ECORE_MCP_CMD;
- params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
- params.nvm_common.offset = addr;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- p_dev->mcp_nvm_resp = params.nvm_common.resp;
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_SECURE_MODE, addr,
+ &resp, &param);
+ p_dev->mcp_nvm_resp = resp;
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2513,42 +2940,42 @@ enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset;
+ u32 resp, param;
enum _ecore_status_t rc;
- u32 bytes_left, bytes_to_copy, buf_size;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
addr = offset;
offset = 0;
bytes_left = len;
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
while (bytes_left > 0) {
bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_rd.buf = (u32 *)(p_buf + offset);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((addr + offset) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (bytes_to_copy << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
- FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset));
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
- offset += *params.nvm_rd.buf_size;
- bytes_left -= *params.nvm_rd.buf_size;
+ offset += buf_size;
+ bytes_left -= buf_size;
}
return ECORE_SUCCESS;
@@ -2559,36 +2986,35 @@ enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
u32 port, u32 addr, u32 offset,
u32 len, u8 *p_buf)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_idx, buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_idx, buf_size;
-
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset =
- (port << DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT) |
- (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT);
- params.type = ECORE_MCP_NVM_WR;
- params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
+
+ nvm_offset = (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) |
+ (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET);
buf_idx = 0;
while (buf_idx < len) {
buf_size = OSAL_MIN_T(u32, (len - buf_idx),
MAX_I2C_TRANSACTION_SIZE);
- params.nvm_common.offset &=
- (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
- DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
- params.nvm_common.offset |=
- ((offset + buf_idx) <<
- DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT);
- params.nvm_common.offset |=
- (buf_size << DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT);
- params.nvm_wr.buf_size = buf_size;
- params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
- FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((offset + buf_idx) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET);
+ nvm_offset |= (buf_size <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET);
+ rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_WRITE,
+ nvm_offset, &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send a transceiver write command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
return ECORE_NODEV;
- } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
return ECORE_UNKNOWN_ERROR;
buf_idx += buf_size;
@@ -2604,7 +3030,7 @@ enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
drv_mb_param, &rsp, gpio_val);
@@ -2625,8 +3051,8 @@ enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 drv_mb_param = 0, param, rsp;
- drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT) |
- (gpio_val << DRV_MB_PARAM_GPIO_VALUE_SHIFT);
+ drv_mb_param = (gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET) |
+ (gpio_val << DRV_MB_PARAM_GPIO_VALUE_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
drv_mb_param, &rsp, &param);
@@ -2648,7 +3074,7 @@ enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
u32 drv_mb_param = 0, rsp, val = 0;
enum _ecore_status_t rc = ECORE_SUCCESS;
- drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_SHIFT;
+ drv_mb_param = gpio << DRV_MB_PARAM_GPIO_NUMBER_OFFSET;
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_INFO,
drv_mb_param, &rsp, &val);
@@ -2656,9 +3082,9 @@ enum _ecore_status_t ecore_mcp_gpio_info(struct ecore_hwfn *p_hwfn,
return rc;
*gpio_direction = (val & DRV_MB_PARAM_GPIO_DIRECTION_MASK) >>
- DRV_MB_PARAM_GPIO_DIRECTION_SHIFT;
+ DRV_MB_PARAM_GPIO_DIRECTION_OFFSET;
*gpio_ctrl = (val & DRV_MB_PARAM_GPIO_CTRL_MASK) >>
- DRV_MB_PARAM_GPIO_CTRL_SHIFT;
+ DRV_MB_PARAM_GPIO_CTRL_OFFSET;
if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
return ECORE_UNKNOWN_ERROR;
@@ -2673,7 +3099,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, &param);
@@ -2695,7 +3121,7 @@ enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, &param);
@@ -2717,7 +3143,7 @@ enum _ecore_status_t ecore_mcp_bist_nvm_test_get_num_images(
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
drv_mb_param, &rsp, num_images);
@@ -2735,26 +3161,20 @@ enum _ecore_status_t ecore_mcp_bist_nvm_test_get_image_att(
struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct bist_nvm_image_att *p_image_att, u32 image_index)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, nvm_offset, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
-
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
- params.nvm_common.offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
- DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
- params.nvm_common.offset |= (image_index <<
- DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT);
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_rd.buf_size = &buf_size;
- params.nvm_common.cmd = DRV_MSG_CODE_BIST_TEST;
- params.nvm_rd.buf = (u32 *)p_image_att;
-
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ nvm_offset = (DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET);
+ nvm_offset |= (image_index <<
+ DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)p_image_att);
if (rc != ECORE_SUCCESS)
return rc;
- if (((params.nvm_common.resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
(p_image_att->return_code != 1))
rc = ECORE_UNKNOWN_ERROR;
@@ -2788,13 +3208,13 @@ ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
- SENSOR_LOCATION_SHIFT;
+ SENSOR_LOCATION_OFFSET;
p_temp_sensor->threshold_high = (val & THRESHOLD_HIGH_MASK) >>
- THRESHOLD_HIGH_SHIFT;
+ THRESHOLD_HIGH_OFFSET;
p_temp_sensor->critical = (val & CRITICAL_TEMPERATURE_MASK) >>
- CRITICAL_TEMPERATURE_SHIFT;
+ CRITICAL_TEMPERATURE_OFFSET;
p_temp_sensor->current_temp = (val & CURRENT_TEMP_MASK) >>
- CURRENT_TEMP_SHIFT;
+ CURRENT_TEMP_OFFSET;
}
return ECORE_SUCCESS;
@@ -2805,23 +3225,17 @@ enum _ecore_status_t ecore_mcp_get_mba_versions(
struct ecore_ptt *p_ptt,
struct ecore_mba_vers *p_mba_vers)
{
- struct ecore_mcp_nvm_params params;
+ u32 buf_size, resp, param;
enum _ecore_status_t rc;
- u32 buf_size;
- OSAL_MEM_ZERO(&params, sizeof(params));
- params.type = ECORE_MCP_NVM_RD;
- params.nvm_common.cmd = DRV_MSG_CODE_GET_MBA_VERSION;
- params.nvm_common.offset = 0;
- params.nvm_rd.buf = &p_mba_vers->mba_vers[0];
- params.nvm_rd.buf_size = &buf_size;
- rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
+ rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MBA_VERSION,
+ 0, &resp, &param, &buf_size,
+ &p_mba_vers->mba_vers[0]);
if (rc != ECORE_SUCCESS)
return rc;
- if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
- FW_MSG_CODE_NVM_OK)
+ if ((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
rc = ECORE_UNKNOWN_ERROR;
if (buf_size != MCP_DRV_NVM_BUF_LEN)
@@ -2897,9 +3311,9 @@ ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET) | \
(ECORE_RESC_ALLOC_VERSION_MINOR << \
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET))
struct ecore_resc_alloc_in_params {
u32 cmd;
@@ -2983,10 +3397,10 @@ ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
"Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
p_in_params->cmd, p_in_params->res_id,
ecore_hw_get_resc_name(p_in_params->res_id),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(mb_params.param,
- DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_in_params->resc_max_val);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
@@ -3003,10 +3417,10 @@ ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
- ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
- FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ GET_MFW_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
p_out_params->resc_num, p_out_params->resc_start,
p_out_params->vf_resc_num, p_out_params->vf_resc_start,
p_out_params->flags);
@@ -3094,7 +3508,7 @@ static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
}
if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
- u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+ u8 opcode = GET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
DP_NOTICE(p_hwfn, false,
"The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
@@ -3127,9 +3541,9 @@ __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
break;
}
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
@@ -3142,9 +3556,8 @@ __ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
/* Analyze the response */
- p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
- RESOURCE_CMD_RSP_OWNER);
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ p_params->owner = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
@@ -3199,6 +3612,36 @@ ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
}
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent)
+{
+ if (p_lock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_lock, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = ECORE_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock != OSAL_NULL) {
+ OSAL_MEM_ZERO(p_unlock, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
+
enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params)
@@ -3209,8 +3652,8 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
: RESOURCE_OPCODE_RELEASE;
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
- ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ SET_MFW_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
@@ -3223,7 +3666,7 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
/* Analyze the response */
- opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+ opcode = GET_MFW_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
@@ -3250,3 +3693,137 @@ ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
}
+
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
+ 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
+ if (rc == ECORE_SUCCESS)
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_PROBE),
+ "MFW supported features: %08x\n",
+ p_hwfn->mcp_info->capabilities);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param, features;
+
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ |
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
+ features, &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr)
+{
+ struct attribute_cmd_write_stc attr_cmd_write;
+ enum _attribute_commands_e mfw_attr_cmd;
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ switch (p_drv_attr->attr_cmd) {
+ case ECORE_MCP_DRV_ATTR_CMD_READ:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_WRITE:
+ mfw_attr_cmd = ATTRIBUTE_CMD_WRITE;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_READ_CLEAR;
+ break;
+ case ECORE_MCP_DRV_ATTR_CMD_CLEAR:
+ mfw_attr_cmd = ATTRIBUTE_CMD_CLEAR;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false, "Unknown attribute command %d\n",
+ p_drv_attr->attr_cmd);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_ATTRIBUTE;
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_KEY,
+ p_drv_attr->attr_num);
+ SET_MFW_FIELD(mb_params.param, DRV_MB_PARAM_ATTRIBUTE_CMD,
+ mfw_attr_cmd);
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_WRITE) {
+ OSAL_MEM_ZERO(&attr_cmd_write, sizeof(attr_cmd_write));
+ attr_cmd_write.val = p_drv_attr->val;
+ attr_cmd_write.mask = p_drv_attr->mask;
+ attr_cmd_write.offset = p_drv_attr->offset;
+
+ mb_params.p_data_src = &attr_cmd_write;
+ mb_params.data_src_size = sizeof(attr_cmd_write);
+ }
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The attribute command is not supported by the MFW\n");
+ return ECORE_NOTIMPL;
+ } else if (mb_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to send an attribute command [mcp_resp 0x%x, attr_cmd %d, attr_num %d]\n",
+ mb_params.mcp_resp, p_drv_attr->attr_cmd,
+ p_drv_attr->attr_num);
+ return ECORE_INVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Attribute Command: cmd %d [mfw_cmd %d], num %d, in={val 0x%08x, mask 0x%08x, offset 0x%08x}, out={val 0x%08x}\n",
+ p_drv_attr->attr_cmd, mfw_attr_cmd, p_drv_attr->attr_num,
+ p_drv_attr->val, p_drv_attr->mask, p_drv_attr->offset,
+ mb_params.mcp_param);
+
+ if (p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ ||
+ p_drv_attr->attr_cmd == ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR)
+ p_drv_attr->val = mb_params.mcp_param;
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val)
+{
+ struct ecore_mcp_mb_params mb_params = {0};
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 dword = val;
+
+ mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
+ mb_params.param = offset;
+ mb_params.p_data_src = &dword;
+ mb_params.data_src_size = sizeof(dword);
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to wol write request, rc = %d\n", rc);
+ }
+
+ if (mb_params.mcp_resp != FW_MSG_CODE_WOL_READ_WRITE_OK) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to write value 0x%x to offset 0x%x [mcp_resp 0x%x]\n",
+ val, offset, mb_params.mcp_resp);
+ rc = ECORE_UNKNOWN_ERROR;
+ }
+}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 77fb5a3c..6afaf7de 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -13,6 +13,7 @@
#include "mcp_public.h"
#include "ecore.h"
#include "ecore_mcp_api.h"
+#include "ecore_dev_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
@@ -24,17 +25,27 @@
((rel_pfid) | \
((p_hwfn)->abs_pf_id & 1) << 3) : \
rel_pfid)
-#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ((_p_hwfn)->p_dev->num_ports_in_engines * \
- ecore_device_num_engines((_p_hwfn)->p_dev)))
+ ecore_device_num_ports((_p_hwfn)->p_dev))
struct ecore_mcp_info {
- /* Spinlock used for protecting the access to the MFW mailbox */
- osal_spinlock_t lock;
- /* Flag to indicate whether sending a MFW mailbox is forbidden */
- bool block_mb_sending;
+ /* List for mailbox commands which were sent and wait for a response */
+ osal_list_t cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ osal_spinlock_t cmd_lock;
+
+ /* Flag to indicate whether sending a MFW mailbox command is blocked */
+ bool b_block_cmd;
+
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ osal_spinlock_t link_lock;
/* Address of the MCP public area */
u32 public_base;
@@ -59,7 +70,10 @@ struct ecore_mcp_info {
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
- u16 mcp_hist;
+ u32 mcp_hist;
+
+ /* Capabilties negotiated with the MFW */
+ u32 capabilities;
};
struct ecore_mcp_mb_params {
@@ -97,7 +111,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * Can only be called after `num_ports_in_engines' is set
+ * Can only be called after `num_ports_in_engine' is set
*/
void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
@@ -150,9 +164,13 @@ enum ecore_drv_role {
};
struct ecore_load_req_params {
+ /* Input params */
enum ecore_drv_role drv_role;
u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
bool avoid_eng_reset;
+ enum ecore_override_force_load override_force_load;
+
+ /* Output params */
u32 load_code;
};
@@ -247,56 +265,6 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief - Sends an NVM write command request to the MFW with
- * payload.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
- * DRV_MSG_CODE_NVM_PUT_FILE_DATA
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param i_txn_size - Buffer size
- * @param i_buf - Pointer to the buffer
- *
- * @param return ECORE_SUCCESS upon success.
- */
-enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param,
- u32 i_txn_size,
- u32 *i_buf);
-
-/**
- * @brief - Sends an NVM read command request to the MFW to get
- * a buffer.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
- * DRV_MSG_CODE_NVM_READ_NVRAM commands
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param o_txn_size - Buffer size output
- * @param o_buf - Pointer to the buffer returned by the MFW.
- *
- * @param return ECORE_SUCCESS upon success.
- */
-enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 cmd,
- u32 param,
- u32 *o_mcp_resp,
- u32 *o_mcp_param,
- u32 *o_txn_size,
- u32 *o_buf);
-
-/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
@@ -368,12 +336,33 @@ enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
+ * @param epoch
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+struct ecore_mdump_retain_data {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+/**
+ * @brief - Gets the mdump retained data from the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_retain
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_retain_data *p_mdump_retain);
+
/**
* @brief - Sets the MFW's max value for the given resource
*
@@ -426,7 +415,12 @@ enum ecore_resc_lock {
/* Locks that the MFW is aware of should be added here downwards */
/* Ecore only locks should be added here upwards */
- ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL,
+
+ /* A dummy value to be used for auxiliary functions in need of
+ * returning an 'error' value.
+ */
+ ECORE_RESC_LOCK_RESC_INVALID,
};
struct ecore_resc_lock_params {
@@ -440,9 +434,11 @@ struct ecore_resc_lock_params {
/* Number of times to retry locking */
u8 retry_num;
+#define ECORE_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
/* The interval in usec between retries */
u16 retry_interval;
+#define ECORE_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
/* Use sleep or delay between retries */
bool sleep_b4_retry;
@@ -493,4 +489,83 @@ enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params);
+/**
+ * @brief - default initialization for lock/unlock resource structs
+ *
+ * @param p_lock - lock params struct to be initialized; Can be OSAL_NULL
+ * @param p_unlock - unlock params struct to be initialized; Can be OSAL_NULL
+ * @param resource - the requested resource
+ * @paral b_is_permanent - disable retries & aging when set
+ */
+void ecore_mcp_resc_lock_default_init(struct ecore_resc_lock_params *p_lock,
+ struct ecore_resc_unlock_params *p_unlock,
+ enum ecore_resc_lock resource,
+ bool b_is_permanent);
+
+/**
+ * @brief Learn of supported MFW features; To be done during early init
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Inform MFW of set of features supported by driver. Should be done
+ * inside the contet of the LOAD_REQ.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+enum ecore_mcp_drv_attr_cmd {
+ ECORE_MCP_DRV_ATTR_CMD_READ,
+ ECORE_MCP_DRV_ATTR_CMD_WRITE,
+ ECORE_MCP_DRV_ATTR_CMD_READ_CLEAR,
+ ECORE_MCP_DRV_ATTR_CMD_CLEAR,
+};
+
+struct ecore_mcp_drv_attr {
+ enum ecore_mcp_drv_attr_cmd attr_cmd;
+ u32 attr_num;
+
+ /* R/RC - will be set with the read value
+ * W - should hold the required value to be written
+ * C - DC
+ */
+ u32 val;
+
+ /* W - mask/offset to be applied on the given value
+ * R/RC/C - DC
+ */
+ u32 mask;
+ u32 offset;
+};
+
+/**
+ * @brief Handle the drivers' attributes that are kept by the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_drv_attr
+ */
+enum _ecore_status_t
+ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mcp_drv_attr *p_drv_attr);
+
+/**
+ * @brief Read ufp config from the shared memory.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void
+ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
+
+void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u32 offset, u32 val);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index abc190c9..be3e91f0 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -23,24 +23,51 @@ struct ecore_mcp_link_pause_params {
bool forced_tx;
};
+enum ecore_mcp_eee_mode {
+ ECORE_MCP_EEE_DISABLED,
+ ECORE_MCP_EEE_ENABLED,
+ ECORE_MCP_EEE_UNSUPPORTED
+};
+
+struct ecore_link_eee_params {
+ u32 tx_lpi_timer;
+#define ECORE_EEE_1G_ADV (1 << 0)
+#define ECORE_EEE_10G_ADV (1 << 1)
+ /* Capabilities are represented using ECORE_EEE_*_ADV values */
+ u8 adv_caps;
+ u8 lp_adv_caps;
+ bool enable;
+ bool tx_lpi_enable;
+};
+
struct ecore_mcp_link_params {
struct ecore_mcp_link_speed_params speed;
struct ecore_mcp_link_pause_params pause;
u32 loopback_mode; /* in PMM_LOOPBACK values */
+ struct ecore_link_eee_params eee;
};
struct ecore_mcp_link_capabilities {
u32 speed_capabilities;
bool default_speed_autoneg; /* In Mb/s */
u32 default_speed; /* In Mb/s */
+ enum ecore_mcp_eee_mode default_eee;
+ u32 eee_lpi_timer;
+ u8 eee_speed_caps;
};
struct ecore_mcp_link_state {
bool link_up;
- u32 line_speed; /* In Mb/s */
u32 min_pf_rate; /* In Mb/s */
- u32 speed; /* In Mb/s */
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in MB/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
bool full_duplex;
bool an;
@@ -67,6 +94,10 @@ struct ecore_mcp_link_state {
u8 partner_adv_pause;
bool sfp_tx_fault;
+
+ bool eee_active;
+ u8 eee_adv_caps;
+ u8 eee_lp_adv_caps;
};
struct ecore_mcp_function_info {
@@ -88,37 +119,6 @@ struct ecore_mcp_function_info {
u16 mtu;
};
-struct ecore_mcp_nvm_common {
- u32 offset;
- u32 param;
- u32 resp;
- u32 cmd;
-};
-
-struct ecore_mcp_nvm_rd {
- u32 *buf_size;
- u32 *buf;
-};
-
-struct ecore_mcp_nvm_wr {
- u32 buf_size;
- u32 *buf;
-};
-
-struct ecore_mcp_nvm_params {
-#define ECORE_MCP_CMD (1 << 0)
-#define ECORE_MCP_NVM_RD (1 << 1)
-#define ECORE_MCP_NVM_WR (1 << 2)
- u8 type;
-
- struct ecore_mcp_nvm_common nvm_common;
-
- union {
- struct ecore_mcp_nvm_rd nvm_rd;
- struct ecore_mcp_nvm_wr nvm_wr;
- };
-};
-
#ifndef __EXTRACT__LINUX__
enum ecore_nvm_images {
ECORE_NVM_IMAGE_ISCSI_CFG,
@@ -583,14 +583,16 @@ enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_hwfn *p_hwfn,
* @brief Get media type value of the port.
*
* @param p_dev - ecore dev pointer
+ * @param p_ptt
* @param mfw_ver - media type value
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - Operation was successful.
* ECORE_BUSY - Operation failed
*/
-enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
- u32 *media_type);
+enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 *media_type);
/**
* @brief - Sends a command to the MCP mailbox.
@@ -598,9 +600,9 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @param cmd - command to be sent to the MCP
- * @param param - optional param
- * @param o_mcp_resp - the MCP response code (exclude sequence)
- * @param o_mcp_param - optional parameter provided by the MCP response
+ * @param param - Optional param
+ * @param o_mcp_resp - The MCP response code (exclude sequence)
+ * @param o_mcp_param - Optional parameter provided by the MCP response
*
* @return enum _ecore_status_t -
* ECORE_SUCCESS - operation was successful
@@ -632,44 +634,6 @@ const struct ecore_mcp_function_info
*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn);
#endif
-/**
- * @brief - Function for reading/manipulating the nvram. Following are supported
- * functionalities.
- * 1. Read: Read the specified nvram offset.
- * input values:
- * type - ECORE_MCP_NVM_RD
- * cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM)
- * offset - nvm offset
- *
- * output values:
- * buf - buffer
- * buf_size - buffer size
- *
- * 2. Write: Write the data at the specified nvram offset
- * input values:
- * type - ECORE_MCP_NVM_WR
- * cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM)
- * offset - nvm offset
- * buf - buffer
- * buf_size - buffer size
- *
- * 3. Command: Send the NVM command to MCP.
- * input values:
- * type - ECORE_MCP_CMD
- * cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE)
- * offset - nvm offset
- *
- *
- * @param p_hwfn
- * @param p_ptt
- * @param params
- *
- * @return ECORE_SUCCESS - operation was successful.
- */
-enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_mcp_nvm_params *params);
-
#ifndef LINUX_REMOVE
/**
* @brief - count number of function with a matching personality on engine.
@@ -891,7 +855,7 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
* @param p_dev
* @param addr - nvm offset
* @param cmd - nvm command
- * @param p_buf - nvm write buffer
+ * @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -904,7 +868,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
*
* @param p_dev
* @param addr - nvm offset
- * @param p_buf - nvm write buffer
+ * @param p_buf - nvm read buffer
* @param len - buffer len
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -913,6 +877,56 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
u8 *p_buf, u32 len);
/**
+ * @brief - Sends an NVM write command request to the MFW with
+ * payload.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
+ * DRV_MSG_CODE_NVM_PUT_FILE_DATA
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param i_txn_size - Buffer size
+ * @param i_buf - Pointer to the buffer
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 i_txn_size,
+ u32 *i_buf);
+
+/**
+ * @brief - Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands
+ * @param param - [0:23] - Offset [24:31] - Size
+ * @param o_mcp_resp - MCP response
+ * @param o_mcp_param - MCP response param
+ * @param o_txn_size - Buffer size output
+ * @param o_buf - Pointer to the buffer returned by the MFW.
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size,
+ u32 *o_buf);
+
+/**
* @brief Read from sfp
*
* @param p_hwfn - hw function
@@ -1123,6 +1137,17 @@ enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief - Clear the mdump retained data.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
* @brief - Processes the TLV request from MFW i.e., get the required TLV info
* from the ecore client and send it to the MFW.
*
@@ -1134,4 +1159,13 @@ enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Return whether management firmware support smart AN
+ *
+ * @param p_hwfn
+ *
+ * @return bool - true iff feature is supported.
+ */
+bool ecore_mcp_is_smart_an_supported(struct ecore_hwfn *p_hwfn);
#endif
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
index 0bf1be88..3a1de094 100644
--- a/drivers/net/qede/base/ecore_mng_tlv.c
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -1403,9 +1403,9 @@ ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
return -1;
}
-static enum _ecore_status_t
-ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+static enum _ecore_status_t ecore_mfw_update_tlvs(struct ecore_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf,
+ u32 size)
{
union ecore_mfw_tlv_data *p_tlv_data;
struct ecore_drv_tlv_hdr tlv;
@@ -1512,8 +1512,7 @@ ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
/* Update the TLV values in the local buffer */
for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
if (tlv_group & id) {
- if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
- size))
+ if (ecore_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
goto drv_done;
}
}
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index 226e3d2a..66622323 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -22,6 +22,10 @@ struct ecore_eth_pf_params {
*/
u16 num_cons;
+ /* per-VF number of CIDs */
+ u8 num_vf_cons;
+#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
+
/* To enable arfs, previous to HW-init a positive number needs to be
* set [as filters require allocated searcher ILT memory].
* This will set the maximal number of configured steering-filters.
@@ -67,6 +71,7 @@ struct ecore_iscsi_pf_params {
u8 is_target;
u8 bdq_pbl_num_entries[2];
+ u8 disable_stats_collection;
};
enum ecore_rdma_protocol {
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index c9c23096..1d085815 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -28,424 +28,506 @@
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define DORQ_REG_GLB_MAX_ICID_0_RT_OFFSET 18
+#define DORQ_REG_GLB_MAX_ICID_1_RT_OFFSET 19
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_0_RT_OFFSET 20
+#define DORQ_REG_GLB_RANGE2CONN_TYPE_1_RT_OFFSET 21
+#define DORQ_REG_PRV_PF_MAX_ICID_2_RT_OFFSET 22
+#define DORQ_REG_PRV_PF_MAX_ICID_3_RT_OFFSET 23
+#define DORQ_REG_PRV_PF_MAX_ICID_4_RT_OFFSET 24
+#define DORQ_REG_PRV_PF_MAX_ICID_5_RT_OFFSET 25
+#define DORQ_REG_PRV_VF_MAX_ICID_2_RT_OFFSET 26
+#define DORQ_REG_PRV_VF_MAX_ICID_3_RT_OFFSET 27
+#define DORQ_REG_PRV_VF_MAX_ICID_4_RT_OFFSET 28
+#define DORQ_REG_PRV_VF_MAX_ICID_5_RT_OFFSET 29
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_2_RT_OFFSET 30
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_3_RT_OFFSET 31
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_4_RT_OFFSET 32
+#define DORQ_REG_PRV_PF_RANGE2CONN_TYPE_5_RT_OFFSET 33
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_2_RT_OFFSET 34
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_3_RT_OFFSET 35
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_4_RT_OFFSET 36
+#define DORQ_REG_PRV_VF_RANGE2CONN_TYPE_5_RT_OFFSET 37
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 38
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 39
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 40
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 41
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 42
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 43
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 44
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 45
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 1024
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1069
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 1024
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2093
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6509
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6510
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6511
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6512
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6513
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6514
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6515
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6516
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6517
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6518
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6519
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6520
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6521
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6522
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6523
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6524
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6525
#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_OFFSET 6527
#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
-#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
-#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define SRC_REG_COUNTFREE_RT_OFFSET 6529
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6530
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6531
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6532
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6533
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6534
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6535
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6536
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6537
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6538
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6539
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6540
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6541
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6542
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6543
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6544
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6545
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6546
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6547
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6548
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6549
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6550
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6551
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6552
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6553
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6554
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6555
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6556
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6557
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6558
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6559
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6560
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6561
+#define PSWRQ2_REG_TGSRC_FIRST_ILT_RT_OFFSET 6562
+#define PSWRQ2_REG_RGSRC_FIRST_ILT_RT_OFFSET 6563
+#define PSWRQ2_REG_TGSRC_LAST_ILT_RT_OFFSET 6564
+#define PSWRQ2_REG_RGSRC_LAST_ILT_RT_OFFSET 6565
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6566
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 26414
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 32980
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 32981
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 32982
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 32983
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 32984
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 32985
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 32986
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 32987
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 32988
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 32989
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 32990
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 32991
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 32992
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 33408
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 34016
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 34017
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 34018
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 34019
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 34020
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 34021
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 34022
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 34023
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 34024
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 34025
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 34026
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 34027
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 34028
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 34029
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 34030
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 34031
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 34032
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 34033
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 34034
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 34035
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 34036
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 34037
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 34038
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 34039
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 34040
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 34041
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 34042
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 34043
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 34044
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 34045
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 34046
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 34047
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 34048
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 34049
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 34050
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 34051
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 34052
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 34053
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 34054
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 34055
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 34056
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 34057
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 34058
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 34059
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 34060
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 34061
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 34062
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 34063
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 34064
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 34065
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 34066
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 34067
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 34068
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 34069
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 34070
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 34071
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 34072
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 34073
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 34074
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 34075
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 34076
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 34077
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 34078
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 34079
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 34080
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 34081
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
-#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
-#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
-#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
-#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
-#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
-#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
-#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
-#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
-#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
-#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
-#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
-#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
-#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
-#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
-#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
-#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
-#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
-#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
-#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
-#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
-#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
-#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
-#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
-#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237
+#define QM_REG_PQTX2PF_0_RT_OFFSET 34238
+#define QM_REG_PQTX2PF_1_RT_OFFSET 34239
+#define QM_REG_PQTX2PF_2_RT_OFFSET 34240
+#define QM_REG_PQTX2PF_3_RT_OFFSET 34241
+#define QM_REG_PQTX2PF_4_RT_OFFSET 34242
+#define QM_REG_PQTX2PF_5_RT_OFFSET 34243
+#define QM_REG_PQTX2PF_6_RT_OFFSET 34244
+#define QM_REG_PQTX2PF_7_RT_OFFSET 34245
+#define QM_REG_PQTX2PF_8_RT_OFFSET 34246
+#define QM_REG_PQTX2PF_9_RT_OFFSET 34247
+#define QM_REG_PQTX2PF_10_RT_OFFSET 34248
+#define QM_REG_PQTX2PF_11_RT_OFFSET 34249
+#define QM_REG_PQTX2PF_12_RT_OFFSET 34250
+#define QM_REG_PQTX2PF_13_RT_OFFSET 34251
+#define QM_REG_PQTX2PF_14_RT_OFFSET 34252
+#define QM_REG_PQTX2PF_15_RT_OFFSET 34253
+#define QM_REG_PQTX2PF_16_RT_OFFSET 34254
+#define QM_REG_PQTX2PF_17_RT_OFFSET 34255
+#define QM_REG_PQTX2PF_18_RT_OFFSET 34256
+#define QM_REG_PQTX2PF_19_RT_OFFSET 34257
+#define QM_REG_PQTX2PF_20_RT_OFFSET 34258
+#define QM_REG_PQTX2PF_21_RT_OFFSET 34259
+#define QM_REG_PQTX2PF_22_RT_OFFSET 34260
+#define QM_REG_PQTX2PF_23_RT_OFFSET 34261
+#define QM_REG_PQTX2PF_24_RT_OFFSET 34262
+#define QM_REG_PQTX2PF_25_RT_OFFSET 34263
+#define QM_REG_PQTX2PF_26_RT_OFFSET 34264
+#define QM_REG_PQTX2PF_27_RT_OFFSET 34265
+#define QM_REG_PQTX2PF_28_RT_OFFSET 34266
+#define QM_REG_PQTX2PF_29_RT_OFFSET 34267
+#define QM_REG_PQTX2PF_30_RT_OFFSET 34268
+#define QM_REG_PQTX2PF_31_RT_OFFSET 34269
+#define QM_REG_PQTX2PF_32_RT_OFFSET 34270
+#define QM_REG_PQTX2PF_33_RT_OFFSET 34271
+#define QM_REG_PQTX2PF_34_RT_OFFSET 34272
+#define QM_REG_PQTX2PF_35_RT_OFFSET 34273
+#define QM_REG_PQTX2PF_36_RT_OFFSET 34274
+#define QM_REG_PQTX2PF_37_RT_OFFSET 34275
+#define QM_REG_PQTX2PF_38_RT_OFFSET 34276
+#define QM_REG_PQTX2PF_39_RT_OFFSET 34277
+#define QM_REG_PQTX2PF_40_RT_OFFSET 34278
+#define QM_REG_PQTX2PF_41_RT_OFFSET 34279
+#define QM_REG_PQTX2PF_42_RT_OFFSET 34280
+#define QM_REG_PQTX2PF_43_RT_OFFSET 34281
+#define QM_REG_PQTX2PF_44_RT_OFFSET 34282
+#define QM_REG_PQTX2PF_45_RT_OFFSET 34283
+#define QM_REG_PQTX2PF_46_RT_OFFSET 34284
+#define QM_REG_PQTX2PF_47_RT_OFFSET 34285
+#define QM_REG_PQTX2PF_48_RT_OFFSET 34286
+#define QM_REG_PQTX2PF_49_RT_OFFSET 34287
+#define QM_REG_PQTX2PF_50_RT_OFFSET 34288
+#define QM_REG_PQTX2PF_51_RT_OFFSET 34289
+#define QM_REG_PQTX2PF_52_RT_OFFSET 34290
+#define QM_REG_PQTX2PF_53_RT_OFFSET 34291
+#define QM_REG_PQTX2PF_54_RT_OFFSET 34292
+#define QM_REG_PQTX2PF_55_RT_OFFSET 34293
+#define QM_REG_PQTX2PF_56_RT_OFFSET 34294
+#define QM_REG_PQTX2PF_57_RT_OFFSET 34295
+#define QM_REG_PQTX2PF_58_RT_OFFSET 34296
+#define QM_REG_PQTX2PF_59_RT_OFFSET 34297
+#define QM_REG_PQTX2PF_60_RT_OFFSET 34298
+#define QM_REG_PQTX2PF_61_RT_OFFSET 34299
+#define QM_REG_PQTX2PF_62_RT_OFFSET 34300
+#define QM_REG_PQTX2PF_63_RT_OFFSET 34301
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
+#define QM_REG_RLGLBLCRD_RT_OFFSET 34842
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098
+#define QM_REG_RLPFPERIOD_RT_OFFSET 35099
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100
+#define QM_REG_RLPFINCVAL_RT_OFFSET 35101
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30855
+#define QM_REG_RLPFCRD_RT_OFFSET 35133
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30871
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
+#define QM_REG_RLPFENABLE_RT_OFFSET 35149
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30905
+#define QM_REG_WFQPFCRD_RT_OFFSET 35183
#define QM_REG_WFQPFCRD_RT_SIZE 256
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
+#define QM_REG_WFQPFENABLE_RT_OFFSET 35439
+#define QM_REG_WFQVPENABLE_RT_OFFSET 35440
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31675
+#define QM_REG_TXPQMAP_RT_OFFSET 35953
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32699
+#define QM_REG_WFQVPCRD_RT_OFFSET 36977
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33211
+#define QM_REG_WFQVPMAP_RT_OFFSET 37489
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
-#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
+#define QM_REG_VOQCRDLINE_RT_OFFSET 38321
#define QM_REG_VOQCRDLINE_RT_SIZE 36
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
+#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742
-#define RUNTIME_ARRAY_SIZE 34309
+#define RUNTIME_ARRAY_SIZE 41743
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index c8e564f9..86e84964 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -49,6 +49,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
* for a physical function (PF).
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
@@ -58,6 +59,7 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index d6e4b9e0..7598e7a6 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -232,6 +232,7 @@ static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
}
static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn)
{
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
@@ -241,14 +242,14 @@ static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
}
if (p_tunn->vxlan_port.b_update_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ ecore_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port);
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
+ ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
}
static void
@@ -293,9 +294,11 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
&p_tun->ip_gre);
}
+#define ETH_P_8021Q 0x8100
+
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
- enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
@@ -305,6 +308,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
+ int i;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
@@ -332,20 +336,26 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
- switch (mode) {
- case ECORE_MF_DEFAULT:
- case ECORE_MF_NPAR:
- p_ramrod->mf_mode = MF_NPAR;
- break;
- case ECORE_MF_OVLAN:
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
p_ramrod->mf_mode = MF_OVLAN;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Unsupported MF mode, init as DEFAULT\n");
+ else
p_ramrod->mf_mode = MF_NPAR;
+
+ p_ramrod->outer_tag_config.outer_tag.tci =
+ OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
+ p_ramrod->outer_tag_config.outer_tag.tpid =
+ OSAL_CPU_TO_LE16(ETH_P_8021Q);
+ if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+ else
+ p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+ p_ramrod->outer_tag_config.pri_map_valid = 1;
+ for (i = 0; i < 8; i++)
+ p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] =
+ (u8)i;
}
- p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
@@ -358,7 +368,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
- if (IS_MF_SI(p_hwfn))
+ if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
+ &p_hwfn->p_dev->mf_bits))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
@@ -384,18 +395,20 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
- "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
- sb, sb_index, p_ramrod->outer_tag);
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
+ sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
+ p_ramrod->outer_tag_config.outer_tag.tci);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn)
- ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->p_dev->tunnel);
return rc;
}
-enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
@@ -419,6 +432,50 @@ enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+ if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+ else
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+
+/* QM rate limiter resolution is 1.6Mbps */
+#define QM_RL_RESOLUTION(mb_val) ((mb_val) * 10 / 16)
+
+/* FW uses 1/64k to express gd */
+#define FW_GD_RESOLUTION(gd) (64 * 1024 / (gd))
+
+u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
+{
+ return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
+}
+
+u16 ecore_sp_rl_gd_denom(u32 gd)
+{
+ return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
+}
+
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params)
{
@@ -450,21 +507,37 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
rl_update->rl_id_last = params->rl_id_last;
rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
- rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
- rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
- rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
- rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+ rl_update->rl_max_rate =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
+ rl_update->rl_r_ai =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
+ rl_update->rl_r_hai =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
+ rl_update->dcqcn_g =
+ OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
- rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
- params->dcqcn_timeuot_us);
+ rl_update->dcqcn_timeuot_us =
+ OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+ rl_update->qcn_update_param_flg,
+ rl_update->dcqcn_update_param_flg,
+ rl_update->rl_init_flg, rl_update->rl_start_flg,
+ rl_update->rl_stop_flg, rl_update->rl_id_first,
+ rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+ rl_update->rl_bc_rate, rl_update->rl_max_rate,
+ rl_update->rl_r_ai, rl_update->rl_r_hai,
+ rl_update->dcqcn_g, rl_update->dcqcn_k_us,
+ rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
+
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
@@ -505,7 +578,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
+ ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -551,3 +624,28 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+ p_ent->ramrod.pf_update.mf_vlan =
+ OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 33e31e42..98009c65 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -59,8 +59,8 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_ptt
* @param p_tunn - pf start tunneling configuration
- * @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
*
@@ -68,8 +68,8 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tunn,
- enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
@@ -85,7 +85,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_pf_stop - PF Function Stop Ramrod
@@ -123,10 +123,10 @@ struct ecore_rl_update_params {
u8 rl_id_last;
u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
u32 rl_bc_rate; /* Byte Counter Limit */
- u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
- u16 rl_r_ai; /* Active increase rate */
- u16 rl_r_hai; /* Hyper active increase rate */
- u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
+ u32 rl_max_rate; /* Maximum rate in Mbps resolution */
+ u32 rl_r_ai; /* Active increase rate */
+ u32 rl_r_hai; /* Hyper active increase rate */
+ u32 dcqcn_gd; /* DCQCN Alpha update gain */
u32 dcqcn_k_us; /* DCQCN Alpha update interval */
u32 dcqcn_timeuot_us;
u32 qcn_timeuot_us;
@@ -143,4 +143,23 @@ struct ecore_rl_update_params {
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params);
+/**
+ * @brief ecore_sp_pf_update_stag - PF STAG value update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn);
+
+/**
+ * @brief ecore_sp_pf_update_ufp - PF ufp update Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn);
+
#endif /*__ECORE_SP_COMMANDS_H__*/
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 3c1d05b3..70ffa8cd 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -36,9 +36,8 @@
/***************************************************************************
* Blocking Imp. (BLOCK/EBLOCK mode)
***************************************************************************/
-static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
- void *cookie,
- union event_ring_data *data,
+static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
+ union event_ring_data OSAL_UNUSED * data,
u8 fw_return_code)
{
struct ecore_spq_comp_done *comp_done;
@@ -87,6 +86,7 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
u8 *p_fw_ret, bool skip_quick_poll)
{
struct ecore_spq_comp_done *comp_done;
+ struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
/* A relatively short polling period w/o sleeping, to allow the FW to
@@ -103,8 +103,13 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
if (rc == ECORE_SUCCESS)
return ECORE_SUCCESS;
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
- rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
+ rc = ecore_mcp_drain(p_hwfn, p_ptt);
+ ecore_ptt_release(p_hwfn, p_ptt);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
goto err;
@@ -173,10 +178,10 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
+ struct e4_core_conn_context *p_cxt;
struct ecore_cxt_info cxt_info;
- struct core_conn_context *p_cxt;
- enum _ecore_status_t rc;
u16 physical_q;
+ enum _ecore_status_t rc;
cxt_info.iid = p_spq->cid;
@@ -225,9 +230,9 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent)
{
struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct core_db_data *p_db_data = &p_spq->db_data;
u16 echo = ecore_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
- struct core_db_data db;
p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
elem = ecore_chain_produce(p_chain);
@@ -236,31 +241,24 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
}
- *elem = p_ent->elem; /* struct assignment */
+ *elem = p_ent->elem; /* Struct assignment */
- /* send a doorbell on the slow hwfn session */
- OSAL_MEMSET(&db, 0, sizeof(db));
- SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
- SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
- DQ_XCM_CORE_SPQ_PROD_CMD);
- db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
- db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
+ p_db_data->spq_prod =
+ OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
- /* make sure the SPQE is updated before the doorbell */
+ /* Make sure the SPQE is updated before the doorbell */
OSAL_WMB(p_hwfn->p_dev);
- DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
- *(u32 *)&db);
+ DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
- /* make sure doorbell is rang */
+ /* Make sure doorbell is rang */
OSAL_WMB(p_hwfn->p_dev);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
" agg_params: %02x, prod: %04x\n",
- DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
- db.agg_flags, ecore_chain_get_prod_idx(p_chain));
+ p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
+ p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
return ECORE_SUCCESS;
}
@@ -273,12 +271,16 @@ static enum _ecore_status_t
ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- switch (p_eqe->protocol_id) {
- case PROTOCOLID_COMMON:
- return ecore_sriov_eqe_event(p_hwfn,
- p_eqe->opcode,
- p_eqe->echo, &p_eqe->data);
- default:
+ ecore_spq_async_comp_cb cb;
+
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
@@ -286,6 +288,28 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
}
}
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
+}
+
/***************************************************************************
* EQ API
***************************************************************************/
@@ -450,8 +474,11 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
struct ecore_spq_entry *p_virt = OSAL_NULL;
+ struct core_db_data *p_db_data;
+ void OSAL_IOMEM *db_addr;
dma_addr_t p_phys = 0;
u32 i, capacity;
+ enum _ecore_status_t rc;
OSAL_LIST_INIT(&p_spq->pending);
OSAL_LIST_INIT(&p_spq->completion_pending);
@@ -489,6 +516,24 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
/* reset the chain itself */
ecore_chain_reset(&p_spq->chain);
+
+ /* Initialize the address/data of the SPQ doorbell */
+ p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
+ p_db_data = &p_spq->db_data;
+ OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* Register the SPQ doorbell with the doorbell recovery mechanism */
+ db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+ rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
}
enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
@@ -530,7 +575,9 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
p_spq->p_virt = p_virt;
p_spq->p_phys = p_phys;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
+#endif
p_hwfn->p_spq = p_spq;
return ECORE_SUCCESS;
@@ -544,11 +591,16 @@ spq_allocate_fail:
void ecore_spq_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq *p_spq = p_hwfn->p_spq;
+ void OSAL_IOMEM *db_addr;
u32 capacity;
if (!p_spq)
return;
+ /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+ db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
+ ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
+
if (p_spq->p_virt) {
capacity = ecore_chain_get_capacity(&p_spq->chain);
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
@@ -559,7 +611,10 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn)
}
ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
+#endif
+
OSAL_FREE(p_hwfn->p_dev, p_spq);
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index e530f834..526cff08 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -86,6 +86,22 @@ struct ecore_consq {
struct ecore_chain chain;
};
+typedef enum _ecore_status_t
+(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb);
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
struct ecore_spq {
osal_spinlock_t lock;
@@ -124,6 +140,10 @@ struct ecore_spq {
u32 comp_count;
u32 cid;
+
+ u32 db_addr_offset;
+ struct core_db_data db_data;
+ ecore_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
struct ecore_port;
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index db2873e7..b1e26d6f 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -27,6 +27,12 @@
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
@@ -53,9 +59,26 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
"CHANNEL_TLV_UPDATE_TUNN_PARAM",
"CHANNEL_TLV_COALESCE_UPDATE",
+ "CHANNEL_TLV_QID",
+ "CHANNEL_TLV_COALESCE_READ",
"CHANNEL_TLV_MAX"
};
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
+
/* IOV ramrods */
static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
@@ -193,9 +216,7 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
}
static struct ecore_queue_cid *
-ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- struct ecore_vf_queue *p_queue)
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
{
int i;
@@ -214,8 +235,7 @@ enum ecore_iov_validate_q_mode {
ECORE_IOV_VALIDATE_Q_DISABLE,
};
-static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
u16 qid,
enum ecore_iov_validate_q_mode mode,
bool b_is_tx)
@@ -257,8 +277,7 @@ static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
- mode, false);
+ return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
@@ -274,8 +293,7 @@ static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
- mode, true);
+ return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -297,13 +315,12 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
}
/* Is there at least 1 queue open? */
-static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_rxqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
false))
return true;
@@ -311,13 +328,12 @@ static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
return false;
}
-static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_txqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
true))
return true;
@@ -325,19 +341,6 @@ static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
return false;
}
-/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
-u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
-{
- int i;
-
- while (length--) {
- crc ^= *ptr++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
- }
- return crc;
-}
-
enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
int vfid,
struct ecore_ptt *p_ptt)
@@ -359,8 +362,8 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
/* Increment bulletin board version and compute crc */
p_bulletin->version++;
- p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
- p_vf->bulletin.size - crc_size);
+ p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
@@ -442,33 +445,6 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
}
-static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- struct ecore_igu_block *p_sb;
- u16 sb_id;
- u32 val;
-
- if (!p_hwfn->hw_info.p_igu_info) {
- DP_ERR(p_hwfn,
- "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
- return;
- }
-
- for (sb_id = 0;
- sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
- p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
- if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
- !(p_sb->status & ECORE_IGU_STATUS_PF)) {
- val = ecore_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sb_id * 4);
- SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
- ecore_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
- }
- }
-}
-
static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
{
struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
@@ -621,20 +597,24 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
p_hwfn->pf_iov_info = p_sriov;
+ ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ ecore_sriov_eqe_event);
+
return ecore_iov_allocate_vfdb(p_hwfn);
}
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
ecore_iov_setup_vfdb(p_hwfn);
- ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
}
void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
+ ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
ecore_iov_free_vfdb(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
@@ -843,11 +823,52 @@ static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vf_id,
+ u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* If client overrides this, don't do anything */
+ if (p_hwfn->p_dev->b_dont_override_vf_msix)
+ return ECORE_SUCCESS;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = OSAL_MAX_T(u8, current_max,
+ p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
{
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
if (vf->to_disable)
return ECORE_SUCCESS;
@@ -861,11 +882,8 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
- rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
- vf->abs_vf_id, vf->num_sbs);
+ rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
return rc;
@@ -934,46 +952,38 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 num_rx_queues)
{
- struct ecore_igu_block *igu_blocks;
- int qid = 0, igu_id = 0;
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
u32 val = 0;
- igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
- if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
- num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
-
- p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues =
+ (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
- while ((qid < num_rx_queues) &&
- (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
- if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
- struct cau_sb_entry sb_entry;
-
- vf->igu_sbs[qid] = (u16)igu_id;
- igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
-
- SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
- ecore_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
- val);
-
- /* Configure igu sb in CAU which were marked valid */
- ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_hwfn->rel_pf_id,
- vf->abs_vf_id, 1);
- ecore_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(osal_uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- igu_id * sizeof(u64), 2, 0);
- qid++;
- }
- igu_id++;
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = ecore_get_igu_free_sb(p_hwfn, false);
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2, 0);
}
vf->num_sbs = (u8)num_rx_queues;
@@ -1009,10 +1019,8 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
ecore_wr(p_hwfn, p_ptt, addr, val);
- p_info->igu_map.igu_blocks[igu_id].status |=
- ECORE_IGU_STATUS_FREE;
-
- p_hwfn->hw_info.p_igu_info->free_blks++;
+ p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
}
vf->num_sbs = 0;
@@ -1110,34 +1118,28 @@ ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
vf->vport_id = p_params->vport_id;
vf->rss_eng_id = p_params->rss_eng_id;
- /* Perform sanity checking on the requested queue_id */
+ /* Since it's possible to relocate SBs, it's a bit difficult to check
+ * things here. Simply check whether the index falls in the range
+ * belonging to the PF.
+ */
for (i = 0; i < p_params->num_queues; i++) {
- u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
- u16 max_vf_qzone = min_vf_qzone +
- FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
-
qid = p_params->req_rx_queue[i];
- if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
DP_NOTICE(p_hwfn, true,
- "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
qid, p_params->rel_vf_id,
- min_vf_qzone, max_vf_qzone);
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
return ECORE_INVAL;
}
qid = p_params->req_tx_queue[i];
- if (qid > max_vf_qzone) {
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
DP_NOTICE(p_hwfn, true,
- "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
- qid, p_params->rel_vf_id, max_vf_qzone);
+ "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
return ECORE_INVAL;
}
-
- /* If client *really* wants, Tx qid can be shared with PF */
- if (qid < min_vf_qzone)
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
- p_params->rel_vf_id, qid, i);
}
/* Limit number of queues according to number of CIDs */
@@ -1307,8 +1309,7 @@ static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
}
/* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
{
struct channel_tlv *tl = (struct channel_tlv *)*offset;
@@ -1364,7 +1365,12 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
- u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ u16 length,
+#else
+ u16 OSAL_UNUSED length,
+#endif
+ u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct ecore_dmae_params params;
@@ -1378,7 +1384,7 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
mbx->sw_mbx.response_size =
length + sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel)
+ if (!p_vf->b_hw_channel)
return;
#endif
@@ -1394,17 +1400,22 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
&params);
+ /* Once PF copies the rc to the VF, the latter can continue and
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
sizeof(u64) / 4, &params);
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
}
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
- enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
{
switch (flag) {
case ECORE_IOV_VP_UPDATE_ACTIVATE:
@@ -1442,15 +1453,15 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
continue;
- resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
- ecore_iov_vport_to_tlv(p_hwfn, i), size);
+ resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+ size);
if (tlvs_accepted & (1 << i))
resp->hdr.status = status;
@@ -1460,12 +1471,13 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - vport_update resp: TLV %d, status %02x\n",
p_vf->relative_vf_id,
- ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+ ecore_iov_vport_to_tlv(i),
+ resp->hdr.status);
total_len += size;
}
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
return total_len;
@@ -1480,13 +1492,11 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
mbx->offset = (u8 *)mbx->reply_virt;
- ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, type, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
-
- OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
}
struct ecore_public_vf_info
@@ -1535,6 +1545,60 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
}
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+ DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
@@ -1571,6 +1635,8 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
+ ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
*/
@@ -1582,18 +1648,18 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters ||
- p_resp->num_mc_filters < p_req->num_mc_filters) {
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
- " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
- " vlan [%02x/%02x] mc [%02x/%02x]\n",
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id,
p_req->num_rxqs, p_resp->num_rxqs,
p_req->num_rxqs, p_resp->num_txqs,
p_req->num_sbs, p_resp->num_sbs,
p_req->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this
* failure.
@@ -1610,8 +1676,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
return PFVF_STATUS_SUCCESS;
}
-static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
- struct pfvf_stats_info *p_stats)
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
{
p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
@@ -1693,7 +1758,7 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
}
/* On 100g PFs, prevent old VFs from loading */
- if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ if (ECORE_IS_CMT(p_hwfn->p_dev) &&
!(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support"
@@ -1721,14 +1786,24 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
- pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+ p_ptt);
+
+ ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
ETH_ALEN);
@@ -1969,8 +2044,7 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid = OSAL_NULL;
/* There can be at most 1 Rx queue on qzone. Find it */
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
- p_queue);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
if (p_cid == OSAL_NULL)
continue;
@@ -2106,16 +2180,19 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
u8 status = PFVF_STATUS_SUCCESS;
enum _ecore_status_t rc;
+ OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
vf->vport_instance--;
vf->spoof_chk = false;
- if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
- (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ if ((ecore_iov_validate_active_rxq(vf)) ||
+ (ecore_iov_validate_active_txq(vf))) {
vf->b_malicious = true;
DP_NOTICE(p_hwfn, false,
"VF [%02x] - considered malicious;"
" Unable to stop RX/TX queuess\n",
vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
}
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
@@ -2129,6 +2206,7 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
vf->configured_features = 0;
OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
@@ -2154,9 +2232,8 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
@@ -2171,6 +2248,42 @@ static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+ struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published if its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return ECORE_IOV_LEGACY_QID_TX;
+ else
+ return ECORE_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (p_qid_tlv == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
@@ -2179,11 +2292,11 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ u8 qid_usage_idx, vf_legacy = 0;
struct ecore_vf_queue *p_queue;
struct vfpf_start_rxq_tlv *req;
struct ecore_queue_cid *p_cid;
- bool b_legacy_vf = false;
- u8 qid_usage_idx;
+ struct ecore_sb_info sb_dummy;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
@@ -2193,45 +2306,43 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Legacy VFs made assumptions on the CID their queues connected to,
- * assuming queue X used CID X.
- * TODO - need to validate that there was no official release post
- * the current legacy scheme that still made that assumption.
- */
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN)
- b_legacy_vf = true;
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
- /* Acquire a new queue-cid */
p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+ /* Acquire a new queue-cid */
OSAL_MEMSET(&params, 0, sizeof(params));
params.queue_id = (u8)p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
- params.sb_idx = req->sb_index;
- /* TODO - set qid_usage_idx according to extended TLV. For now, use
- * '0' for Rx.
- */
- qid_usage_idx = 0;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->rx_qid;
- vf_params.b_legacy = b_legacy_vf;
+ vf_params.vf_legacy = vf_legacy;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
- &params, &vf_params);
+ &params, true, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
- if (!b_legacy_vf)
+ if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
@@ -2254,7 +2365,8 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
out:
ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
- b_legacy_vf);
+ !!(vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD));
}
static void
@@ -2382,7 +2494,7 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
if (b_update_required) {
u16 geneve_port;
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
if (rc != ECORE_SUCCESS)
@@ -2397,11 +2509,11 @@ static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
}
send_resp:
- p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ p_resp = ecore_add_tlv(&mbx->offset,
CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
@@ -2433,9 +2545,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
@@ -2456,8 +2567,8 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_queue *p_queue;
struct vfpf_start_txq_tlv *req;
struct ecore_queue_cid *p_cid;
- bool b_legacy_vf = false;
- u8 qid_usage_idx;
+ struct ecore_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
u32 cid = 0;
enum _ecore_status_t rc;
u16 pq;
@@ -2470,39 +2581,35 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* In case this is a legacy VF - need to know to use the right cids.
- * TODO - need to validate that there was no official release post
- * the current legacy scheme that still made that assumption.
- */
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN)
- b_legacy_vf = true;
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
- /* Acquire a new queue-cid */
p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+ /* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
- params.sb_idx = req->sb_index;
-
- /* TODO - set qid_usage_idx according to extended TLV. For now, use
- * '1' for Tx.
- */
- qid_usage_idx = 1;
- if (p_queue->cids[qid_usage_idx].p_cid)
- goto out;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->tx_qid;
- vf_params.b_legacy = b_legacy_vf;
+ vf_params.vf_legacy = vf_legacy;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
- &params, &vf_params);
+ &params, false, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
@@ -2528,80 +2635,74 @@ out:
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 rxq_id,
- u8 num_rxqs,
+ u8 qid_usage_idx,
bool cqe_completion)
{
+ struct ecore_vf_queue *p_queue;
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid, i;
- /* TODO - improve validation [wrap around] */
- if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ ECORE_IOV_VALIDATE_Q_NA)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
return ECORE_INVAL;
+ }
- for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
- struct ecore_queue_cid **pp_cid = OSAL_NULL;
+ p_queue = &vf->vf_queues[rxq_id];
- /* There can be at most a single Rx per qzone. Find it */
- for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
- if (p_queue->cids[i].p_cid &&
- !p_queue->cids[i].b_is_tx) {
- pp_cid = &p_queue->cids[i].p_cid;
- break;
- }
- }
- if (pp_cid == OSAL_NULL) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
- vf->relative_vf_id, qid);
- continue;
- }
-
- rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
- false, cqe_completion);
- if (rc != ECORE_SUCCESS)
- return rc;
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct ecore_queue_cid *p_cid;
- *pp_cid = OSAL_NULL;
- vf->num_active_rxqs--;
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx,
+ rxq_id, p_cid->qid_usage_idx);
+ return ECORE_INVAL;
}
- return rc;
+ /* Now that we know we have a valid Rx-queue - close it */
+ rc = ecore_eth_rx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ vf->num_active_rxqs--;
+
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
- u16 txq_id, u8 num_txqs)
+ u16 txq_id,
+ u8 qid_usage_idx)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_vf_queue *p_queue;
- int qid, j;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
- ECORE_IOV_VALIDATE_Q_NA) ||
- !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
ECORE_IOV_VALIDATE_Q_NA))
return ECORE_INVAL;
- for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- p_queue = &vf->vf_queues[qid];
- for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
- if (p_queue->cids[j].p_cid == OSAL_NULL)
- continue;
-
- if (!p_queue->cids[j].b_is_tx)
- continue;
-
- rc = ecore_eth_tx_queue_stop(p_hwfn,
- p_queue->cids[j].p_cid);
- if (rc != ECORE_SUCCESS)
- return rc;
+ p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return ECORE_INVAL;
- p_queue->cids[j].p_cid = OSAL_NULL;
- }
- }
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- return rc;
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ return ECORE_SUCCESS;
}
static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2610,20 +2711,34 @@ static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
*/
req = &mbx->req_virt->stop_rxqs;
- rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->num_rxqs, req->cqe_completion);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+ rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ qid_usage_idx, req->cqe_completion);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
@@ -2634,19 +2749,35 @@ static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
*/
req = &mbx->req_virt->stop_txqs;
- rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+ rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+ qid_usage_idx);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
@@ -2662,6 +2793,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
u16 i;
@@ -2669,10 +2801,30 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- /* Validate inputs */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ /* Starting with the addition of CHANNEL_TLV_QID, this API started
+ * expecting a single queue at a time. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+ req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
- ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
vf->relative_vf_id, req->rx_qid,
@@ -2682,12 +2834,9 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
}
for (i = 0; i < req->num_rxqs; i++) {
- struct ecore_vf_queue *p_queue;
u16 qid = req->rx_qid + i;
- p_queue = &vf->vf_queues[qid];
- handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- p_queue);
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
}
rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
@@ -2696,7 +2845,7 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
complete_event_flg,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
- if (rc)
+ if (rc != ECORE_SUCCESS)
goto out;
status = PFVF_STATUS_SUCCESS;
@@ -2931,8 +3080,7 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
goto out;
}
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[q_idx]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
p_rss->rss_ind_table[i] = p_cid;
}
@@ -2945,7 +3093,6 @@ out:
static void
ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_sge_tpa_params *p_sge_tpa,
struct ecore_iov_vf_mbx *p_mbx,
@@ -3035,7 +3182,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
- ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
&sge_tpa_params, mbx, &tlvs_mask);
tlvs_accepted = tlvs_mask;
@@ -3066,8 +3213,8 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
"Upper-layer prevents said VF"
" configuration\n");
else
- DP_NOTICE(p_hwfn, true,
- "No feature tlvs found for vport update\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
@@ -3272,12 +3419,13 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
goto out;
}
- /* Update shadow copy of the VF configuration */
+ /* Update shadow copy of the VF configuration. In case shadow indicates
+ * the action should be blocked return success to VF to imitate the
+ * firmware behaviour in such case.
+ */
if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
- ECORE_SUCCESS) {
- status = PFVF_STATUS_FAILURE;
+ ECORE_SUCCESS)
goto out;
- }
/* Determine if the unicast filtering is acceptible by PF */
if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
@@ -3382,6 +3530,76 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
length, status);
}
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_vf_queue *p_queue;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ } else {
+ if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+ p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
@@ -3422,8 +3640,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[qid]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc != ECORE_SUCCESS) {
@@ -3432,6 +3649,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
goto out;
}
+ vf->rx_coal = rx_coal;
}
/* TODO - in future, it might be possible to pass this in a per-cid
@@ -3456,6 +3674,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
goto out;
}
}
+ vf->tx_coal = tx_coal;
}
status = PFVF_STATUS_SUCCESS;
@@ -3464,6 +3683,92 @@ out:
sizeof(struct pfvf_def_resp_tlv), status);
}
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid)
+{
+ struct ecore_queue_cid *p_cid;
+ struct ecore_vf_info *vf;
+ struct ecore_ptt *p_ptt;
+ int i, rc = 0;
+
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%d] - Can not set coalescing: VF is not active\n",
+ vf_id);
+ return ECORE_INVAL;
+ }
+
+ vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3495,11 +3800,11 @@ static enum _ecore_status_t
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod;
cons[i] = ecore_rd(p_hwfn, p_ptt,
@@ -3514,7 +3819,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp;
tmp = ecore_rd(p_hwfn, p_ptt,
@@ -3524,7 +3829,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
break;
}
- if (i == MAX_NUM_VOQS)
+ if (i == MAX_NUM_VOQS_E4)
break;
OSAL_MSLEEP(20);
@@ -3623,8 +3928,7 @@ cleanup:
ack_vfs[vfid / 32] |= (1 << (vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
- p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
- ~(1ULL << (rel_vf_id % 64));
+ p_vf->vf_mbx.b_pending_msg = false;
}
return rc;
@@ -3734,11 +4038,11 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
p_bulletin = p_vf->bulletin.p_virt;
if (p_params)
- __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ __ecore_vf_get_link_params(p_params, p_bulletin);
if (p_link)
- __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ __ecore_vf_get_link_state(p_link, p_bulletin);
if (p_caps)
- __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ __ecore_vf_get_link_caps(p_caps, p_bulletin);
}
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
@@ -3754,12 +4058,22 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* ecore_iov_process_mbx_request */
- DP_VERBOSE(p_hwfn,
- ECORE_MSG_IOV,
- "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+#ifndef CONFIG_ECORE_SW_CHANNEL
+ if (!mbx->b_pending_msg) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%02x]: Trying to process mailbox message when none is pending\n",
+ p_vf->abs_vf_id);
+ return;
+ }
+ mbx->b_pending_msg = false;
+#endif
mbx->first_tlv = mbx->req_virt->first_tlv;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Processing mailbox message [type %04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
OSAL_IOV_VF_MSG_TYPE(p_hwfn,
p_vf->relative_vf_id,
mbx->first_tlv.tl.type);
@@ -3820,6 +4134,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_UPDATE:
ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_COALESCE_READ:
+ ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
@@ -3884,26 +4201,20 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
#endif
}
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
{
- u64 add_bit = 1ULL << (vfid % 64);
+ int i;
- /* TODO - add locking mechanisms [no atomics in ecore, so we can't
- * add the lock inside the ecore_pf_iov struct].
- */
- p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
-}
+ OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
- u64 *events)
-{
- u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
- /* TODO - Take a lock */
- OSAL_MEMCPY(events, p_pending_events,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
- OSAL_MEMSET(p_pending_events, 0,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+ if (p_vf->vf_mbx.b_pending_msg)
+ events[i / 64] |= 1ULL << (i % 64);
+ }
}
static struct ecore_vf_info *
@@ -3937,6 +4248,8 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
*/
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+ p_vf->vf_mbx.b_pending_msg = true;
+
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
}
@@ -3945,24 +4258,31 @@ static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
{
struct ecore_vf_info *p_vf;
- p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->errId);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn, false,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
}
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 OSAL_UNUSED fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
@@ -4001,7 +4321,7 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
return i;
out:
- return E4_MAX_NUM_VFS;
+ return MAX_NUM_VFS_E4;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -4370,6 +4690,7 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid, int val)
{
+ struct ecore_mcp_link_state *p_link;
struct ecore_vf_info *vf;
u8 abs_vp_id = 0;
enum _ecore_status_t rc;
@@ -4383,7 +4704,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
}
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
@@ -4513,3 +4837,17 @@ ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
else
return 0;
}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ vf_info->b_hw_channel = b_is_hw;
+}
+#endif
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index 3c2f58bd..850b1052 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -16,7 +16,7 @@
#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
- (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+ (MAX_NUM_VFS_E4 * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
@@ -45,6 +45,9 @@ struct ecore_iov_vf_mbx {
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
+ /* Message from VF awaits handling */
+ bool b_pending_msg;
+
u8 *offset;
#ifdef CONFIG_ECORE_SW_CHANNEL
@@ -63,6 +66,10 @@ struct ecore_iov_vf_mbx {
*/
};
+#define ECORE_IOV_LEGACY_QID_RX (0)
+#define ECORE_IOV_LEGACY_QID_TX (1)
+#define ECORE_IOV_QID_INVALID (0xFE)
+
struct ecore_vf_queue_cid {
bool b_is_tx;
struct ecore_queue_cid *p_cid;
@@ -110,6 +117,11 @@ struct ecore_vf_info {
struct ecore_bulletin bulletin;
dma_addr_t vf_bulletin;
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ /* Determine whether PF communicate with VF using HW/SW channel */
+ bool b_hw_channel;
+#endif
+
/* PF saves a copy of the last VF acquire message */
struct vfpf_acquire_tlv acquire;
@@ -129,6 +141,9 @@ struct ecore_vf_info {
u8 num_rxqs;
u8 num_txqs;
+ u16 rx_coal;
+ u16 tx_coal;
+
u8 num_sbs;
u8 num_mac_filters;
@@ -160,8 +175,7 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
- struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
- u64 pending_events[ECORE_VF_ARRAY_LENGTH];
+ struct ecore_vf_info vfs_array[MAX_NUM_VFS_E4];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
#ifndef REMOVE_DBG
@@ -197,17 +211,13 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
*
- * @param p_hwfn
- * @param p_iov
+ * @param offset
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset,
- u16 type,
- u16 length);
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
@@ -231,10 +241,8 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
- * @param p_ptt
*/
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_free - free sriov related resources
@@ -251,38 +259,12 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn);
void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
/**
- * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data);
-
-/**
- * @brief calculate CRC for bulletin board validation
- *
- * @param basic crc seed
- * @param ptr to beginning of buffer
- * @length in bytes of buffer
- *
- * @return calculated crc over buffer [with respect to seed].
- */
-u32 ecore_crc32(u32 crc,
- u8 *ptr,
- u32 length);
-
-/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
- * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ * @return true iff one of the PF's vfs got FLRed. false otherwise.
*/
bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
u32 *disabled_vfs);
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index f4d331cf..25109dbd 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -44,7 +44,7 @@ static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
/* Init type and length */
- p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
+ p_tlv = ecore_add_tlv(&p_iov->offset, type, length);
/* Init first tlv header */
((struct vfpf_first_tlv *)p_tlv)->reply_address =
@@ -65,6 +65,14 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
}
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/* The SW channel implementation of Windows needs to know the 'exact'
+ * response size of any given message. That means that for future
+ * messages we'd be unable to send TLVs to PF if he'll be unable to
+ * answer them if the |response| != |default response|.
+ * We'd need to handshake in acquire capabilities for any such.
+ */
+#endif
static enum _ecore_status_t
ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
u8 *done, u32 resp_size)
@@ -122,35 +130,118 @@ ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
}
if (!*done) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF <-- PF Timeout [Type %d]\n",
- p_req->first_tlv.tl.type);
+ DP_NOTICE(p_hwfn, true,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
rc = ECORE_TIMEOUT;
} else {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "PF response: %d [Type %d]\n",
- *done, p_req->first_tlv.tl.type);
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn, false,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+ return rc;
+}
+
+static void ecore_vf_pf_add_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Only add QIDs for the queue if it was negotiated with PF */
+ if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ p_qid_tlv = ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+ p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
+enum _ecore_status_t _ecore_vf_pf_release(struct ecore_hwfn *p_hwfn,
+ bool b_final)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep first tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = ECORE_AGAIN;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ if (!b_final)
+ return rc;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys,
+ sizeof(union vfpf_tlvs));
+ if (p_iov->pf2vf_reply)
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct ecore_bulletin_content);
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+ p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys,
+ size);
}
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_DEALLOC(&p_iov->mutex);
+#endif
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = OSAL_NULL;
+
return rc;
}
+enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
+{
+ return _ecore_vf_pf_release(p_hwfn, true);
+}
+
#define VF_ACQUIRE_THRESH 3
static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "PF unwilling to fullill resource request: rxq [%02x/%02x]"
- " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
- " vlan [%02x/%02x] mc [%02x/%02x]."
- " Try PF recommended amount\n",
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
p_req->num_rxqs, p_resp->num_rxqs,
p_req->num_rxqs, p_resp->num_txqs,
p_req->num_sbs, p_resp->num_sbs,
p_req->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* humble our request */
p_req->num_txqs = p_resp->num_txqs;
@@ -159,6 +250,7 @@ static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
p_req->num_mac_filters = p_resp->num_mac_filters;
p_req->num_vlan_filters = p_resp->num_vlan_filters;
p_req->num_mc_filters = p_resp->num_mc_filters;
+ p_req->num_cids = p_resp->num_cids;
}
static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
@@ -185,6 +277,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_cids = ECORE_ETH_VF_DEFAULT_NUM_CIDS;
OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
@@ -201,12 +294,17 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
/* Fill capability field with any non-deprecated config we support */
req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+ /* If we've mapped the doorbell bar, try using queue qids */
+ if (p_iov->b_doorbell_bar)
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+
/* pf 2 vf bulletin board address */
req->bulletin_addr = p_iov->bulletin.phys;
req->bulletin_size = p_iov->bulletin.size;
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -221,10 +319,8 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
/* send acquire request */
rc = ecore_send_msg2pf(p_hwfn,
&resp->hdr.status, sizeof(*resp));
-
- /* PF timeout */
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS)
+ goto exit;
/* copy acquire response from buffer to p_hwfn */
OSAL_MEMCPY(&p_iov->acquire_resp,
@@ -310,6 +406,15 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
VFPF_ACQUIRE_CAP_PRE_FP_HSI)
p_iov->b_pre_fp_hsi = true;
+ /* In case PF doesn't support multi-queue Tx, update the number of
+ * CIDs to reflect the number of queues [older PFs didn't fill that
+ * field].
+ */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ resp->resc.num_cids = resp->resc.num_rxqs +
+ resp->resc.num_txqs;
+
rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
if (rc) {
DP_NOTICE(p_hwfn, true,
@@ -325,7 +430,7 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
/* get HW info */
p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
- p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
+ p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
DP_INFO(p_hwfn, "Chip details - %s%d\n",
ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
@@ -357,10 +462,28 @@ exit:
return rc;
}
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ u32 bar_size;
+
+ /* Regview size is fixed */
+ if (bar_id == BAR_ID_0)
+ return 1 << 17;
+
+ /* Doorbell is received from PF */
+ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+ if (bar_size)
+ return 1 << bar_size;
+ return 0;
+}
+
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
{
+ struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
struct ecore_vf_iov *p_iov;
u32 reg;
+ enum _ecore_status_t rc;
/* Set number of hwfns - might be overridden once leading hwfn learns
* actual configuration from PF.
@@ -368,10 +491,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
if (IS_LEAD_HWFN(p_hwfn))
p_hwfn->p_dev->num_hwfns = 1;
- /* Set the doorbell bar. Assumption: regview is set */
- p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
- PXP_VF_BAR0_START_DQ;
-
reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
@@ -386,6 +505,31 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
+ /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+ * value, but there are several incompatibily scenarios where that
+ * would be incorrect and we'd need to override it.
+ */
+ if (p_hwfn->doorbells == OSAL_NULL) {
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ } else if (p_hwfn == p_lead) {
+ /* For leading hw-function, value is always correct, but need
+ * to handle scenario where legacy PF would not support 100g
+ * mapped bars later.
+ */
+ p_iov->b_doorbell_bar = true;
+ } else {
+ /* here, value would be correct ONLY if the leading hwfn
+ * received indication that mapped-bars are supported.
+ */
+ if (p_lead->vf_iov_info->b_doorbell_bar)
+ p_iov->b_doorbell_bar = true;
+ else
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ }
+
/* Allocate vf2pf msg */
p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_iov->
@@ -428,14 +572,44 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
p_iov->bulletin.size);
+#ifdef CONFIG_ECORE_LOCK_ALLOC
OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
p_hwfn->vf_iov_info = p_iov;
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
- return ecore_vf_pf_acquire(p_hwfn);
+ rc = ecore_vf_pf_acquire(p_hwfn);
+
+ /* If VF is 100g using a mapped bar and PF is too old to support that,
+ * acquisition would succeed - but the VF would have no way knowing
+ * the size of the doorbell bar configured in HW and thus will not
+ * know how to split it for 2nd hw-function.
+ * In this case we re-try without the indication of the mapped
+ * doorbell.
+ */
+ if (rc == ECORE_SUCCESS &&
+ p_iov->b_doorbell_bar &&
+ !ecore_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+ ECORE_IS_CMT(p_hwfn->p_dev)) {
+ rc = _ecore_vf_pf_release(p_hwfn, false);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_iov->b_doorbell_bar = false;
+ p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ rc = ecore_vf_pf_acquire(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+ p_hwfn->regview, p_hwfn->doorbells,
+ p_hwfn->p_dev->doorbells);
+
+ return rc;
free_vf2pf_request:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
@@ -583,7 +757,7 @@ ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -627,8 +801,8 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; /* Keep initialized, for future compatibility */
@@ -649,8 +823,10 @@ ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
(u32 *)(&init_prod_val));
}
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -704,8 +880,10 @@ enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -745,11 +923,13 @@ ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = p_cid->rel.sb;
- req->sb_index = p_cid->rel.sb_idx;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -799,8 +979,10 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
+ ecore_vf_pf_add_qid(p_hwfn, p_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -831,34 +1013,32 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
struct vfpf_update_rxq_tlv *req;
enum _ecore_status_t rc;
- /* TODO - API is limited to assuming continuous regions of queues,
- * but VF queues might not fullfil this requirement.
- * Need to consider whether we need new TLVs for this, or whether
- * simply doing it iteratively is good enough.
+ /* Starting with CHANNEL_TLV_QID and the need for additional queue
+ * information, this API stopped supporting multiple rxqs.
+ * TODO - remove this and change the API to accept a single queue-cid
+ * in a follow-up patch.
*/
- if (!num_rxqs)
+ if (num_rxqs != 1) {
+ DP_NOTICE(p_hwfn, true,
+ "VFs can no longer update more than a single queue\n");
return ECORE_INVAL;
+ }
-again:
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
- /* Find the length of the current contagious range of queues beginning
- * at first queue's index.
- */
req->rx_qid = (*pp_cid)->rel.queue_id;
- for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
- if (pp_cid[req->num_rxqs]->rel.queue_id !=
- req->rx_qid + req->num_rxqs)
- break;
+ req->num_rxqs = 1;
if (comp_cqe_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
if (comp_event_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
+ ecore_vf_pf_add_qid(p_hwfn, *pp_cid);
+
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -871,15 +1051,6 @@ again:
goto exit;
}
- /* Make sure we're done with all the queues */
- if (req->num_rxqs < num_rxqs) {
- num_rxqs -= req->num_rxqs;
- pp_cid += req->num_rxqs;
- /* TODO - should we give a non-locked variant instead? */
- ecore_vf_pf_req_end(p_hwfn, rc);
- goto again;
- }
-
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
@@ -908,12 +1079,15 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
req->only_untagged = only_untagged;
/* status blocks */
- for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
- if (p_hwfn->sbs_info[i])
- req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+ struct ecore_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+ if (p_sb)
+ req->sb_addr[i] = p_sb->sb_phys;
+ }
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -944,7 +1118,7 @@ enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1051,7 +1225,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct vfpf_vport_update_activate_tlv *p_act_tlv;
size = sizeof(struct vfpf_vport_update_activate_tlv);
- p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_act_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1071,7 +1245,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
- p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_vlan_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1084,7 +1258,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
- p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_tx_switch_tlv = ecore_add_tlv(&p_iov->offset,
tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1095,7 +1269,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
- p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_mcast_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1113,7 +1287,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
size = sizeof(struct vfpf_vport_update_accept_param_tlv);
- p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ p_accept_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
if (update_rx) {
@@ -1135,7 +1309,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
- p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_rss_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_RSS, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1173,8 +1347,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
- p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
- tlv, size);
+ p_any_vlan_tlv = ecore_add_tlv(&p_iov->offset, tlv, size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
@@ -1188,7 +1361,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
sge_tpa_params = p_params->sge_tpa_params;
size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
- p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
+ p_sge_tpa_tlv = ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
size);
resp_size += sizeof(struct pfvf_def_resp_tlv);
@@ -1226,7 +1399,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
}
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1258,7 +1431,7 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1280,55 +1453,6 @@ exit:
return rc;
}
-enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
-{
- struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
- struct pfvf_def_resp_tlv *resp;
- struct vfpf_first_tlv *req;
- u32 size;
- enum _ecore_status_t rc;
-
- /* clear mailbox and prep first tlv */
- req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
-
- /* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
- CHANNEL_TLV_LIST_END,
- sizeof(struct channel_list_end_tlv));
-
- resp = &p_iov->pf2vf_reply->default_resp;
- rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
-
- if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
- rc = ECORE_AGAIN;
-
- ecore_vf_pf_req_end(p_hwfn, rc);
-
- p_hwfn->b_int_enabled = 0;
-
- if (p_iov->vf2pf_request)
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_iov->vf2pf_request,
- p_iov->vf2pf_request_phys,
- sizeof(union vfpf_tlvs));
- if (p_iov->pf2vf_reply)
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_iov->pf2vf_reply,
- p_iov->pf2vf_reply_phys,
- sizeof(union pfvf_tlvs));
-
- if (p_iov->bulletin.p_virt) {
- size = sizeof(struct ecore_bulletin_content);
- OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
- p_iov->bulletin.p_virt,
- p_iov->bulletin.phys, size);
- }
-
- OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
-
- return rc;
-}
-
void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_mcast *p_filter_cmd)
{
@@ -1374,7 +1498,7 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
req->vlan = p_ucast->vlan;
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1405,7 +1529,7 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
sizeof(struct vfpf_first_tlv));
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset,
+ ecore_add_tlv(&p_iov->offset,
CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -1424,6 +1548,39 @@ exit:
return rc;
}
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 *p_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_read_coal_resp_tlv *resp;
+ struct vfpf_read_coal_req_tlv *req;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*req));
+ req->qid = p_cid->rel.queue_id;
+ req->is_rx = p_cid->b_is_rx ? 1 : 0;
+
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->read_coal_resp;
+
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ *p_coal = resp->coal;
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
enum _ecore_status_t
ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
struct ecore_queue_cid *p_cid)
@@ -1446,7 +1603,7 @@ ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
rx_coal, tx_coal, req->qid);
/* add list termination tlv */
- ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_iov->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
resp = &p_iov->pf2vf_reply->default_resp;
@@ -1479,6 +1636,24 @@ u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
}
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+ u16 sb_id, struct ecore_sb_info *p_sb)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
+ return;
+ }
+
+ if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+ DP_NOTICE(p_hwfn, true, "Can't configure SB %04x\n", sb_id);
+ return;
+ }
+
+ p_iov->sbs_info[sb_id] = p_sb;
+}
+
enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
u8 *p_change)
{
@@ -1497,8 +1672,8 @@ enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
/* Verify the bulletin we see is valid */
- crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
- p_iov->bulletin.size - crc_size);
+ crc = OSAL_CRC32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
if (crc != shadow.crc)
return ECORE_AGAIN;
@@ -1513,8 +1688,7 @@ enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_params, 0, sizeof(*p_params));
@@ -1531,12 +1705,11 @@ void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *params)
{
- __ecore_vf_get_link_params(p_hwfn, params,
+ __ecore_vf_get_link_params(params,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
@@ -1558,12 +1731,11 @@ void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *link)
{
- __ecore_vf_get_link_state(p_hwfn, link,
+ __ecore_vf_get_link_state(link,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin)
{
OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
@@ -1573,7 +1745,7 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps)
{
- __ecore_vf_get_link_caps(p_hwfn, p_link_caps,
+ __ecore_vf_get_link_caps(p_link_caps,
&p_hwfn->vf_iov_info->bulletin_shadow);
}
@@ -1703,3 +1875,10 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
*fw_rev = info->fw_rev;
*fw_eng = info->fw_eng;
}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw)
+{
+ p_hwfn->vf_iov_info->b_hw_channel = b_is_hw;
+}
+#endif
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index f4713884..de2758cb 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -14,6 +14,11 @@
#include "ecore_l2_api.h"
#include "ecore_vfpf_if.h"
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default.
+ */
+#define ECORE_ETH_VF_DEFAULT_NUM_CIDS (32)
+
/* This data is held in the ecore_hwfn structure for VFs only. */
struct ecore_vf_iov {
union vfpf_tlvs *vf2pf_request;
@@ -36,25 +41,47 @@ struct ecore_vf_iov {
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
-};
+ /* Current day VFs are passing the SBs physical address on vport
+ * start, and as they lack an IGU mapping they need to store the
+ * addresses of previously registered SBs.
+ * Even if we were to change configuration flow, due to backward
+ * compatibility [with older PFs] we'd still need to store these.
+ */
+ struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ /* Would be set if the VF is to try communicating with it PF
+ * using a hw channel.
+ */
+ bool b_hw_channel;
+#endif
+
+ /* Determines whether VF utilizes doorbells via limited register
+ * bar or via the doorbell bar.
+ */
+ bool b_doorbell_bar;
+};
-enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce,
- struct ecore_queue_cid *p_cid);
-enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce,
- struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Get coalesce per VF's relative queue.
+ *
+ * @param p_hwfn
+ * @param p_coal - coalesce value in micro second for VF queues.
+ * @param p_cid - queue cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 *p_coal,
+ struct ecore_queue_cid *p_cid);
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
- * Coalesce value '0' will omit the configuration.
+ * Coalesce value '0' will omit the configuration.
*
- * @param p_hwfn
- * @param rx_coal - coalesce value in micro second for rx queue
- * @param tx_coal - coalesce value in micro second for tx queue
- * @param queue_cid
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param p_cid - queue cid
*
**/
enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
@@ -200,6 +227,15 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id);
+/**
+ * @brief Stores [or removes] a configured sb_info.
+ *
+ * @param p_hwfn
+ * @param sb_id - zero-based SB index [for fastpath]
+ * @param sb_info - may be OSAL_NULL [during removal].
+ */
+void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
+ u16 sb_id, struct ecore_sb_info *p_sb);
/**
* @brief ecore_vf_pf_vport_start - perform vport start for VF.
@@ -251,34 +287,28 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
- * @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
-void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_params *p_params,
+void __ecore_vf_get_link_params(struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
- * @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
-void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_state *p_link,
+void __ecore_vf_get_link_state(struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
- * @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
-void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
- struct ecore_mcp_link_capabilities *p_link_caps,
+void __ecore_vf_get_link_caps(struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
enum _ecore_status_t
@@ -286,5 +316,8 @@ ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn);
void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
+
+u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn,
+ enum BAR_ID bar_id);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index be3a326b..9815cf8a 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -163,5 +163,18 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_eng);
void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
u16 *p_vxlan_port, u16 *p_geneve_port);
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+/**
+ * @brief set the VF to use a SW/HW channel when communicating with PF.
+ * NOTICE: today the likely first place to call this from VF
+ * would be OSAL_VF_FILL_ACQUIRE_RESC_REQ(); Might want to consider
+ * something a bit more appropriate.
+ *
+ * @param p_hwfn
+ * @param b_is_hw - true iff VF is to use a HW-channel
+ */
+void ecore_vf_set_hw_channel(struct ecore_hwfn *p_hwfn, bool b_is_hw);
+#endif
#endif
#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 66184421..3ccc7665 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -19,13 +19,14 @@
*
**/
struct vf_pf_resc_request {
- u8 num_rxqs;
- u8 num_txqs;
- u8 num_sbs;
- u8 num_mac_filters;
- u8 num_vlan_filters;
- u8 num_mc_filters; /* No limit so superfluous */
- u16 padding;
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters; /* No limit so superfluous */
+ u8 num_cids;
+ u8 padding;
};
struct hw_sb_info {
@@ -92,6 +93,20 @@ struct vfpf_acquire_tlv {
/* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0)
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+
+ /* A requirement for supporting multi-Tx queues on a single queue-zone,
+ * VF would pass qids as additional information whenever passing queue
+ * references.
+ * TODO - due to the CID limitations in Bar0, VFs currently don't pass
+ * this, and use the legacy CID scheme.
+ */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2)
+
+ /* The VF is using the physical bar. While this is mostly internal
+ * to the VF, might affect the number of CIDs supported assuming
+ * QUEUE_QIDS is set.
+ */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR (1 << 3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
@@ -170,6 +185,9 @@ struct pfvf_acquire_resp_tlv {
#endif
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
+ /* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS (1 << 3)
+
u16 db_size;
u8 indices_per_sb;
u8 os_type;
@@ -178,7 +196,8 @@ struct pfvf_acquire_resp_tlv {
u16 chip_rev;
u8 dev_type;
- u8 padding;
+ /* Doorbell bar size configured in HW: log(size) or 0 */
+ u8 bar_size;
struct pfvf_stats_info stats_info;
@@ -210,7 +229,8 @@ struct pfvf_acquire_resp_tlv {
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
- u8 padding[2];
+ u8 num_cids;
+ u8 padding;
} resc;
u32 bulletin_size;
@@ -223,6 +243,16 @@ struct pfvf_start_queue_resp_tlv {
u8 padding[4];
};
+/* Extended queue information - additional index for reference inside qzone.
+ * If commmunicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+ struct channel_tlv tl;
+ u8 qid;
+ u8 padding[3];
+};
+
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
@@ -265,7 +295,15 @@ struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
+
+ /* While the API supports multiple Rx-queues on a single TLV
+ * message, in practice older VFs always used it as one [ecore].
+ * And there are PFs [starting with the CHANNEL_TLV_QID] which
+ * would start assuming this is always a '1'. So in practice this
+ * field should be considered deprecated and *Always* set to '1'.
+ */
u8 num_rxqs;
+
u8 cqe_completion;
u8 padding[4];
};
@@ -275,6 +313,13 @@ struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
+
+ /* While the API supports multiple Tx-queues on a single TLV
+ * message, in practice older VFs always used it as one [ecore].
+ * And there are PFs [starting with the CHANNEL_TLV_QID] which
+ * would start assuming this is always a '1'. So in practice this
+ * field should be considered deprecated and *Always* set to '1'.
+ */
u8 num_txqs;
u8 padding[5];
};
@@ -465,6 +510,19 @@ struct vfpf_update_coalesce {
u8 padding[2];
};
+struct vfpf_read_coal_req_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 qid;
+ u8 is_rx;
+ u8 padding[5];
+};
+
+struct pfvf_read_coal_resp_tlv {
+ struct pfvf_tlv hdr;
+ u16 coal;
+ u8 padding[6];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -478,6 +536,7 @@ union vfpf_tlvs {
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
+ struct vfpf_read_coal_req_tlv read_coal_req;
struct tlv_buffer_size tlv_buf_size;
};
@@ -487,6 +546,7 @@ union pfvf_tlvs {
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
+ struct pfvf_read_coal_resp_tlv read_coal_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@@ -605,6 +665,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
+ CHANNEL_TLV_QID,
+ CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 1ad8a962..81ca6634 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -28,19 +28,19 @@
typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_OFFSET 0
#define OFFSIZE_OFFSET_MASK 0x0000ffff
/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_OFFSET 16
#define OFFSIZE_SIZE_MASK 0xffff0000
/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
#define SECTION_OFFSET(_offsize) \
- ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2))
+ ((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_OFFSET) << 2))
/* SECTION_SIZE is calculating the size in bytes out of offsize */
#define SECTION_SIZE(_offsize) \
- (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2)
+ (((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_OFFSET) << 2)
/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
* within section
@@ -59,7 +59,7 @@ struct eth_phy_cfg {
/* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */
u32 speed;
#define ETH_SPEED_AUTONEG 0
-#define ETH_SPEED_SMARTLINQ 0x8
+#define ETH_SPEED_SMARTLINQ 0x8 /* deprecated - use link_modes field instead */
u32 pause; /* bitmask */
#define ETH_PAUSE_NONE 0x0
@@ -84,38 +84,28 @@ struct eth_phy_cfg {
/* Remote Serdes Loopback (RX to TX) */
#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
- /* Used to configure the EEE Tx LPI timer, has several modes of
- * operation, according to bits 29:28
- * 2'b00: Timer will be configured by nvram, output will be the value
- * from nvram.
- * 2'b01: Timer will be configured by nvram, output will be in
- * 16xmicroseconds.
- * 2'b10: bits 1:0 contain an nvram value which will be used instead
- * of the one located in the nvram. Output will be that value.
- * 2'b11: bits 19:0 contain the idle timer in microseconds; output
- * will be in 16xmicroseconds.
- * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
- */
- u32 eee_mode;
-#define EEE_MODE_TIMER_USEC_MASK (0x000fffff)
-#define EEE_MODE_TIMER_USEC_OFFSET (0)
-#define EEE_MODE_TIMER_USEC_BALANCED_TIME (0xa00)
-#define EEE_MODE_TIMER_USEC_AGGRESSIVE_TIME (0x100)
-#define EEE_MODE_TIMER_USEC_LATENCY_TIME (0x6000)
-/* Set by the driver to request status timer will be in microseconds and and not
- * in EEE policy definition
+ u32 eee_cfg;
+/* EEE is enabled (configuration). Refer to eee_status->active for negotiated
+ * status
*/
-#define EEE_MODE_OUTPUT_TIME (1 << 28)
-/* Set by the driver to override default nvm timer */
-#define EEE_MODE_OVERRIDE_NVRAM (1 << 29)
-#define EEE_MODE_ENABLE_LPI (1 << 30) /* Set when */
-#define EEE_MODE_ADV_LPI (1 << 31) /* Set when EEE is enabled */
+#define EEE_CFG_EEE_ENABLED (1 << 0)
+#define EEE_CFG_TX_LPI (1 << 1)
+#define EEE_CFG_ADV_SPEED_1G (1 << 2)
+#define EEE_CFG_ADV_SPEED_10G (1 << 3)
+#define EEE_TX_TIMER_USEC_MASK (0xfffffff0)
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000)
+
+ u32 link_modes; /* Additional link modes */
+#define LINK_MODE_SMARTLINQ_ENABLE 0x1 /* XXX Deprecate */
};
struct port_mf_cfg {
u32 dynamic_cfg; /* device control channel */
#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_OFFSET 0
#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
u32 reserved[1];
@@ -274,11 +264,11 @@ struct couple_mode_teaming {
/**************************************
* LLDP and DCBX HSI structures
**************************************/
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
-
+#define MAX_SYSTEM_LLDP_TLV_DATA 32 /* In dwords. 128 in bytes*/
+#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
typedef enum _lldp_agent_e {
LLDP_NEAREST_BRIDGE = 0,
LLDP_NEAREST_NON_TPMR_BRIDGE,
@@ -289,15 +279,15 @@ typedef enum _lldp_agent_e {
struct lldp_config_params_s {
u32 config;
#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_TX_INTERVAL_OFFSET 0
#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_HOLD_OFFSET 8
#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_MAX_CREDIT_OFFSET 12
#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_RX_OFFSET 30
#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+#define LLDP_CONFIG_ENABLE_TX_OFFSET 31
/* Holds local Chassis ID TLV header, subtype and 9B of payload.
* If firtst byte is 0, then we will use default chassis ID
*/
@@ -321,17 +311,17 @@ struct lldp_status_params_s {
struct dcbx_ets_feature {
u32 flags;
#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_ENABLED_OFFSET 0
#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_WILLING_OFFSET 1
#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_ERROR_OFFSET 2
#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_CBS_OFFSET 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_ETS_MAX_TCS_OFFSET 4
#define DCBX_OOO_TC_MASK 0x00000f00
-#define DCBX_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_OFFSET 8
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
*/
@@ -363,7 +353,7 @@ struct dcbx_ets_feature {
struct dcbx_app_priority_entry {
u32 entry;
#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_MAP_OFFSET 0
#define DCBX_APP_PRI_0 0x01
#define DCBX_APP_PRI_1 0x02
#define DCBX_APP_PRI_2 0x04
@@ -373,11 +363,11 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_PRI_6 0x40
#define DCBX_APP_PRI_7 0x80
#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_OFFSET 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
#define DCBX_APP_SF_IEEE_MASK 0x0000f000
-#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_OFFSET 12
#define DCBX_APP_SF_IEEE_RESERVED 0
#define DCBX_APP_SF_IEEE_ETHTYPE 1
#define DCBX_APP_SF_IEEE_TCP_PORT 2
@@ -385,7 +375,7 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+#define DCBX_APP_PROTOCOL_ID_OFFSET 16
};
@@ -393,19 +383,19 @@ struct dcbx_app_priority_entry {
struct dcbx_app_priority_feature {
u32 flags;
#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_ENABLED_OFFSET 0
#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_WILLING_OFFSET 1
#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_ERROR_OFFSET 2
/* Not in use
#define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
- #define DCBX_APP_DEFAULT_PRI_SHIFT 8
+ #define DCBX_APP_DEFAULT_PRI_OFFSET 8
*/
#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_MAX_TCS_OFFSET 12
#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+#define DCBX_APP_NUM_ENTRIES_OFFSET 16
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
@@ -416,7 +406,7 @@ struct dcbx_features {
/* PFC feature */
u32 pfc;
#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_OFFSET 0
#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
@@ -427,17 +417,17 @@ struct dcbx_features {
#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_FLAGS_OFFSET 8
#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_CAPS_OFFSET 8
#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_MBC_OFFSET 14
#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_WILLING_OFFSET 15
#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ENABLED_OFFSET 16
#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
+#define DCBX_PFC_ERROR_OFFSET 17
/* APP feature */
struct dcbx_app_priority_feature app;
@@ -446,10 +436,12 @@ struct dcbx_features {
struct dcbx_local_params {
u32 config;
#define DCBX_CONFIG_VERSION_MASK 0x00000007
-#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_OFFSET 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_DYNAMIC \
+ (DCBX_CONFIG_VERSION_IEEE | DCBX_CONFIG_VERSION_CEE)
#define DCBX_CONFIG_VERSION_STATIC 4
u32 flags;
@@ -461,7 +453,7 @@ struct dcbx_mib {
u32 flags;
/*
#define DCBX_CONFIG_VERSION_MASK 0x00000007
- #define DCBX_CONFIG_VERSION_SHIFT 0
+ #define DCBX_CONFIG_VERSION_OFFSET 0
#define DCBX_CONFIG_VERSION_DISABLED 0
#define DCBX_CONFIG_VERSION_IEEE 1
#define DCBX_CONFIG_VERSION_CEE 2
@@ -472,19 +464,49 @@ struct dcbx_mib {
};
struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+/* This bit defines if system TLVs are instead of mandatory TLVS or in
+ * addition to them. Set 1 for replacing mandatory TLVs
+ */
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_OFFSET 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_OFFSET 16
u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
+/* Since this struct is written by MFW and read by driver need to add
+ * sequence guards (as in case of DCBX MIB)
+ */
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
struct dcb_dscp_map {
u32 flags;
#define DCB_DSCP_ENABLE_MASK 0x1
-#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE_OFFSET 0
#define DCB_DSCP_ENABLE 1
u32 dscp_pri_map[8];
};
+/**************************************
+ * Attributes commands
+ **************************************/
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
/**************************************/
/* */
/* P U B L I C G L O B A L */
@@ -512,12 +534,12 @@ struct public_global {
#define MDUMP_REASON_DUMP_AGED (1 << 2)
u32 ext_phy_upgrade_fw;
#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
-#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_OFFSET (0)
#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
-#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
+#define EXT_PHY_FW_UPGRADE_TYPE_OFFSET (16)
};
/**************************************/
@@ -567,9 +589,9 @@ struct public_path {
/* Reset on mcp reset, and incremented for eveny process kill event. */
u32 process_kill;
#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_COUNTER_OFFSET 0
#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define PROCESS_KILL_GLOB_AEU_BIT_OFFSET 16
#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
};
@@ -697,6 +719,8 @@ struct public_port {
#define LFA_SPEED_MISMATCH (1 << 3)
#define LFA_FLOW_CTRL_MISMATCH (1 << 4)
#define LFA_ADV_SPEED_MISMATCH (1 << 5)
+#define LFA_EEE_MISMATCH (1 << 6)
+#define LFA_LINK_MODES_MISMATCH (1 << 7)
#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
#define LINK_FLAP_COUNT_OFFSET 16
@@ -721,13 +745,13 @@ struct public_port {
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
-#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x00000008
#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
/* 1G Passive copper cable */
@@ -775,6 +799,7 @@ struct public_port {
#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
@@ -787,38 +812,55 @@ struct public_port {
u32 wol_pkt_details;
struct dcb_dscp_map dcb_dscp_map;
- /* the status of EEE auto-negotiation
- * bits 19:0 the configured tx-lpi entry timer value. Depends on bit 31.
- * bits 23:20 the speeds advertised for EEE.
- * bits 27:24 the speeds the Link partner advertised for EEE.
- * The supported/adv. modes in bits 27:19 originate from the
- * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
- * bit 28 when 1'b1 EEE was requested.
- * bit 29 when 1'b1 tx lpi was requested.
- * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if 30:29
- * are 2'b11.
- * bit 31 - When 1'b0 bits 15:0 contain
- * NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_XXX define as value.
- * When 1'b1 those bits contains a value times 16 microseconds.
- */
u32 eee_status;
-#define EEE_TIMER_MASK 0x000fffff
-#define EEE_ADV_STATUS_MASK 0x00f00000
-#define EEE_1G_ADV (1 << 1)
-#define EEE_10G_ADV (1 << 2)
-#define EEE_ADV_STATUS_SHIFT 20
-#define EEE_LP_ADV_STATUS_MASK 0x0f000000
-#define EEE_LP_ADV_STATUS_SHIFT 24
-#define EEE_REQUESTED_BIT 0x10000000
-#define EEE_LPI_REQUESTED_BIT 0x20000000
-#define EEE_ACTIVE_BIT 0x40000000
-#define EEE_TIME_OUTPUT_BIT 0x80000000
+/* Set when EEE negotiation is complete. */
+#define EEE_ACTIVE_BIT (1 << 0)
+
+/* Shows the Local Device EEE capabilities */
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+ #define EEE_1G_ADV (1 << 1)
+ #define EEE_10G_ADV (1 << 2)
+/* Same values as in EEE_LD_ADV, but for Link Parter */
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+
+/* Supported speeds for EEE */
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+ #define EEE_1G_SUPPORTED (1 << 1)
+ #define EEE_10G_SUPPORTED (1 << 2)
u32 eee_remote; /* Used for EEE in LLDP */
#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
-#define EEE_REMOTE_TW_TX_SHIFT 0
+#define EEE_REMOTE_TW_TX_OFFSET 0
#define EEE_REMOTE_TW_RX_MASK 0xffff0000
-#define EEE_REMOTE_TW_RX_SHIFT 16
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+#define ETH_TRANSCEIVER_MONITORING_TYPE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_MONITORING_TYPE_OFFSET 0
+#define ETH_TRANSCEIVER_ADDR_CHNG_REQUIRED (1 << 2)
+#define ETH_TRANSCEIVER_RCV_PWR_MEASURE_TYPE (1 << 3)
+#define ETH_TRANSCEIVER_EXTERNALLY_CALIBRATED (1 << 4)
+#define ETH_TRANSCEIVER_INTERNALLY_CALIBRATED (1 << 5)
+#define ETH_TRANSCEIVER_HAS_DIAGNOSTIC (1 << 6)
+#define ETH_TRANSCEIVER_IDENT_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_IDENT_OFFSET 8
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
};
/**************************************/
@@ -857,11 +899,11 @@ struct public_func {
/* function 0 of each port cannot be hidden */
#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_OFFSET 0x00000001
#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_OFFSET 4
#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
@@ -871,18 +913,20 @@ struct public_func {
/* MINBW, MAXBW */
/* value range - 0..100, increments in 1 % */
#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_OFFSET 8
#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_OFFSET 16
#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+#define FUNC_STATUS_LOGICAL_LINK_UP 0x00000002
+#define FUNC_STATUS_FORCED_LINK 0x00000004
u32 mac_upper; /* MAC */
#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_OFFSET 0
#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
u32 mac_lower;
#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
@@ -895,7 +939,7 @@ struct public_func {
u32 ovlan_stag; /* tags */
#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_OFFSET 0
#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
u32 pf_allocation; /* vf per pf */
@@ -912,29 +956,46 @@ struct public_func {
u32 drv_id;
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define DRV_ID_PDA_COMP_VER_OFFSET 0
#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_OFFSET 16
#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
- DRV_ID_MCP_HSI_VER_SHIFT)
+ DRV_ID_MCP_HSI_VER_OFFSET)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_OFFSET 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_OFFSET)
+#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_OFFSET)
#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
+#define DRV_ID_DRV_INIT_HW_OFFSET 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_OFFSET)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
};
/**************************************/
@@ -1019,13 +1080,13 @@ struct ocbb_data_stc {
#define MFW_SENSOR_LOCATION_EXTERNAL 2
#define MFW_SENSOR_LOCATION_SFP 3
-#define SENSOR_LOCATION_SHIFT 0
+#define SENSOR_LOCATION_OFFSET 0
#define SENSOR_LOCATION_MASK 0x000000ff
-#define THRESHOLD_HIGH_SHIFT 8
+#define THRESHOLD_HIGH_OFFSET 8
#define THRESHOLD_HIGH_MASK 0x0000ff00
-#define CRITICAL_TEMPERATURE_SHIFT 16
+#define CRITICAL_TEMPERATURE_OFFSET 16
#define CRITICAL_TEMPERATURE_MASK 0x00ff0000
-#define CURRENT_TEMP_SHIFT 24
+#define CURRENT_TEMP_OFFSET 24
#define CURRENT_TEMP_MASK 0xff000000
struct temperature_status_stc {
u32 num_of_sensors;
@@ -1090,18 +1151,18 @@ struct load_req_stc {
u32 fw_ver;
u32 misc0;
#define LOAD_REQ_ROLE_MASK 0x000000FF
-#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_ROLE_OFFSET 0
#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
-#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_OFFSET 8
#define LOAD_REQ_LOCK_TO_DEFAULT 0
#define LOAD_REQ_LOCK_TO_NONE 255
#define LOAD_REQ_FORCE_MASK 0x000F0000
-#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_OFFSET 16
#define LOAD_REQ_FORCE_NONE 0
#define LOAD_REQ_FORCE_PF 1
#define LOAD_REQ_FORCE_ALL 2
#define LOAD_REQ_FLAGS0_MASK 0x00F00000
-#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_OFFSET 20
#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
};
@@ -1111,14 +1172,27 @@ struct load_rsp_stc {
u32 fw_ver;
u32 misc0;
#define LOAD_RSP_ROLE_MASK 0x000000FF
-#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_ROLE_OFFSET 0
#define LOAD_RSP_HSI_MASK 0x0000FF00
-#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_HSI_OFFSET 8
#define LOAD_RSP_FLAGS0_MASK 0x000F0000
-#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_OFFSET 16
#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
};
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
union drv_union_data {
struct mcp_mac wol_mac; /* UNLOAD_DONE */
@@ -1149,6 +1223,8 @@ union drv_union_data {
struct load_req_stc load_req;
struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
/* ... */
};
@@ -1166,8 +1242,8 @@ struct public_drv_mb {
/* - DONT_CARE - Don't flap the link if up */
#define DRV_MSG_CODE_LINK_RESET 0x23000000
- /* Vitaly: LLDP commands */
#define DRV_MSG_CODE_SET_LLDP 0x24000000
+#define DRV_MSG_CODE_REGISTER_LLDP_TLVS_RX 0x24100000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
/* OneView feature driver HSI*/
#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
@@ -1189,18 +1265,19 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
/* Param is either DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW/IMAGE */
#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
/* Param should be set to the transaction size (up to 64 bytes) */
#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
/* MFW will place the file offset and len in file_att struct */
#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-/* Read 32bytes of nvram data. Param is [0:23] – Offset [24:31] –
- * Len in Bytes
+/* Read 32bytes of nvram data. Param is [0:23] ??? Offset [24:31] -
+ * ??? Len in Bytes
*/
#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-/* Writes up to 32Bytes to nvram. Param is [0:23] – Offset [24:31] –
- * Len in Bytes. In case this address is in the range of secured file in
+/* Writes up to 32Bytes to nvram. Param is [0:23] ??? Offset [24:31]
+ * ??? Len in Bytes. In case this address is in the range of secured file in
* secured mode, the operation will fail
*/
#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
@@ -1242,7 +1319,7 @@ struct public_drv_mb {
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
#define DRV_MSG_CODE_GET_VMAC 0x00120000
-#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_OFFSET 4
#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
@@ -1270,9 +1347,9 @@ struct public_drv_mb {
/* Set function BW, params[15:8] - min, params[7:0] - max */
#define DRV_MSG_CODE_SET_BW 0x00190000
#define BW_MAX_MASK 0x000000ff
-#define BW_MAX_SHIFT 0
+#define BW_MAX_OFFSET 0
#define BW_MIN_MASK 0x0000ff00
-#define BW_MIN_SHIFT 8
+#define BW_MIN_OFFSET 8
/* When param is set to 1, all parities will be masked(disabled). When params
* are set to 0, parities will be unmasked again.
@@ -1305,9 +1382,9 @@ struct public_drv_mb {
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
-#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_RESC_OFFSET 0
#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
-#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_CMD_REQ_OPCODE_OFFSET 5
/* request resource ownership with default aging */
#define RESOURCE_OPCODE_REQ 1
/* request resource ownership without aging */
@@ -1318,12 +1395,12 @@ struct public_drv_mb {
/* force resource release */
#define RESOURCE_OPCODE_FORCE_RELEASE 5
#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
-#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+#define RESOURCE_CMD_REQ_AGE_OFFSET 8
#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
-#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OWNER_OFFSET 0
#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
-#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_CMD_RSP_OPCODE_OFFSET 8
/* resource is free and granted to requester */
#define RESOURCE_OPCODE_GNT 1
/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
@@ -1361,6 +1438,8 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
/* Clear all logs */
#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 /* Get retained data */
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 /* Clear retain data */
#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
/* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_GPIO_INFO 0x00270000
@@ -1368,14 +1447,28 @@ struct public_drv_mb {
#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
/* Value should be placed in union */
#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
-#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_OFFSET 0
#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
-#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_OFFSET 16
#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
-#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_OFFSET 21
#define DRV_MB_PARAM_PORT_MASK 0x00600000
#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
+#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000 /* Param: None */
+/* Param: Set DRV_MB_PARAM_FEATURE_SUPPORT_* */
+#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
+/* return FW_MB_PARAM_FEATURE_SUPPORT_* */
+#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
+#define DRV_MSG_CODE_READ_WOL_REG 0X00320000
+#define DRV_MSG_CODE_WRITE_WOL_REG 0X00330000
+#define DRV_MSG_CODE_GET_WOL_BUFFER 0X00340000
+/* Param: [0:23] Attribute key, [24:31] Attribute sub command */
+#define DRV_MSG_CODE_ATTRIBUTE 0x00350000
+
+/* Param: Password len. Union: Plain Password */
+#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000
+
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
@@ -1393,45 +1486,56 @@ struct public_drv_mb {
#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
/* LLDP / DCBX params*/
+ /* To be used with SET_LLDP command */
#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_SEND_OFFSET 0
+ /* To be used with SET_LLDP and REGISTER_LLDP_TLVS_RX commands */
#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
-#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_LLDP_AGENT_OFFSET 1
+ /* To be used with REGISTER_LLDP_TLVS_RX command */
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_OFFSET 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_OFFSET 4
+ /* To be used with SET_DCBX command */
#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_DCBX_NOTIFY_OFFSET 3
#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_OFFSET 0
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
-#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
+#define DRV_MB_PARAM_PHY_ADDR_OFFSET 0
#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
-#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
+#define DRV_MB_PARAM_PHY_LANE_OFFSET 16
#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
-#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
+#define DRV_MB_PARAM_PHY_SELECT_PORT_OFFSET 29
#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
-#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
+#define DRV_MB_PARAM_PHY_PORT_OFFSET 30
#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
-#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT 0
+#define DRV_MB_PARAM_PHYMOD_LANE_OFFSET 0
#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF
-#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8
+#define DRV_MB_PARAM_PHYMOD_SIZE_OFFSET 8
#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00
- /* configure vf MSIX params*/
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+ /* configure vf MSIX params BB */
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_OFFSET 0
#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_OFFSET 8
#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+ /* configure vf MSIX for PF params AH*/
+#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_OFFSET 0
+#define DRV_MB_PARAM_CFG_PF_VFS_MSIX_SB_NUM_MASK 0x000000FF
/* OneView configuration parametres */
-#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OFFSET 0
#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
@@ -1442,7 +1546,7 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6
#define DRV_MB_PARAM_OV_CURR_CFG_HII 7
-#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0
+#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OFFSET 0
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_ISCSI_IP_ACQUIRED (1 << 1)
@@ -1455,17 +1559,17 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6)
#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0
-#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0
+#define DRV_MB_PARAM_OV_PCI_BUS_NUM_OFFSET 0
#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF
-#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_OFFSET 0
#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_OFFSET 0
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
/* Not Installed*/
@@ -1476,36 +1580,36 @@ struct public_drv_mb {
/* installed and active */
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
-#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000
-#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT 0
+#define DRV_MB_PARAM_GPIO_NUMBER_OFFSET 0
#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF
-#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16
+#define DRV_MB_PARAM_GPIO_VALUE_OFFSET 16
#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000
-#define DRV_MB_PARAM_GPIO_DIRECTION_SHIFT 16
+#define DRV_MB_PARAM_GPIO_DIRECTION_OFFSET 16
#define DRV_MB_PARAM_GPIO_DIRECTION_MASK 0x00FF0000
-#define DRV_MB_PARAM_GPIO_CTRL_SHIFT 24
+#define DRV_MB_PARAM_GPIO_CTRL_OFFSET 24
#define DRV_MB_PARAM_GPIO_CTRL_MASK 0xFF000000
/* Resource Allocation params - Driver version support*/
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
@@ -1518,11 +1622,27 @@ struct public_drv_mb {
#define DRV_MB_PARAM_BIST_RC_FAILED 2
#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_OFFSET 0
#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_OFFSET 8
#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+/* driver supports SmartLinQ parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+/* driver supports EEE parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_MASK 0xFFFF0000
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_OFFSET 16
+/* driver supports virtual link parameter */
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+ /* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xFF000000
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_UNSUPPORTED 0x00000000
@@ -1545,6 +1665,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
+#define FW_MSG_CODE_REGISTER_LLDP_TLVS_RX_DONE 0x24100000
#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000
#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000
@@ -1597,6 +1718,7 @@ struct public_drv_mb {
#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
#define FW_MSG_CODE_OK 0x00160000
+#define FW_MSG_CODE_ERROR 0x00170000
#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
@@ -1628,16 +1750,37 @@ struct public_drv_mb {
#define FW_MSG_CODE_MDUMP_IN_PROGRESS 0x00040000
#define FW_MSG_CODE_MDUMP_WRITE_FAILED 0x00050000
+
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
+#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_BAD_ASIC 0x00880000
+
+#define FW_MSG_CODE_WOL_READ_WRITE_OK 0x00820000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_VAL 0x00830000
+#define FW_MSG_CODE_WOL_READ_WRITE_INVALID_ADDR 0x00840000
+#define FW_MSG_CODE_WOL_READ_BUFFER_OK 0x00850000
+#define FW_MSG_CODE_WOL_READ_BUFFER_INVALID_VAL 0x00860000
+
#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_KEY 0x00020000
+#define FW_MSG_CODE_ATTRIBUTE_INVALID_CMD 0x00030000
u32 fw_mb_param;
/* Resource Allocation params - MFW version support */
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_OFFSET 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_OFFSET 0
+
+/* get MFW feature support response */
+/* MFW supports SmartLinQ */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001
+/* MFW supports EEE */
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
+/* MFW supports virtual link */
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
@@ -1702,6 +1845,9 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 4e588350..c99e805d 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -13,20 +13,20 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
- * Created: 12/15/2016
+ * Created: 5/8/2017
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
-#define NVM_CFG_version 0x81805
+#define NVM_CFG_version 0x83000
-#define NVM_CFG_new_option_seq 15
+#define NVM_CFG_new_option_seq 23
-#define NVM_CFG_removed_option_seq 0
+#define NVM_CFG_removed_option_seq 1
-#define NVM_CFG_updated_value_seq 1
+#define NVM_CFG_updated_value_seq 4
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
@@ -342,9 +342,8 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
/* Set caution temperature */
- #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK \
- 0x00FF0000
- #define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16
+ #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_DEAD_TEMP_TH_TEMPERATURE_OFFSET 16
/* Set external thermal sensor I2C address */
#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK \
0xFF000000
@@ -509,6 +508,10 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
#define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
#define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_MASK 0x30000000
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_OFFSET 28
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_VOLTAGE_REGULATOR_TYPE_TI 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@@ -1036,7 +1039,13 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
#define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
- u32 reserved[58]; /* 0x140 */
+ u32 preboot_debug_mode_std; /* 0x140 */
+ u32 preboot_debug_mode_ext; /* 0x144 */
+ u32 ext_phy_cfg1; /* 0x148 */
+ /* Ext PHY MDI pair swap value */
+ #define NVM_CFG1_GLOB_EXT_PHY_MDI_PAIR_SWAP_MASK 0x0000FFFF
+ #define NVM_CFG1_GLOB_EXT_PHY_MDI_PAIR_SWAP_OFFSET 0
+ u32 reserved[55]; /* 0x14C */
};
struct nvm_cfg1_path {
@@ -1134,6 +1143,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1142,6 +1152,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1152,11 +1163,11 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
@@ -1167,11 +1178,11 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
@@ -1203,6 +1214,14 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
#define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
#define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_MASK 0x00800000
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_OFFSET 23
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_SMARTLINQ_MODE_ENABLED 0x1
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_MASK 0x01000000
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_OFFSET 24
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_DISABLED 0x0
+ #define NVM_CFG1_PORT_RESERVED_WAS_MFW_SMARTLINQ_ENABLED 0x1
u32 phy_cfg; /* 0x1C */
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
@@ -1243,6 +1262,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM5422X 0x2
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
/* EEE power saving mode */
@@ -1276,19 +1296,27 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK \
0x00E00000
#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_MASK \
+ 0x01000000
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_OFFSET 24
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_DISABLED \
+ 0x0
+ #define NVM_CFG1_PORT_RESERVED_WAS_PREBOOT_SMARTLINQ_ENABLED 0x1
u32 mba_cfg2; /* 0x2C */
#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
+ #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_MASK 0x01FE0000
+ #define NVM_CFG1_PORT_PREBOOT_LINK_UP_DELAY_OFFSET 17
u32 vf_cfg; /* 0x30 */
#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
@@ -1304,9 +1332,12 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
- #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
- #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
- #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_25G 0x4
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_25G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_40G 0x8
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_40G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_AH_50G 0x10
+ #define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_50G 0x20
#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_BB_100G 0x40
u32 transceiver_00; /* 0x40 */
/* Define for mapping of transceiver signal module absent */
@@ -1412,6 +1443,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_10G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1423,6 +1455,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_10G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1434,21 +1467,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_10G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_10G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1490,6 +1523,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1501,6 +1535,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_25G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1512,21 +1547,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_25G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_25G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1568,6 +1603,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1579,6 +1615,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_40G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1590,21 +1627,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_40G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_40G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1646,6 +1683,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1658,6 +1696,7 @@ struct nvm_cfg1_port {
16
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_50G_MFW_SPEED_CAPABILITY_MASK_50G 0x20
@@ -1670,21 +1709,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_50G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_50G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1726,6 +1765,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_OFFSET 0
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_50G 0x20
@@ -1735,6 +1775,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_OFFSET 16
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_20G 0x4
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_25G 0x8
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_40G 0x10
#define NVM_CFG1_PORT_MNM_100G_MFW_SPEED_CAP_MASK_50G 0x20
@@ -1745,21 +1786,21 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_100G_DRV_LINK_SPEED_SMARTLINQ 0x8
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_MASK 0x000000F0
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_OFFSET 4
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_AUTONEG 0x0
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_1G 0x1
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_10G 0x2
+ #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_20G 0x3
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_25G 0x4
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_40G 0x5
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_50G 0x6
#define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_BB_100G 0x7
- #define NVM_CFG1_PORT_MNM_100G_MFW_LINK_SPEED_SMARTLINQ 0x8
/* This field defines the board technology
* (backpane,transceiver,external PHY)
*/
@@ -1795,7 +1836,13 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
- u32 reserved[116]; /* 0x88 */
+ u32 temperature; /* 0x88 */
+ #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_MASK 0x000000FF
+ #define NVM_CFG1_PORT_PHY_MODULE_DEAD_TEMP_TH_OFFSET 0
+ #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_MASK \
+ 0x0000FF00
+ #define NVM_CFG1_PORT_PHY_MODULE_ALOM_FAN_ON_TEMP_TH_OFFSET 8
+ u32 reserved[115]; /* 0x8C */
};
struct nvm_cfg1_func {
@@ -1910,6 +1957,7 @@ struct nvm_cfg1_func {
#define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
#define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
#define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_RDMA 0x8
u32 reserved[8]; /* 0x30 */
};
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 60286545..ad15d28a 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1205,3 +1205,20 @@
#define NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 0x501ac0UL
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 0x501b00UL
+
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL
+#define DORQ_REG_PF_USAGE_CNT 0x1009c0UL
+#define DORQ_REG_DPM_FORCE_ABORT 0x1009d8UL
+#define DORQ_REG_PF_OVFL_STICKY 0x1009d0UL
+#define DORQ_REG_INT_STS 0x100180UL
+ #define DORQ_REG_INT_STS_DB_DROP (0x1 << 1)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1 << 2)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1 << 3)
+#define DORQ_REG_DB_DROP_DETAILS_REL 0x100a28UL
+#define DORQ_REG_INT_STS_WR 0x100188UL
+#define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL
+#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
+ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
+#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL