aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qede/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qede/base')
-rw-r--r--drivers/net/qede/base/bcm_osal.c4
-rw-r--r--drivers/net/qede/base/bcm_osal.h40
-rw-r--r--drivers/net/qede/base/common_hsi.h196
-rw-r--r--drivers/net/qede/base/ecore.h193
-rw-r--r--drivers/net/qede/base/ecore_chain.h142
-rw-r--r--drivers/net/qede/base/ecore_cxt.c409
-rw-r--r--drivers/net/qede/base/ecore_cxt.h68
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h24
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c456
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h10
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h5
-rw-r--r--drivers/net/qede/base/ecore_dev.c2342
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h127
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h20
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h911
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h221
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h2079
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h90
-rw-r--r--drivers/net/qede/base/ecore_hw.c55
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c1454
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h172
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c26
-rw-r--r--drivers/net/qede/base/ecore_int.c171
-rw-r--r--drivers/net/qede/base/ecore_int.h10
-rw-r--r--drivers/net/qede/base/ecore_int_api.h51
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h69
-rw-r--r--drivers/net/qede/base/ecore_iro.h8
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h32
-rw-r--r--drivers/net/qede/base/ecore_l2.c892
-rw-r--r--drivers/net/qede/base/ecore_l2.h149
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h137
-rw-r--r--drivers/net/qede/base/ecore_mcp.c1099
-rw-r--r--drivers/net/qede/base/ecore_mcp.h192
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h349
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c1535
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h16
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h623
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h19
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c376
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h23
-rw-r--r--drivers/net/qede/base/ecore_spq.c108
-rw-r--r--drivers/net/qede/base/ecore_spq.h36
-rw-r--r--drivers/net/qede/base/ecore_sriov.c1221
-rw-r--r--drivers/net/qede/base/ecore_sriov.h28
-rw-r--r--drivers/net/qede/base/ecore_status.h1
-rw-r--r--drivers/net/qede/base/ecore_utils.h6
-rw-r--r--drivers/net/qede/base/ecore_vf.c418
-rw-r--r--drivers/net/qede/base/ecore_vf.h85
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h21
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h55
-rw-r--r--drivers/net/qede/base/eth_common.h36
-rw-r--r--drivers/net/qede/base/mcp_public.h377
-rw-r--r--drivers/net/qede/base/nvm_cfg.h541
-rw-r--r--drivers/net/qede/base/reg_addr.h59
54 files changed, 12138 insertions, 5649 deletions
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 28be9587..3f895cd4 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -98,9 +98,7 @@ inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
u32 nwords = 0;
OSAL_BUILD_BUG_ON(!limit);
nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
- for (i = 0; i < nwords; i++)
- if (~(addr[i] != 0))
- break;
+ for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 0b446f2e..32c9b251 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -18,6 +18,7 @@
#include <rte_cycles.h>
#include <rte_debug.h>
#include <rte_ether.h>
+#include <rte_io.h>
/* Forward declaration */
struct ecore_dev;
@@ -88,8 +89,12 @@ typedef int bool;
#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
-#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
-#define OSAL_FREE(dev, memory) rte_free((void *)memory)
+#define OSAL_VZALLOC(dev, size) rte_zmalloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) \
+ do { \
+ rte_free((void *)memory); \
+ memory = OSAL_NULL; \
+ } while (0)
#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
@@ -113,18 +118,18 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
/* HW reads/writes */
-#define DIRECT_REG_RD(_dev, _reg_addr) \
- (*((volatile u32 *) (_reg_addr)))
+#define DIRECT_REG_RD(_dev, _reg_addr) rte_read32(_reg_addr)
#define REG_RD(_p_hwfn, _reg_offset) \
DIRECT_REG_RD(_p_hwfn, \
((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
-#define DIRECT_REG_WR16(_reg_addr, _val) \
- (*((volatile u16 *)(_reg_addr)) = _val)
+#define DIRECT_REG_WR16(_reg_addr, _val) rte_write16((_val), (_reg_addr))
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) rte_write32((_val), (_reg_addr))
-#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
- (*((volatile u32 *)(_reg_addr)) = _val)
+#define DIRECT_REG_WR_RELAXED(_dev, _reg_addr, _val) \
+ rte_write32_relaxed((_val), (_reg_addr))
#define REG_WR(_p_hwfn, _reg_offset, _val) \
DIRECT_REG_WR(NULL, \
@@ -134,9 +139,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
(_reg_offset)), (u16)_val)
-#define DOORBELL(_p_hwfn, _db_addr, _val) \
- DIRECT_REG_WR(_p_hwfn, \
- ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+ DIRECT_REG_WR_RELAXED((_p_hwfn), \
+ ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
+ (_db_addr)), (u32)_val)
/* Mutexes */
@@ -162,6 +168,7 @@ typedef pthread_mutex_t osal_mutex_t;
#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
#define OSAL_DPC_INIT(dpc, hwfn) nothing
#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_SYNC(hwfn) nothing
/* Lists */
@@ -286,7 +293,8 @@ typedef struct osal_list_t {
#define OSAL_WMB(dev) rte_wmb()
#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
-#define OSAL_BITS_PER_BYTE (8)
+#define OSAL_BIT(nr) (1UL << (nr))
+#define OSAL_BITS_PER_BYTE (8)
#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
@@ -314,6 +322,8 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BUILD_BUG_ON(cond) nothing
#define ETH_ALEN ETHER_ADDR_LEN
+#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
+
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
@@ -323,6 +333,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
@@ -391,6 +402,7 @@ u32 qede_osal_log2(u32);
#define OSAL_STRCPY(dst, string) strcpy(dst, string)
#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+#define OSAL_STRTOUL(str, base, res) 0
#define OSAL_INLINE inline
#define OSAL_REG_ADDR(_p_hwfn, _offset) \
@@ -409,5 +421,7 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
-
+#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index b431c78d..cbcde227 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -78,8 +78,16 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 32
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
+/*
+ * Usually LL2 queues are opened in pairs TX-RX.
+ * There is a hard restriction on number of RX queues (limited by Tstorm RAM)
+ * and TX counters (Pstorm RAM).
+ * Number of TX queues is almost unlimited.
+ * The constants are different so as to allow asymmetric LL2 connections
+ */
+
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
/****************************************************************************/
@@ -89,7 +97,7 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 10
+#define FW_MINOR_VERSION 18
#define FW_REVISION_VERSION 9
#define FW_ENGINEERING_VERSION 0
@@ -107,20 +115,21 @@
#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
-#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
-#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_K2 (192)
+#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define COMMON_MAX_NUM_VFS (240)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all
* possible PFs and VFs - we need a constant for this size
*/
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -149,9 +158,10 @@
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
+#define E4_NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Clock values */
#define MASTER_CLK_FREQ_E4 (375e6)
@@ -176,6 +186,13 @@
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
/*****************/
/* DQ CONSTANTS */
@@ -471,7 +488,6 @@
#define PXP_BAR_DQ 1
/* PTT and GTT */
-#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
@@ -496,6 +512,8 @@
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+#define PXP_NUM_PF_WINDOWS 12
+
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
@@ -518,8 +536,6 @@
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
/* PF BAR */
-/*#define PXP_BAR0_START_GRC 0x1000 */
-/*#define PXP_BAR0_GRC_LENGTH 0xBFF000 */
#define PXP_BAR0_START_GRC 0x0000
#define PXP_BAR0_GRC_LENGTH 0x1C00000
#define PXP_BAR0_END_GRC \
@@ -588,7 +604,7 @@
#define SDM_OP_GEN_TRIG_AGG_INT 2
#define SDM_OP_GEN_TRIG_LOADER 4
#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
-#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
/***********************************************************/
/* Completion types */
@@ -611,6 +627,7 @@
#define SDM_COMP_TYPE_RELEASE_THREAD 7
/* Write to local RAM as a completion */
#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /* Applicable only for E4 */
/******************/
@@ -721,13 +738,10 @@ union event_ring_data {
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
- /* Dedicated field for RoCE affiliated asynchronous error */;
- struct regpair roceHandle;
+ struct regpair roceHandle /* Dedicated field for RDMA data */;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup
/* VF Initial Cleanup data */;
-/* Host handle for the Async Completions */
- struct regpair iwarp_handle;
};
/* Event Ring Entry */
struct event_ring_entry {
@@ -768,6 +782,8 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+
+
/*
* Ustorm Queue Zone
*/
@@ -881,7 +897,7 @@ enum db_dest {
*/
enum db_dpm_type {
DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
- DPM_ROCE /* RoCE DPM- to NIG */,
+ DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */,
/* L2 DPM inline- to PBF, with packet data on doorbell */
DPM_L2_INLINE,
DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
@@ -968,42 +984,42 @@ struct db_pwm_addr {
};
/*
- * Parameters to RoCE firmware, passed in EDPM doorbell
+ * Parameters to RDMA firmware, passed in EDPM doorbell
*/
-struct db_roce_dpm_params {
+struct db_rdma_dpm_params {
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
-/* Type of DPM transacation (DPM_ROCE) (use enum db_dpm_type) */
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
-/* opcode for ROCE operation */
-#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+/* opcode for RDMA operation */
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
/* the size of the WQE payload in bytes */
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
/* RoCE completion flag */
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
-#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
};
/*
- * Structure for doorbell data, in ROCE DPM mode, for the first doorbell in a
+ * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
* DPM burst
*/
-struct db_roce_dpm_data {
+struct db_rdma_dpm_data {
__le16 icid /* internal CID */;
__le16 prod_val /* aggregated value to update */;
-/* parameters passed to RoCE firmware */
- struct db_roce_dpm_params params;
+/* parameters passed to RDMA firmware */
+ struct db_rdma_dpm_params params;
};
/* Igu interrupt command */
@@ -1136,6 +1152,68 @@ struct parsing_and_err_flags {
/*
+ * Parsing error flags bitmap.
+ */
+struct parsing_err_flags {
+ __le16 flags;
+/* MAC error indication */
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+/* truncation error indication */
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+/* packet too small indication */
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+/* Header Missing Tag */
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len
+ * indicates number that is bigger than real packet length 3. tunneling:
+ * total-ip-length of the outer header points to offset that is smaller than
+ * the one pointed to by the total-ip-len of the inner hdr.
+ */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+/* from frame cracker output. for either TCP or UDP */
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any
+ * reason, like: udp/ipv4 checksum is 0 etc.
+ */
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+/* set if geneve option size was over 32 byte */
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
+
+/*
* Pb context
*/
struct pb_context {
@@ -1492,49 +1570,57 @@ struct tdif_task_context {
struct timers_context {
__le32 logical_client_0;
/* Expiration time of logical client 0 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
/* Valid bit of logical client 0 */
#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
/* Active bit of logical client 0 */
#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
__le32 logical_client_1;
/* Expiration time of logical client 1 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
/* Valid bit of logical client 1 */
#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
/* Active bit of logical client 1 */
#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
__le32 logical_client_2;
/* Expiration time of logical client 2 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
/* Valid bit of logical client 2 */
#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
/* Active bit of logical client 2 */
#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
__le32 host_expiration_fields;
/* Expiration time on host (closest one) */
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
/* Valid bit of host expiration */
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
-#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
-#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 907b35b9..80b11a4c 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -28,9 +28,21 @@
#include "ecore_proto_if.h"
#include "mcp_public.h"
-#define MAX_HWFNS_PER_DEVICE (4)
+#define ECORE_MAJOR_VERSION 8
+#define ECORE_MINOR_VERSION 18
+#define ECORE_REVISION_VERSION 7
+#define ECORE_ENGINEERING_VERSION 0
+
+#define ECORE_VERSION \
+ ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
+ (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+
+#define MAX_HWFNS_PER_DEVICE 2
#define NAME_SIZE 128 /* @DPDK */
-#define VER_SIZE 16
#define ECORE_WFQ_UNIT 100
#include "../qede_logs.h" /* @DPDK */
@@ -80,13 +92,22 @@ enum ecore_nvm_cmd {
#define SET_FIELD(value, name, flag) \
do { \
(value) &= ~(name##_MASK << name##_SHIFT); \
- (value) |= (((u64)flag) << (name##_SHIFT)); \
+ (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\
} while (0)
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
#endif
+#define ECORE_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define ECORE_MFW_SET_FIELD(name, field, value) \
+do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
+} while (0)
+
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -158,8 +179,8 @@ enum DP_MODULE {
ECORE_MSG_CXT = 0x800000,
ECORE_MSG_LL2 = 0x1000000,
ECORE_MSG_ILT = 0x2000000,
- ECORE_MSG_RDMA = 0x4000000,
- ECORE_MSG_DEBUG = 0x8000000,
+ ECORE_MSG_RDMA = 0x4000000,
+ ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#endif
@@ -179,6 +200,7 @@ struct ecore_cxt_mngr;
struct ecore_dma_mem;
struct ecore_sb_sp_info;
struct ecore_ll2_info;
+struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
@@ -205,33 +227,29 @@ enum ecore_tunn_clss {
MAX_ECORE_TUNN_CLSS,
};
-struct ecore_tunn_start_params {
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+struct ecore_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum ecore_tunn_clss tun_cls;
+};
+
+struct ecore_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
};
-struct ecore_tunn_update_params {
- unsigned long tunn_mode_update_mask;
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+struct ecore_tunnel_info {
+ struct ecore_tunn_update_type vxlan;
+ struct ecore_tunn_update_type l2_geneve;
+ struct ecore_tunn_update_type ip_geneve;
+ struct ecore_tunn_update_type l2_gre;
+ struct ecore_tunn_update_type ip_gre;
+
+ struct ecore_tunn_update_udp_port vxlan_port;
+ struct ecore_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
};
/* The PCI personality is not quite synonymous to protocol ID:
@@ -243,7 +261,8 @@ enum ecore_pci_personality {
ECORE_PCI_FCOE,
ECORE_PCI_ISCSI,
ECORE_PCI_ETH_ROCE,
- ECORE_PCI_IWARP,
+ ECORE_PCI_ETH_IWARP,
+ ECORE_PCI_ETH_RDMA,
ECORE_PCI_DEFAULT /* default in shmem */
};
@@ -273,6 +292,7 @@ enum ecore_resources {
ECORE_LL2_QUEUE,
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
+ ECORE_BDQ,
ECORE_MAX_RESC, /* must be last */
};
@@ -288,6 +308,7 @@ enum ecore_feature {
ECORE_RDMA_CNQ,
ECORE_ISCSI_CQ,
ECORE_FCOE_CQ,
+ ECORE_VF_L2_QUE,
ECORE_MAX_FEATURES,
};
@@ -302,6 +323,7 @@ enum ecore_port_mode {
ECORE_PORT_MODE_DE_2X25G,
ECORE_PORT_MODE_DE_1X25G,
ECORE_PORT_MODE_DE_4X25G,
+ ECORE_PORT_MODE_DE_2X10G,
};
enum ecore_dev_cap {
@@ -326,6 +348,19 @@ enum ecore_hw_err_type {
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH || \
+ ECORE_IS_RDMA_PERSONALITY(dev))
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
@@ -347,9 +382,6 @@ struct ecore_hw_info {
u8 num_active_tc;
- /* Traffic class used for tcp out of order traffic */
- u8 ooo_tc;
-
/* The traffic class used by PF for it's offloaded protocol */
u8 offload_tc;
@@ -372,16 +404,11 @@ struct ecore_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
-};
-struct ecore_hw_cid_data {
- u32 cid;
- bool b_cid_allocated;
- u8 vfid; /* 1-based; 0 signals this is for a PF */
+ /* Default DCBX mode */
+ u8 dcbx_mode;
- /* Additional identifiers */
- u16 opaque_fid;
- u8 vport_id;
+ u16 mtu;
};
/* maximun size of read/write commands (HW limit) */
@@ -424,15 +451,18 @@ struct ecore_qm_info {
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
@@ -472,7 +502,7 @@ struct ecore_hwfn {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
- void *dp_ctx;
+ void *dp_ctx;
bool first_on_engine;
bool hw_init_done;
@@ -527,8 +557,8 @@ struct ecore_hwfn {
u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
- struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
+ struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
struct ecore_cxt_mngr *p_cxt_mngr;
@@ -544,9 +574,6 @@ struct ecore_hwfn {
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
- struct ecore_hw_cid_data *p_tx_cids;
- struct ecore_hw_cid_data *p_rx_cids;
-
struct ecore_dmae_info dmae_info;
/* QM init */
@@ -572,6 +599,12 @@ struct ecore_hwfn {
/* If one of the following is set then EDPM shouldn't be used */
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
+
+ /* L2-related */
+ struct ecore_l2_info *p_l2_info;
+
+ /* @DPDK */
+ struct ecore_ptt *p_arfs_ptt;
};
#ifndef __EXTRACT__LINUX__
@@ -603,7 +636,7 @@ struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
- void *dp_ctx;
+ void *dp_ctx;
u8 type;
#define ECORE_DEV_TYPE_BB (0 << 0)
@@ -620,6 +653,10 @@ struct ecore_dev {
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+#define ECORE_DEV_ID_MASK 0xff00
+#define ECORE_DEV_ID_MASK_BB 0x1600
+#define ECORE_DEV_ID_MASK_AH 0x8000
+
u16 vendor_id;
u16 device_id;
@@ -679,7 +716,7 @@ struct ecore_dev {
int pcie_width;
int pcie_speed;
- u8 ver_str[NAME_SIZE]; /* @DPDK */
+
/* Add MF related configuration */
u8 mcp_rev;
u8 boot_mode;
@@ -711,10 +748,7 @@ struct ecore_dev {
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
- bool b_hw_channel;
-
- unsigned long tunn_mode;
-
+ struct ecore_tunnel_info tunnel;
bool b_is_vf;
u32 drv_type;
@@ -766,15 +800,6 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
-#ifndef REAL_ASIC_ONLY
-#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
- (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
- (ECORE_PATH_ID(p_hwfn) == 1) && \
- ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
- (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
- (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
-#endif
-
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
@@ -783,8 +808,8 @@ struct ecore_dev {
*
* @return OSAL_INLINE u8
*/
-static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
- u32 concrete_fid)
+static OSAL_INLINE u8
+ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
@@ -800,7 +825,7 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
}
#define PURE_LB_TC 8
-#define OOO_LB_TC 9
+#define PKT_LB_TC 9
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
@@ -811,7 +836,33 @@ int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
+void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
+ u8 *mac);
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (1 << 0)
+#define PQ_FLAGS_MCOS (1 << 1)
+#define PQ_FLAGS_LB (1 << 2)
+#define PQ_FLAGS_OOO (1 << 3)
+#define PQ_FLAGS_ACK (1 << 4)
+#define PQ_FLAGS_OFLD (1 << 5)
+#define PQ_FLAGS_VFS (1 << 6)
+
+/* physical queue index for cm context intialization */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+
+/* amount of resources used in qm init */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index 9ad1874f..ba272a91 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -54,21 +54,9 @@ struct ecore_chain_pbl_u32 {
u32 cons_page_idx;
};
-struct ecore_chain_pbl {
- /* Base address of a pre-allocated buffer for pbl */
- dma_addr_t p_phys_table;
- void *p_virt_table;
-
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
- */
- void **pp_virt_addr_tbl;
-
- /* Index to current used page by producer/consumer */
- union {
- struct ecore_chain_pbl_u16 pbl16;
- struct ecore_chain_pbl_u32 pbl32;
- } u;
+struct ecore_chain_ext_pbl {
+ dma_addr_t p_pbl_phys;
+ void *p_pbl_virt;
};
struct ecore_chain_u16 {
@@ -84,40 +72,75 @@ struct ecore_chain_u32 {
};
struct ecore_chain {
- /* Address of first page of the chain */
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
-
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
/* Point to next element to produce/consume */
void *p_prod_elem;
void *p_cons_elem;
- enum ecore_chain_mode mode;
- enum ecore_chain_use_mode intended_use;
+ /* Fastpath portions of the PBL [if exists] */
+
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ union {
+ struct ecore_chain_pbl_u16 u16;
+ struct ecore_chain_pbl_u32 u32;
+ } c;
+ } pbl;
- enum ecore_chain_cnt_type cnt_type;
union {
struct ecore_chain_u16 chain16;
struct ecore_chain_u32 chain32;
} u;
- u32 page_cnt;
+ /* Capacity counts only usable elements */
+ u32 capacity;
+ u32 page_cnt;
- /* Number of elements - capacity is for usable elements only,
- * while size will contain total number of elements [for entire chain].
+ /* A u8 would suffice for mode, but it would save as a lot of headaches
+ * on castings & defaults.
*/
- u32 capacity;
- u32 size;
+ enum ecore_chain_mode mode;
/* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
- u16 elem_unusable;
- u16 usable_per_page;
u16 elem_size;
u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
- struct ecore_chain_pbl pbl;
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
+ */
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
+ u32 size;
+
+ u8 intended_use;
+
+ /* TBD - do we really need this? Couldn't find usage for it */
+ bool b_external_pbl;
void *dp_ctx;
};
@@ -128,8 +151,8 @@ struct ecore_chain {
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
- (1 + ((sizeof(struct ecore_chain_next) - 1) / \
- (elem_size))) : 0)
+ (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) / \
+ (elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
@@ -238,7 +261,7 @@ u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
}
static OSAL_INLINE
-u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_unusable;
}
@@ -256,7 +279,7 @@ static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
static OSAL_INLINE
dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
{
- return p_chain->pbl.p_phys_table;
+ return p_chain->pbl_sp.p_phys_table;
}
/**
@@ -281,9 +304,9 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
p_next = (struct ecore_chain_next *)(*p_next_elem);
*p_next_elem = p_next->next_virt;
if (is_chain_u16(p_chain))
- *(u16 *)idx_to_inc += p_chain->elem_unusable;
+ *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
else
- *(u32 *)idx_to_inc += p_chain->elem_unusable;
+ *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
break;
case ECORE_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
@@ -384,7 +407,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -393,7 +416,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -458,7 +481,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -467,7 +490,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -511,25 +534,26 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
- p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
- p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
} else {
- p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
- p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
- case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
- case ECORE_CHAIN_USE_TO_PRODUCE:
- /* Do nothing */
- break;
-
case ECORE_CHAIN_USE_TO_CONSUME:
- /* produce empty elements */
- for (i = 0; i < p_chain->capacity; i++)
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
ecore_chain_recycle_consumed(p_chain);
- break;
+ break;
+
+ case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case ECORE_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
}
}
@@ -556,9 +580,9 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->p_virt_addr = OSAL_NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->intended_use = intended_use;
+ p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
- p_chain->cnt_type = cnt_type;
+ p_chain->cnt_type = (u8)cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
@@ -570,9 +594,9 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->page_cnt = page_cnt;
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
-
- p_chain->pbl.p_phys_table = 0;
- p_chain->pbl.p_virt_table = OSAL_NULL;
+ p_chain->b_external_pbl = false;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = OSAL_NULL;
p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
p_chain->dp_ctx = dp_ctx;
@@ -616,8 +640,8 @@ static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
- p_chain->pbl.p_phys_table = p_phys_pbl;
- p_chain->pbl.p_virt_table = p_virt_pbl;
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 3dd953d9..688118bb 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -8,6 +8,7 @@
#include "bcm_osal.h"
#include "reg_addr.h"
+#include "common_hsi.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_eth.h"
#include "ecore_rt_defs.h"
@@ -19,6 +20,7 @@
#include "ecore_hw.h"
#include "ecore_dev_api.h"
#include "ecore_sriov.h"
+#include "ecore_mcp.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
@@ -100,7 +102,6 @@ struct ecore_tid_seg {
struct ecore_conn_type_cfg {
u32 cid_count;
- u32 cid_start;
u32 cids_per_vf;
struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
};
@@ -191,11 +192,11 @@ struct ecore_cxt_mngr {
*/
u32 vf_count;
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
/* Acquired CIDs */
struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+ /* TBD - do we want this allocated to reserve space? */
+ struct ecore_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
/* ILT shadow table */
struct ecore_dma_mem *ilt_shadow;
@@ -209,10 +210,29 @@ struct ecore_cxt_mngr {
u32 t2_num_pages;
u64 first_free;
u64 last_free;
+
+ /* The infrastructure originally was very generic and context/task
+ * oriented - per connection-type we would set how many of those
+ * are needed, and later when determining how much memory we're
+ * needing for a given block we'd iterate over all the relevant
+ * connection-types.
+ * But since then we've had some additional resources, some of which
+ * require memory which is indepent of the general context/task
+ * scheme. We add those here explicitly per-feature.
+ */
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ /* TODO - VF arfs filters ? */
};
/* check if resources/configuration is required according to protocol type */
-static OSAL_INLINE bool src_proto(enum protocol_type type)
+static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
{
return type == PROTOCOLID_TOE;
}
@@ -250,18 +270,22 @@ struct ecore_src_iids {
u32 per_vf_cids;
};
-static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_mngr *p_mngr,
struct ecore_src_iids *iids)
{
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- if (!src_proto(i))
+ if (!src_proto(p_hwfn, i))
continue;
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
}
/* counts the iids for the Timers block configuration */
@@ -276,14 +300,24 @@ struct ecore_tm_iids {
static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
+ bool tm_vf_required = false;
+ bool tm_required = false;
u32 i, j;
for (i = 0; i < MAX_CONN_TYPES; i++) {
struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
- iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
}
if (tm_tid_proto(i)) {
@@ -313,7 +347,8 @@ static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
}
}
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
+static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_iids *iids)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_tid_seg *segs;
@@ -671,7 +706,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
/* SRC */
p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
* database. Thus sum the PF searcher cids and all the VFs searcher
@@ -718,12 +753,11 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++) {
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
}
-
- p_cli->vf_total_lines = curr_line - p_blk->start_line;
}
/* TSDM (SRQ CONTEXT) */
@@ -766,7 +800,6 @@ static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
p_mngr->t2[i].size);
OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
- p_mngr->t2 = OSAL_NULL;
}
static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
@@ -787,7 +820,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (!p_src->active)
return ECORE_SUCCESS;
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
total_size = conn_num * sizeof(struct src_ent);
@@ -1006,44 +1039,75 @@ ilt_shadow_fail:
static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 type;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ OSAL_FREE(p_hwfn->p_dev,
+ p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
}
}
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+ u32 cid_start, u32 cid_count,
+ struct ecore_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return ECORE_SUCCESS;
+
+ size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
+ p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (p_map->cid_map == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 start_cid = 0;
- u32 type;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 size;
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct ecore_cid_acquired_map *p_map;
- if (cid_cnt == 0)
- continue;
-
- size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
- p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
- GFP_KERNEL, size);
- if (!p_mngr->acquired[type].cid_map)
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
goto cid_map_fail;
- p_mngr->acquired[type].max_count = cid_cnt;
- p_mngr->acquired[type].start_cid = start_cid;
-
- p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+ /* Handle VF maps */
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (ecore_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf,
+ p_map))
+ goto cid_map_fail;
+ }
- DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
- "Type %08x start: %08x count %08x\n",
- type, p_mngr->acquired[type].start_cid,
- p_mngr->acquired[type].max_count);
- start_cid += cid_cnt;
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
}
return ECORE_SUCCESS;
@@ -1155,27 +1219,41 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
- OSAL_MUTEX_DEALLOC(&p_mngr->mutex);
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
-
- p_hwfn->p_cxt_mngr = OSAL_NULL;
}
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
int type;
+ u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 i;
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
- if (cid_cnt == 0)
+ if (!p_cfg->cids_per_vf)
continue;
- for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
- p_mngr->acquired[type].cid_map[i] = 0;
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
}
}
@@ -1366,18 +1444,10 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
}
/* CM PF */
-static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
{
- union ecore_qm_pq_params pq_params;
- u16 pq;
-
- /* XCM pure-LB queue */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
- return ECORE_SUCCESS;
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}
/* DQ PF */
@@ -1569,7 +1639,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_src_iids src_iids;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
if (!conn_num)
return;
@@ -1585,6 +1655,9 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
p_hwfn->p_cxt_mngr->first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
p_hwfn->p_cxt_mngr->last_free);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Configured SEARCHER for 0x%08x connections\n",
+ conn_num);
}
/* Timers PF */
@@ -1724,93 +1797,150 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
ecore_prs_init_pf(p_hwfn);
}
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
- enum protocol_type type, u32 *p_cid)
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
u32 rel_cid;
- if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ if (type >= MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Determine the right map to take this CID from */
+ if (vfid == ECORE_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (p_map->cid_map == OSAL_NULL) {
DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
return ECORE_INVAL;
}
- rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
- p_mngr->acquired[type].max_count);
+ rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
+ p_map->max_count);
- if (rel_cid >= p_mngr->acquired[type].max_count) {
+ if (rel_cid >= p_map->max_count) {
DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
type);
return ECORE_NORESOURCES;
}
- OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+ OSAL_SET_BIT(rel_cid, p_map->cid_map);
- *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
+}
+
static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
- u32 cid, enum protocol_type *p_type)
+ u32 cid, u8 vfid,
+ enum protocol_type *p_type,
+ struct ecore_cid_acquired_map **pp_map)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_cid_acquired_map *p_map;
- enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
- for (p = 0; p < MAX_CONN_TYPES; p++) {
- p_map = &p_mngr->acquired[p];
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == ECORE_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
- if (!p_map->cid_map)
+ if (!((*pp_map)->cid_map))
continue;
- if (cid >= p_map->start_cid &&
- cid < p_map->start_cid + p_map->max_count) {
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
break;
}
}
- *p_type = p;
-
- if (p == MAX_CONN_TYPES) {
- DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
- return false;
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
}
- rel_cid = cid - p_map->start_cid;
- if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
- DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
- return false;
+
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, true,
+ "CID %d [vifd %02x] not acquired", cid, vfid);
+ goto fail;
}
+
return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = OSAL_NULL;
+ return false;
}
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
+ if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn, true,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
/* Test acquired and find matching per-protocol map */
- b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
if (!b_acquired)
return;
- rel_cid = cid - p_mngr->acquired[type].start_cid;
- OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+ rel_cid = cid - p_map->start_cid;
+ OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+ _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
}
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
- b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ ECORE_CXT_PF_CID,
+ &type, &p_map);
if (!b_acquired)
return ECORE_INVAL;
@@ -1839,7 +1969,7 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1866,10 +1996,15 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
struct ecore_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- ecore_cxt_set_proto_cid_count(p_hwfn,
- PROTOCOLID_ETH,
- p_params->num_cons, 1); /* FIXME VF count... */
-
+ /* TODO - we probably want to add VF number to the PF
+ * params;
+ * As of now, allocates 16 * 2 per-VF [to retain regular
+ * functionality].
+ */
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 32);
+ p_hwfn->p_cxt_mngr->arfs_count =
+ p_params->num_arfs_filters;
break;
}
default:
@@ -1879,47 +2014,6 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
- struct ecore_tid_mem *p_info)
-{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 proto, seg, total_lines, i, shadow_line;
- struct ecore_ilt_client_cfg *p_cli;
- struct ecore_ilt_cli_blk *p_fl_seg;
- struct ecore_tid_seg *p_seg_info;
-
- /* Verify the personality */
- switch (p_hwfn->hw_info.personality) {
- default:
- return ECORE_INVAL;
- }
-
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
- if (!p_cli->active)
- return ECORE_INVAL;
-
- p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
- if (!p_seg_info->has_fl_mem)
- return ECORE_INVAL;
-
- p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
- total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
- p_fl_seg->real_size_in_page);
-
- for (i = 0; i < total_lines; i++) {
- shadow_line = i + p_fl_seg->start_line -
- p_hwfn->p_cxt_mngr->pf_start_line;
- p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
- }
- p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
- p_fl_seg->real_size_in_page;
- p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
- p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
- p_info->tid_size;
-
- return ECORE_SUCCESS;
-}
-
/* This function is very RoCE oriented, if another protocol in the future
* will want this feature we'll need to modify the function to be more generic
*/
@@ -2157,52 +2251,3 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
return rc;
}
-
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
- u32 tid,
- u8 ctx_type, void **pp_task_ctx)
-{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_ilt_client_cfg *p_cli;
- struct ecore_ilt_cli_blk *p_seg;
- struct ecore_tid_seg *p_seg_info;
- u32 proto, seg;
- u32 total_lines;
- u32 tid_size, ilt_idx;
- u32 num_tids_per_block;
-
- /* Verify the personality */
- switch (p_hwfn->hw_info.personality) {
- default:
- return ECORE_INVAL;
- }
-
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
- if (!p_cli->active)
- return ECORE_INVAL;
-
- p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
-
- if (ctx_type == ECORE_CTX_WORKING_MEM) {
- p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
- } else if (ctx_type == ECORE_CTX_FL_MEM) {
- if (!p_seg_info->has_fl_mem)
- return ECORE_INVAL;
- p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
- } else {
- return ECORE_INVAL;
- }
- total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
- tid_size = p_mngr->task_type_size[p_seg_info->type];
- num_tids_per_block = p_seg->real_size_in_page / tid_size;
-
- if (total_lines < tid / num_tids_per_block)
- return ECORE_INVAL;
-
- ilt_idx = tid / num_tids_per_block + p_seg->start_line -
- p_mngr->pf_start_line;
- *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
- (tid % num_tids_per_block) * tid_size;
-
- return ECORE_SUCCESS;
-}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 5379d7bc..6ff823a5 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -35,17 +35,6 @@ u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
-#ifndef LINUX_REMOVE
-/**
- * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
- *
- * @param p_hwfn
- * @param iids [out], a structure holding all the counters
- */
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
- struct ecore_qm_iids *iids);
-#endif
-
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -130,14 +119,53 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#define ECORE_CXT_PF_CID (0xff)
+
+/**
+ * @brief ecore_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ */
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
/**
-* @brief ecore_cxt_release - Release a cid
-*
-* @param p_hwfn
-* @param cid
-*/
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
- u32 cid);
+ * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid);
/**
* @brief ecore_cxt_get_tid_mem_info - function checks if the
@@ -169,9 +197,5 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
- u32 tid,
- u8 ctx_type,
- void **task_ctx);
#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 6a50412a..6d87620d 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -26,19 +26,6 @@ struct ecore_tid_mem {
};
/**
-* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
-*
-* @param p_hwfn
-* @param type
-* @param p_cid
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid);
-
-/**
* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
@@ -50,15 +37,4 @@ enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
-/**
-* @brief ecore_cxt_get_tid_mem_info
-*
-* @param p_hwfn
-* @param p_info
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
- struct ecore_tid_mem *p_info);
-
#endif
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 8175619a..4f1b0698 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -13,6 +13,7 @@
#include "ecore_cxt.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
+#include "ecore_iov_api.h"
#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
#define ECORE_ETH_TYPE_DEFAULT (0)
@@ -27,14 +28,25 @@
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
- return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
- DCBX_APP_SF_ETHTYPE) ? true : false;
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
}
static bool ecore_dcbx_app_port(u32 app_info_bitmap)
{
- return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
- DCBX_APP_SF_PORT) ? true : false;
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
}
static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
@@ -45,100 +57,67 @@ static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
return ecore_dcbx_app_port(app_info_bitmap);
- return (mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT) ?
- true : false;
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
}
-static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == ECORE_ETH_TYPE_DEFAULT) ? true : false;
-}
+ bool ethtype;
-static bool ecore_dcbx_enabled(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_DISABLED) ? false : true;
-}
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
-static bool ecore_dcbx_cee(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_CEE) ? true : false;
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
}
-static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap)
+static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
+ u16 proto_id, bool ieee)
{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_IEEE) ? true : false;
-}
+ bool port;
-static bool ecore_dcbx_local(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_STATIC) ? true : false;
-}
+ if (!p_hwfn->p_dcbx_info->iwarp_port)
+ return false;
-/* @@@TBD A0 Eagle workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool set_to_pfc)
-{
- if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
- return;
-
- ecore_wr(p_hwfn, p_ptt,
- YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */ +
- YSTORM_FLOW_CONTROL_MODE_OFFSET,
- set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE,
- EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE);
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == p_hwfn->p_dcbx_info->iwarp_port));
}
static void
ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_results *p_data)
{
- struct ecore_hw_info *p_info = &p_hwfn->hw_info;
enum dcbx_protocol_type id;
- u8 prio, tc, size, update;
- bool enable;
- const char *name; /* @DPDK */
int i;
- size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
-
- DP_INFO(p_hwfn, "DCBX negotiated: %d\n", p_data->dcbx_enabled);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
- name = ecore_dcbx_app_update[i].name;
-
- enable = p_data->arr[id].enable;
- update = p_data->arr[id].update;
- tc = p_data->arr[id].tc;
- prio = p_data->arr[id].priority;
- DP_INFO(p_hwfn,
- "%s info: update %d, enable %d, prio %d, tc %d,"
- " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
- name, update, enable, prio, tc, p_info->num_active_tc,
- p_data->arr[id].dscp_enable, p_data->arr[id].dscp_val);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d,"
+ " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ ecore_dcbx_app_update[i].name,
+ p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
+ p_data->arr[id].dscp_enable,
+ p_data->arr[id].dscp_val);
}
}
static void
-ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
- u8 tc, enum ecore_pci_personality personality)
-{
- /* QM reconf data */
- if (p_info->personality == personality)
- p_info->offload_tc = tc;
-}
-
-void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
- bool enable, bool update, u8 prio, u8 tc,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
@@ -164,27 +143,26 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
else if (enable)
p_data->arr[type].update = UPDATE_DCB;
else
- p_data->arr[type].update = DONT_UPDATE_DCB_DHCP;
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
- ecore_dcbx_set_pf_tcs(&p_hwfn->hw_info, tc, personality);
+ /* QM reconf data */
+ if (p_hwfn->hw_info.personality == personality)
+ p_hwfn->hw_info.offload_tc = tc;
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
- bool enable, bool update, u8 prio, u8 tc,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
const char *name; /* @DPDK */
- u8 size;
int i;
- size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
-
- for (i = 0; i < size; i++) {
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
if (type != id)
@@ -193,7 +171,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
name = ecore_dcbx_app_update[i].name;
- ecore_dcbx_set_params(p_data, p_hwfn, enable, update,
+ ecore_dcbx_set_params(p_data, p_hwfn, enable,
prio, tc, type, personality);
}
}
@@ -232,20 +210,18 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
u32 app_prio_bitmap, u16 id,
enum dcbx_protocol_type *type, bool ieee)
{
- bool status = false;
-
- if (ecore_dcbx_default_tlv(app_prio_bitmap, id)) {
+ if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
- status = true;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
DP_ERR(p_hwfn,
"No action required, App TLV id = 0x%x"
" app_prio_bitmap = 0x%x\n",
id, app_prio_bitmap);
+ return false;
}
- return status;
+ return true;
}
/* Parse app TLV's to update TC information in hw_info structure for
@@ -257,14 +233,17 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
int count, u8 dcbx_version)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u8 tc, priority, priority_map;
enum dcbx_protocol_type type;
+ u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
+ u8 priority;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Num APP entries = %d pri_tc_tbl = 0x%x dcbx_version = %u\n",
+ count, pri_tc_tbl, dcbx_version);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
@@ -273,10 +252,12 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
DCBX_APP_PROTOCOL_ID);
priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PRI_MAP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
+ protocol_id, priority_map);
rc = ecore_dcbx_get_app_priority(priority_map, &priority);
if (rc == ECORE_INVAL) {
DP_ERR(p_hwfn, "Invalid priority\n");
- return rc;
+ return ECORE_INVAL;
}
tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
@@ -289,9 +270,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = (type == DCBX_PROTOCOL_ETH ? false : true);
+ enable = !(type == DCBX_PROTOCOL_ETH);
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
@@ -308,7 +289,7 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
@@ -350,6 +331,7 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -371,6 +353,9 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
u32 prefix_seq_num, suffix_seq_num;
int read_count = 0;
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
do {
if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
@@ -403,21 +388,20 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
return rc;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_app_prio *p_prio,
struct ecore_dcbx_results *p_results)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 val;
if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
- p_results->arr[DCBX_PROTOCOL_ETH].enable) {
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "Priority: eth %d\n", p_prio->eth);
- }
- return rc;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Priorities: eth %d\n",
+ p_prio->eth);
}
static void
@@ -508,8 +492,9 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc.willing, pfc_map);
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
}
static void
@@ -528,10 +513,10 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x"
- " max_ets_tc %d\n",
- p_params->ets_willing, p_params->ets_cbs,
- p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
* encoded in a type u32 array of size 2.
@@ -540,7 +525,7 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
- pri_map = OSAL_BE32_TO_CPU(p_ets->pri_tc_tbl[0]);
+ pri_map = p_ets->pri_tc_tbl[0];
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -552,7 +537,7 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
}
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
@@ -563,60 +548,35 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
-
- return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_admin_params *p_local;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
- u32 pfc;
-
- p_local = &params->local;
- p_data = &p_local->params;
- p_app = &p_hwfn->p_dcbx_info->local_admin.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets;
- pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc;
-
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
- false);
- p_local->valid = true;
+ struct dcbx_features *p_feat;
- return ECORE_SUCCESS;
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_remote_params *p_remote;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
- u32 pfc;
-
- p_remote = &params->remote;
- p_data = &p_remote->params;
- p_app = &p_hwfn->p_dcbx_info->remote.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->remote.features.ets;
- pfc = p_hwfn->p_dcbx_info->remote.features.pfc;
+ struct dcbx_features *p_feat;
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params,
false);
- p_remote->valid = true;
-
- return ECORE_SUCCESS;
+ params->remote.valid = true;
}
static enum _ecore_status_t
@@ -625,14 +585,11 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
struct ecore_dcbx_results *p_results;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
+ struct dcbx_features *p_feat;
bool enabled, err;
- u32 pfc, flags;
+ u32 flags;
+ bool val;
flags = p_hwfn->p_dcbx_info->operational.flags;
@@ -640,42 +597,50 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
* was successfuly performed
*/
p_operational = &params->operational;
- enabled = ecore_dcbx_enabled(flags);
+ enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
return ECORE_INVAL;
}
- p_data = &p_operational->params;
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
p_results = &p_hwfn->p_dcbx_info->results;
- p_app = &p_hwfn->p_dcbx_info->operational.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
- pfc = p_hwfn->p_dcbx_info->operational.features.pfc;
- p_operational->ieee = ecore_dcbx_ieee(flags);
- p_operational->cee = ecore_dcbx_cee(flags);
- p_operational->local = ecore_dcbx_local(flags);
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"Version support: ieee %d, cee %d, static %d\n",
p_operational->ieee, p_operational->cee,
p_operational->local);
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
- err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
- return rc;
+ return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
@@ -700,62 +665,46 @@ ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
(j * 4)) & 0xf;
}
-
- return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_lldp_local *p_local;
- osal_size_t size;
- u32 *dest;
-
- p_local = &params->lldp_local;
-
- size = OSAL_ARRAY_SIZE(p_local->local_chassis_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_local.local_chassis_id;
- OSAL_MEMCPY(dest, p_local->local_chassis_id, size);
+ struct lldp_config_params_s *p_local;
- size = OSAL_ARRAY_SIZE(p_local->local_port_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_local.local_port_id;
- OSAL_MEMCPY(dest, p_local->local_port_id, size);
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
- return ECORE_SUCCESS;
+ OSAL_MEMCPY(params->lldp_local.local_chassis_id,
+ p_local->local_chassis_id,
+ OSAL_ARRAY_SIZE(p_local->local_chassis_id));
+ OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
+ OSAL_ARRAY_SIZE(p_local->local_port_id));
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_lldp_remote *p_remote;
- osal_size_t size;
- u32 *dest;
-
- p_remote = &params->lldp_remote;
+ struct lldp_status_params_s *p_remote;
- size = OSAL_ARRAY_SIZE(p_remote->peer_chassis_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_chassis_id;
- OSAL_MEMCPY(dest, p_remote->peer_chassis_id, size);
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
- size = OSAL_ARRAY_SIZE(p_remote->peer_port_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_port_id;
- OSAL_MEMCPY(dest, p_remote->peer_port_id, size);
-
- return ECORE_SUCCESS;
+ OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
+ p_remote->peer_chassis_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
+ OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_port_id));
}
static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, enum ecore_mib_read_type type)
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *p_params,
+ enum ecore_mib_read_type type)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_get *p_params;
-
- p_params = &p_hwfn->p_dcbx_info->get;
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
@@ -768,10 +717,10 @@ ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
- rc = ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
- rc = ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
@@ -785,9 +734,10 @@ static enum _ecore_status_t
ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
lldp_config_params);
data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
@@ -802,8 +752,8 @@ ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
@@ -857,6 +807,7 @@ ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
struct ecore_dcbx_mib_meta_data data;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, local_admin_dcbx_mib);
data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
@@ -883,7 +834,7 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_INVAL;
switch (type) {
case ECORE_DCBX_OPERATIONAL_MIB:
@@ -904,7 +855,6 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
- return ECORE_INVAL;
}
return rc;
@@ -945,10 +895,9 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* according to negotiation results
*/
enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
- ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled);
}
}
- ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
/* Update the DSCP to TC mapping bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
@@ -964,17 +913,18 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_dcbx_info));
+ sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_dcbx_info'");
- rc = ECORE_NOMEM;
+ return ECORE_NOMEM;
}
- return rc;
+ p_hwfn->p_dcbx_info->iwarp_port =
+ p_hwfn->pf_params.rdma_pf_params.iwarp_port;
+
+ return ECORE_SUCCESS;
}
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
@@ -999,24 +949,31 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag = false;
+ u8 update_flag;
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
- p_dest->update_eth_dcb_data_flag = update_flag;
+ p_dest->update_eth_dcb_data_mode = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
+ p_dest->update_iwarp_dcb_data_mode = update_flag;
p_dcb_data = &p_dest->eth_dcb_data;
ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+ p_dcb_data = &p_dest->iwarp_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
}
-static
-enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
- enum ecore_mib_read_type type)
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type)
{
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
rc = ECORE_TIMEOUT;
@@ -1028,30 +985,13 @@ enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
- rc = ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
-enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
- struct ecore_dcbx_get *p_get,
- enum ecore_mib_read_type type)
-{
- enum _ecore_status_t rc;
-
- rc = ecore_dcbx_query(p_hwfn, type);
- if (rc)
- return rc;
-
- if (p_get != OSAL_NULL)
- OSAL_MEMCPY(p_get, &p_hwfn->p_dcbx_info->get,
- sizeof(struct ecore_dcbx_get));
-
- return rc;
-}
-
static void
ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
u32 *pfc, struct ecore_dcbx_params *p_params)
@@ -1074,8 +1014,8 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
- pfc_map |= (0x1 << i);
-
+ pfc_map |= (1 << i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1087,6 +1027,7 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_params *p_params)
{
u8 *bw_map, *tsa_map;
+ u32 val;
int i;
if (p_params->ets_willing)
@@ -1113,14 +1054,22 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
bw_map[i] = p_params->ets_tc_bw_tbl[i];
tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
- p_ets->pri_tc_tbl[0] |= (((u32)p_params->ets_pri_tc_tbl[i]) <<
- ((7 - i) * 4));
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
}
- p_ets->pri_tc_tbl[0] = OSAL_CPU_TO_BE32(p_ets->pri_tc_tbl[0]);
for (i = 0; i < 2; i++) {
p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
}
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n",
+ p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0],
+ p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0],
+ p_ets->tc_tsa_tbl[1]);
}
static void
@@ -1147,24 +1096,33 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
if (ieee) {
- *entry &= ~DCBX_APP_SF_IEEE_MASK;
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT;
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
}
} else {
@@ -1183,6 +1141,8 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
*entry |= ((u32)(p_params->app_entry[i].prio) <<
DCBX_APP_PRI_MAP_SHIFT);
}
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
}
static enum _ecore_status_t
@@ -1195,7 +1155,7 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
local_admin->flags = 0;
OSAL_MEMCPY(&local_admin->features,
&p_hwfn->p_dcbx_info->operational.features,
- sizeof(struct dcbx_features));
+ sizeof(local_admin->features));
if (params->enabled) {
local_admin->config = params->ver_num;
@@ -1230,10 +1190,9 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
sizeof(*p_dscp_map));
+ p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
if (p_params->dscp.enabled)
p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
- else
- p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
for (i = 0, entry = 0; i < 8; i++) {
val = 0;
@@ -1246,6 +1205,8 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->dscp_nig_update = true;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+
return ECORE_SUCCESS;
}
@@ -1254,15 +1215,15 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set *params,
bool hw_commit)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_mib_meta_data data;
struct dcbx_local_params local_admin;
+ struct ecore_dcbx_mib_meta_data data;
struct dcb_dscp_map dscp_map;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!hw_commit) {
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
- sizeof(struct ecore_dcbx_set));
+ sizeof(p_hwfn->p_dcbx_info->set));
return ECORE_SUCCESS;
}
@@ -1315,12 +1276,13 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
}
dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_dcbx_get));
+ sizeof(*dcbx_info));
if (!dcbx_info) {
DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
return ECORE_NOMEM;
}
+ OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
ECORE_DCBX_OPERATIONAL_MIB);
if (rc) {
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 15186246..eba2d91b 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -17,9 +17,6 @@
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
-#define ECORE_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -32,6 +29,7 @@ struct ecore_dcbx_info {
struct ecore_dcbx_set set;
struct ecore_dcbx_get get;
u8 dcbx_cap;
+ u16 iwarp_port;
};
struct ecore_dcbx_mib_meta_data {
@@ -56,10 +54,4 @@ void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
-#ifndef REAL_ASIC_ONLY
-/* @@@TBD eagle phy workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
- bool set_to_pfc);
-#endif
-
#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 82416e7f..2dc76796 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -37,6 +37,7 @@ enum dcbx_protocol_type {
DCBX_PROTOCOL_ROCE,
DCBX_PROTOCOL_ROCE_V2,
DCBX_PROTOCOL_ETH,
+ DCBX_PROTOCOL_IWARP,
DCBX_MAX_PROTOCOL_TYPE
};
@@ -147,6 +148,7 @@ struct ecore_dcbx_get {
#define ECORE_DCBX_VERSION_DISABLED 0
#define ECORE_DCBX_VERSION_IEEE 1
#define ECORE_DCBX_VERSION_CEE 2
+#define ECORE_DCBX_VERSION_DYNAMIC 3
struct ecore_dcbx_set {
#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
@@ -190,7 +192,8 @@ static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
{DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
- {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}
+ {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
+ {DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
};
#endif /* __ECORE_DCBX_API_H__ */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 6060f9ee..865103c6 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -30,6 +30,7 @@
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
#include "ecore_dcbx.h"
+#include "ecore_l2.h"
/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
* registers involved are not split and thus configuration is a race where
@@ -70,28 +71,26 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
}
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ if (val)
+ return 1 << (val + 15);
/* The above registers were updated in the past only in CMT mode. Since
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (!val) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size");
- DP_NOTICE(p_hwfn, false,
- "of 256kB for GRC and 512kB for DB\n");
- return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
- } else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size");
- DP_NOTICE(p_hwfn, false,
- "of 512kB for GRC and 512kB for DB\n");
- return 512 * 1024;
- }
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 256kB"
+ " for GRC and 512kB for DB\n");
+ val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 512kB"
+ " for GRC and 512kB for DB\n");
+ val = 512 * 1024;
}
- return 1 << (val + 15);
+ return val;
}
void ecore_init_dp(struct ecore_dev *p_dev,
@@ -138,335 +137,620 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
- qm_info->qm_pq_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
- qm_info->qm_vport_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
- qm_info->qm_port_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
- qm_info->wfq_data = OSAL_NULL;
}
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_free(&p_dev->hwfns[i]);
return;
+ }
OSAL_FREE(p_dev, p_dev->fw_data);
- p_dev->fw_data = OSAL_NULL;
OSAL_FREE(p_dev, p_dev->reset_stats);
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
- p_hwfn->p_tx_cids = OSAL_NULL;
- OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
- p_hwfn->p_rx_cids = OSAL_NULL;
- }
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
ecore_cxt_mngr_free(p_hwfn);
ecore_qm_info_free(p_hwfn);
ecore_spq_free(p_hwfn);
- ecore_eq_free(p_hwfn, p_hwfn->p_eq);
- ecore_consq_free(p_hwfn, p_hwfn->p_consq);
+ ecore_eq_free(p_hwfn);
+ ecore_consq_free(p_hwfn);
ecore_int_free(p_hwfn);
-#ifdef CONFIG_ECORE_LL2
- ecore_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
-#endif
ecore_iov_free(p_hwfn);
+ ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
/* @@@TBD Flush work-queue ? */
}
}
-static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
- bool b_sleepable)
+/******************** QM initialization *******************/
+
+/* bitmaps for indicating active traffic classes.
+ * Special case for Arrowhead 4 port
+ */
+/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
+#define ACTIVE_TCS_BMAP 0x9f
+/* 0..3 actually used, OOO and high priority stuff all use 3 */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue;
- struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- struct init_qm_port_params *p_qm_port;
- bool init_rdma_offload_pq = false;
- bool init_pure_ack_pq = false;
- bool init_ooo_pq = false;
- u16 num_pqs, protocol_pqs;
- u16 num_pf_rls = 0;
- u16 num_vfs = 0;
- u32 pf_rl;
- u8 pf_wfq;
-
- /* @TMP - saving the existing min/max bw config before resetting the
- * qm_info to restore them.
- */
- pf_rl = qm_info->pf_rl;
- pf_wfq = qm_info->pf_wfq;
+ u32 flags;
-#ifdef CONFIG_ECORE_SRIOV
- if (p_hwfn->p_dev->p_iov_info)
- num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-#endif
- OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+ /* common flags */
+ flags = PQ_FLAGS_LB;
-#ifndef ASIC_ONLY
- /* @TMP - Don't allocate QM queues for VFs on emulation */
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false,
- "Emulation - skip configuring QM queues for VFs\n");
- num_vfs = 0;
+ /* feature flags */
+ if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+ flags |= PQ_FLAGS_VFS;
+
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case ECORE_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+ PQ_FLAGS_OFLD;
+ break;
+ default:
+ DP_ERR(p_hwfn, "unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ return 0;
}
-#endif
+ return flags;
+}
- /* ethernet PFs require a pq per tc. Even if only a subset of the TCs
- * active, we want physical queues allocated for all of them, since we
- * don't have a good recycle flow. Non ethernet PFs require only a
- * single physical queue.
- */
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
- p_hwfn->hw_info.personality == ECORE_PCI_ETH)
- protocol_pqs = p_hwfn->hw_info.num_hw_tc;
- else
- protocol_pqs = 1;
-
- num_pqs = protocol_pqs + num_vfs + 1; /* The '1' is for pure-LB */
- num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
- num_pqs++; /* for RoCE queue */
- init_rdma_offload_pq = true;
- if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) {
- /* Due to FW assumption that rl==vport, we limit the
- * number of rate limiters by the minimum between its
- * allocated number and the allocated number of vports.
- * Another limitation is the number of supported qps
- * with rate limiters in FW.
- */
- num_pf_rls =
- (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- RESC_NUM(p_hwfn, ECORE_VPORT));
+/* Getters for resource amounts necessary for qm initialization */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
- /* we subtract num_vfs because each one requires a rate
- * limiter, and one default rate limiter.
- */
- if (num_pf_rls < num_vfs + 1) {
- DP_ERR(p_hwfn, "No RL for DCQCN");
- DP_ERR(p_hwfn, "[num_pf_rls %d num_vfs %d]\n",
- num_pf_rls, num_vfs);
- return ECORE_INVAL;
- }
- num_pf_rls -= num_vfs + 1;
- }
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
+{
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+ p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
- num_pqs += num_pf_rls;
- qm_info->num_pf_rls = (u8)num_pf_rls;
- }
+#define NUM_DEFAULT_RLS 1
- if (p_hwfn->hw_info.personality == ECORE_PCI_IWARP) {
- num_pqs += 3; /* for iwarp queue / pure-ack / ooo */
- init_rdma_offload_pq = true;
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
- num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+ /* @DPDK */
+ /* num RLs can't exceed resource amount of rls or vports or the
+ * dcqcn qps
+ */
+ num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+ (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
- /* Sanity checking that setup requires legal number of resources */
- if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
- DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x avail %04x",
- num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
- return ECORE_INVAL;
+ /* make sure after we reserve the default and VF rls we'll have
+ * something left
+ */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
+ DP_NOTICE(p_hwfn, false,
+ "no rate limiters left for PF rate limiting"
+ " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+ return 0;
}
- /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
- * then special queues (iSCSI pure-ACK / RoCE), then per-VF PQ.
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+ return num_pf_rls;
+}
+
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ /* all pqs share the same vport (hence the 1 below), except for vfs
+ * and pf_rl pqs
*/
- qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct init_qm_pq_params) *
- num_pqs);
- if (!qm_info->qm_pq_params)
- goto alloc_err;
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
- qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct
- init_qm_vport_params) *
- num_vports);
- if (!qm_info->qm_vport_params)
- goto alloc_err;
+/* calc amount of PQs according to the requested flags */
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ ecore_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) +
+ (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn);
+}
- qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct init_qm_port_params)
- * MAX_NUM_PORTS);
- if (!qm_info->qm_port_params)
- goto alloc_err;
+/* initialize the top level QM params */
+static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
- qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct ecore_wfq_data) *
- num_vports);
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
- if (!qm_info->wfq_data)
- goto alloc_err;
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
- vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
- /* First init rate limited queues ( Due to RoCE assumption of
- * qpid=rlid )
+ /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
+ * 4 otherwise
*/
- for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- };
-
- /* Protocol PQs */
- for (i = 0; i < protocol_pqs; i++) {
- struct init_qm_pq_params *params =
- &qm_info->qm_pq_params[curr_queue++];
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
- p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
- params->vport_id = vport_id;
- params->tc_id = i;
- /* Note: this assumes that if we had a configuration
- * with N tcs and subsequently another configuration
- * With Fewer TCs, the in flight traffic (in QM queues,
- * in FW, from driver to FW) will still trickle out and
- * not get "stuck" in the QM. This is determined by the
- * NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ. Unused TCs are
- * supposed to be cleared in this map, allowing traffic
- * to flush out. If this is not the case, we would need
- * to set the TC of unused queues to 0, and reconfigure
- * QM every time num of TCs changes. Unused queues in
- * this context would mean those intended for TCs where
- * tc_id > hw_info.num_active_tcs.
- */
- params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
- } else {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.offload_tc;
- params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
- }
- }
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
- /* Then init pure-LB PQ */
- qm_info->pure_lb_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id =
- (u8)RESC_START(p_hwfn, ECORE_VPORT);
- qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
-
- qm_info->offload_pq = 0; /* Already initialized for iSCSI/FCoE */
- if (init_rdma_offload_pq) {
- qm_info->offload_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_pure_ack_pq) {
- qm_info->pure_ack_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_ooo_pq) {
- qm_info->ooo_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- /* Then init per-VF PQs */
- vf_offset = curr_queue;
- for (i = 0; i < num_vfs; i++) {
- /* First vport is used by the PF */
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
- /* @@@TBD VF Multi-cos */
- qm_info->qm_pq_params[curr_queue].tc_id = 0;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- curr_queue++;
- };
-
- qm_info->vf_queues_offset = vf_offset;
- qm_info->num_pqs = num_pqs;
- qm_info->num_vports = num_vports;
+/* initialize qm vport params */
+static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
+{
/* Initialize qm port parameters */
- num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+
for (i = 0; i < num_ports; i++) {
- p_qm_port = &qm_info->qm_port_params[i];
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
p_qm_port->active = 1;
- /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
- * be in place
- */
- if (num_ports == 4)
- p_qm_port->active_phys_tcs = 0xf;
- else
- p_qm_port->active_phys_tcs = 0x9f;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
+}
- if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
- qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
- else
- qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be
+ * shared)
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq =
+ ecore_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL ||
+ pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
+ " qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls,
+ ecore_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
+ sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return OSAL_NULL;
+}
+
+/* save pq index in qm info */
+static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
qm_info->num_vf_pqs = num_vfs;
- qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
+ PQ_INIT_VF_RL);
+}
- for (i = 0; i < qm_info->num_vports; i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->vport_rl_en = 1;
- qm_info->vport_wfq_en = 1;
- qm_info->pf_rl = pf_rl;
- qm_info->pf_wfq = pf_wfq;
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
+ PQ_INIT_PF_RL);
+}
+
+static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ ecore_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ ecore_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ ecore_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ ecore_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ ecore_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ ecore_init_qm_offload_pq(p_hwfn);
+
+ /* done sharing vports */
+ ecore_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ ecore_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+{
+ if (ecore_init_qm_get_num_vports(p_hwfn) >
+ RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return ECORE_INVAL;
+ }
return ECORE_SUCCESS;
+}
- alloc_err:
- DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
- ecore_qm_info_free(p_hwfn);
- return ECORE_NOMEM;
+/*
+ * Function for verbose printing of the qm initialization results
+ */
+static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d,"
+ " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
+ " num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
+ qm_info->num_vf_pqs, qm_info->num_vports,
+ qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
+ " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
+ qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
+ qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ port = &qm_info->qm_port_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d,"
+ " num_pbf_cmd_lines %d, num_btb_blocks %d,"
+ " reserved %d\n",
+ i, port->active, port->active_phys_tcs,
+ port->num_pbf_cmd_lines, port->num_btb_blocks,
+ port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &qm_info->qm_vport_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d,"
+ " first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->vport_rl,
+ vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
+ vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &qm_info->qm_pq_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
+ " rl_valid %d\n",
+ qm_info->start_pq + i, pq->vport_id, pq->tc_id,
+ pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ ecore_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ ecore_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ ecore_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ ecore_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ ecore_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ ecore_dp_init_qm_params(p_hwfn);
}
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -478,17 +762,8 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
bool b_rc;
enum _ecore_status_t rc;
- /* qm_info is allocated in ecore_init_qm_info() which is already called
- * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
- * The allocated size may change each init, so we free it before next
- * allocation.
- */
- ecore_qm_info_free(p_hwfn);
-
/* initialize ecore's qm data structure */
- rc = ecore_init_qm_info(p_hwfn, false);
- if (rc != ECORE_SUCCESS)
- return rc;
+ ecore_init_qm_info(p_hwfn);
/* stop PF's qm queues */
OSAL_SPIN_LOCK(&qm_lock);
@@ -521,51 +796,67 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc;
+
+ rc = ecore_init_qm_sanity(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_pq_params) *
+ ecore_init_qm_get_num_pqs(p_hwfn));
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_vport_params) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_port_params) *
+ p_hwfn->p_dev->num_ports_in_engines);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_wfq_data) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return ECORE_SUCCESS;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+ ecore_qm_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+/******************** End QM initialization ***************/
+
enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
{
- struct ecore_consq *p_consq;
- struct ecore_eq *p_eq;
-#ifdef CONFIG_ECORE_LL2
- struct ecore_ll2_info *p_ll2_info;
-#endif
enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i) {
+ rc = ecore_l2_alloc(&p_dev->hwfns[i]);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
return rc;
+ }
p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->fw_data));
if (!p_dev->fw_data)
return ECORE_NOMEM;
- /* Allocate Memory for the Queue->CID mapping */
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- /* @@@TMP - resc management, change to actual required size */
- int tx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
- int rx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-
- p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- tx_size);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Tx Cids\n");
- goto alloc_no_mem;
- }
-
- p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- rx_size);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Rx Cids\n");
- goto alloc_no_mem;
- }
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
@@ -582,11 +873,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
- /* Prepare and process QM requirements */
- rc = ecore_init_qm_info(p_hwfn, true);
+ rc = ecore_alloc_qm_data(p_hwfn);
if (rc)
goto alloc_err;
+ /* init qm info */
+ ecore_init_qm_info(p_hwfn);
+
/* Compute the ILT client partition */
rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
if (rc)
@@ -618,8 +911,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* EQ */
n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
- if ((p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) ||
- (p_hwfn->hw_info.personality == ECORE_PCI_IWARP)) {
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
/* Calculate the EQ size
* ---------------------
* Each ICID may generate up to one event at a time i.e.
@@ -629,37 +921,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
* worst case:
* - Core - according to SPQ.
* - RoCE - per QP there are a couple of ICIDs, one
- * responder and one requester, each can
- * generate an EQE => n_eqes_qp = 2 * n_qp.
- * Each CQ can generate an EQE. There are 2 CQs
- * per QP => n_eqes_cq = 2 * n_qp.
- * Hence the RoCE total is 4 * n_qp or
- * 2 * num_cons.
+ * responder and one requester, each can
+ * generate an EQE => n_eqes_qp = 2 * n_qp.
+ * Each CQ can generate an EQE. There are 2 CQs
+ * per QP => n_eqes_cq = 2 * n_qp.
+ * Hence the RoCE total is 4 * n_qp or
+ * 2 * num_cons.
* - ENet - There can be up to two events per VF. One
- * for VF-PF channel and another for VF FLR
- * initial cleanup. The number of VFs is
- * bounded by MAX_NUM_VFS_BB, and is much
- * smaller than RoCE's so we avoid exact
- * calculation.
+ * for VF-PF channel and another for VF FLR
+ * initial cleanup. The number of VFs is
+ * bounded by MAX_NUM_VFS_BB, and is much
+ * smaller than RoCE's so we avoid exact
+ * calculation.
*/
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
num_cons =
ecore_cxt_get_proto_cid_count(
p_hwfn,
PROTOCOLID_ROCE,
- 0);
+ OSAL_NULL);
num_cons *= 2;
} else {
num_cons = ecore_cxt_get_proto_cid_count(
p_hwfn,
PROTOCOLID_IWARP,
- 0);
+ OSAL_NULL);
}
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
num_cons =
ecore_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ OSAL_NULL);
n_eqes += 2 * num_cons;
}
@@ -667,33 +960,27 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
"The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
- goto alloc_err;
+ goto alloc_no_mem;
}
- p_eq = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
- if (!p_eq)
- goto alloc_no_mem;
- p_hwfn->p_eq = p_eq;
+ rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
+ if (rc)
+ goto alloc_err;
- p_consq = ecore_consq_alloc(p_hwfn);
- if (!p_consq)
- goto alloc_no_mem;
- p_hwfn->p_consq = p_consq;
-
-#ifdef CONFIG_ECORE_LL2
- if (p_hwfn->using_ll2) {
- p_ll2_info = ecore_ll2_alloc(p_hwfn);
- if (!p_ll2_info)
- goto alloc_no_mem;
- p_hwfn->p_ll2_info = p_ll2_info;
- }
-#endif
+ rc = ecore_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_l2_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dmae_info structure\n");
+ "Failed to allocate memory for dmae_info"
+ " structure\n");
goto alloc_err;
}
@@ -707,7 +994,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
}
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
- sizeof(struct ecore_eth_stats));
+ sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
@@ -715,9 +1002,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
- alloc_no_mem:
+alloc_no_mem:
rc = ECORE_NOMEM;
- alloc_err:
+alloc_err:
ecore_resc_free(p_dev);
return rc;
}
@@ -726,16 +1013,19 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_setup(&p_dev->hwfns[i]);
return;
+ }
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
ecore_cxt_mngr_setup(p_hwfn);
ecore_spq_setup(p_hwfn);
- ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
- ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
+ ecore_eq_setup(p_hwfn);
+ ecore_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */
ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
@@ -745,11 +1035,8 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_l2_setup(p_hwfn);
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
-#ifdef CONFIG_ECORE_LL2
- if (p_hwfn->using_ll2)
- ecore_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
-#endif
}
}
@@ -794,10 +1081,9 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Sending final cleanup for PFVF[%d] [Command %08x\n]",
- id, OSAL_CPU_TO_LE32(command));
+ id, command);
- ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
- OSAL_CPU_TO_LE32(command));
+ ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
/* Poll until completion */
while (!REG_RD(p_hwfn, addr) && count--)
@@ -819,10 +1105,8 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
{
int hw_mode = 0;
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- hw_mode |= 1 << MODE_BB_A0;
- } else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
- hw_mode |= 1 << MODE_BB_B0;
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_BB;
} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
} else {
@@ -877,11 +1161,6 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
#endif
hw_mode |= 1 << MODE_ASIC;
-#ifndef REAL_ASIC_ONLY
- if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
- hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
-#endif
-
if (p_hwfn->p_dev->num_hwfns > 1)
hw_mode |= 1 << MODE_100G;
@@ -899,29 +1178,36 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 pl_hv = 1;
int i;
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- pl_hv |= 0x600;
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev))
+ pl_hv |= 0x600;
+ }
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
+ if (CHIP_REV_IS_EMUL(p_dev) &&
+ (ECORE_IS_AH(p_dev)))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
+ 0x3ffffff);
/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
- if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
+ if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
- /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_hwfn->p_dev->num_ports_in_engines >> 1));
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ (p_dev->num_ports_in_engines >> 1));
- ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ }
}
/* Poll on RBC */
@@ -987,7 +1273,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_gtt_init(p_hwfn);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1002,7 +1288,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
}
ecore_qm_common_rt_init(p_hwfn,
- p_hwfn->p_dev->num_ports_in_engines,
+ p_dev->num_ports_in_engines,
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -1010,18 +1296,6 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_cxt_hw_init_common(p_hwfn);
- /* Close gate from NIG to BRB/Storm; By default they are open, but
- * we close them to prevent NIG from passing data to reset blocks.
- * Should have been done in the ENGINE phase, but init-tool lacks
- * proper port-pretend capabilities.
- */
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- ecore_port_unpretend(p_hwfn, p_ptt);
-
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1032,11 +1306,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
- if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ if (ECORE_IS_BB(p_dev)) {
/* Workaround clears ROCE search for all functions to prevent
* involving non initialized function in processing ROCE packet.
*/
- num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+ num_pfs = NUM_OF_ENG_PFS(p_dev);
for (pf_id = 0; pf_id < num_pfs; pf_id++) {
ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
@@ -1052,8 +1326,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
* This is not done inside the init tool since it currently can't
* perform a pretending to VFs.
*/
- max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
- : MAX_NUM_VFS_BB;
+ max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
@@ -1080,20 +1353,19 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
{
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
- ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
+ ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
(8 << PMEG_IF_BYTE_COUNT),
(reg_type << 25) | (addr << 8) | port,
(u32)((data >> 32) & 0xffffffff),
(u32)(data & 0xffffffff));
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
- (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
+ (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
(reg_type << 25) | (addr << 8) | port);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
- data & 0xffffffff);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
(data >> 32) & 0xffffffff);
}
@@ -1109,48 +1381,13 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
#define XLMAC_PAUSE_CTRL (0x60d)
#define XLMAC_PFC_CTRL (0x60e)
-static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
+static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u8 port = p_hwfn->port_id;
- u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
-
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
- (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
- (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
- | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
- 1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
- 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
- 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
- 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
- (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
- (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
-}
-
-static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
u8 loopback = 0, port = p_hwfn->port_id * 2;
DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
- if (ECORE_IS_AH(p_hwfn->p_dev)) {
- ecore_emul_link_init_ah(p_hwfn, p_ptt);
- return;
- }
-
/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
port);
@@ -1179,8 +1416,53 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
}
-static void ecore_link_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 port)
+static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 port = p_hwfn->port_id;
+ u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
+ (port <<
+ CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
+ (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
+ (0xA <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
+ (8 <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
+ 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
+ else /* BB */
+ ecore_emul_link_init_bb(p_hwfn, p_ptt);
+}
+
+static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port)
{
int port_offset = port ? 0x800 : 0;
u32 xmac_rxctrl = 0;
@@ -1193,10 +1475,10 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
/* Set the number of ports on the Warp Core to 10G */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
/* Soft reset of XMAC */
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
@@ -1207,70 +1489,24 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
/* FIXME: move to common end */
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
/* Set Max packet size: initialize XMAC block register for port 0 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
/* CRC append for Tx packets: init XMAC block register for port 1 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
/* Enable TX and RX: initialize XMAC block register for port 1 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
- XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
- xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
- xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
+ XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
+ xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
+ XMAC_REG_RX_CTRL_BB + port_offset);
+ xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
}
#endif
-static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- int hw_mode)
-{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
- hw_mode);
- if (rc != ECORE_SUCCESS)
- return rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
- return ECORE_SUCCESS;
-
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- if (ECORE_IS_AH(p_hwfn->p_dev))
- return ECORE_SUCCESS;
- ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
- } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
- /* Activate OPTE in CMT */
- u32 val;
-
- val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
- val |= 0x10;
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
- 0x55555555);
- }
-
- ecore_emul_link_init(p_hwfn, p_ptt);
- } else {
- DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
- }
-#endif
-
- return rc;
-}
-
static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
@@ -1339,7 +1575,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
u32 db_bar_size, n_cpus;
u32 roce_edpm_mode;
u32 pf_dems_shift;
- int rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
@@ -1394,8 +1630,9 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
- cond = ((rc) && (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
- (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
+ cond = ((rc != ECORE_SUCCESS) &&
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
if (cond || p_hwfn->dcbx_no_edpm) {
/* Either EDPM is disabled from user configuration, or it is
* disabled via DCBx, or it is not mandatory and we failed to
@@ -1419,7 +1656,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
"disabled" : "enabled");
/* Check return codes from above calls */
- if (rc) {
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
"Failed to allocate enough DPIs\n");
return ECORE_NORESOURCES;
@@ -1437,10 +1674,58 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+ else if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
+ } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ /* Activate OPTE in CMT */
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+ val |= 0x10;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+ 0x55555555);
+ }
+
+ ecore_emul_link_init(p_hwfn, p_ptt);
+ } else {
+ DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+ }
+#endif
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
@@ -1532,7 +1817,9 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
if (b_hw_start) {
/* enable interrupts */
- ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
/* send function start command */
rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
@@ -1618,14 +1905,31 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
+enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
- enum _ecore_status_t rc, mfw_rc;
- u32 load_code, param;
- int i, j;
+ struct ecore_load_req_params load_req_params;
+ u32 load_code, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ int i;
- if (p_params->int_mode == ECORE_INT_MODE_MSI && p_dev->num_hwfns > 1) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
+ (p_dev->num_hwfns > 1)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
@@ -1640,8 +1944,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
if (IS_VF(p_dev)) {
- p_hwfn->b_int_enabled = 1;
+ ecore_vf_start(p_hwfn, p_params);
continue;
}
@@ -1654,33 +1964,37 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- /* @@@TBD need to add here:
- * Check for fan failure
- * Prev_unload
- */
- rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
- if (rc) {
+ OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
+ load_req_params.drv_role = p_params->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ load_req_params.timeout_val = p_params->mfw_timeout_val;
+ load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed sending LOAD_REQ command\n");
+ "Failed sending a LOAD_REQ command\n");
return rc;
}
+ load_code = load_req_params.load_code;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
/* CQ75580:
* When coming back from hiberbate state, the registers from
* which shadow is read initially are not initialized. It turns
* out that these registers get initialized during the call to
* ecore_mcp_load_req request. So we need to reread them here
* to get the proper shadow register value.
- * Note: This is a workaround for the missinginig MFW
+ * Note: This is a workaround for the missing MFW
* initialization. It may be removed once the implementation
* is done.
*/
ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
-
/* Only relevant for recovery:
* Clear the indication after the LOAD_REQ command is responded
* by the MFW.
@@ -1699,33 +2013,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
- if (rc)
+ if (rc != ECORE_SUCCESS)
break;
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
- if (rc)
+ if (rc != ECORE_SUCCESS)
break;
-
-#ifndef REAL_ASIC_ONLY
- if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
- struct init_nig_pri_tc_map_req tc_map;
-
- OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
-
- /* remove this once flow control is
- * implemented
- */
- for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
- tc_map.pri[j].tc_id = 0;
- tc_map.pri[j].valid = 1;
- }
- ecore_init_nig_pri_tc_map(p_hwfn,
- p_hwfn->p_main_ptt,
- &tc_map);
- }
-#endif
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
@@ -1736,6 +2031,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected load code [0x%08x]", load_code);
rc = ECORE_NOTIMPL;
break;
}
@@ -1751,16 +2048,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
0, &load_code, &param);
if (rc != ECORE_SUCCESS)
return rc;
+
if (mfw_rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed sending LOAD_DONE command\n");
+ "Failed sending a LOAD_DONE command\n");
return mfw_rc;
}
- ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt);
- ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
- p_params->epoch);
-
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
@@ -1777,7 +2071,29 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->hw_init_done = true;
}
- return ECORE_SUCCESS;
+ if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ drv_mb_param = STORM_FW_VERSION;
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &load_code, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu)
+ rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+
+ rc = ecore_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_DISABLED);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+ }
+
+ return rc;
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
@@ -1802,13 +2118,14 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
*/
OSAL_MSLEEP(1);
}
- if (i == ECORE_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn, true,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ if (i < ECORE_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
+ " [Connection %02x Tasks %02x]\n",
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
@@ -1823,32 +2140,77 @@ void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
}
}
+static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 expected_val)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (val != expected_val) {
+ DP_NOTICE(p_hwfn, true,
+ "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
+ addr, val, expected_val);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
{
- enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
int j;
for_each_hwfn(p_dev, j) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &p_dev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
+ rc = ecore_vf_pf_reset(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ if (!p_dev->recov_in_prog) {
+ rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
+
rc = ecore_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
/* perform debug action after PF stop was sent */
- OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
+ OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
/* close NIG to BRB gate */
ecore_wr(p_hwfn, p_ptt,
@@ -1875,20 +2237,48 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
- }
+
+ if (!p_dev->recov_in_prog) {
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+ /* @@@TBD - assert on incorrect xCFC values (10.b) */
+ }
+
+ /* Disable PF in HW blocks */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ if (!p_dev->recov_in_prog) {
+ ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+ } /* hwfn loop */
if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
- p_dev->hwfns[0].p_main_ptt, false);
- if (t_rc != ECORE_SUCCESS)
- rc = t_rc;
+ rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_change_pci_hwfn failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
}
- return rc;
+ return rc2;
}
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
@@ -1949,84 +2339,6 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 reg,
- bool expected)
-{
- u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return ECORE_UNKNOWN_ERROR;
- }
-
- return 0;
-}
-
-enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
-{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 unload_resp, unload_param;
- int i;
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- if (IS_VF(p_dev)) {
- rc = ecore_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- if (!p_dev->recov_in_prog) {
- ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
- /* @@@TBD - assert on incorrect xCFC values (10.b) */
- }
-
- /* Disable PF in HW blocks */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
-
- if (p_dev->recov_in_prog) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
- "Recovery is in progress -> skip sending unload_req/done\n");
- break;
- }
-
- /* Send unload command to MCP */
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ,
- DRV_MB_PARAM_UNLOAD_WOL_MCP,
- &unload_resp, &unload_param);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "ecore_hw_reset: UNLOAD_REQ failed\n");
- /* @@TBD - what to do? for now, assume ENG. */
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
-
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn,
- true, "ecore_hw_reset: UNLOAD_DONE failed\n");
- /* @@@TBD - Should it really ASSERT here ? */
- return rc;
- }
- }
-
- return rc;
-}
-
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
{
@@ -2040,22 +2352,22 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
/* clear indirect access */
if (ECORE_IS_AH(p_hwfn->p_dev)) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_E8_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_EC_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F0_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F4_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
} else {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
/* Clean Previous errors if such exist */
@@ -2090,6 +2402,7 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct ecore_sb_cnt_info sb_cnt_info;
int num_features = 1;
/* L2 Queues require each: 1 status block. 1 L2 queue */
@@ -2098,145 +2411,226 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
RESC_NUM(p_hwfn, ECORE_SB) / num_features,
RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ sb_cnt_info.sb_iov_cnt);
+
+ feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+ feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "#PF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
- feat_num[ECORE_PF_L2_QUE],
- feat_num[ECORE_RDMA_CNQ],
- RESC_NUM(p_hwfn, ECORE_SB), num_features);
+ "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
+ (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
+ RESC_NUM(p_hwfn, ECORE_SB));
}
-static enum resource_id_enum
-ecore_hw_get_mfw_res_id(enum ecore_resources res_id)
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
case ECORE_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
+ return "SB";
case ECORE_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case ECORE_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case ECORE_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case ECORE_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case ECORE_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case ECORE_MAC:
+ return "MAC";
case ECORE_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case ECORE_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case ECORE_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case ECORE_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case ECORE_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case ECORE_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case ECORE_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case ECORE_BDQ:
+ return "BDQ";
default:
- break;
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+{
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ enum _ecore_status_t rc;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ /* @DPDK */
+ switch (res_id) {
+ case ECORE_LL2_QUEUE:
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_RDMA_STATS_QUEUE:
+ case ECORE_BDQ:
+ resc_max_val = 0;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return ECORE_NOTIMPL;
}
- return mfw_res_id;
+ return ECORE_SUCCESS;
}
-static u32 ecore_hw_get_dflt_resc_num(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id)
+static
+enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
struct ecore_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
case ECORE_SB:
OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
+ *p_resc_num = sb_cnt_info.sb_cnt;
break;
case ECORE_L2_QUEUE:
- dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case ECORE_VPORT:
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
break;
case ECORE_RSS_ENG:
- dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case ECORE_PQ:
- dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
MAX_QM_TX_QUEUES_BB) / num_funcs;
break;
case ECORE_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case ECORE_MAC:
case ECORE_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case ECORE_ILT:
- dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case ECORE_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case ECORE_RDMA_CNQ_RAM:
case ECORE_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
/* @DPDK */
- dflt_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
break;
case ECORE_RDMA_STATS_QUEUE:
/* @DPDK */
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
break;
+ case ECORE_BDQ:
+ /* @DPDK */
+ *p_resc_num = 0;
+ break;
default:
break;
}
- return dflt_resc_num;
+
+ switch (res_id) {
+ case ECORE_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ break;
+ default:
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
+ }
+
+ return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id,
- bool drv_resc_alloc)
+static enum _ecore_status_t
+__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
+ bool drv_resc_alloc)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
enum _ecore_status_t rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- dflt_resc_num = ecore_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
- DP_ERR(p_hwfn, "Failed to get default amount for resource %d\n",
- res_id);
- return ECORE_INVAL;
+ rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
}
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
@@ -2246,21 +2640,13 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
}
#endif
- OSAL_MEM_ZERO(&resc_info, sizeof(resc_info));
- resc_info.res_id = ecore_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d with MFW resources\n",
- res_id);
- return ECORE_INVAL;
- }
-
- rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "MFW resp failure for a resc alloc req [res_id %d]\n",
- res_id);
+ "MFW response failure for an allocation request for"
+ " resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
return rc;
}
@@ -2269,15 +2655,13 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- /* @DPDK */
- DP_INFO(p_hwfn,
- "No allocation info for resc %d [mcp_resp 0x%x].",
- res_id, mcp_resp);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
DP_INFO(p_hwfn,
- "Applying default values [num %d, start %d].\n",
- dflt_resc_num, dflt_resc_start);
+ "Failed to receive allocation info for resource %d [%s]."
+ " mcp_resp = 0x%x. Applying default values"
+ " [%d,%d].\n",
+ res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
+ dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
@@ -2287,71 +2671,51 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
/* TBD - remove this when revising the handling of the SB resource */
if (res_id == ECORE_SB) {
/* Excluding the slowpath SB */
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
- DP_NOTICE(p_hwfn, false,
- "Resource %d: MFW allocation [num %d, start %d]",
- res_id, *p_resc_num, *p_resc_start);
- DP_NOTICE(p_hwfn, false,
- "differs from default values [num %d, start %d]%s\n",
- dflt_resc_num,
- dflt_resc_start,
- drv_resc_alloc ? " - applying default values" : "");
+ DP_INFO(p_hwfn,
+ "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
+ *p_resc_start, dflt_resc_num, dflt_resc_start,
+ drv_resc_alloc ? " - Applying default values" : "");
if (drv_resc_alloc) {
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
}
}
- out:
+out:
return ECORE_SUCCESS;
}
-static const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
+static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
{
- switch (res_id) {
- case ECORE_SB:
- return "SB";
- case ECORE_L2_QUEUE:
- return "L2_QUEUE";
- case ECORE_VPORT:
- return "VPORT";
- case ECORE_RSS_ENG:
- return "RSS_ENG";
- case ECORE_PQ:
- return "PQ";
- case ECORE_RL:
- return "RL";
- case ECORE_MAC:
- return "MAC";
- case ECORE_VLAN:
- return "VLAN";
- case ECORE_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case ECORE_ILT:
- return "ILT";
- case ECORE_LL2_QUEUE:
- return "LL2_QUEUE";
- case ECORE_CMDQS_CQS:
- return "CMDQS_CQS";
- case ECORE_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
- default:
- return "UNKNOWN_RESOURCE";
+ enum _ecore_status_t rc;
+ u8 res_id;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ return rc;
}
+
+ return ECORE_SUCCESS;
}
+#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
+#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
+
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
bool drv_resc_alloc)
{
+ struct ecore_resc_unlock_params resc_unlock_params;
+ struct ecore_resc_lock_params resc_lock_params;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
- enum _ecore_status_t rc;
u8 res_id;
+ enum _ecore_status_t rc;
#ifndef ASIC_ONLY
u32 *resc_start = p_hwfn->hw_info.resc_start;
u32 *resc_num = p_hwfn->hw_info.resc_num;
@@ -2364,10 +2728,62 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
#endif
- for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
- rc = ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ * Old drivers that don't acquire the lock can run in parallel, and
+ * their allocation values won't be affected by the updated max values.
+ */
+ OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
+ resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+ resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
+ resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+ resc_lock_params.sleep_b4_retry = true;
+ OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
+ resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ return rc;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ rc = ECORE_BUSY;
+ goto unlock_and_exit;
+ } else {
+ rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
if (rc != ECORE_SUCCESS)
- return rc;
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
}
#ifndef ASIC_ONLY
@@ -2420,14 +2836,21 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
RESC_START(p_hwfn, res_id));
return ECORE_SUCCESS;
+
+unlock_and_exit:
+ ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ return rc;
}
-static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t
+ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_hw_prepare_params *p_params)
{
- u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct ecore_mcp_link_params *link;
+ enum _ecore_status_t rc;
/* Read global nvm_cfg address */
nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@ -2435,6 +2858,8 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
/* Verify MCP has initialized it */
if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
return ECORE_INVAL;
}
@@ -2474,6 +2899,9 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
break;
@@ -2486,6 +2914,28 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
break;
}
+ /* Read DCBX configuration */
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ dcbx_mode = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, generic_cont0));
+ dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
+ >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
+ switch (dcbx_mode) {
+ case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_CEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_IEEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
+ break;
+ default:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
+ }
+
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -2595,7 +3045,13 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
&p_hwfn->hw_info.device_capabilities);
- return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
}
static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
@@ -2615,7 +3071,12 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
* In case of CMT in BB, only the "even" functions are enabled, and thus
* the number of functions for both hwfns is learnt from the same bits.
*/
- reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+ if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) {
+ reg_function_hide = ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_FUNCTION_HIDE_BB_K2);
+ } else { /* E5 */
+ reg_function_hide = 0;
+ }
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
@@ -2681,8 +3142,7 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
port_mode = 1;
else
#endif
- port_mode = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
p_hwfn->p_dev->num_ports_in_engines = 1;
@@ -2697,8 +3157,8 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
}
}
-static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 port;
int i;
@@ -2727,7 +3187,8 @@ static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
#endif
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ CNIG_REG_NIG_PORT0_CONF_K2_E5 +
+ (i * 4));
if (port & 1)
p_hwfn->p_dev->num_ports_in_engines++;
}
@@ -2739,20 +3200,27 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
else
- ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
+ ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
}
static enum _ecore_status_t
ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- enum ecore_pci_personality personality, bool drv_resc_alloc)
+ enum ecore_pci_personality personality,
+ struct ecore_hw_prepare_params *p_params)
{
+ bool drv_resc_alloc = p_params->drv_resc_alloc;
enum _ecore_status_t rc;
/* Since all information is common, only first hwfns should do this */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_BAD_IOV;
+ else
+ return rc;
+ }
}
/* TODO In get_hw_info, amoungst others:
@@ -2765,13 +3233,22 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_hw_info_port_num(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
+#endif
+ rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ }
#endif
- ecore_hw_get_nvm_info(p_hwfn, p_ptt);
rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
+ else
+ return rc;
+ }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
@@ -2795,11 +3272,14 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
- if (personality != ECORE_PCI_DEFAULT)
+ if (personality != ECORE_PCI_DEFAULT) {
p_hwfn->hw_info.personality = personality;
- else if (ecore_mcp_is_init(p_hwfn))
- p_hwfn->hw_info.personality =
- p_hwfn->mcp_info->func_info.protocol;
+ } else if (ecore_mcp_is_init(p_hwfn)) {
+ enum ecore_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
#ifndef ASIC_ONLY
/* To overcome ILT lack for emulation, until at least until we'll have
@@ -2826,18 +3306,23 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_get_num_funcs(p_hwfn, p_ptt);
+ if (ecore_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
/* In case of forcing the driver's default resource allocation, calling
* ecore_hw_get_resc() should come after initializing the personality
* and after getting the number of functions, since the calculation of
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- return ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
-}
+ rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
-#define ECORE_DEV_ID_MASK 0xff00
-#define ECORE_DEV_ID_MASK_BB 0x1600
-#define ECORE_DEV_ID_MASK_AH 0x8000
+ return rc;
+}
static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
{
@@ -2892,9 +3377,9 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
DP_INFO(p_dev->hwfns,
- "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
- CHIP_REV_IS_A0(p_dev) ? 0 : 1,
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
@@ -2948,11 +3433,13 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
#endif
static enum _ecore_status_t
-ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
- void OSAL_IOMEM *p_doorbells,
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * p_regview,
+ void OSAL_IOMEM * p_doorbells,
struct ecore_hw_prepare_params *p_params)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mdump_info mdump_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Split PCI bars evenly between hwfns */
@@ -2966,6 +3453,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
"Reading the ME register returns all Fs; Preventing further chip access\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
return ECORE_INVAL;
}
@@ -2975,6 +3464,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
}
@@ -2984,8 +3475,12 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = ecore_get_dev_info(p_dev);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_DEV;
goto err1;
+ }
}
ecore_hw_hwfn_prepare(p_hwfn);
@@ -2994,50 +3489,72 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
}
- if (p_hwfn == ECORE_LEADING_HWFN(p_dev) && !p_dev->recov_in_prog) {
- rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
- DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
- }
-
/* Read the device configuration information from the HW and SHMEM */
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
- p_params->personality, p_params->drv_resc_alloc);
+ p_params->personality, p_params);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
goto err2;
}
+ /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
+ * called, since among others it sets the ports number in an engine.
+ */
+ if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ !p_dev->recov_in_prog) {
+ rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
+
+ /* Check if mdump logs are present and update the epoch value */
+ if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_info);
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ DP_NOTICE(p_hwfn, false,
+ "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ }
+
+ ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->epoch);
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
DP_NOTICE(p_hwfn, false,
"FPGA: workaround; Prevent DMAE parities\n");
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
+ 7);
DP_NOTICE(p_hwfn, false,
"FPGA: workaround: Set VF bar0 size\n");
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_VF_BAR0_SIZE, 4);
+ PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
}
#endif
return rc;
- err2:
+err2:
if (IS_LEAD_HWFN(p_hwfn))
ecore_iov_free_hw_info(p_dev);
ecore_mcp_free(p_hwfn);
- err1:
+err1:
ecore_hw_hwfn_free(p_hwfn);
- err0:
+err0:
return rc;
}
@@ -3049,6 +3566,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
+
/* Store the precompiled init data ptrs */
if (IS_PF(p_dev))
ecore_init_iro_array(p_dev);
@@ -3084,6 +3604,10 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
* initiliazed hwfn 0.
*/
if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_ENG2;
+
if (IS_PF(p_dev)) {
ecore_init_free(p_hwfn);
ecore_mcp_free(p_hwfn);
@@ -3096,13 +3620,18 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
}
}
- return ECORE_SUCCESS;
+ return rc;
}
void ecore_hw_remove(struct ecore_dev *p_dev)
{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
int i;
+ if (IS_PF(p_dev))
+ ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_NOT_LOADED);
+
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -3164,13 +3693,13 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
struct ecore_chain *p_chain)
{
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
- u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
if (!pp_virt_addr_tbl)
return;
- if (!p_chain->pbl.p_virt_table)
+ if (!p_pbl_virt)
goto out;
for (i = 0; i < page_cnt; i++) {
@@ -3185,8 +3714,10 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
}
pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table, pbl_size);
+
+ if (!p_chain->b_external_pbl)
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table, pbl_size);
out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -3271,8 +3802,8 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
static enum _ecore_status_t
ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
{
- void *p_virt = OSAL_NULL;
dma_addr_t p_phys = 0;
+ void *p_virt = OSAL_NULL;
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
@@ -3286,8 +3817,10 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
- struct ecore_chain *p_chain)
+static enum _ecore_status_t
+ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
{
void *p_virt = OSAL_NULL;
u8 *p_pbl_virt = OSAL_NULL;
@@ -3296,13 +3829,12 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
u32 page_cnt = p_chain->page_cnt, size, i;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
+ pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
DP_NOTICE(p_dev, true,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
- OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
@@ -3311,7 +3843,15 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+
+ if (ext_pbl == OSAL_NULL) {
+ p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
@@ -3349,7 +3889,8 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems, osal_size_t elem_size,
- struct ecore_chain *p_chain)
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -3380,7 +3921,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
rc = ecore_chain_alloc_single(p_dev, p_chain);
break;
case ECORE_CHAIN_MODE_PBL:
- rc = ecore_chain_alloc_pbl(p_dev, p_chain);
+ rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
break;
}
if (rc)
@@ -3388,7 +3929,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
- nomem:
+nomem:
ecore_chain_free(p_dev, p_chain);
return rc;
}
@@ -3764,19 +4305,14 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
{
struct coalescing_timeset *p_coal_timeset;
- if (IS_VF(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
- return ECORE_INVAL;
- }
-
if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
DP_NOTICE(p_hwfn, true,
"Coalescing configuration not enabled\n");
return ECORE_INVAL;
}
- OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
p_coal_timeset = p_eth_qzone;
+ OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
@@ -3784,15 +4320,55 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Configuring a single queue's coalescing but
+ * claiming all queues are abiding same configuration
+ * for PF and VF both.
+ */
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
+ tx_coal, p_cid);
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (rx_coal) {
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ }
+
+ if (tx_coal) {
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
{
struct ustorm_eth_queue_zone eth_qzone;
- u16 fw_qid = 0;
+ u8 timeset, timer_res;
u32 address;
enum _ecore_status_t rc;
- u8 timeset, timer_res;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
@@ -3807,35 +4383,32 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
}
timeset = (u8)(coalesce >> timer_res);
- rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, false);
if (rc != ECORE_SUCCESS)
goto out;
- address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
if (rc != ECORE_SUCCESS)
goto out;
- p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
out:
return rc;
}
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
{
struct xstorm_eth_queue_zone eth_qzone;
- u16 fw_qid = 0;
+ u8 timeset, timer_res;
u32 address;
enum _ecore_status_t rc;
- u8 timeset, timer_res;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
@@ -3851,22 +4424,16 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
- rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, true);
if (rc != ECORE_SUCCESS)
goto out;
- address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
- if (rc != ECORE_SUCCESS)
- goto out;
-
- p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
out:
return rc;
}
@@ -4292,3 +4859,16 @@ int ecore_device_num_ports(struct ecore_dev *p_dev)
return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
}
+
+void ecore_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 042c0af2..e64a768d 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -58,18 +58,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
void ecore_resc_setup(struct ecore_dev *p_dev);
struct ecore_hw_init_params {
- /* tunnelling parameters */
- struct ecore_tunn_start_params *p_tunn;
+ /* Tunnelling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
bool b_hw_start;
- /* interrupt mode [msix, inta, etc.] to use */
+
+ /* Interrupt mode [msix, inta, etc.] to use */
enum ecore_int_mode int_mode;
-/* npar tx switching to be used for vports configured for tx-switching */
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
bool allow_npar_tx_switch;
- /* binary fw data pointer in binary fw file */
+
+ /* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
- /* the OS Epoch time in seconds */
- u32 epoch;
+
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
};
/**
@@ -131,22 +151,47 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev);
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
-/**
- * @brief ecore_hw_reset -
- *
- * @param p_dev
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+enum ecore_hw_prepare_result {
+ ECORE_HW_PREPARE_SUCCESS,
+
+ /* FAILED results indicate probe has failed & cleaned up */
+ ECORE_HW_PREPARE_FAILED_ENG2,
+ ECORE_HW_PREPARE_FAILED_ME,
+ ECORE_HW_PREPARE_FAILED_MEM,
+ ECORE_HW_PREPARE_FAILED_DEV,
+ ECORE_HW_PREPARE_FAILED_NVM,
+
+ /* BAD results indicate probe is passed even though some wrongness
+ * has occurred; Trying to actually use [I.e., hw_init()] might have
+ * dire reprecautions.
+ */
+ ECORE_HW_PREPARE_BAD_IOV,
+ ECORE_HW_PREPARE_BAD_MCP,
+ ECORE_HW_PREPARE_BAD_IGU,
+};
struct ecore_hw_prepare_params {
- /* personality to initialize */
+ /* Personality to initialize */
int personality;
- /* force the driver's default resource allocation */
+
+ /* Force the driver's default resource allocation */
bool drv_resc_alloc;
- /* check the reg_fifo after any register access */
+
+ /* Check the reg_fifo after any register access */
bool chk_reg_fifo;
+
+ /* Request the MFW to initiate PF FLR */
+ bool initiate_pf_flr;
+
+ /* The OS Epoch time in seconds */
+ u32 epoch;
+
+ /* Allow prepare to pass even if some initializations are failing.
+ * If set, the `p_prepare_res' field would be set with the return,
+ * and might allow probe to pass even if there are certain issues.
+ */
+ bool b_relaxed_probe;
+ enum ecore_hw_prepare_result p_relaxed_res;
};
/**
@@ -368,7 +413,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems,
osal_size_t elem_size,
- struct ecore_chain *p_chain);
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
/**
* @brief ecore_chain_free - Free chain DMA memory
@@ -515,41 +561,24 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
-
/**
- * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
- * The fact that we can configure coalescing to up to 511, but on varying
- * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
- * for the highest values.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param coalesce - Coalesce value in micro seconds.
- * @param qid - Queue index.
- * @param qid - SB Id
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
-
-/**
- * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
- * While the API allows setting coalescing per-qid, all tx queues sharing a
- * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
* @param p_hwfn
- * @param p_ptt
- * @param coalesce - Coalesce value in micro seconds.
- * @param qid - Queue index.
- * @param qid - SB Id
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param p_handle
*
* @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
+ **/
+enum _ecore_status_t
+ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
+ u16 tx_coal, void *p_handle);
#endif
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index 6395b7cd..2acd864d 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -10,43 +10,43 @@
#define GTT_REG_ADDR_H
/* Win 2 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 179d410f..3042ed55 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -75,306 +75,306 @@ struct xstorm_core_conn_st_ctx {
__le32 reserved0[55] /* Pad to 15 cycles */;
};
-struct xstorm_core_conn_ag_ctx {
+struct e4_xstorm_core_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 core_state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
/* cf_array_cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
/* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
/* cf_array_cf_en */
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
/* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
/* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
/* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
/* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
/* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
/* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
/* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
/* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 consolid_prod /* physical_q1 */;
@@ -410,7 +410,7 @@ struct xstorm_core_conn_ag_ctx {
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
+ u8 e5_reserved /* e5_reserved */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
@@ -428,89 +428,89 @@ struct xstorm_core_conn_ag_ctx {
__le16 word15 /* word15 */;
};
-struct tstorm_core_conn_ag_ctx {
+struct e4_tstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
@@ -532,63 +532,63 @@ struct tstorm_core_conn_ag_ctx {
__le32 reg10 /* reg10 */;
};
-struct ustorm_core_conn_ag_ctx {
+struct e4_ustorm_core_conn_ag_ctx {
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
@@ -628,11 +628,11 @@ struct core_conn_context {
/* xstorm storm context */
struct xstorm_core_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
/* tstorm aggregative context */
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
/* mstorm storm context */
struct mstorm_core_conn_st_ctx mstorm_st_context;
/* ustorm storm context */
@@ -660,6 +660,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -743,6 +744,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
@@ -834,7 +836,12 @@ struct core_rx_fast_path_cqe {
__le16 packet_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
- __le32 reserved[4];
+/* bit- map: each bit represents a specific error. errors indications are
+ * provided by the cracker. see spec for detailed description
+ */
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
};
/*
@@ -860,7 +867,8 @@ struct core_rx_slow_path_cqe {
u8 type /* CQE type */;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+ __le32 reserved1[5];
};
/*
@@ -926,36 +934,51 @@ struct core_rx_stop_ramrod_data {
/*
* Flags for Core TX BD
*/
-struct core_tx_bd_flags {
- u8 as_bitfield;
+struct core_tx_bd_data {
+ __le16 as_bitfield;
/* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
/* Insert VLAN into packet */
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
/* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
/* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
/* Packet is IPv6 with extensions */
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
* 0-TCP, 1-UDP
*/
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
/* The pseudo checksum mode to place in the L4 checksum field. Required only
- * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
*/
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+/* Number of BDs that make up one packet - width wide enough to present
+ * CORE_LL2_TX_MAX_BDS_PER_PACKET
+ */
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
+ */
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+/* Calculate ip length */
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
/*
@@ -968,28 +991,18 @@ struct core_tx_bd {
* packets: echo data to pass to Rx
*/
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-/* Number of BDs that make up one packet - width wide enough to present
- * X_CORE_LL2_NUM_OF_BDS_ON_ST_CT
- */
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-/* Use roce_flavor enum - Diffrentiate between Roce flavors is valid when
- * connType is ROCE (use enum core_roce_flavor_type)
- */
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags /* BD Flags */;
+ struct core_tx_bd_data bd_data /* BD Flags */;
__le16 bitfield1;
+/* L4 Header Offset from start of packet (in Words). This is needed if both
+ * l4_csum and ipv6_ext are set
+ */
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
/* Packet destination - Network, LB (use enum core_tx_dest) */
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
@@ -1034,13 +1047,13 @@ struct core_tx_stop_ramrod_data {
/*
* Enum flag for what type of dcb data to update
*/
-enum dcb_dhcp_update_flag {
+enum dcb_dscp_update_mode {
/* use when no change should be done to dcb data */
- DONT_UPDATE_DCB_DHCP,
+ DONT_UPDATE_DCB_DSCP,
UPDATE_DCB /* use to update only l2 (vlan) priority */,
- UPDATE_DSCP /* use to update only l3 dhcp */,
- UPDATE_DCB_DSCP /* update vlan pri and dhcp */,
- MAX_DCB_DHCP_UPDATE_FLAG
+ UPDATE_DSCP /* use to update only l3 dscp */,
+ UPDATE_DCB_DSCP /* update vlan pri and dscp */,
+ MAX_DCB_DSCP_UPDATE_FLAG
};
@@ -1224,6 +1237,10 @@ enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
/* LL2 queue for unaligned packets sent aligned by the driver */
IWARP_LL2_ALIGNED_TX_QUEUE,
+/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
+ * driver
+ */
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR /* Error indication */,
MAX_IWARP_LL2_TX_QUEUES
};
@@ -1265,6 +1282,7 @@ enum malicious_vf_error_id {
/* Tunneled packet with IPv6+Ext without a proper number of BDs */
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+ ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1311,9 +1329,13 @@ enum personality_type {
* tunnel configuration
*/
struct pf_start_tunnel_config {
-/* Set VXLAN tunnel UDP destination port. */
+/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
+ * FW will use a default port
+ */
u8 set_vxlan_udp_port_flg;
-/* Set GENEVE tunnel UDP destination port. */
+/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
+ * FW will use a default port
+ */
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
/* If set, enable l2 GENEVE tunnel in TX path. */
@@ -1329,8 +1351,10 @@ struct pf_start_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
- __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
- __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
+ __le16 vxlan_udp_port;
+/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
+ __le16 geneve_udp_port;
};
/*
@@ -1431,13 +1455,13 @@ struct pf_update_tunnel_config {
*/
struct pf_update_ramrod_data {
u8 pf_id;
- u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */;
- u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
- u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
- u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
+ u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
+ u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
+ u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
+ u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
/* Update RROCE (RoceV2) DCB data indication */
- u8 update_rroce_dcb_data_flag;
- u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
@@ -1596,6 +1620,8 @@ struct tstorm_per_port_stat {
struct regpair fcoe_irregular_pkt;
/* packet is an ROCE irregular packet */
struct regpair roce_irregular_pkt;
+/* packet is an IWARP irregular packet */
+ struct regpair iwarp_irregular_pkt;
/* packet is an ETH irregular packet */
struct regpair eth_irregular_pkt;
/* packet is an TOE irregular packet */
@@ -1846,8 +1872,11 @@ struct dmae_cmd {
#define DMAE_CMD_SRC_VF_ID_SHIFT 0
#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
#define DMAE_CMD_DST_VF_ID_SHIFT 8
- __le32 comp_addr_lo /* PCIe completion address low or grc address */;
-/* PCIe completion address high or reserved (if completion address is in GRC) */
+/* PCIe completion address low in bytes or GRC completion address in DW */
+ __le32 comp_addr_lo;
+/* PCIe completion address high in bytes or reserved (if completion address is
+ * GRC)
+ */
__le32 comp_addr_hi;
__le32 comp_val /* Value to write to completion address */;
__le32 crc32 /* crc16 result */;
@@ -1919,6 +1948,92 @@ enum dmae_cmd_src_enum {
};
+struct e4_mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+
/*
* IGU cleanup command
*/
@@ -2002,44 +2117,6 @@ struct igu_msix_vector {
};
-struct mstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-
/*
* per encapsulation type enabling flags
*/
@@ -2187,10 +2264,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-
-
-
-
struct ystorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index e82b0d4c..917e8f4c 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -92,6 +92,13 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x1fa0000,
+ GRCBASE_RGSRC = 0x1fa8000,
+ GRCBASE_TGFS = 0x1fb0000,
+ GRCBASE_TGSRC = 0x1fb8000,
+ GRCBASE_PTLD = 0x1fc0000,
+ GRCBASE_YPLD = 0x1fe0000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -177,6 +184,13 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -198,6 +212,10 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
+ BIN_BUF_DBG_BUS_BLOCKS /* Debug Bus blocks */,
+ BIN_BUF_DBG_BUS_LINES /* Debug Bus lines */,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA /* Debug Bus blocks user data */,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */,
BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -209,8 +227,8 @@ enum bin_dbg_buffer_type {
struct dbg_attn_bit_mapping {
__le16 data;
/* The index of an attention in the blocks attentions list
- * (if is_unused_idx_cnt=0), or a number of consecutive unused attention bits
- * (if is_unused_idx_cnt=1)
+ * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
+ * (if is_unused_bit_cnt=1)
*/
#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
@@ -259,10 +277,10 @@ struct dbg_attn_reg_result {
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
/* Number of attention indexes in this register */
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
-/* Offset of this registers block attention indexes (values in the range
- * 0..number of block attentions)
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
*/
__le16 attn_idx_offset;
__le16 reserved;
@@ -279,7 +297,7 @@ struct dbg_attn_block_result {
/* Value from dbg_attn_type enum */
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-/* Number of registers in the blok in which at least one attention bit is set */
+/* Number of registers in block in which at least one attention bit is set */
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
/* Offset of this registers block attention names in the attention name offsets
@@ -314,17 +332,17 @@ struct dbg_mode_hdr {
*/
struct dbg_attn_reg {
struct dbg_mode_hdr mode /* Mode header */;
-/* Offset of this registers block attention indexes (values in the range
- * 0..number of block attentions)
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
*/
__le16 attn_idx_offset;
__le32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-/* Number of attention indexes in this register */
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+/* Number of attention in this register */
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
/* STS_CLR attention register GRC address (in dwords) */
__le32 sts_clr_address;
/* MASK attention register GRC address (in dwords) */
@@ -344,6 +362,53 @@ enum dbg_attn_type {
/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the Debug Bus lines array. */
+ __le16 lines_offset;
+};
+
+
+/*
+ * Debug Bus block user data
+ */
+struct dbg_bus_block_user_data {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the debug bus line name offsets array. */
+ __le16 names_offset;
+};
+
+
+/*
+ * Block Debug line data
+ */
+struct dbg_bus_line {
+ u8 data;
+/* Number of groups in the line (0-3) */
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+/* Indicates if this is a 128b line (0) or a 256b line (1). */
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+/* Four 2-bit values, indicating the size of each group minus 1 (i.e.
+ * value=0 means size=1, value=1 means size=2, etc), starting from lsb.
+ * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1).
+ */
+ u8 group_sizes;
+};
+
+
+/*
* condition header for registers dump
*/
struct dbg_dump_cond_hdr {
@@ -367,8 +432,11 @@ struct dbg_dump_mem {
/* register size (in dwords) */
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
-#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
@@ -378,10 +446,13 @@ struct dbg_dump_mem {
struct dbg_dump_reg {
__le32 data;
/* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
@@ -414,8 +485,11 @@ struct dbg_idle_chk_cond_hdr {
struct dbg_idle_chk_cond_reg {
__le32 data;
/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
/* value from block_id enum */
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
@@ -431,8 +505,11 @@ struct dbg_idle_chk_cond_reg {
struct dbg_idle_chk_info_reg {
__le32 data;
/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
/* value from block_id enum */
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
@@ -534,17 +611,21 @@ enum dbg_idle_chk_severity_types {
* Debug Bus block data
*/
struct dbg_bus_block_data {
-/* Indicates if the block is enabled for recording (0/1) */
- u8 enabled;
- u8 hw_id /* HW ID associated with the block */;
+ __le16 data;
+/* 4-bit value: bit i set -> dword/qword i is enabled. */
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+/* Number of dwords/qwords to shift right the debug data (0-3) */
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+/* 4-bit value: bit i set -> dword/qword i is forced valid. */
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+/* 4-bit value: bit i set -> dword/qword i frame bit is forced. */
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
u8 line_num /* Debug line number to select */;
- u8 right_shift /* Number of units to right the debug data (0-3) */;
- u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
-/* 4-bit value: bit i set -> unit i is forced valid. */
- u8 force_valid;
-/* 4-bit value: bit i set -> unit i frame bit is forced. */
- u8 force_frame;
- u8 reserved;
+ u8 hw_id /* HW ID associated with the block */;
};
@@ -594,6 +675,21 @@ enum dbg_bus_constraint_ops {
/*
+ * Debug Bus trigger state data
+ */
+struct dbg_bus_trigger_state_data {
+ u8 data;
+/* 4-bit value: bit i set -> dword i of the trigger state block
+ * (after right shift) is enabled.
+ */
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+/* 4-bit value: bit i set -> dword i is compared by a constraint */
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
+/*
* Debug Bus memory address
*/
struct dbg_bus_mem_addr {
@@ -640,14 +736,8 @@ union dbg_bus_storm_eid_params {
* Debug Bus Storm data
*/
struct dbg_bus_storm_data {
-/* Indicates if the Storm is enabled for fast debug recording (0/1) */
- u8 fast_enabled;
-/* Fast debug Storm mode, valid only if fast_enabled is set */
- u8 fast_mode;
-/* Indicates if the Storm is enabled for slow debug recording (0/1) */
- u8 slow_enabled;
-/* Slow debug Storm mode, valid only if slow_enabled is set */
- u8 slow_mode;
+ u8 enabled /* indicates if the Storm is enabled for recording */;
+ u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
u8 hw_id /* HW ID associated with the Storm */;
u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
@@ -657,7 +747,6 @@ struct dbg_bus_storm_data {
u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
/* EID filter params to filter on. Valid only if eid_filter_en is set. */
union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
/* CID to filter on. Valid only if cid_filter_en is set. */
__le32 cid;
};
@@ -669,20 +758,18 @@ struct dbg_bus_data {
__le32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state */;
u8 hw_dwords /* HW dwords per cycle */;
- u8 next_hw_id /* Next HW ID to be associated with an input */;
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
+ * HW ID of dword/qword i
+ */
+ __le16 hw_id_mask;
u8 num_enabled_blocks /* Number of blocks enabled for recording */;
u8 num_enabled_storms /* Number of Storms enabled for recording */;
u8 target /* Output target */;
- u8 next_trigger_state /* ID of next trigger state to be added */;
-/* ID of next filter/trigger constraint to be added */
- u8 next_constraint_id;
u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
/* Indicates if timestamp recording is enabled (0/1) */
u8 timestamp_input_en;
u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
-/* Indicates if the recording trigger is enabled (0/1) */
- u8 trigger_en;
/* If true, the next added constraint belong to the filter. Otherwise,
* it belongs to the last added trigger state. Valid only if either filter or
* triggers are enabled.
@@ -696,6 +783,14 @@ struct dbg_bus_data {
* Valid only if both filter and trigger are enabled (0/1)
*/
u8 filter_post_trigger;
+ __le16 reserved;
+/* Indicates if the recording trigger is enabled (0/1) */
+ u8 trigger_en;
+/* trigger states data */
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+/* ID of next filter/trigger constraint to be added */
+ u8 next_constraint_id;
/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
* assigned a different HW ID (0/1)
*/
@@ -706,9 +801,8 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
struct dbg_bus_pci_buf_data pci_buf;
- __le16 reserved;
/* Debug Bus data for each block */
- struct dbg_bus_block_data blocks[80];
+ struct dbg_bus_block_data blocks[88];
/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6];
};
@@ -738,17 +832,6 @@ enum dbg_bus_frame_modes {
/*
- * Debug bus input types
- */
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
-
-
-/*
* Debug bus other engine mode
*/
enum dbg_bus_other_engine_modes {
@@ -842,16 +925,17 @@ enum dbg_bus_targets {
};
+
/*
* GRC Dump data
*/
struct dbg_grc_data {
+/* Indicates if the GRC parameters were initialized */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
- __le32 param_val[40];
-/* Indicates for each GRC parameter if it was set by the user (0/1).
- * Array size must match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40];
+ __le32 param_val[48];
};
@@ -901,6 +985,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+ DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
+ DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
MAX_DBG_GRC_PARAMS
};
@@ -975,7 +1061,10 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
MAX_DBG_STATUS
};
@@ -1014,9 +1103,9 @@ struct dbg_tools_data {
struct idle_chk_data idle_chk /* Idle Check data */;
u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
/* Indicates if a block is in reset state (0/1) */
- u8 block_in_reset[80];
+ u8 block_in_reset[88];
u8 chip_id /* Chip ID (from enum chip_ids) */;
- u8 platform_id /* Platform ID (from enum platform_ids) */;
+ u8 platform_id /* Platform ID */;
u8 initialized /* Indicates if the data was initialized */;
u8 reserved;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index e26c1833..397c408d 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -34,315 +34,315 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
/* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
/* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
/* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
/* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
/* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
/* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
/* cf_array_cf */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
/* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
/* cf_array_cf_en */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
/* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
/* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
/* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
/* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
/* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
/* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
/* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
/* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
+ __le16 e5_reserved1 /* physical_q1 */;
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
@@ -375,7 +375,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
+ u8 e5_reserved /* e5_reserved */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
@@ -400,47 +400,47 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
/* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
/* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
/* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
/* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
/* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
/* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
/* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
/* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
/* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
@@ -454,89 +454,89 @@ struct ystorm_eth_conn_ag_ctx {
__le32 reg3 /* reg3 */;
};
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
@@ -558,88 +558,88 @@ struct tstorm_eth_conn_ag_ctx {
__le32 reg10 /* reg10 */;
};
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
/* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
/* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
/* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
/* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
/* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
/* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
/* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
/* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
/* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
/* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
/* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
/* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
/* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
/* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
/* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
/* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
/* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
/* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
/* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
/* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
/* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
/* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
/* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
/* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
@@ -678,15 +678,15 @@ struct eth_conn_context {
/* xstorm storm context */
struct xstorm_eth_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
/* ystorm storm context */
struct ystorm_eth_conn_st_ctx ystorm_st_context;
/* ystorm aggregative context */
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
/* tstorm aggregative context */
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
/* ustorm storm context */
struct ustorm_eth_conn_st_ctx ustorm_st_context;
/* mstorm storm context */
@@ -739,6 +739,7 @@ enum eth_error_code {
ETH_FILTERS_VNI_ADD_FAIL_FULL,
/* vni add filters command failed due to duplicate VNI filter */
ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */,
MAX_ETH_ERROR_CODE
};
@@ -982,8 +983,10 @@ struct eth_vport_rss_config {
u8 rss_id;
u8 rss_mode /* The RSS mode for this function */;
u8 update_rss_key /* if set update the rss key */;
- u8 update_rss_ind_table /* if set update the indirection table */;
- u8 update_rss_capabilities /* if set update the capabilities */;
+/* if set update the indirection table values */
+ u8 update_rss_ind_table;
+/* if set update the capabilities and indirection table size. */
+ u8 update_rss_capabilities;
u8 tbl_size /* rss mask (Tbl size) */;
__le32 reserved2[2];
/* RSS indirection table */
@@ -1267,7 +1270,10 @@ struct rx_update_gft_filter_data {
/* Use enum to set type of flow using gft HW logic blocks */
u8 filter_type;
u8 filter_action /* Use to set type of action on filter */;
- u8 reserved;
+/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
+ * case of error.
+ */
+ u8 assert_on_error;
};
@@ -1446,7 +1452,15 @@ struct vport_update_ramrod_data_cmn {
/* If set, MTU will be updated. Vport must be not active. */
u8 update_mtu_flg;
__le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
- u8 reserved[2];
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
+ * updated
+ */
+ u8 update_ctl_frame_checks_en_flg;
+/* If set, Contorl frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -1472,6 +1486,668 @@ struct vport_update_ramrod_data {
+struct E4XstormEthConnAgCtxDqExtLdPart {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+/* bit12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+/* bit13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+/* bit14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+/* timer1cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+/* timer2cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+/* cf5 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+/* cf6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+/* cf7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+/* cf9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+/* cf10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+/* cf11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+/* cf13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+/* cf14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+/* cf15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+/* cf1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+/* cf3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+/* cf4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+/* cf5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+/* cf6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+/* cf7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+/* cf8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+/* cf9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+/* cf11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+/* cf12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+/* cf13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+/* cf14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+/* cf15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+/* cf16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+};
+
+
+struct e4_mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+
+
/*
* GFT CAM line struct
*/
@@ -1620,8 +2296,7 @@ enum gft_profile_upper_protocol_type {
* GFT RAM line struct
*/
struct gft_ram_line {
- __le32 low32bits;
-/* (use enum gft_vlan_select) */
+ __le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
@@ -1684,7 +2359,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_DST_PORT_SHIFT 30
#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
- __le32 high32bits;
+ __le32 hi;
#define GFT_RAM_LINE_DSCP_MASK 0x1
#define GFT_RAM_LINE_DSCP_SHIFT 0
#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
@@ -1722,690 +2397,4 @@ enum gft_vlan_select {
};
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-
-
-
-struct xstormEthConnAgCtxDqExtLdPart {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
-/* exist_in_qm2 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
-/* exist_in_qm3 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-/* bit4 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
-/* cf_array_active */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
-/* bit6 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
-/* bit7 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
- u8 flags1;
-/* bit8 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
-/* bit9 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
-/* bit10 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
-/* bit11 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-/* bit12 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-/* bit13 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-/* bit14 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
-/* bit15 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-/* timer0cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-/* timer1cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-/* timer2cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-/* timer_stop_all */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-/* cf4 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-/* cf5 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-/* cf6 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-/* cf7 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
- u8 flags4;
-/* cf8 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-/* cf9 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-/* cf10 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-/* cf11 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-/* cf12 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-/* cf13 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-/* cf14 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-/* cf15 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-/* cf16 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
-/* cf_array_cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
-/* cf18 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
-/* cf19 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
- u8 flags7;
-/* cf20 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
-/* cf21 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
-/* cf22 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-/* cf0en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-/* cf1en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-/* cf2en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-/* cf3en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-/* cf4en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-/* cf5en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-/* cf6en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-/* cf7en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
-/* cf8en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-/* cf9en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-/* cf10en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-/* cf11en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-/* cf12en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-/* cf13en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-/* cf14en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-/* cf15en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-/* cf16en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-/* cf18en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
-/* cf19en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
-/* cf20en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
-/* cf21en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
-/* cf22en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-/* cf23en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
-/* rule1en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
- u8 flags11;
-/* rule2en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
-/* rule3en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
-/* rule4en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
-/* rule5en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-/* rule6en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-/* rule7en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-/* rule8en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-/* rule9en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-/* rule10en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-/* rule11en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-/* rule12en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-/* rule13en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-/* rule14en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-/* rule15en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-/* rule16en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-/* rule17en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-/* rule18en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-/* rule19en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-/* rule20en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-/* rule21en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-/* rule22en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-/* rule23en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-/* rule24en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-/* rule25en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-/* bit16 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
-/* bit17 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
-/* bit18 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
-/* bit19 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-/* bit20 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
-/* bit21 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-/* cf23 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
-};
-
-
-
-struct xstorm_eth_hw_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-/* exist_in_qm2 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-/* exist_in_qm3 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-/* bit4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-/* cf_array_active */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-/* bit6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-/* bit7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-/* bit8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-/* bit9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-/* bit10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-/* bit11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-/* bit12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-/* bit13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-/* bit14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-/* bit15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-/* timer0cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-/* timer1cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-/* timer2cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-/* timer_stop_all */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-/* cf4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-/* cf5 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-/* cf6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-/* cf7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-/* cf8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-/* cf9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-/* cf10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-/* cf11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-/* cf12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-/* cf13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-/* cf14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-/* cf15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-/* cf16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-/* cf_array_cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-/* cf18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-/* cf19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-/* cf20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-/* cf21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-/* cf22 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-/* cf0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-/* cf1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-/* cf2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-/* cf3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-/* cf4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-/* cf5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-/* cf6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-/* cf7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-/* cf8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-/* cf9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-/* cf10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-/* cf11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-/* cf12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-/* cf13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-/* cf14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-/* cf15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-/* cf16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-/* cf18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-/* cf19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-/* cf20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-/* cf21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-/* cf22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-/* cf23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-/* rule1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-/* rule2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-/* rule3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-/* rule4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-/* rule5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-/* rule6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-/* rule7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-/* rule8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-/* rule9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-/* rule10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-/* rule11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-/* rule12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-/* rule13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-/* rule14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-/* rule15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-/* rule16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-/* rule17en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-/* rule18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-/* rule19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-/* rule20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-/* rule21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-/* rule22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-/* rule23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-/* rule24en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-/* rule25en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-/* bit16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-/* bit17 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-/* bit18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-/* bit19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-/* bit20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-/* bit21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-/* cf23 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
-};
-
-
#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 410b0bcb..1f57e9b2 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -22,6 +22,13 @@
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_E5,
+ MAX_CHIP_IDS
+};
+
struct fw_asserts_ram_section {
/* The offset of the section in the RAM in RAM lines (64-bit units) */
@@ -69,51 +76,6 @@ struct fw_info_location {
__le32 size;
};
-
-
-
-enum init_modes {
- MODE_BB_A0,
- MODE_BB_B0,
- MODE_K2,
- MODE_ASIC,
- MODE_EMUL_REDUCED,
- MODE_EMUL_FULL,
- MODE_FPGA,
- MODE_CHIPSIM,
- MODE_SF,
- MODE_MF_SD,
- MODE_MF_SI,
- MODE_PORTS_PER_ENG_1,
- MODE_PORTS_PER_ENG_2,
- MODE_PORTS_PER_ENG_4,
- MODE_100G,
- MODE_40G,
- MODE_EAGLE_ENG1_WORKAROUND,
- MAX_INIT_MODES
-};
-
-
-enum init_phases {
- PHASE_ENGINE,
- PHASE_PORT,
- PHASE_PF,
- PHASE_VF,
- PHASE_QM_PF,
- MAX_INIT_PHASES
-};
-
-
-enum init_split_types {
- SPLIT_TYPE_NONE,
- SPLIT_TYPE_PORT,
- SPLIT_TYPE_PF,
- SPLIT_TYPE_PORT_PF,
- SPLIT_TYPE_VF,
- MAX_INIT_SPLIT_TYPES
-};
-
-
/*
* Binary buffer header
*/
@@ -204,8 +166,46 @@ union init_array_hdr {
};
+enum init_modes {
+ MODE_BB_A0_DEPRECATED,
+ MODE_BB,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_E5,
+ MAX_INIT_MODES
+};
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
/*
* init array types
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 7f4db0a0..2bcc32d3 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -86,7 +86,6 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
- p_hwfn->p_ptt_pool = OSAL_NULL;
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
@@ -496,8 +495,8 @@ static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static enum _ecore_status_t
-ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
@@ -774,6 +773,18 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ /* Return success to let the flow to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
@@ -906,44 +917,6 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
return rc;
}
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto,
- union ecore_qm_pq_params *p_params)
-{
- u16 pq_id = 0;
-
- if ((proto == PROTOCOLID_CORE ||
- proto == PROTOCOLID_ETH) && !p_params) {
- DP_NOTICE(p_hwfn, true,
- "Protocol %d received NULL PQ params\n", proto);
- return 0;
- }
-
- switch (proto) {
- case PROTOCOLID_CORE:
- if (p_params->core.tc == LB_TC)
- pq_id = p_hwfn->qm_info.pure_lb_pq;
- else if (p_params->core.tc == OOO_LB_TC)
- pq_id = p_hwfn->qm_info.ooo_pq;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_ETH:
- pq_id = p_params->eth.tc;
- /* TODO - multi-CoS for VFs? */
- if (p_params->eth.is_vf)
- pq_id += p_hwfn->qm_info.vf_queues_offset +
- p_params->eth.vf_id;
- break;
- default:
- pq_id = 0;
- }
-
- pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
-
- return pq_id;
-}
-
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type)
{
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index de08650b..004ab351 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -17,112 +17,156 @@
#include "ecore_hsi_init_tool.h"
#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
-enum CmInterfaceEnum {
- MCM_SEC,
- MCM_PRI,
- UCM_SEC,
- UCM_PRI,
- TCM_SEC,
- TCM_PRI,
- YCM_SEC,
- YCM_PRI,
- XCM_SEC,
- XCM_PRI,
- NUM_OF_CM_INTERFACES
+
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
-/* general constants */
-#define QM_PQ_MEM_4KB(pq_size) \
-(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
-#define QM_PQ_SIZE_256B(pq_size) \
-(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID 0xffff
-/* feature enable */
-#define QM_BYPASS_EN 1
-#define QM_BYTE_CRD_EN 1
-/* other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
-/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 62500000
+
+/* General constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
+ 0)
+#define QM_INVALID_PQ_ID 0xffff
+
+/* Feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
+/* Other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+
+/* WFQ constants: */
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 43750000
-/* RL constants */
-#define QM_RL_UPPER_BOUND 62500000
-#define QM_RL_PERIOD 5
+
+/* 0.7 * upper bound (62500000) */
+#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* RL constants: */
+
+/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_RL_UPPER_BOUND 62500000
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL 43750000
-/* RL increment value - the factor of 1.01 was added after seeing only
- * 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
- * In this scenario the PF RL was reducing the line rate to 99% although
- * the credit increment value was the correct one and FW calculated
- * correct packet sizes. The reason for the inaccuracy of the RL is
- * unknown at this point.
+
+/* 0.7 * upper bound (62500000) */
+#define QM_RL_MAX_INC_VAL 43750000
+
+/* RL increment value - rate is specified in mbps. the factor of 1.01 was
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
*/
-/* rate in mbps */
#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
- QM_RL_PERIOD * 101) / (8 * 100)), 1)
+ QM_RL_PERIOD * 101) / (8 * 100)), 1)
+
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-/* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES 150
+
+/* Command Queue constants: */
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
-(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
-voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
-- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
-(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
-(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
/* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
-/* headroom per-port */
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
-#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 0x2
-#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
/* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
-SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+ SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+
/* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
-((port) * (max_phys_tcs_per_port) + (tc))
-#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
+ ((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phys_tcs_per_port) \
-((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
+ ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
+ LB_VOQ(port))
+
+
/******************** INTERNAL IMPLEMENTATION *********************/
+
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- /* enable RLs for all VOQs */
+ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
- /* write RL period */
+
+ /* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
@@ -133,7 +177,8 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
@@ -145,12 +190,13 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
- /* write RL period (use timer 0 only) */
+ /* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
@@ -163,7 +209,8 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
@@ -176,13 +223,9 @@ static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands
- * with the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
- if (is_bb_a0)
- cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
+
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
@@ -198,38 +241,43 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id, num_tcs_in_port;
- /* clear PBF lines for all VOQs */
+
+ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- u16 phys_lines, phys_lines_per_tc;
- /* find #lines to divide between active physical TCs */
- phys_lines =
- port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
- /* find #lines per active physical TC */
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- }
- phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn,
- voq, phys_lines_per_tc);
- }
+ u16 phys_lines, phys_lines_per_tc;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Find #lines to divide between the active physical TCs */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+ /* Init registers per active TC */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
}
- /* init registers for pure LB TC */
- ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
- PBF_CMDQ_PURE_LB_LINES);
}
+
+ /* Init registers for pure LB TC */
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -259,50 +307,51 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id, num_tcs_in_port;
+
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- /* subtract headroom blocks */
- usable_blocks =
- port_params[port_id].num_btb_blocks -
- BTB_HEADROOM_BLOCKS;
-/* find blocks per physical TC. use factor to avoid floating arithmethic */
-
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- pure_lb_blocks =
- (usable_blocks * BTB_PURE_LB_FACTOR) /
- (num_tcs_in_port *
- BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
- pure_lb_blocks =
- OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
- pure_lb_blocks / BTB_PURE_LB_FACTOR);
- phys_blocks =
- (usable_blocks -
- pure_lb_blocks) /
- num_tcs_in_port;
- /* init physical TCs */
- for (tc = 0;
- tc < NUM_OF_PHYS_TCS;
- tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn,
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* Find blocks per physical TC. use factor to avoid floating
+ * arithmethic.
+ */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks /
+ BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
+
+ /* Init physical TCs */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
- }
}
- /* init pure LB TC */
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(
- LB_VOQ(port_id)), pure_lb_blocks);
}
+
+ /* Init pure LB TC */
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+ pure_lb_blocks);
}
}
@@ -323,59 +372,69 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
- u16 i, pq_id, pq_group;
- u16 num_pqs = num_pf_pqs + num_vf_pqs;
- u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
- u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
- /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
- u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
- u32 mem_addr_4kb = base_mem_addr_4kb;
- /* set mapping from PQ group to PF */
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
+ u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = num_pf_pqs + num_vf_pqs;
+
+ first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(num_vf_cids));
- /* go over all Tx PQs */
+
+ /* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
- struct qm_rf_pq_map tx_pq_map;
- u8 voq =
- VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
- bool is_vf_pq = (i >= num_pf_pqs);
- /* added to avoid compilation warning */
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- bool rl_valid = pq_params[i].rl_valid &&
- pq_params[i].vport_id < max_qm_global_rls;
- /* update first Tx PQ of VPORT/TC */
- u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
- u16 first_tx_pq_id =
- vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
- tc_id];
+ struct qm_rf_pq_map tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u8 voq, vport_id_in_pf;
+ u16 first_tx_pq_id;
+
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ is_vf_pq = (i >= num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
+ max_qm_global_rls;
+
+ /* Update first Tx PQ of VPORT/TC */
+ vport_id_in_pf = pq_params[i].vport_id - start_vport;
+ first_tx_pq_id =
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- /* create new VP PQ */
+ /* Create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
- /* map VP PQ to VOQ and PF */
+
+ /* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
- /* check RL ID */
+
+ /* Check RL ID */
if (pq_params[i].rl_valid && pq_params[i].vport_id >=
max_qm_global_rls)
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter config");
- /* fill PQ map entry */
+ "Invalid VPORT ID for rate limiter config\n");
+
+ /* Fill PQ map entry */
OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
@@ -386,44 +445,30 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
pq_params[i].wrr_group);
- /* write PQ map entry to CAM */
+
+ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
- /* set base address */
+
+ /* Set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
- /* check if VF PQ */
+
+ /* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
- /* if PQ is associated with a VF, add indication to PQ
- * VF mask
- */
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
+ (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
}
}
- /* store Tx PQ VF mask to size select register */
- for (i = 0; i < num_tx_pq_vf_masks; i++) {
- if (tx_pq_vf_mask[i]) {
- if (is_bb_a0) {
- /* A0-only: perform read-modify-write
- *(fixed in B0)
- */
- u32 curr_mask =
- is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
- QM_REG_MAXPQSIZETXSEL_0
- + i * 4);
- STORE_RT_REG(p_hwfn,
- QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, curr_mask | tx_pq_vf_mask[i]);
- } else
- STORE_RT_REG(p_hwfn,
- QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, tx_pq_vf_mask[i]);
- }
- }
+
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, tx_pq_vf_mask[i]);
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -433,20 +478,26 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
- u16 i, pq_id;
-/* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
-
- u16 pq_group = pf_id;
- u32 pq_size = num_pf_cids + num_tids;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
- u32 mem_addr_4kb = base_mem_addr_4kb;
- /* map PQ group to PF */
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
+
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
+ */
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* set base address */
+
+ /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
@@ -454,7 +505,10 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
mem_addr_4kb += pq_mem_4kb;
}
}
-/* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
@@ -463,76 +517,89 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u16 num_tx_pqs,
struct init_qm_pq_params *pq_params)
{
+ u32 inc_val, crd_reg_offset;
+ u8 voq;
u16 i;
- u32 inc_val;
- u32 crd_reg_offset =
- (pf_id <
- MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
- QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
+
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ (pf_id % MAX_NUM_PFS_BB);
+
inc_val = QM_WFQ_INC_VAL(pf_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
return -1;
}
+
for (i = 0; i < num_tx_pqs; i++) {
- u8 voq =
- VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
+
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
-/* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
- u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
return -1;
}
+
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
-/* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
- * error.
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
*/
static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
- u8 tc, i;
+ u16 vport_pq_id;
u32 inc_val;
- /* go over all PF VPORTs */
+ u8 tc, i;
+
+ /* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (vport_params[i].vport_wfq) {
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight config");
- return -1;
- }
- /* each VPORT can have several VPORT PQ IDs for
- * different TCs
- */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id =
- vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET
- + vport_pq_id, inc_val);
- }
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
}
@@ -548,19 +615,23 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ u32 inc_val;
+
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- /* go over all PF VPORTs */
+
+ /* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
+
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
@@ -569,6 +640,7 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
+
return 0;
}
@@ -576,17 +648,20 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
i++) {
OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
- /* check if timeout while waiting for SDM command ready */
+
+ /* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
"Timeout waiting for QM SDM cmd ready signal\n");
return false;
}
+
return true;
}
@@ -596,15 +671,19 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
+
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
+
/******************** INTERFACE IMPLEMENTATION *********************/
+
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
@@ -625,32 +704,42 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- /* init AFullOprtnstcCrdMask */
- u32 mask =
- (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask;
+
+ /* Init AFullOprtnstcCrdMask */
+ mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
- /* enable/disable PF RL */
+
+ /* Enable/disable PF RL */
ecore_enable_pf_rl(p_hwfn, pf_rl_en);
- /* enable/disable PF WFQ */
+
+ /* Enable/disable PF WFQ */
ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
- /* enable/disable VPORT RL */
+
+ /* Enable/disable VPORT RL */
ecore_enable_vport_rl(p_hwfn, vport_rl_en);
- /* enable/disable VPORT WFQ */
+
+ /* Enable/disable VPORT WFQ */
ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
- /* init PBF CMDQ line credit */
+
+ /* Init PBF CMDQ line credit */
ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
- /* init BTB blocks in PBF */
+
+ /* Init BTB blocks in PBF */
ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
+
return 0;
}
@@ -673,66 +762,86 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
+ u32 other_mem_size_4kb;
u8 tc, i;
- u32 other_mem_size_4kb =
- QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
- /* clear first Tx PQ ID array for each VPORT */
+
+ other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
+ QM_OTHER_PQS_PER_PF;
+
+ /* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
- /* map Other PQs (if any) */
+
+ /* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
num_tids, 0);
#endif
- /* map Tx PQs */
+
+ /* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
max_phys_tcs_per_port, is_first_pf, num_pf_cids,
num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
start_vport, other_mem_size_4kb, pq_params,
vport_params);
- /* init PF WFQ */
+
+ /* Init PF WFQ */
if (pf_wfq)
if (ecore_pf_wfq_rt_init
(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
- num_pf_pqs + num_vf_pqs, pq_params) != 0)
+ num_pf_pqs + num_vf_pqs, pq_params))
return -1;
- /* init PF RL */
- if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
+
+ /* Init PF RL */
+ if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
return -1;
- /* set VPORT WFQ */
- if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
+
+ /* Set VPORT WFQ */
+ if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
return -1;
- /* set VPORT RL */
+
+ /* Set VPORT RL */
if (ecore_vport_rl_rt_init
- (p_hwfn, start_vport, num_vports, vport_params) != 0)
+ (p_hwfn, start_vport, num_vports, vport_params))
return -1;
+
return 0;
}
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ u32 inc_val;
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
return 0;
}
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
- u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
return 0;
}
@@ -740,20 +849,25 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
+ u16 vport_pq_id;
+ u32 inc_val;
u8 tc;
- u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight configuration");
+ "Invalid VPORT WFQ weight configuration\n");
return -1;
}
+
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = first_tx_pq_id[tc];
+ vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
ecore_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
}
+
return 0;
}
@@ -761,20 +875,24 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+
if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
+
inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
return 0;
}
@@ -784,15 +902,20 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
- u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
- /* set command's PQ type */
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
+
+ /* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
- /* go over requested PQs */
+
+ /* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
- /* set PQ bit in mask (stop command only) */
+ /* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
- /* if last PQ or end of PQ mask, write command */
+
+ /* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
@@ -807,68 +930,92 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
pq_mask = 0;
}
}
+
return true;
}
+
/* NIG: ETS configuration constants */
#define NIG_TX_ETS_CLIENT_OFFSET 4
#define NIG_LB_ETS_CLIENT_OFFSET 1
#define NIG_ETS_MIN_WFQ_BYTES 1600
+
/* NIG: ETS constants */
#define NIG_ETS_UP_BOUND(weight, mtu) \
-(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
/* NIG: RL constants */
-#define NIG_RL_BASE_TYPE 1 /* byte base type */
-#define NIG_RL_PERIOD 1 /* in us */
+
+/* Byte base type value */
+#define NIG_RL_BASE_TYPE 1
+
+/* Period in us */
+#define NIG_RL_PERIOD 1
+
+/* Period in 25MHz cycles */
#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
+
+/* Rate in mbps */
#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
+
#define NIG_RL_MAX_VAL(inc_val, mtu) \
-(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+ (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+
/* NIG: packet prioritry configuration constants */
-#define NIG_PRIORITY_MAP_TC_BITS 4
+#define NIG_PRIORITY_MAP_TC_BITS 4
+
+
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req, bool is_lb)
{
- u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
- u8 tc_client_offset =
- is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
- u32 min_weight = 0xffffffff;
- u32 tc_weight_base_addr =
- is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
- NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
- u32 tc_weight_addr_diff =
- is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
- NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
- NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
- u32 tc_bound_base_addr =
- is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
- u32 tc_bound_addr_diff =
- is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
- NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
+ u32 tc_bound_base_addr, tc_bound_addr_diff;
+ u8 sp_tc_map = 0, wfq_tc_map = 0;
+ u8 tc, num_tc, tc_client_offset;
+
+ num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+ tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
+ NIG_TX_ETS_CLIENT_OFFSET;
+ min_weight = 0xffffffff;
+ tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+
for (tc = 0; tc < num_tc; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- /* update SP map */
+
+ /* Update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
- if (tc_req->use_wfq) {
- /* update WFQ map */
- wfq_tc_map |= (1 << tc);
- /* find minimal weight */
- if (tc_req->weight < min_weight)
- min_weight = tc_req->weight;
- }
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
}
- /* write SP map */
+
+ /* Write SP map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
NIG_REG_TX_ARB_CLIENT_IS_STRICT,
(sp_tc_map << tc_client_offset));
- /* write WFQ map */
+
+ /* Write WFQ map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
@@ -876,22 +1023,23 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
/* write WFQ weights */
for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- if (tc_req->use_wfq) {
- /* translate weight to bytes */
- u32 byte_weight =
- (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
- /* write WFQ weight */
- ecore_wr(p_hwfn, p_ptt,
- tc_weight_base_addr +
- tc_weight_addr_diff * tc_client_offset,
- byte_weight);
- /* write WFQ upper bound */
- ecore_wr(p_hwfn, p_ptt,
- tc_bound_base_addr +
- tc_bound_addr_diff * tc_client_offset,
- NIG_ETS_UP_BOUND(byte_weight, req->mtu));
- }
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
+ tc_weight_addr_diff * tc_client_offset, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
+ tc_bound_addr_diff * tc_client_offset,
+ NIG_ETS_UP_BOUND(byte_weight, req->mtu));
}
}
@@ -899,16 +1047,18 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req)
{
- u8 tc;
u32 ctrl, inc_val, reg_offset;
- /* disable global MAC+LB RL */
+ u8 tc;
+
+ /* Disable global MAC+LB RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
- /* configure and enable global MAC+LB RL */
+
+ /* Configure and enable global MAC+LB RL */
if (req->lb_mac_rate) {
- /* configure */
+ /* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
@@ -916,20 +1066,23 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
+
+ /* Enable */
ctrl |=
1 <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
}
- /* disable global LB-only RL */
+
+ /* Disable global LB-only RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
- /* configure and enable global LB-only RL */
+
+ /* Configure and enable global LB-only RL */
if (req->lb_rate) {
- /* configure */
+ /* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_rate);
@@ -937,41 +1090,41 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
+
+ /* Enable */
ctrl |=
1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
}
- /* per-TC RLs */
+
+ /* Per-TC RLs */
for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
tc++, reg_offset += 4) {
- /* disable TC RL */
+ /* Disable TC RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
- /* configure and enable TC RL */
- if (req->tc_rate[tc]) {
- /* configure */
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
- reg_offset, NIG_RL_PERIOD_CLK_25M);
- inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
- reg_offset, inc_val);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
- reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
- ctrl |=
- 1 <<
- NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
- ctrl);
- }
+
+ /* Configure and enable TC RL */
+ if (!req->tc_rate[tc])
+ continue;
+
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+ reg_offset, NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+ reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+ reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |= 1 <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
+ reg_offset, ctrl);
}
}
@@ -979,20 +1132,23 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req)
{
- u8 pri, tc;
- u32 pri_tc_mask = 0;
u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+ u32 pri_tc_mask = 0;
+ u8 pri, tc;
+
for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
- if (req->pri[pri].valid) {
- pri_tc_mask |=
- (req->pri[pri].
- tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
- tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
- }
+ if (!req->pri[pri].valid)
+ continue;
+
+ pri_tc_mask |= (req->pri[pri].tc_id <<
+ (pri * NIG_PRIORITY_MAP_TC_BITS));
+ tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
}
- /* write priority -> TC mask */
+
+ /* Write priority -> TC mask */
ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
- /* write TC -> priority mask */
+
+ /* Write TC -> priority mask */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
tc_pri_mask[tc]);
@@ -1001,110 +1157,133 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
}
}
+
/* PRS: ETS configuration constants */
-#define PRS_ETS_MIN_WFQ_BYTES 1600
+#define PRS_ETS_MIN_WFQ_BYTES 1600
#define PRS_ETS_UP_BOUND(weight, mtu) \
-(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_ets_req *req)
{
+ u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- u32 min_weight = 0xffffffff;
- u32 tc_weight_addr_diff =
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
- u32 tc_bound_addr_diff =
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
+ tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- /* update SP map */
+
+ /* Update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
- if (tc_req->use_wfq) {
- /* update WFQ map */
- wfq_tc_map |= (1 << tc);
- /* find minimal weight */
- if (tc_req->weight < min_weight)
- min_weight = tc_req->weight;
- }
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
}
+
/* write SP map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+
/* write WFQ map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
wfq_tc_map);
+
/* write WFQ weights */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- if (tc_req->use_wfq) {
- /* translate weight to bytes */
- u32 byte_weight =
- (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
- /* write WFQ weight */
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
- tc * tc_weight_addr_diff, byte_weight);
- /* write WFQ upper bound */
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
- tc * tc_bound_addr_diff,
- PRS_ETS_UP_BOUND(byte_weight, req->mtu));
- }
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
+ tc_weight_addr_diff, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+ tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
+ req->mtu));
}
}
+
/* BRB: RAM configuration constants */
#define BRB_TOTAL_RAM_BLOCKS_BB 4800
#define BRB_TOTAL_RAM_BLOCKS_K2 5632
-#define BRB_BLOCK_SIZE 128 /* in bytes */
+#define BRB_BLOCK_SIZE 128
#define BRB_MIN_BLOCKS_PER_TC 9
-#define BRB_HYST_BYTES 10240
-#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
-/*
- * temporary big RAM allocation - should be updated
- */
+#define BRB_HYST_BYTES 10240
+#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+
+/* Temporary big RAM allocation - should be updated */
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
{
- u8 port, active_ports = 0;
+ u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
u32 active_port_blocks, reg_offset = 0;
- u32 tc_headroom_blocks =
- (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
- u32 min_pkt_size_blocks =
- (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
- u32 total_blocks =
- ECORE_IS_K2(p_hwfn->
- p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
- BRB_TOTAL_RAM_BLOCKS_BB;
- /* find number of active ports */
+ u8 port, active_ports = 0;
+
+ tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
+ BRB_BLOCK_SIZE);
+ min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
+ BRB_BLOCK_SIZE);
+ total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+ BRB_TOTAL_RAM_BLOCKS_BB;
+
+ /* Find number of active ports */
for (port = 0; port < MAX_NUM_PORTS; port++)
if (req->num_active_tcs[port])
active_ports++;
+
active_port_blocks = (u32)(total_blocks / active_ports);
+
for (port = 0; port < req->max_ports_per_engine; port++) {
- /* calculate per-port sizes */
- u32 tc_guaranteed_blocks =
- (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
- u32 port_blocks =
- req->num_active_tcs[port] ? active_port_blocks : 0;
- u32 port_guaranteed_blocks =
- req->num_active_tcs[port] * tc_guaranteed_blocks;
- u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
- u32 full_xoff_th =
- req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
- u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
- u32 pause_xoff_th = tc_headroom_blocks;
- u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+ u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
+ u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
+ u32 tc_guaranteed_blocks;
u8 tc;
- /* init total size per port */
+
+ /* Calculate per-port sizes */
+ tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
+ BRB_BLOCK_SIZE);
+ port_blocks = req->num_active_tcs[port] ? active_port_blocks :
+ 0;
+ port_guaranteed_blocks = req->num_active_tcs[port] *
+ tc_guaranteed_blocks;
+ port_shared_blocks = port_blocks - port_guaranteed_blocks;
+ full_xoff_th = req->num_active_tcs[port] *
+ BRB_MIN_BLOCKS_PER_TC;
+ full_xon_th = full_xoff_th + min_pkt_size_blocks;
+ pause_xoff_th = tc_headroom_blocks;
+ pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+
+ /* Init total size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
port_blocks);
- /* init shared size per port */
+
+ /* Init shared size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
port_shared_blocks);
+
for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
- /* clear init values for non-active TCs */
+ /* Clear init values for non-active TCs */
if (tc == req->num_active_tcs[port]) {
tc_guaranteed_blocks = 0;
full_xoff_th = 0;
@@ -1112,15 +1291,18 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
pause_xoff_th = 0;
pause_xon_th = 0;
}
- /* init guaranteed size per TC */
+
+ /* Init guaranteed size per TC */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_TC_GUARANTIED_0 + reg_offset,
tc_guaranteed_blocks);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
BRB_HYST_BLOCKS);
-/* init pause/full thresholds per physical TC - for loopback traffic */
+ /* Init pause/full thresholds per physical TC - for
+ * loopback traffic.
+ */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1133,7 +1315,10 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
-/* init pause/full thresholds per physical TC - for main traffic */
+
+ /* Init pause/full thresholds per physical TC - for
+ * main traffic.
+ */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1150,23 +1335,25 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
}
}
-/*In MF should be called once per engine to set EtherType of OuterTag*/
+/* In MF should be called once per engine to set EtherType of OuterTag */
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
- /* update PRS register */
+ /* Update PRS register */
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
- /* update NIG register */
+
+ /* Update NIG register */
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
- /* update PBF register */
+
+ /* Update PBF register */
STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
}
-/*In MF should be called once per port to set EtherType of OuterTag*/
+/* In MF should be called once per port to set EtherType of OuterTag */
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
- /* update DORQ register */
+ /* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
}
@@ -1176,11 +1363,13 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
- /* update PRS register */
+ /* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
- /* update PBF register */
+
+ /* Update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
@@ -1188,23 +1377,26 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool vxlan_enable)
{
u32 reg_val;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* update DORQ register */
+
+ /* Update DORQ register */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
vxlan_enable ? 1 : 0);
}
@@ -1214,7 +1406,8 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
bool eth_gre_enable, bool ip_gre_enable)
{
u32 reg_val;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
@@ -1224,10 +1417,11 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
@@ -1236,7 +1430,8 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* update DORQ registers */
+
+ /* Update DORQ registers */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
eth_gre_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
@@ -1246,14 +1441,13 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
- /* geneve tunnel not supported in BB_A0 */
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
- return;
- /* update PRS register */
+ /* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
- /* update PBF register */
+
+ /* Update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
@@ -1262,10 +1456,8 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- /* geneve tunnel not supported in BB_A0 */
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
- return;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
@@ -1275,42 +1467,75 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
- /* EDPM with geneve tunnel not supported in BB_B0 */
+
+ /* EDPM with geneve tunnel not supported in BB */
if (ECORE_IS_BB_B0(p_hwfn->p_dev))
return;
- /* update DORQ registers */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0);
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0);
}
+
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
-#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define PARSER_ETH_CONN_CM_HDR 0
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
+{
+ union gft_cam_line_union cam_line;
+ struct gft_ram_line ram_line;
+ u32 i, *ram_line_ptr;
+
+ ram_line_ptr = (u32 *)&ram_line;
+
+ /* Stop using gft logic, disable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next rfs/gft session*/
+
+ /* Zero camline */
+ OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ cam_line.cam_line_mapped.camline);
+
+ /* Zero ramline */
+ OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
+
+ /* Each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(ram_line_ptr + i));
+}
+
+
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- /* set RFS event ID to be awakened i Tstorm By Prs */
- u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ u32 rfs_cm_hdr_event_id;
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
@@ -1331,39 +1556,48 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct gft_ram_line ramLine;
u32 *ramLinePointer = (u32 *)&ramLine;
int i;
+
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - ipv4 or ipv6");
+
if (!tcp && !udp)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - udp or tcp");
- /* set RFS event ID to be awakened i Tstorm By Prs */
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
/* Configure Registers for RFS mode */
-/* enable gft search */
+
+ /* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
* context only cid
* in PRS on match
*/
camLine.cam_line_mapped.camline = 0;
- /* cam line is now valid!! */
+
+ /* Cam line is now valid!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
- /* filters are per PF!! */
+
+ /* Filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
if (!(tcp && udp)) {
SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
@@ -1373,6 +1607,7 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
+
if (!(ipv4 && ipv6)) {
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
@@ -1385,44 +1620,53 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
- /* write characteristics to cam */
+
+ /* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camLine.cam_line_mapped.camline);
camLine.cam_line_mapped.camline =
ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
- /* write line to RAM - compare to filter 4 tuple */
- ramLine.low32bits = 0;
- ramLine.high32bits = 0;
- SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_DST_PORT, 1);
- /* each iteration write to reg */
+
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramLine.lo = 0;
+ ramLine.hi = 0;
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* Each iteration write to reg */
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * pf_id +
i * REG_SIZE, *(ramLinePointer + i));
- /* set default profile so that no filter match will happen */
- ramLine.low32bits = 0xffff;
- ramLine.high32bits = 0xffff;
+
+ /* Set default profile so that no filter match will happen */
+ ramLine.lo = 0xffff;
+ ramLine.hi = 0xffff;
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
i * REG_SIZE, *(ramLinePointer + i));
}
-/* Configure VF zone size mode*/
+/* Configure VF zone size mode */
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mode,
bool runtime_init)
{
u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
u32 msdm_vf_offset_mask;
+
if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
msdm_vf_size_log += 1;
else if (mode == VF_ZONE_SIZE_MODE_QUAD)
msdm_vf_size_log += 2;
+
msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
+
if (runtime_init) {
STORE_RT_REG(p_hwfn,
PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
@@ -1438,12 +1682,13 @@ void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
}
}
-/* get mstorm statistics for offset by VF zone size mode*/
+/* Get mstorm statistics for offset by VF zone size mode */
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
+
if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
(stat_cnt_id > MAX_NUM_PFS)) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
@@ -1453,16 +1698,18 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
(stat_cnt_id - MAX_NUM_PFS);
}
+
return offset;
}
-/* get mstorm VF producer offset by VF zone size mode*/
+/* Get mstorm VF producer offset by VF zone size mode */
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
u8 vf_id,
u8 vf_queue_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+
if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
@@ -1471,5 +1718,166 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
vf_id;
}
+
return offset;
}
+
+/* Calculate CRC8 of first 4 bytes in buf */
+static u8 ecore_calc_crc8(const u8 *buf)
+{
+ u32 i, j, crc = 0xff << 8;
+
+ /* CRC-8 polynomial */
+ #define POLY 0x1070
+
+ for (j = 0; j < 4; j++, buf++) {
+ crc ^= (*buf << 8);
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x8000)
+ crc ^= (POLY << 3);
+
+ crc <<= 1;
+ }
+ }
+
+ return (u8)(crc >> 8);
+}
+
+/* Calculate and return CDU validation byte per conneciton type / region /
+ * cid
+ */
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
+ u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ u32 validation_string = 0;
+ const u8 *data_to_crc_rev;
+ u8 data_to_crc[4];
+
+ data_to_crc_rev = (const u8 *)&validation_string;
+
+ /*
+ * The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian (ntoh())*/
+ data_to_crc[0] = data_to_crc_rev[3];
+ data_to_crc[1] = data_to_crc_rev[2];
+ data_to_crc[2] = data_to_crc_rev[1];
+ data_to_crc[3] = data_to_crc_rev[0];
+
+ crc = ecore_calc_crc8(data_to_crc);
+
+ validation_byte |= ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
+ const u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3 - bits [31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 9df0e7de..4da3fc29 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -8,20 +8,22 @@
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
-/* forward declarations */
+/* Forward declarations */
+
struct init_qm_pq_params;
+
/**
- * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
+ * @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
*
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
@@ -31,6 +33,7 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
+
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
* phase
@@ -38,10 +41,10 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
* @param p_hwfn
* @param max_ports_per_engine - max number of ports per engine in HW
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
* @param port_params - array of size MAX_NUM_PORTS with params for each port
*
* @return 0 on success, -1 on error.
@@ -54,22 +57,24 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
bool vport_rl_en,
bool vport_wfq_en,
struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+
/**
* @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param port_id - port ID
- * @param pf_id - PF ID
+ * @param port_id - port ID
+ * @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
- * @param num_pf_cids - number of connections used by this PF
+ * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
+ * @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param start_pq - first Tx PQ ID associated with this PF
- * @param num_pf_pqs - number of Tx PQs associated with this PF (non-VF)
- * @param num_vf_pqs - number of Tx PQs associated with a VF
- * @param start_vport - first VPORT ID associated with this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF
+ * (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
* @param num_vports - number of VPORTs associated with this PF
* @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
* be 0. otherwise, the weight must be non-zero.
@@ -100,6 +105,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
+
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
*
@@ -114,11 +120,12 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u16 pf_wfq);
+
/**
- * @brief ecore_init_pf_rl Initializes the rate limit of the specified PF
+ * @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_rl - rate limit in Mb/sec units
*
@@ -128,6 +135,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u32 pf_rl);
+
/**
* @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
*
@@ -144,10 +152,12 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS],
u16 vport_wfq);
+
/**
- * @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT
+ * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
+ * VPORT.
*
- * @param p_hwfn
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
@@ -158,6 +168,7 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
u32 vport_rl);
+
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -178,6 +189,7 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
*
@@ -193,6 +205,7 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req,
bool is_lb);
+
/**
* @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
*
@@ -205,6 +218,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
*
@@ -216,6 +230,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req);
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
@@ -229,6 +244,7 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req);
#endif /* UNUSED_HSI_FUNC */
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
@@ -242,6 +258,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_brb_ram_req *req);
#endif /* UNUSED_HSI_FUNC */
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
@@ -250,22 +267,24 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
* if engine
* is in BD mode.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType);
+
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType should Be called
* once per port.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
* port
@@ -276,15 +295,17 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
+
/**
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool vxlan_enable);
+
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -296,6 +317,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_gre_enable,
bool ip_gre_enable);
+
/**
* @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
* udp port
@@ -306,6 +328,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
+
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -318,6 +341,7 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable,
bool ip_geneve_enable);
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
*
@@ -325,16 +349,27 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable RFS.
+ */
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
+
/**
* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
*
-*
-* @param p_ptt - ptt window used for writing the registers.
-* @param pf_id - pf on which to enable RFS.
-* @param tcp - set profile tcp packets.
-* @param udp - set profile udp packet.
-* @param ipv4 - set profile ipv4 packet.
-* @param ipv6 - set profile ipv6 packet.
+* @param p_ptt - ptt window used for writing the registers.
+* @param pf_id - pf on which to enable RFS.
+* @param tcp - set profile tcp packets.
+* @param udp - set profile udp packet.
+* @param ipv4 - set profile ipv4 packet.
+* @param ipv6 - set profile ipv6 packet.
*/
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -344,6 +379,7 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
bool ipv4,
bool ipv6);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
* used before first ETH queue started.
@@ -357,18 +393,20 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
*p_ptt, u16 mode, bool runtime_init);
+
/**
-* @brief ecore_get_mstorm_queue_stat_offset - get mstorm statistics offset by VF
-* zone size mode.
+ * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
+ * VF zone size mode.
*
* @param stat_cnt_id - statistic counter id
* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
*/
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id, u16 vf_zone_size_mode);
+
/**
-* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
-* size mode.
+ * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
+ * size mode.
*
* @param vf_id - vf id.
* @param vf_queue_id - per VF rx queue id.
@@ -376,4 +414,58 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
*/
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
vf_queue_id, u16 vf_zone_size_mode);
+/**
+ * @brief ecore_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid);
+/**
+ * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid);
+/**
+ * @brief ecore_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
+/**
+ * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
+ * validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 351e9467..b907a95e 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -63,8 +63,8 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- enum _ecore_status_t rc = ECORE_SUCCESS;
u16 i, segment;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* Since not all RT entries are initialized, go over the RT and
* for each segment of initialized values use DMA.
@@ -190,19 +190,19 @@ static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
bool b_must_dmae,
bool b_can_dmae)
{
+ u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+ u32 data = OSAL_LE32_TO_CPU(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 offset, output_len, input_len, max_size;
#endif
- u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
struct ecore_dev *p_dev = p_hwfn->p_dev;
- enum _ecore_status_t rc = ECORE_SUCCESS;
union init_array_hdr *hdr;
const u32 *array_data;
- u32 size, addr, data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 size;
array_data = p_dev->fw_data->arr_data;
- data = OSAL_LE32_TO_CPU(cmd->data);
- addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
hdr = (union init_array_hdr *)
(uintptr_t)(array_data + dmae_array_offset);
@@ -272,13 +272,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
struct init_write_op *p_cmd,
bool b_can_dmae)
{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
enum _ecore_status_t rc = ECORE_SUCCESS;
- bool b_must_dmae;
- u32 addr, data;
-
- data = OSAL_LE32_TO_CPU(p_cmd->data);
- b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
@@ -452,10 +449,10 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
int phase, int phase_id, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
init_ops = p_dev->fw_data->init_ops;
@@ -573,8 +570,7 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
return ECORE_INVAL;
}
- /* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)((uintptr_t)(data + sizeof(u32)));
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 6fb037df..8dc4d150 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -59,6 +59,11 @@ struct aeu_invert_reg_bit {
#define ATTENTION_OFFSET_MASK (0x000ff000)
#define ATTENTION_OFFSET_SHIFT (12)
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT (1 << 23)
+
#define ATTENTION_CLEAR_ENABLE (1 << 28)
unsigned int flags;
@@ -414,7 +419,7 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
@@ -468,7 +473,26 @@ static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
return ECORE_INVAL;
}
-/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+};
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 1 */
@@ -511,8 +535,18 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
OSAL_NULL, MAX_BLOCK_ID},
{"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
ecore_general_attention_35, MAX_BLOCK_ID},
- {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- BLOCK_CNIG},
+ {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ OSAL_NULL, BLOCK_NWM},
+ {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ OSAL_NULL, BLOCK_NWM},
{"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
{"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -634,6 +668,27 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
};
+static struct aeu_invert_reg_bit *
+ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!ECORE_IS_BB(p_hwfn->p_dev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct ecore_sb_attn_info {
@@ -868,7 +923,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
- if ((p_bit->flags & ATTENTION_PARITY) &&
+ if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
!!(parities & (1 << bit_idx))) {
ecore_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -905,26 +960,29 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
unsigned long int bitmask;
u8 bit, bit_len;
+ /* Need to account bits with changed meaning */
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
- /* No need to handle attention-only bits */
- if (p_aeu->flags == ATTENTION_PAR)
- continue;
-
bit = bit_idx;
bit_len = ATTENTION_LENGTH(p_aeu->flags);
- if (p_aeu->flags & ATTENTION_PAR_INT) {
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
/* Skip Parity */
bit++;
bit_len--;
}
+ /* Find the bits relating to HW-block, then
+ * shift so they'll become LSB.
+ */
bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
if (bitmask) {
u32 flags = p_aeu->flags;
char bit_name[30];
+ u8 num;
- bit = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
+ num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
bit_len);
/* Some bits represent more than a
@@ -936,11 +994,17 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
ATTENTION_LENGTH(flags) > 1))
OSAL_SNPRINTF(bit_name, 30,
p_aeu->bit_name,
- bit);
+ num);
else
OSAL_STRNCPY(bit_name,
p_aeu->bit_name,
30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
/* Handle source of the attention */
ecore_int_deassertion_aeu_bit(p_hwfn,
p_aeu,
@@ -1203,12 +1267,13 @@ static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
for (j = 0, k = 0; k < 32; j++) {
- unsigned int flags = aeu_descs[i].bits[j].flags;
+ struct aeu_invert_reg_bit *p_aeu;
- if (flags & ATTENTION_PARITY)
+ p_aeu = &aeu_descs[i].bits[j];
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
sb_info->parity_mask[i] |= 1 << k;
- k += ATTENTION_LENGTH(flags);
+ k += ATTENTION_LENGTH(p_aeu->flags);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"Attn Mask [Reg %d]: 0x%08x\n",
@@ -1234,7 +1299,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_dev, true,
- "Failed to allocate `struct ecore_sb_attn_info'");
+ "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@@ -1243,7 +1308,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_dev, true,
- "Failed to allocate status block (attentions)");
+ "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@@ -1964,6 +2029,31 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
}
}
}
+
+ /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
+ * the number of VF SBs [especially for first VF on engine, as we can't
+ * diffrentiate between empty entries and its entries].
+ * Since we don't really support more SBs than VFs today, prevent any
+ * such configuration by sanitizing the number of SBs to equal the
+ * number of VFs.
+ */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (total_vfs < p_igu_info->free_blks) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Limiting number of SBs for IOV - %04x --> %04x\n",
+ p_igu_info->free_blks,
+ p_hwfn->p_dev->p_iov_info->total_vfs);
+ p_igu_info->free_blks = total_vfs;
+ } else if (total_vfs > p_igu_info->free_blks) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
+ p_igu_info->free_blks, total_vfs);
+ return ECORE_INVAL;
+ }
+ }
+
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
@@ -2092,24 +2182,6 @@ void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
-{
- struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
- /* Determine origin of SB id */
- if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
- return sb_id - p_info->igu_base_sb;
- } else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
- return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
- } else {
- DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
- sb_id);
- return 0;
- }
-}
-
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
{
int i;
@@ -2127,8 +2199,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
- enum _ecore_status_t rc;
struct cau_sb_entry sb_entry;
+ enum _ecore_status_t rc;
if (!p_hwfn->hw_init_done) {
DP_ERR(p_hwfn, "hardware not initialized yet\n");
@@ -2159,3 +2231,30 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
return rc;
}
+
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ int i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY +
+ sbid * 4 * PIS_PER_SB + i * 4);
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 45358b94..0c8929e3 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -172,16 +172,6 @@ void ecore_int_free(struct ecore_hwfn *p_hwfn);
void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
-
-/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index fc873e77..799fbe82 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -41,6 +41,12 @@ struct ecore_sb_info {
struct ecore_dev *p_dev;
};
+struct ecore_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct ecore_sb_cnt_info {
int sb_cnt;
int sb_iov_cnt;
@@ -108,7 +114,7 @@ static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#else
-static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
+static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#endif
@@ -120,19 +126,37 @@ static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
+ data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM *addr,
- int size, u32 *data)
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
{
- __internal_ram_wr(p_hwfn, addr, size, data);
+ __internal_ram_wr_relaxed(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
- int size, u32 *data)
+ int size, u32 *data)
{
- __internal_ram_wr(OSAL_NULL, addr, size, data);
+ __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
}
#endif
+
#endif
struct ecore_hwfn;
@@ -285,4 +309,19 @@ void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
*/
void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
+/**
+ * @brief Read debug information regarding a given SB.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_sb - point to Status block for which we want to get info.
+ * @param p_info - pointer to struct to fill with information regarding SB.
+ *
+ * @return ECORE_SUCCESS if pointer is filled; failure otherwise.
+ */
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info);
+
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index bb8df82f..50cb3f2b 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -52,6 +52,7 @@ enum ecore_iov_pf_to_vf_status {
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
};
struct ecore_mcp_link_params;
@@ -87,6 +88,28 @@ struct ecore_public_vf_info {
u16 forced_vlan;
};
+struct ecore_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+ /* TODO - remove this limitation */
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ u8 vport_id;
+
+ /* Should be set in case RSS is going to be used for VF */
+ u8 rss_eng_id;
+};
+
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
@@ -174,15 +197,14 @@ void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
*
* @param p_hwfn
* @param p_ptt
- * @param rel_vf_id
- * @param num_rx_queues
+ * @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 rel_vf_id,
- u16 num_rx_queues);
+ struct ecore_iov_vf_init_params
+ *p_params);
/**
* @brief ecore_iov_process_mbx_req - process a request received
@@ -301,12 +323,13 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param rel_vf_id - Relative VF ID
* @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
*
* @return bool - true for valid VF ID
*/
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id,
- bool b_enabled_only);
+ bool b_enabled_only, bool b_non_malicious);
/**
* @brief Get VF's public info structure
@@ -399,16 +422,6 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
u16 *opaque_fid);
/**
- * @brief Get VFs VPORT id.
- *
- * @param p_hwfn
- * @param vfid
- * @param vport id
- */
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vport_id);
-
-/**
* @brief Set forced VLAN [pvid] in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
* Setting of pvid 0 would clear the feature.
@@ -662,24 +675,24 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
- * @brief - Get VF's vport min rate configured.
+ * @brief - Returm true if VF has started in FW
+ *
* @param p_hwfn
* @param rel_vf_id
*
- * @return - rate in Mbps
+ * @return
*/
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
- * @brief - Configure min rate for VF's vport.
- * @param p_dev
- * @param vfid
- * @param - rate in Mbps
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
*
- * @return
+ * @return - rate in Mbps
*/
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
- int vfid, u32 rate);
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
#endif
/**
@@ -688,15 +701,17 @@ enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
* @param p_hwfn
* @param rel_vf_id
*
- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port);
#endif /* CONFIG_ECORE_SRIOV */
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
- _i < MAX_NUM_VFS; \
+ _i < E4_MAX_NUM_VFS; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index aad90123..b4bfe89f 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -185,5 +185,13 @@
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
+ ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
+ ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 43e01e47..6764bfa6 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,13 +9,13 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[47] = {
+static const struct iro iro_arr[49] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
- { 0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ { 0x4cb0, 0x80, 0x0, 0x0, 0x80},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6318, 0x20, 0x0, 0x0, 0x20},
+ { 0x6518, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -41,7 +41,7 @@ static const struct iro iro_arr[47] = {
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x60f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x61f8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
@@ -53,7 +53,7 @@ static const struct iro iro_arr[47] = {
/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
{ 0x53a0, 0x80, 0x4, 0x0, 0x4},
/* MSTORM_TPA_TIMEOUT_US_OFFSET */
- { 0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ { 0xc7c8, 0x0, 0x0, 0x0, 0x4},
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
@@ -63,13 +63,13 @@ static const struct iro iro_arr[47] = {
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf188, 0x78, 0x0, 0x0, 0x78},
+ { 0xf1b0, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
- { 0xacf0, 0x0, 0x0, 0x0, 0xf0},
+ { 0xaef8, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
- { 0xade0, 0x8, 0x0, 0x0, 0x8},
+ { 0xafe8, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -85,25 +85,29 @@ static const struct iro iro_arr[47] = {
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x10, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd888, 0x38, 0x0, 0x0, 0x24},
+ { 0xd9a8, 0x38, 0x0, 0x0, 0x24},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x12c38, 0x10, 0x0, 0x0, 0x8},
+ { 0x12988, 0x10, 0x0, 0x0, 0x8},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa0, 0x38, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa8c0, 0x30, 0x0, 0x0, 0x10},
+ { 0xa8c0, 0x38, 0x0, 0x0, 0x10},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x28, 0x0, 0x0, 0x18},
+ { 0x86f8, 0x30, 0x0, 0x0, 0x18},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x101f8, 0x10, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
- { 0xdd08, 0x48, 0x0, 0x0, 0x38},
+ { 0xde28, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
{ 0x10660, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x2b80, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x5000, 0x10, 0x0, 0x0, 0x10},
+ { 0x5020, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
+ { 0xc9b0, 0x30, 0x0, 0x0, 0x10},
+/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
+ { 0xeec0, 0x10, 0x0, 0x0, 0x10},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 74f61b00..4ab8fd5f 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -29,6 +29,306 @@
#define ECORE_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+struct ecore_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ osal_mutex_t lock;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return ECORE_SUCCESS;
+
+ p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
+ if (!p_l2_info)
+ return ECORE_NOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ ecore_vf_get_num_rxqs(p_hwfn, &rx);
+ ecore_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
+ }
+
+ pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
+ sizeof(unsigned long *) *
+ p_l2_info->queues);
+ if (pp_qids == OSAL_NULL)
+ return ECORE_NOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
+ MAX_QUEUES_PER_QZONE / 8);
+ if (pp_qids[i] == OSAL_NULL)
+ return ECORE_NOMEM;
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
+{
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_l2_free(struct ecore_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ if (p_hwfn->p_l2_info == OSAL_NULL)
+ return;
+
+ if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
+ break;
+ OSAL_VFREE(p_hwfn->p_dev,
+ p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ /* Lock is last to initialize, if everything else was */
+ if (i == p_hwfn->p_l2_info->queues)
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
+#endif
+
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = OSAL_NULL;
+}
+
+/* TODO - we'll need locking around these... */
+static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
+
+ if (queue_id > p_l2_info->queues) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ OSAL_MUTEX_RELEASE(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
+
+ OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ /* For VF-queues, stuff is a bit complicated as:
+ * - They always maintain the qid_usage on their own.
+ * - In legacy mode, they also maintain their CIDs.
+ */
+
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+ if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+ if (!p_cid->b_legacy_vf)
+ ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+static struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+ if (p_cid == OSAL_NULL)
+ return OSAL_NULL;
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params != OSAL_NULL) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->b_legacy_vf = p_vf_params->b_legacy;
+ } else {
+ p_cid->vfid = ECORE_QUEUE_CID_PF;
+ }
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->p_dev)) {
+ p_cid->abs = p_cid->rel;
+
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * The would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+ &p_cid->abs.queue_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+ /* SBs relevant information was already provided as absolute */
+ p_cid->abs.sb = p_cid->rel.sb;
+ p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->rel.vport_id, p_cid->abs.vport_id,
+ p_cid->rel.queue_id, p_cid->qid_usage_idx,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id, p_cid->abs.stats_id,
+ p_cid->abs.sb, p_cid->abs.sb_idx);
+
+ return p_cid;
+
+fail:
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+ return OSAL_NULL;
+}
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ u8 vfid = ECORE_CXT_PF_CID;
+ bool b_legacy_vf = false;
+ u32 cid = 0;
+
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->b_legacy) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
+ if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return OSAL_NULL;
+ }
+ }
+
+ p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, p_vf_params);
+ if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, cid, vfid);
+
+ return p_cid;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params)
+{
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+}
+
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params)
@@ -36,9 +336,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ u16 rx_mode = 0, tx_err = 0;
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- u16 rx_mode = 0;
rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
@@ -71,6 +371,30 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+ /* Handle requests for strict behavior on transmission errors */
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
+ p_params->b_err_illegal_vlan_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
+ p_params->b_err_small_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
+ p_params->b_err_anti_spoof ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
+ p_params->b_err_illegal_inband_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
+ p_params->b_err_vlan_insert_with_inband ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
+ p_params->b_err_big_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
+ p_params->b_err_ctrl_frame ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
+
/* TPA related fields */
OSAL_MEMSET(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
@@ -129,10 +453,9 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct ecore_rss_params *p_rss)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct eth_vport_rss_config *p_config;
- u16 abs_l2_queue = 0;
- int i;
+ int i, table_size;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!p_rss) {
p_ramrod->common.update_rss_flg = 0;
@@ -186,16 +509,40 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
p_config->capabilities,
p_config->update_rss_ind_table, p_config->update_rss_key);
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- rc = ecore_fw_l2_queue(p_hwfn,
- (u8)p_rss->rss_ind_table[i],
- &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
+ table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return ECORE_INVAL;
+
+ p_config->indirection_table[i] =
+ OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
+ }
- p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
- i, p_config->indirection_table[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
}
for (i = 0; i < 10; i++)
@@ -250,8 +597,8 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "p_ramrod->rx_mode.state = 0x%x\n",
- state);
+ "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
}
/* Set Tx mode accept flags */
@@ -274,8 +621,8 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "p_ramrod->tx_mode.state = 0x%x\n",
- state);
+ "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
}
}
@@ -534,57 +881,28 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
return 0;
}
-static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_hw_cid_data *p_cid_data)
-{
- if (!p_cid_data->b_cid_allocated)
- return;
-
- ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
- p_cid_data->b_cid_allocated = false;
-}
-
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod)
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 abs_rx_q_id = 0;
- u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = p_params->vport_id;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+ p_cid->abs.vport_id, p_cid->abs.sb);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -595,11 +913,11 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = p_params->stats_id;
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
@@ -609,92 +927,88 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_params->vf_qid || b_use_zone_a_prod) {
- p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
+ if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- b_use_zone_a_prod ? " [legacy]" : "",
- p_params->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod)
{
- struct ecore_hw_cid_data *p_rx_cid;
u32 init_prod_val = 0;
- u16 abs_l2_queue = 0;
- u8 abs_stats_id = 0;
- enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_rxq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size, pp_prod);
- }
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
+ return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_rx_cid->b_cid_allocated = true;
- p_params->stats_id = abs_stats_id;
- p_params->vf_qid = 0;
-
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
- opaque_fid,
- p_rx_cid->cid,
- p_params,
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ else
+ rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size,
- false);
+ &p_ret_params->p_prod);
+ /* Provide the caller with a reference to as handler */
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handles,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -704,14 +1018,14 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 qid, abs_rx_q_id = 0;
+ struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 i;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxqs_update(p_hwfn,
- rx_queue_id,
+ (struct ecore_queue_cid **)
+ pp_rxq_handles,
num_rxqs,
complete_cqe_flg,
complete_event_flg);
@@ -721,12 +1035,11 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
- qid = rx_queue_id + i;
- p_rx_cid = &p_hwfn->p_rx_cids[qid];
+ p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
/* Get SPQ entry */
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
@@ -735,41 +1048,34 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
}
return rc;
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion)
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool b_eq_completion_only,
+ bool b_cqe_completion)
{
- struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- u16 abs_rx_q_id = 0;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
- cqe_completion);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -779,64 +1085,56 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) &&
- !eq_completion_only) || cqe_completion;
- p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) ||
- eq_completion_only;
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
+ b_eq_completion_only;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union ecore_qm_pq_params *p_pq_params)
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id)
{
struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_tx_cid;
- u16 pq_id, abs_tx_q_id = 0;
- u8 abs_vport_id;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- p_tx_cid->cid = cid;
- p_tx_cid->opaque_fid = opaque_fid;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -846,110 +1144,89 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->stats_counter_id = p_params->stats_id;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
- p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
+ dma_addr_t pbl_addr, u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell)
{
- struct ecore_hw_cid_data *p_tx_cid;
- union ecore_qm_pq_params pq_params;
- u8 abs_stats_id = 0;
enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_txq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- pbl_addr,
- pbl_size,
- pp_doorbell);
- }
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
+ /* TODO - set tc in the pq_params for multi-cos */
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc != ECORE_SUCCESS)
return rc;
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-
- pq_params.eth.tc = tc;
-
- /* Allocate a CID for the queue */
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_tx_cid->b_cid_allocated = true;
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = (u8 OSAL_IOMEM *)
+ p_hwfn->doorbells +
+ DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ return ECORE_SUCCESS;
+}
- p_params->stats_id = abs_stats_id;
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- opaque_fid,
- p_tx_cid->cid,
- p_params,
- pbl_addr,
- pbl_size,
- &pq_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_INVAL;
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
-{
- return ECORE_NOTIMPL;
-}
-
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id)
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
- struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_tx_cid->cid;
- init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -958,11 +1235,22 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc;
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
@@ -988,17 +1276,6 @@ ecore_filter_action(enum ecore_filter_opcode opcode)
return action;
}
-static void ecore_set_fw_mac_addr(__le16 *fw_msb,
- __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
-{
- ((u8 *)fw_msb)[0] = mac[1];
- ((u8 *)fw_msb)[1] = mac[0];
- ((u8 *)fw_mid)[0] = mac[3];
- ((u8 *)fw_mid)[1] = mac[2];
- ((u8 *)fw_lsb)[0] = mac[5];
- ((u8 *)fw_lsb)[1] = mac[4];
-}
-
static enum _ecore_status_t
ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
@@ -1093,6 +1370,9 @@ ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
case ECORE_FILTER_VNI:
p_first_filter->type = ETH_FILTER_TYPE_VNI;
break;
+ case ECORE_FILTER_UNUSED: /* @DPDK */
+ p_first_filter->type = MAX_ETH_FILTER_TYPE;
+ break;
}
if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
@@ -1738,3 +2018,87 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
else
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
}
+
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+ }
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
+ p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing",
+ (unsigned long)p_addr, length);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 9c1bd388..7fe4cbcb 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -15,59 +15,106 @@
#include "ecore_spq.h"
#include "ecore_l2_api.h"
-/**
- * @brief ecore_sp_eth_tx_queue_update -
- *
- * This ramrod updates a TX queue. It is used for setting the active
- * state of the queue.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- *
- * @return enum _ecore_status_t
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define ECORE_QUEUE_CID_PF (0xff)
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+struct ecore_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ bool b_legacy;
+
+ /* For VFs, this index arrives via TLV to diffrentiate between
+ * different queues opened on the same qzone, and is passed
+ * [where the PF would have allocated it internally for its own].
+ */
+ u8 qid_usage_idx;
+};
+
+struct ecore_queue_cid {
+ /* 'Relative' is a relative term ;-). Usually the indices [not counting
+ * SBs] would be PF-relative, but there are some cases where that isn't
+ * the case - specifically for a PF configuring its VF indices it's
+ * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+ */
+ struct ecore_queue_start_common_params rel;
+ struct ecore_queue_start_common_params abs;
+ u32 cid;
+ u16 opaque_fid;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ u8 vfid;
+ u8 vf_qid;
+
+ /* We need an additional index to diffrentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to diffrentiate].
+ */
+ u8 qid_usage_idx;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ bool b_legacy_vf;
+
+ struct ecore_hwfn *p_owner;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
+void ecore_l2_free(struct ecore_hwfn *p_hwfn);
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
- stats_id is absolute packed in p_params.
+ * @param p_cid
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
- * @param b_use_zone_a_prod - support legacy VF producers
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod);
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
+ * @param p_cid
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
@@ -75,14 +122,38 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union ecore_qm_pq_params *p_pq_params);
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_ptt
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ it with cookie and callback function address, if not
+ using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 326fa45b..d09f3c4a 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -28,15 +28,26 @@ enum ecore_rss_caps {
#endif
struct ecore_queue_start_common_params {
- /* Rx/Tx queue id */
- u8 queue_id;
+ /* Should always be relative to entity sending this. */
u8 vport_id;
+ u16 queue_id;
- /* stats_id is relative or absolute depends on function */
+ /* Relative, but relevant only for PFs */
u8 stats_id;
+
+ /* These are always absolute */
u16 sb;
- u16 sb_idx;
- u16 vf_qid;
+ u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+ void OSAL_IOMEM *p_prod;
+ void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+ void OSAL_IOMEM *p_doorbell;
+ void *p_handle;
};
struct ecore_rss_params {
@@ -48,7 +59,9 @@ struct ecore_rss_params {
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
- u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
u32 rss_key[ECORE_RSS_KEY_SIZE];
};
@@ -89,6 +102,7 @@ enum ecore_filter_ucast_type {
ECORE_FILTER_INNER_MAC_VNI_PAIR,
ECORE_FILTER_MAC_VNI_PAIR,
ECORE_FILTER_VNI,
+ ECORE_FILTER_UNUSED, /* @DPDK */
};
struct ecore_filter_ucast {
@@ -127,6 +141,14 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_BCAST 0x20
};
+struct ecore_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable; /* Enable or disable arfs mode */
+};
+
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
* FW will assert in the following cases, so driver should take care...:
* 1. Adding a filter to a full table.
@@ -159,42 +181,37 @@ ecore_filter_accept_cmd(
struct ecore_spq_comp_cb *p_comp_data);
/**
- * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
*
* This ramrod initializes an RX Queue for a VPort. An Assert is generated if
* the VPort ID is not currently initialized.
*
* @param p_hwfn
* @param opaque_fid
- * @p_params [stats_id is relative, packed in p_params]
+ * @p_params Inputs; Relative for PF [SB being an exception]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
* @param cqe_pbl_size Size of the CQE PBL Table
- * @param pp_prod Pointer to place producer's
- * address for the Rx Q (May be
- * NULL).
+ * @param p_ret_params Pointed struct to be filled with outputs.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod);
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_rx_queue_stop -
- *
- * This ramrod closes an RX queue. It sends RX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
+ * @param p_rxq Handler of queue to close
* @param eq_completion_only If True completion will be on
* EQe, if False completion will be
* on EQe if p_hwfn opaque
@@ -205,13 +222,13 @@ ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion);
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion);
/**
- * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ * @brief - TX Queue Start Ramrod
*
* This ramrod initializes a TX Queue for a VPort. An Assert is generated if
* the VPort is not currently initialized.
@@ -222,34 +239,29 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
* @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
- * @param pp_doorbell Pointer to place doorbell pointer (May be NULL).
- * This address should be used with the
- * DIRECT_REG_WR macro.
+ * @param p_ret_params Pointer to fill the return parameters in.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell);
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_tx_queue_stop -
- *
- * This ramrod closes a TX queue. It sends TX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
*
* @param p_hwfn
- * @param tx_queue_id TX Queue ID
+ * @param p_txq - handle to Tx queue needed to be closed
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id);
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_txq);
enum ecore_tpa_mode {
ECORE_TPA_MODE_NONE,
@@ -273,6 +285,15 @@ struct ecore_sp_vport_start_params {
bool zero_placement_offset;
bool check_mac;
bool check_ethtype;
+
+ /* Strict behavior on transmission errors */
+ bool b_err_illegal_vlan_mode;
+ bool b_err_illegal_inband_mode;
+ bool b_err_vlan_insert_with_inband;
+ bool b_err_small_pkt;
+ bool b_err_big_pkt;
+ bool b_err_anti_spoof;
+ bool b_err_ctrl_frame;
};
/**
@@ -372,19 +393,19 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
* @note Final phase API.
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
- * @param num_rxqs Allow to update multiple rx
- * queues, from rx_queue_id to
- * (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handlers,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -401,4 +422,18 @@ void ecore_get_vport_stats(struct ecore_dev *p_dev,
void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+/**
+ *@brief ecore_arfs_mode_configure -
+ *
+ *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ *and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ *@param p_hwfn
+ *@param p_ptt
+ *@param p_cfg_params arfs mode configuration parameters.
+ *
+ */
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 2ff97155..a834ac74 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -104,7 +104,6 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
}
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
- p_hwfn->mcp_info = OSAL_NULL;
return ECORE_SUCCESS;
}
@@ -365,6 +364,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
+ union drv_union_data union_data;
u32 union_data_addr;
enum _ecore_status_t rc;
@@ -374,6 +374,15 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
return ECORE_BUSY;
}
+ if (p_mb_params->data_src_size > sizeof(union_data) ||
+ p_mb_params->data_dst_size > sizeof(union_data)) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size, p_mb_params->data_dst_size,
+ sizeof(union_data));
+ return ECORE_INVAL;
+ }
+
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
OFFSETOF(struct public_drv_mb, union_data);
@@ -384,19 +393,21 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- if (p_mb_params->p_data_src != OSAL_NULL)
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
p_mb_params->param, &p_mb_params->mcp_resp,
&p_mb_params->mcp_param);
- if (p_mb_params->p_data_dst != OSAL_NULL)
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size)
ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
+ union_data_addr, p_mb_params->data_dst_size);
ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
@@ -444,14 +455,13 @@ enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
u32 i_txn_size, u32 *i_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -471,13 +481,17 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
u32 *o_txn_size, u32 *o_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -486,7 +500,8 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
+ /* @DPDK */
+ OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
return ECORE_SUCCESS;
}
@@ -519,57 +534,389 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
}
#endif
+static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+{
+ return (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+}
+
+static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
+#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
+#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
+#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
+#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
+#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
+#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
+
+static u32 ecore_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+#ifdef CONFIG_ECORE_L2
+ config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_SRIOV
+ config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ROCE
+ config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_IWARP
+ config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_FCOE
+ config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+ config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_LL2
+ config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
+#endif
+
+ return config_bitmap;
+}
+
+struct ecore_load_req_in_params {
+ u8 hsi_ver;
+#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
+#define ECORE_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct ecore_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static enum _ecore_status_t
+__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_load_req_in_params *p_in_params,
+ struct ecore_load_req_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&load_req, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
+ p_in_params->drv_role);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0, load_req.drv_ver_1,
+ load_req.fw_ver, load_req.misc0,
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_FLAGS0));
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send load request, rc = %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0, load_rsp.drv_ver_1,
+ load_rsp.fw_ver, load_rsp.misc0,
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0,
+ LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
+ enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case ECORE_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case ECORE_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum ecore_load_req_force {
+ ECORE_LOAD_REQ_FORCE_NONE,
+ ECORE_LOAD_REQ_FORCE_PF,
+ ECORE_LOAD_REQ_FORCE_ALL,
+};
+
+static enum _ecore_status_t
+ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
+ enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case ECORE_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case ECORE_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case ECORE_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_load_code)
+ struct ecore_load_req_params *p_params)
{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct ecore_load_req_out_params out_params;
+ struct ecore_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- ecore_mcp_mf_workaround(p_hwfn, p_load_code);
+ ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
return ECORE_SUCCESS;
}
#endif
- OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
- mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
- mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- p_dev->drv_type;
- OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
- mb_params.p_data_src = &union_data;
- rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = ECORE_VERSION;
+ in_params.drv_ver_1 = ecore_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- /* if mcp fails to respond we must abort */
- if (rc != ECORE_SUCCESS) {
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
return rc;
- }
- *p_load_code = mb_params.mcp_resp;
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
+ */
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
+
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (ecore_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ rc = ecore_get_mfw_force_cmd(p_hwfn,
+ ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ in_params.force_cmd = mfw_force_cmd;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return ECORE_BUSY;
+ }
+ }
- /* If MFW refused (e.g. other port is in diagnostic mode) we
- * must abort. This can happen in the following cases:
- * - Other port is in diagnostic mode
- * - Previously loaded function on the engine is not compliant with
- * the requester.
- * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
- * -
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
*/
- if (!(*p_load_code) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
- DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ * This is unexpected since a quasi-FLR request was
+ * previously sent as part of ecore_hw_prepare().
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
+ return ECORE_INVAL;
+ }
+ break;
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
+ DP_NOTICE(p_hwfn, false,
+ "MFW refused a load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
return ECORE_BUSY;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ break;
}
+ p_params->load_code = out_params.load_code;
+
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ /* @DPDK */
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -611,7 +958,6 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
int i;
@@ -622,8 +968,8 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
&mb_params);
if (rc != ECORE_SUCCESS) {
@@ -801,9 +1147,6 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
- if (p_link->link_up)
- ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
-
OSAL_LINK_UPDATE(p_hwfn);
}
@@ -812,8 +1155,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
{
struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *p_phy_cfg;
+ struct eth_phy_cfg phy_cfg;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd;
@@ -823,32 +1165,30 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
#endif
/* Set the shmem configuration according to params */
- p_phy_cfg = &union_data.drv_phy_cfg;
- OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- p_phy_cfg->speed = params->speed.forced_speed;
- p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- p_phy_cfg->adv_speed = params->speed.advertised_speeds;
- p_phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up;
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x,"
- " adv_speed 0x%08x, loopback 0x%08x,"
- " features 0x%08x\n",
- p_phy_cfg->speed, p_phy_cfg->pause,
- p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
- p_phy_cfg->feature_config_flags);
+ " adv_speed 0x%08x, loopback 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
@@ -927,8 +1267,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
enum ecore_mcp_protocol_type stats_type;
union ecore_mcp_protocol_stats stats;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
+ enum _ecore_status_t rc;
switch (type) {
case MFW_DRV_MSG_GET_LAN_STATS:
@@ -936,7 +1276,7 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
return;
}
@@ -945,14 +1285,15 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
- ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
}
-static void
-ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
+static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
{
struct ecore_mcp_function_info *p_info;
@@ -1043,28 +1384,38 @@ static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
}
+struct ecore_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u32 mdump_cmd, union drv_union_data *p_data_src,
- union drv_union_data *p_data_dst, u32 *p_mcp_resp)
+ struct ecore_mdump_cmd_params *p_mdump_cmd_params)
{
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
- mb_params.param = mdump_cmd;
- mb_params.p_data_src = p_data_src;
- mb_params.p_data_dst = p_data_dst;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
DP_NOTICE(p_hwfn, false,
"MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
- mdump_cmd);
+ p_mdump_cmd_params->cmd);
rc = ECORE_INVAL;
}
@@ -1074,99 +1425,123 @@ ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 epoch)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
+ mdump_cmd_params.p_data_src = &epoch;
+ mdump_cmd_params.data_src_size = sizeof(epoch);
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
- &union_data, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
-}
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
-enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- u32 mcp_resp;
-
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct mdump_config_stc *p_mdump_config)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
- OSAL_NULL, &union_data, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
+ mdump_cmd_params.p_data_dst = p_mdump_config;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
if (rc != ECORE_SUCCESS)
return rc;
- /* A zero response implies that the mdump command is not supported */
- if (!mcp_resp)
+ if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
return ECORE_NOTIMPL;
+ }
- if (mcp_resp != FW_MSG_CODE_OK) {
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
DP_NOTICE(p_hwfn, false,
"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
- mcp_resp);
+ mdump_cmd_params.mcp_resp);
rc = ECORE_UNKNOWN_ERROR;
}
- OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
- sizeof(*p_mdump_config));
-
return rc;
}
-enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info)
{
+ u32 addr, global_offsize, global_addr;
struct mdump_config_stc mdump_config;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
- if (rc != ECORE_SUCCESS)
- return rc;
+ OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "MFW mdump_config: version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
- mdump_config.version, mdump_config.config, mdump_config.epoc,
- mdump_config.num_of_logs, mdump_config.valid_logs);
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
+ global_addr +
+ OFFSETOF(struct public_global,
+ mdump_reason));
- if (mdump_config.valid_logs > 0) {
- DP_NOTICE(p_hwfn, false,
- "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ if (p_mdump_info->reason) {
+ rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_info->version = mdump_config.version;
+ p_mdump_info->config = mdump_config.config;
+ p_mdump_info->epoch = mdump_config.epoc;
+ p_mdump_info->num_of_logs = mdump_config.num_of_logs;
+ p_mdump_info->valid_logs = mdump_config.valid_logs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
+ p_mdump_info->reason, p_mdump_info->version,
+ p_mdump_info->config, p_mdump_info->epoch,
+ p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d\n", p_mdump_info->reason);
}
- return rc;
+ return ECORE_SUCCESS;
}
-void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable)
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- p_dev->mdump_en = mdump_enable;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
@@ -1184,6 +1559,7 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
if (p_hwfn->p_dev->mdump_en) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
+ p_hwfn->p_dev->mdump_en = false;
return;
}
@@ -1256,9 +1632,7 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
break;
default:
- /* @DPDK */
- DP_NOTICE(p_hwfn, false,
- "Unimplemented MFW message %d\n", i);
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = ECORE_INVAL;
}
}
@@ -1364,16 +1738,47 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
}
+/* @DPDK */
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
+ enum ecore_pci_personality *p_proto)
+{
+ *p_proto = ECORE_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32)*p_proto);
+}
+
+/* @DPDK */
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32)*p_proto, resp, param);
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
struct public_func *p_info,
+ struct ecore_ptt *p_ptt,
enum ecore_pci_personality *p_proto)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- *p_proto = ECORE_PCI_ETH;
+ if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
+ ECORE_SUCCESS)
+ ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
break;
default:
rc = ECORE_INVAL;
@@ -1394,7 +1799,8 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return ECORE_INVAL;
@@ -1422,6 +1828,13 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ if (info->mtu == 0)
+ info->mtu = 1500;
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
"Read configuration from shmem: pause_on_host %02x"
" protocol %02x BW [%02x - %02x]"
@@ -1543,8 +1956,9 @@ int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
continue;
- if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &protocol) != ECORE_SUCCESS)
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &protocol) !=
+ ECORE_SUCCESS)
continue;
if ((1 << ((u32)protocol)) & personalities)
@@ -1636,9 +2050,8 @@ enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
u32 num_words, i;
void *p_name;
OSAL_BE32 val;
@@ -1649,18 +2062,20 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
#endif
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
+ OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
+ drv_version.version = p_ver->version;
num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
for (i = 0; i < num_words; i++) {
+ /* The driver name is expected to be in a big-endian format */
p_name = &p_ver->name[i * sizeof(u32)];
val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
}
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1700,22 +2115,24 @@ enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- enum ecore_ov_config_method config,
enum ecore_ov_client client)
{
enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
- switch (config) {
+ switch (client) {
case ECORE_OV_CLIENT_DRV:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
break;
case ECORE_OV_CLIENT_USER:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
break;
+ case ECORE_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
default:
- DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
+ DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
return ECORE_INVAL;
}
@@ -1752,9 +2169,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
- drv_state, &resp, &param);
+ drv_mb_param, &resp, &param);
if (rc != ECORE_SUCCESS)
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
return rc;
}
@@ -2251,7 +2668,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 drv_mb_param = 0, rsp, param;
+ u32 drv_mb_param, rsp, param;
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
@@ -2327,28 +2744,25 @@ ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
struct ecore_temperature_info *p_temp_info)
{
struct ecore_temperature_sensor *p_temp_sensor;
- struct temperature_status_stc *p_mfw_temp_info;
+ struct temperature_status_stc mfw_temp_info;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 val;
enum _ecore_status_t rc;
u8 i;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = &mfw_temp_info;
+ mb_params.data_dst_size = sizeof(mfw_temp_info);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- p_mfw_temp_info = &union_data.temp_info;
-
OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
- p_temp_info->num_sensors = OSAL_MIN_T(u32,
- p_mfw_temp_info->num_of_sensors,
+ p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
ECORE_MAX_NUM_OF_SENSORS);
for (i = 0; i < p_temp_info->num_sensors; i++) {
- val = p_mfw_temp_info->sensor[i];
+ val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
SENSOR_LOCATION_SHIFT;
@@ -2403,7 +2817,60 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
0, &rsp, (u32 *)num_events);
}
-#define ECORE_RESC_ALLOC_VERSION_MAJOR 1
+static enum resource_id_enum
+ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case ECORE_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case ECORE_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case ECORE_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case ECORE_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case ECORE_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case ECORE_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case ECORE_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case ECORE_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case ECORE_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define ECORE_RESC_ALLOC_VERSION_MAJOR 2
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
@@ -2411,34 +2878,146 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
(ECORE_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param)
+struct ecore_resc_alloc_in_params {
+ u32 cmd;
+ enum ecore_resources res_id;
+ u32 resc_max_val;
+};
+
+struct ecore_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static enum _ecore_status_t
+ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_resc_alloc_in_params *p_in_params,
+ struct ecore_resc_alloc_out_params *p_out_params)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data *p_union_data;
+ struct resource_info mfw_resc_info;
enum _ecore_status_t rc;
+ OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id));
+ return ECORE_INVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return ECORE_INVAL;
+ }
+
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
- mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.cmd = p_in_params->cmd;
mb_params.param = ECORE_RESC_ALLOC_VERSION;
- p_union_data = (union drv_union_data *)p_resc_info;
- mb_params.p_data_src = p_union_data;
- mb_params.p_data_dst = p_union_data;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd, p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- *p_mcp_param = mb_params.mcp_param;
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
- " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
- *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
- p_resc_info->offset, p_resc_info->vf_size,
- p_resc_info->vf_offset, p_resc_info->flags);
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num, p_out_params->resc_start,
+ p_out_params->vf_resc_num, p_out_params->vf_resc_start,
+ p_out_params->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
return ECORE_SUCCESS;
}
@@ -2448,6 +3027,182 @@ enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
{
u32 mcp_resp, mcp_param;
- return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR,
- 0, &mcp_resp, &mcp_param);
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp,
+ u32 *p_mcp_param)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn, false,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ switch (p_params->timeout) {
+ case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case ECORE_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
+ RESOURCE_CMD_RSP_OWNER);
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ enum _ecore_status_t rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ OSAL_MSLEEP(retry_interval_in_ms);
+ } else {
+ OSAL_UDELAY(p_params->retry_interval);
+ }
+ }
+
+ rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 831890ca..37d1835f 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -11,6 +11,7 @@
#include "bcm_osal.h"
#include "mcp_public.h"
+#include "ecore.h"
#include "ecore_mcp_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
@@ -64,12 +65,22 @@ struct ecore_mcp_info {
struct ecore_mcp_mb_params {
u32 cmd;
u32 param;
- union drv_union_data *p_data_src;
- union drv_union_data *p_data_dst;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
+struct ecore_drv_tlv_hdr {
+ u8 tlv_type; /* According to the enum below */
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
/**
* @brief Initialize the interface with the MCP
*
@@ -128,32 +139,58 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
- * was successul.
+ * was successful.
*/
enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+enum ecore_drv_role {
+ ECORE_DRV_ROLE_OS,
+ ECORE_DRV_ROLE_KDUMP,
+};
+
+struct ecore_load_req_params {
+ enum ecore_drv_role drv_role;
+ u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
+ bool avoid_eng_reset;
+ u32 load_code;
+};
+
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- * succeed, returns whether this PF is the first on the
- * chip/engine/port or function. This function should be
- * called when driver is ready to accept MFW events after
- * Storms initializations are done.
- *
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_load_code - The MCP response param containing one
- * of the following:
- * FW_MSG_CODE_DRV_LOAD_ENGINE
- * FW_MSG_CODE_DRV_LOAD_PORT
- * FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return enum _ecore_status_t -
- * ECORE_SUCCESS - Operation was successul.
- * ECORE_BUSY - Operation failed
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_load_code);
+ struct ecore_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
@@ -327,31 +364,37 @@ enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief - Clears the MFW crash dump logs.
+ * @brief - Sets the MFW's max value for the given resource
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
*
- * @param return ECORE_SUCCESS upon success.
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW crash dump configuration and logs info.
+ * @brief - Gets the MFW allocation info for the given resource
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
*
- * @param return ECORE_SUCCESS upon success.
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
-
-enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param);
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start);
/**
* @brief - Initiates PF FLR
@@ -364,4 +407,79 @@ enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */
+#define ECORE_MCP_RESC_LOCK_MAX_VAL 31
+
+enum ecore_resc_lock {
+ ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL,
+ /* Locks that the MFW is aware of should be added here downwards */
+
+ /* Ecore only locks should be added here upwards */
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
+};
+
+struct ecore_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0
+#define ECORE_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params);
+
+struct ecore_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index c26b4943..190c1352 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -84,6 +84,8 @@ struct ecore_mcp_function_info {
#define ECORE_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
+
+ u16 mtu;
};
struct ecore_mcp_nvm_common {
@@ -173,15 +175,10 @@ union ecore_mcp_protocol_stats {
};
#endif
-enum ecore_ov_config_method {
- ECORE_OV_CONFIG_MTU,
- ECORE_OV_CONFIG_MAC,
- ECORE_OV_CONFIG_WOL
-};
-
enum ecore_ov_client {
ECORE_OV_CLIENT_DRV,
- ECORE_OV_CLIENT_USER
+ ECORE_OV_CLIENT_USER,
+ ECORE_OV_CLIENT_VENDOR_SPEC
};
enum ecore_ov_driver_state {
@@ -235,6 +232,297 @@ struct ecore_mba_vers {
u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG];
};
+enum ecore_mfw_tlv_type {
+ ECORE_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ ECORE_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ ECORE_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ ECORE_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ ECORE_MFW_TLV_MAX = 0x16,
+};
+
+struct ecore_mfw_tlv_generic {
+ u16 feat_flags;
+ bool feat_flags_set;
+ u64 local_mac;
+ bool local_mac_set;
+ u64 additional_mac1;
+ bool additional_mac1_set;
+ u64 additional_mac2;
+ bool additional_mac2_set;
+ u8 drv_state;
+ bool drv_state_set;
+ u8 pxe_progress;
+ bool pxe_progress_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+struct ecore_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+struct ecore_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ u8 crc_err_tstamp[5][14];
+ bool crc_err_tstamp_set[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ u8 flogi_tstamp[14];
+ bool flogi_tstamp_set;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ u8 flogi_acc_tstamp[14];
+ bool flogi_acc_tstamp_set;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ u8 flogi_rjt_tstamp[14];
+ bool flogi_rjt_tstamp_set;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ u8 plogi_tstamp[5][14];
+ bool plogi_tstamp_set[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ u8 plogi_acc_tstamp[5][14];
+ bool plogi_acc_tstamp_set[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ u8 plogo_tstamp[5][14];
+ bool plogo_tstamp_set[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ u8 abts_tstamp[5][14];
+ bool abts_tstamp_set[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ u8 scsi_chk_tstamp[5][14];
+ bool scsi_chk_tstamp_set[5];
+};
+
+struct ecore_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union ecore_mfw_tlv_data {
+ struct ecore_mfw_tlv_generic generic;
+ struct ecore_mfw_tlv_eth eth;
+ struct ecore_mfw_tlv_fcoe fcoe;
+ struct ecore_mfw_tlv_iscsi iscsi;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -452,7 +740,6 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param config - Configuation that has been updated
* @param client - ecore client type
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -460,7 +747,6 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- enum ecore_ov_config_method config,
enum ecore_ov_client client);
/**
@@ -792,14 +1078,49 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 *num_events);
+struct ecore_mdump_info {
+ u32 reason;
+ u32 version;
+ u32 config;
+ u32 epoch;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+/**
+ * @brief - Gets the MFW crash dump configuration and logs info.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_info
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info);
+
/**
- * @brief Sets whether a critical error notification from the MFW is acked, or
- * is it being ignored and thus allowing the MFW crash dump.
+ * @brief - Clears the MFW crash dump logs.
*
- * @param p_dev
- * @param mdump_enable
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the ecore client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
*
+ * @param return ECORE_SUCCESS upon success.
*/
-void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable);
+enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
new file mode 100644
index 00000000..0bf1be88
--- /dev/null
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -0,0 +1,1535 @@
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+static enum _ecore_status_t
+ecore_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= ECORE_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = ECORE_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_ISCSI;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+ecore_mfw_get_gen_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_generic *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->feat_flags_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->feat_flags;
+ return sizeof(p_drv_buf->feat_flags);
+ }
+ break;
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ if (p_drv_buf->local_mac_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->local_mac;
+ return sizeof(p_drv_buf->local_mac);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ if (p_drv_buf->additional_mac1_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac1;
+ return sizeof(p_drv_buf->additional_mac1);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ if (p_drv_buf->additional_mac2_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac2;
+ return sizeof(p_drv_buf->additional_mac2);
+ }
+ break;
+ case DRV_TLV_OS_DRIVER_STATES:
+ if (p_drv_buf->drv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->drv_state;
+ return sizeof(p_drv_buf->drv_state);
+ }
+ break;
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ if (p_drv_buf->pxe_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->pxe_progress;
+ return sizeof(p_drv_buf->pxe_progress);
+ }
+ break;
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_eth_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_eth *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_fcoe_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_fcoe *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[0];
+ return sizeof(p_drv_buf->crc_err_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[1];
+ return sizeof(p_drv_buf->crc_err_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[2];
+ return sizeof(p_drv_buf->crc_err_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[3];
+ return sizeof(p_drv_buf->crc_err_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[4];
+ return sizeof(p_drv_buf->crc_err_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[0];
+ return sizeof(p_drv_buf->crc_err_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[1];
+ return sizeof(p_drv_buf->crc_err_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[2];
+ return sizeof(p_drv_buf->crc_err_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[3];
+ return sizeof(p_drv_buf->crc_err_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[4];
+ return sizeof(p_drv_buf->crc_err_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[0];
+ return sizeof(p_drv_buf->flogi_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[1];
+ return sizeof(p_drv_buf->flogi_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[2];
+ return sizeof(p_drv_buf->flogi_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[3];
+ return sizeof(p_drv_buf->flogi_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ if (p_drv_buf->flogi_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_tstamp;
+ return sizeof(p_drv_buf->flogi_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_acc_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[0];
+ return sizeof(p_drv_buf->flogi_acc_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_acc_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[1];
+ return sizeof(p_drv_buf->flogi_acc_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_acc_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[2];
+ return sizeof(p_drv_buf->flogi_acc_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_acc_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[3];
+ return sizeof(p_drv_buf->flogi_acc_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ if (p_drv_buf->flogi_acc_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_tstamp;
+ return sizeof(p_drv_buf->flogi_acc_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ if (p_drv_buf->flogi_rjt_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt_tstamp;
+ return sizeof(p_drv_buf->flogi_rjt_tstamp);
+ }
+ break;
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[0];
+ return sizeof(p_drv_buf->plogi_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[1];
+ return sizeof(p_drv_buf->plogi_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[2];
+ return sizeof(p_drv_buf->plogi_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[3];
+ return sizeof(p_drv_buf->plogi_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[4];
+ return sizeof(p_drv_buf->plogi_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[0];
+ return sizeof(p_drv_buf->plogi_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[1];
+ return sizeof(p_drv_buf->plogi_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[2];
+ return sizeof(p_drv_buf->plogi_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[3];
+ return sizeof(p_drv_buf->plogi_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[4];
+ return sizeof(p_drv_buf->plogi_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[0];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[1];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[2];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[3];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[4];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[0];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[1];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[2];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[3];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[4];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[0];
+ return sizeof(p_drv_buf->plogo_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[1];
+ return sizeof(p_drv_buf->plogo_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[2];
+ return sizeof(p_drv_buf->plogo_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[3];
+ return sizeof(p_drv_buf->plogo_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[4];
+ return sizeof(p_drv_buf->plogo_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[0];
+ return sizeof(p_drv_buf->plogo_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[1];
+ return sizeof(p_drv_buf->plogo_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[2];
+ return sizeof(p_drv_buf->plogo_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[3];
+ return sizeof(p_drv_buf->plogo_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[4];
+ return sizeof(p_drv_buf->plogo_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[0];
+ return sizeof(p_drv_buf->abts_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[1];
+ return sizeof(p_drv_buf->abts_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[2];
+ return sizeof(p_drv_buf->abts_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[3];
+ return sizeof(p_drv_buf->abts_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[4];
+ return sizeof(p_drv_buf->abts_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[0];
+ return sizeof(p_drv_buf->abts_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[1];
+ return sizeof(p_drv_buf->abts_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[2];
+ return sizeof(p_drv_buf->abts_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[3];
+ return sizeof(p_drv_buf->abts_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[4];
+ return sizeof(p_drv_buf->abts_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ if (p_drv_buf->rx_rscn_nport_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[0];
+ return sizeof(p_drv_buf->rx_rscn_nport[0]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ if (p_drv_buf->rx_rscn_nport_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[1];
+ return sizeof(p_drv_buf->rx_rscn_nport[1]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ if (p_drv_buf->rx_rscn_nport_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[2];
+ return sizeof(p_drv_buf->rx_rscn_nport[2]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ if (p_drv_buf->rx_rscn_nport_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[3];
+ return sizeof(p_drv_buf->rx_rscn_nport[3]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[0];
+ return sizeof(p_drv_buf->scsi_rx_chk[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[1];
+ return sizeof(p_drv_buf->scsi_rx_chk[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[2];
+ return sizeof(p_drv_buf->scsi_rx_chk[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[3];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[4];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[0];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[1];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[2];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[3];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[4];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[4]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_iscsi *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static enum _ecore_status_t
+ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+{
+ union ecore_mfw_tlv_data *p_tlv_data;
+ struct ecore_drv_tlv_hdr tlv;
+ u8 *p_tlv_ptr = OSAL_NULL, *p_temp;
+ u32 offset;
+ int len;
+
+ p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return ECORE_NOMEM;
+
+ if (OSAL_MFW_FILL_TLV_DATA(p_hwfn, tlv_group, p_tlv_data)) {
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+ return ECORE_INVAL;
+ }
+
+ offset = 0;
+ OSAL_MEMSET(&tlv, 0, sizeof(tlv));
+ while (offset < size) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ tlv.tlv_flags = TLV_FLAGS(p_temp);
+ DP_INFO(p_hwfn, "Type %d length = %d flags = 0x%x\n",
+ tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags);
+
+ offset += sizeof(tlv);
+ if (tlv_group == ECORE_MFW_TLV_GENERIC)
+ len = ecore_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_ETH)
+ len = ecore_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_FCOE)
+ len = ecore_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe, &p_tlv_ptr);
+ else
+ len = ecore_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi, &p_tlv_ptr);
+
+ if (len > 0) {
+ OSAL_WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length");
+ len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
+ /* TODO: Endianness handling? */
+ OSAL_MEMCPY(p_mfw_buf, &tlv, sizeof(tlv));
+ OSAL_MEMCPY(p_mfw_buf + offset, p_tlv_ptr, len);
+ }
+
+ offset += sizeof(u32) * tlv.tlv_length;
+ }
+
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val;
+ u8 tlv_group = 0, id, *p_mfw_buf = OSAL_NULL, *p_temp;
+ u32 global_offsize, global_addr;
+ enum _ecore_status_t rc;
+ struct ecore_drv_tlv_hdr tlv;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, data_ptr);
+ size = ecore_rd(p_hwfn, p_ptt, global_addr +
+ OFFSETOF(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, false, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = (void *)OSAL_VZALLOC(p_hwfn->p_dev, size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = ecore_rd(p_hwfn, p_ptt, addr + offset);
+ OSAL_MEMCPY(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (ecore_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ goto drv_done;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id) {
+ if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
+ size))
+ goto drv_done;
+ }
+ }
+
+ /* Write the TLV data to shared memory */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = (u32)p_mfw_buf[offset];
+ ecore_wr(p_hwfn, p_ptt, addr + offset, val);
+ offset += sizeof(u32);
+ }
+
+drv_done:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ OSAL_VFREE(p_hwfn->p_dev, p_mfw_buf);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index e252d528..226e3d2a 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -21,6 +21,12 @@ struct ecore_eth_pf_params {
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
};
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
@@ -63,6 +69,12 @@ struct ecore_iscsi_pf_params {
u8 bdq_pbl_num_entries[2];
};
+enum ecore_rdma_protocol {
+ ECORE_RDMA_PROTOCOL_DEFAULT,
+ ECORE_RDMA_PROTOCOL_ROCE,
+ ECORE_RDMA_PROTOCOL_IWARP,
+};
+
struct ecore_rdma_pf_params {
/* Supplied to ECORE during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -76,6 +88,10 @@ struct ecore_rdma_pf_params {
/* Will allocate rate limiters to be used with QPs */
u8 enable_dcqcn;
+
+ /* TCP port number used for the iwarp traffic */
+ u16 iwarp_port;
+ enum ecore_rdma_protocol rdma_protocol;
};
struct ecore_pf_params {
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 01a29e31..846dc6d1 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -115,339 +115,338 @@
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_OFFSET 30857
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30817
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_RLPFENABLE_RT_OFFSET 30873
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30851
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_OFFSET 31677
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_OFFSET 32701
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_OFFSET 33213
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
-#define RUNTIME_ARRAY_SIZE 33927
+#define RUNTIME_ARRAY_SIZE 34311
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index a4cb507f..c8e564f9 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -41,5 +41,24 @@ struct ecore_spq_comp_cb {
*/
enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ * update Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
#endif
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index b3736a8c..8fd64d7a 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -22,6 +22,7 @@
#include "ecore_hw.h"
#include "ecore_dcbx.h"
#include "ecore_sriov.h"
+#include "ecore_vf.h"
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
@@ -31,7 +32,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ enum _ecore_status_t rc;
if (!pp_ent)
return ECORE_INVAL;
@@ -88,7 +89,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -107,224 +108,208 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
}
static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src,
+ bool b_pf_start)
{
- unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
- unsigned long update_mask = p_src->tunn_mode_update_mask;
- unsigned long tunn_mode = p_src->tunn_mode;
- unsigned long new_tunn_mode = 0;
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
- }
-
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
- p_src->tunn_mode = new_tunn_mode;
- return;
- }
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
- p_src->tunn_mode = new_tunn_mode;
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
}
-static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
{
- unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
- ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
- p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
- p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port =
- OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ /* @DPDK - typecast tunnul class */
+ type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
- return;
- }
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct ecore_tunn_update_udp_port *p_udp_port)
+{
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
+ tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
+ }
+}
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- unsigned long tunn_mode)
+ struct ecore_tunnel_info *p_tun)
{
- u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
- u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- l2gre_enable = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- ipgre_enable = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- vxlan_enable = 1;
+ ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
- ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
- ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+ ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn)
+{
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel hw config is not supported\n");
return;
+ }
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- l2geneve_enable = 1;
+ if (p_tunn->vxlan_port.b_update_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- ipgeneve_enable = 1;
+ if (p_tunn->geneve_port.b_update_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
- ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
- ipgeneve_enable);
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_src,
+ struct ecore_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
- unsigned long tunn_mode;
- enum tunnel_clss type;
-
- if (!p_src)
- return;
-
- tunn_mode = p_src->tunn_mode;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port =
- OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf start config is not supported\n");
return;
}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
+ if (!p_src)
+ return;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
@@ -379,11 +364,11 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
- p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
- p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
@@ -419,11 +404,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (p_tunn) {
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
- }
+ if (p_tunn)
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -498,7 +480,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
@@ -506,6 +488,18 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf update config is not supported\n");
+ return rc;
+ }
+
+ if (!p_tunn)
+ return ECORE_INVAL;
+
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -526,15 +520,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- if (p_tunn->update_vxlan_udp_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
-
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -564,7 +550,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ enum _ecore_status_t rc;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 66c9a69b..33e31e42 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -68,32 +68,11 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
- * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
- * update Ramrod
- *
- * This ramrod is sent to update a tunneling configuration
- * for a physical function (PF).
- *
- * @param p_hwfn
- * @param p_tunn - pf update tunneling parameters
- * @param comp_mode - completion mode
- * @param p_comp_data - callback function
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t
-ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_tunn,
- enum spq_mode comp_mode,
- struct ecore_spq_comp_cb *p_comp_data);
-
-/**
* @brief ecore_sp_pf_update - PF Function Update Ramrod
*
* This ramrod updates function-related parameters. Every parameter can be
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 0d744ddd..3c1d05b3 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -173,11 +173,10 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
- u16 pq;
struct ecore_cxt_info cxt_info;
struct core_conn_context *p_cxt;
- union ecore_qm_pq_params pq_params;
enum _ecore_status_t rc;
+ u16 physical_q;
cxt_info.iid = p_spq->cid;
@@ -191,23 +190,26 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
- SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
- SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
- /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
- */
- SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ /* @@@TBD we zero the context until we have ilt_reset implemented. */
+ OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+ */
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ }
/* CDU validation - FIXME currently disabled */
/* QM physical queue */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+ physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -248,7 +250,8 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
/* make sure the SPQE is updated before the doorbell */
OSAL_WMB(p_hwfn->p_dev);
- DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+ DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
+ *(u32 *)&db);
/* make sure doorbell is rang */
OSAL_WMB(p_hwfn->p_dev);
@@ -355,7 +358,7 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
return rc;
}
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
{
struct ecore_eq *p_eq;
@@ -364,7 +367,7 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_eq'\n");
- return OSAL_NULL;
+ return ECORE_NOMEM;
}
/* Allocate and initialize EQ chain*/
@@ -373,34 +376,38 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
num_elem,
- sizeof(union event_ring_element), &p_eq->chain)) {
+ sizeof(union event_ring_element),
+ &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
/* register EQ completion on the SP SB */
- ecore_int_register_cb(p_hwfn,
- ecore_eq_completion,
+ ecore_int_register_cb(p_hwfn, ecore_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
- return p_eq;
+ p_hwfn->p_eq = p_eq;
+ return ECORE_SUCCESS;
eq_allocate_fail:
- ecore_eq_free(p_hwfn, p_eq);
- return OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, p_eq);
+ return ECORE_NOMEM;
}
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
{
- ecore_chain_reset(&p_eq->chain);
+ ecore_chain_reset(&p_hwfn->p_eq->chain);
}
-void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_free(struct ecore_hwfn *p_hwfn)
{
- if (!p_eq)
+ if (!p_hwfn->p_eq)
return;
- ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
- OSAL_FREE(p_hwfn->p_dev, p_eq);
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
+ p_hwfn->p_eq = OSAL_NULL;
}
/***************************************************************************
@@ -501,10 +508,13 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
}
/* SPQ ring */
- if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
- ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
- /* N/A when the mode is SINGLE */
- sizeof(struct slow_path_element), &p_spq->chain)) {
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_SINGLE,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain, OSAL_NULL)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@@ -920,6 +930,9 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -937,7 +950,7 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
return rc;
}
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_consq *p_consq;
@@ -947,7 +960,7 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_consq'\n");
- return OSAL_NULL;
+ return ECORE_NOMEM;
}
/* Allocate and initialize EQ chain */
@@ -956,27 +969,30 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
ECORE_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain)) {
+ 0x80,
+ &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
- return p_consq;
+ p_hwfn->p_consq = p_consq;
+ return ECORE_SUCCESS;
consq_allocate_fail:
- ecore_consq_free(p_hwfn, p_consq);
- return OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, p_consq);
+ return ECORE_NOMEM;
}
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
{
- ecore_chain_reset(&p_consq->chain);
+ ecore_chain_reset(&p_hwfn->p_consq->chain);
}
-void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_free(struct ecore_hwfn *p_hwfn)
{
- if (!p_consq)
+ if (!p_hwfn->p_consq)
return;
- ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
- OSAL_FREE(p_hwfn->p_dev, p_consq);
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 717ede30..e530f834 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -26,6 +26,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -194,28 +195,23 @@ void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param num_elem number of elements in the eq
*
- * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ * @return enum _ecore_status_t
*/
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn,
- u16 num_elem);
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief ecore_eq_setup - Reset the SPQ to its start state.
+ * @brief ecore_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_eq *p_eq);
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_eq_deallocate - deallocates the given EQ struct.
+ * @brief ecore_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_eq_free(struct ecore_hwfn *p_hwfn,
- struct ecore_eq *p_eq);
+void ecore_eq_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_eq_prod_update - update the FW with default EQ producer
@@ -261,32 +257,26 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_consq_alloc - Allocates & initializes an ConsQ
- * struct
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct
*
* @param p_hwfn
*
- * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ * @return enum _ecore_status_t
*/
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_consq_setup - Reset the ConsQ to its start
- * state.
+ * @brief ecore_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_consq *p_consq);
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_consq_free(struct ecore_hwfn *p_hwfn,
- struct ecore_consq *p_consq);
+void ecore_consq_free(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index b28d7281..db2873e7 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -51,6 +51,8 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_VPORT_UPDATE_RSS",
"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+ "CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_MAX"
};
@@ -86,6 +88,7 @@ static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_ETH;
break;
case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
@@ -146,7 +149,7 @@ static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
}
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
- bool b_enabled_only)
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
@@ -161,6 +164,10 @@ bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
b_enabled_only)
return false;
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
return true;
}
@@ -175,7 +182,8 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
- if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
@@ -184,28 +192,90 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
return vf;
}
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+ ECORE_IOV_VALIDATE_Q_NA,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 qid,
+ enum ecore_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ int i;
+
+ if (mode == ECORE_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct ecore_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (p_qcid->p_cid == OSAL_NULL)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ /* Found. It's enabled. */
+ return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 rx_qid)
+ u16 rx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
+ mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 tx_qid)
+ u16 tx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
+ mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -226,6 +296,35 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
return false;
}
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
{
@@ -317,10 +416,9 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
- DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
"ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
- " stride %d, page size 0x%x\n", 0,
- /* @@@TBD MichalK - function id */
+ " stride %d, page size 0x%x\n",
iov->nres, iov->cap, iov->ctrl,
iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
iov->offset, iov->stride, iov->pgsz);
@@ -395,8 +493,6 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
return;
}
- p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
-
for (idx = 0; idx < p_iov->total_vfs; idx++) {
struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
u32 concrete;
@@ -425,8 +521,6 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
/* TODO - need to devise a better way of getting opaque */
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
- /* @@TBD MichalK - add base vport_id of VFs to equation */
- vf->vport_id = p_iov_info->base_vport_id + idx;
vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
@@ -550,7 +644,6 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn)
void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
{
OSAL_FREE(p_dev, p_dev->p_iov_info);
- p_dev->p_iov_info = OSAL_NULL;
}
enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
@@ -593,18 +686,33 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"IOV capabilities, but no VFs are published\n");
OSAL_FREE(p_dev, p_dev->p_iov_info);
- p_dev->p_iov_info = OSAL_NULL;
return ECORE_SUCCESS;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only be later
+ * to diffrentiate between the two.
*/
- p_dev->p_iov_info->first_vf_in_pf = p_hwfn->p_dev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (ECORE_PATH_ID(p_hwfn))
- p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+
+ if (ECORE_PATH_ID(p_hwfn))
+ p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"First VF in hwfn 0x%08x\n",
@@ -613,7 +721,8 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
@@ -621,12 +730,17 @@ bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
return false;
/* Check VF validity */
- if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true))
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
return false;
return true;
}
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
u16 rel_vf_id, u8 to_disable)
{
@@ -747,6 +861,9 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+ /* It's possible VF was previously considered malicious */
+ vf->b_malicious = false;
+
rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
@@ -901,17 +1018,59 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
vf->num_sbs = 0;
}
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues)
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
{
+ struct ecore_mcp_link_capabilities link_caps;
+ struct ecore_mcp_link_params link_params;
+ struct ecore_mcp_link_state link_state;
u8 num_of_vf_available_chains = 0;
struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cids;
u8 i;
- vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
return ECORE_UNKNOWN_ERROR;
@@ -919,22 +1078,80 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
if (vf->b_init) {
DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
- rel_vf_id);
+ p_params->rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Perform sanity checking on the requested vport/rss */
+ if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+ p_params->rel_vf_id, p_params->vport_id);
+ return ECORE_INVAL;
+ }
+
+ if ((p_params->num_queues > 1) &&
+ (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+ p_params->rel_vf_id, p_params->rss_eng_id);
return ECORE_INVAL;
}
+ /* TODO - remove this once we get confidence of change */
+ if (!p_params->vport_id) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ vf->vport_id = p_params->vport_id;
+ vf->rss_eng_id = p_params->rss_eng_id;
+
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
/* Limit number of queues according to number of CIDs */
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues"
" [0x%04x CIDs available]\n",
- vf->relative_vf_id, num_rx_queues, (u16)cids);
- num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
vf,
- num_rx_queues);
+ num_irqs);
if (num_of_vf_available_chains == 0) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return ECORE_NOMEM;
@@ -945,28 +1162,28 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
vf->num_txqs = num_of_vf_available_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
- vf->igu_sbs[i]);
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
- if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
- DP_NOTICE(p_hwfn, true,
- "VF[%d] will require utilizing of"
- " out-of-bounds queues - %04x\n",
- vf->relative_vf_id, queue_id);
- /* TODO - cleanup the already allocate SBs */
- return ECORE_INVAL;
- }
-
- /* CIDs are per-VF, so no problem having them 0-based. */
- vf->vf_queues[i].fw_rx_qid = queue_id;
- vf->vf_queues[i].fw_tx_qid = queue_id;
- vf->vf_queues[i].fw_cid = i;
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
- vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
+ /* Update the link configuration in bulletin.
+ */
+ OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+ sizeof(link_state));
+ OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (rc == ECORE_SUCCESS) {
@@ -981,43 +1198,6 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
return rc;
}
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
- u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities *p_caps)
-{
- struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
- struct ecore_bulletin_content *p_bulletin;
-
- if (!p_vf)
- return;
-
- p_bulletin = p_vf->bulletin.p_virt;
- p_bulletin->req_autoneg = params->speed.autoneg;
- p_bulletin->req_adv_speed = params->speed.advertised_speeds;
- p_bulletin->req_forced_speed = params->speed.forced_speed;
- p_bulletin->req_autoneg_pause = params->pause.autoneg;
- p_bulletin->req_forced_rx = params->pause.forced_rx;
- p_bulletin->req_forced_tx = params->pause.forced_tx;
- p_bulletin->req_loopback = params->loopback_mode;
-
- p_bulletin->link_up = link->link_up;
- p_bulletin->speed = link->speed;
- p_bulletin->full_duplex = link->full_duplex;
- p_bulletin->autoneg = link->an;
- p_bulletin->autoneg_complete = link->an_complete;
- p_bulletin->parallel_detection = link->parallel_detection;
- p_bulletin->pfc_enabled = link->pfc_enabled;
- p_bulletin->partner_adv_speed = link->partner_adv_speed;
- p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
- p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
- p_bulletin->partner_adv_pause = link->partner_adv_pause;
- p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
- p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id)
@@ -1326,7 +1506,7 @@ struct ecore_public_vf_info
static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
- u32 i;
+ u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
p_vf->configured_features = 0;
@@ -1337,8 +1517,18 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
- for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
- p_vf->vf_queues[i].rxq_active = 0;
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
@@ -1351,7 +1541,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
- int i;
+ u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
@@ -1372,7 +1562,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
for (i = 0; i < p_resp->num_rxqs; i++) {
ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
- p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ p_resp->cid[i] = i;
}
/* Filter related information */
@@ -1460,6 +1650,18 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ /* TODO - not doing anything is bad since we'll assert, but this isn't
+ * necessarily the right behavior - perhaps we should have allowed some
+ * versatility here.
+ */
+ if (vf->state != VF_FREE &&
+ vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
if (req->vfdev_info.capabilities &
@@ -1575,12 +1777,12 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
"VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
" db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
"resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
- " n_vlans-%d, n_mcs-%d\n",
+ " n_vlans-%d\n",
vf->abs_vf_id, resp->pfdev_info.chip_num,
resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
(unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
- resc->num_vlan_filters, resc->num_mc_filters);
+ resc->num_vlan_filters);
vf->state = VF_ACQUIRED;
@@ -1650,11 +1852,9 @@ ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = ecore_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- ECORE_SPQ_MODE_CB,
- OSAL_NULL);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, ECORE_SPQ_MODE_CB,
+ OSAL_NULL);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Failed to configure VLAN [%04x]"
@@ -1682,9 +1882,10 @@ ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
return rc;
}
-static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- u64 events)
+static enum _ecore_status_t
+ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u64 events)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_filter_ucast filter;
@@ -1764,14 +1965,17 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
/* Update all the Rx queues */
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
- u16 qid;
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct ecore_queue_cid *p_cid = OSAL_NULL;
- if (!p_vf->vf_queues[i].rxq_active)
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
+ p_queue);
+ if (p_cid == OSAL_NULL)
continue;
- qid = p_vf->vf_queues[i].fw_rx_qid;
-
- rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
1, 0, 1,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
@@ -1779,7 +1983,7 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Failed to send Rx update"
" fo queue[0x%04x]\n",
- qid);
+ p_cid->rel.queue_id);
return rc;
}
}
@@ -1823,6 +2027,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
@@ -1837,7 +2043,6 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
vf->igu_sbs[sb_id],
vf->abs_vf_id, 1);
}
- ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1904,6 +2109,15 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn, false,
+ "VF [%02x] - considered malicious;"
+ " Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ }
+
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
@@ -1961,63 +2175,242 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_queue_start_common_params p_params;
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_rxq_tlv *req;
+ struct ecore_queue_cid *p_cid;
bool b_legacy_vf = false;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
- OSAL_MEMSET(&p_params, 0, sizeof(p_params));
- p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
- p_params.vf_qid = req->rx_qid;
- p_params.vport_id = vf->vport_id;
- p_params.stats_id = vf->abs_vf_id + 0x10,
- p_params.sb = req->hw_sb;
- p_params.sb_idx = req->sb_index;
-
- if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ ECORE_IOV_VALIDATE_Q_DISABLE) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Legacy VFs have their Producers in a different location, which they
- * calculate on their own and clean the producer prior to this.
+ /* Legacy VFs made assumptions on the CID their queues connected to,
+ * assuming queue X used CID X.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
*/
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
b_legacy_vf = true;
- else
+
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->rx_qid];
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.queue_id = (u8)p_queue->fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '0' for Rx.
+ */
+ qid_usage_idx = 0;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (!b_legacy_vf)
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
- vf->vf_queues[req->rx_qid].fw_cid,
- &p_params,
- req->bd_max_bytes,
- req->rxq_addr,
- req->cqe_pbl_addr,
- req->cqe_pbl_size,
- b_legacy_vf);
-
- if (rc) {
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
} else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
out:
- ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
- status, b_legacy_vf);
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ b_legacy_vf);
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct ecore_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ enum ecore_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & (1 << mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & (1 << mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ struct ecore_tunn_update_udp_port *p_port,
+ enum ecore_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct ecore_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ ECORE_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ ECORE_MODE_L2GRE_TUNN,
+ p_req->l2gre_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ ECORE_MODE_IPGRE_TUNN,
+ p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If ECORE client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ ecore_for_each_vf(p_hwfn, i) {
+ ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
+ u32 cid,
u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
@@ -2046,12 +2439,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
- u16 qid = mbx->req_virt->start_txq.tx_qid;
-
- p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
- }
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
@@ -2060,48 +2449,80 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_queue_start_common_params p_params;
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union ecore_qm_pq_params pq_params;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_txq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ bool b_legacy_vf = false;
+ u8 qid_usage_idx;
+ u32 cid = 0;
enum _ecore_status_t rc;
+ u16 pq;
- /* Prepare the parameters which would choose the right PQ */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
-
+ OSAL_MEMSET(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- OSAL_MEMSET(&p_params, 0, sizeof(p_params));
- p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
- p_params.vport_id = vf->vport_id;
- p_params.stats_id = vf->abs_vf_id + 0x10,
- p_params.sb = req->hw_sb;
- p_params.sb_idx = req->sb_index;
-
- if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ ECORE_IOV_VALIDATE_Q_NA) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- rc = ecore_sp_eth_txq_start_ramrod(
- p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_cid,
- &p_params,
- req->pbl_addr,
- req->pbl_size,
- &pq_params);
+ /* In case this is a legacy VF - need to know to use the right cids.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy_vf = true;
- if (rc)
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->tx_qid];
+
+ params.queue_id = p_queue->fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '1' for Tx.
+ */
+ qid_usage_idx = 1;
+
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
- else {
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->tx_qid].txq_active = true;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
}
out:
- ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+ cid, status);
}
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2111,22 +2532,37 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
bool cqe_completion)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
+ int qid, i;
+ /* TODO - improve validation [wrap around] */
if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
return ECORE_INVAL;
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- if (vf->vf_queues[qid].rxq_active) {
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_rx_qid, false,
- cqe_completion);
-
- if (rc)
- return rc;
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+ struct ecore_queue_cid **pp_cid = OSAL_NULL;
+
+ /* There can be at most a single Rx per qzone. Find it */
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx) {
+ pp_cid = &p_queue->cids[i].p_cid;
+ break;
+ }
}
- vf->vf_queues[qid].rxq_active = false;
+ if (pp_cid == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
+ vf->relative_vf_id, qid);
+ continue;
+ }
+
+ rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_cid = OSAL_NULL;
vf->num_active_rxqs--;
}
@@ -2138,22 +2574,33 @@ static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
u16 txq_id, u8 num_txqs)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
+ struct ecore_vf_queue *p_queue;
+ int qid, j;
- if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
+ ECORE_IOV_VALIDATE_Q_NA))
return ECORE_INVAL;
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- if (vf->vf_queues[qid].txq_active) {
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_tx_qid);
+ p_queue = &vf->vf_queues[qid];
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (p_queue->cids[j].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[j].b_is_tx)
+ continue;
- if (rc)
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[j].p_cid);
+ if (rc != ECORE_SUCCESS)
return rc;
+
+ p_queue->cids[j].p_cid = OSAL_NULL;
}
- vf->vf_queues[qid].txq_active = false;
}
+
return rc;
}
@@ -2208,44 +2655,52 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
- u16 qid;
enum _ecore_status_t rc;
- u8 i;
+ u16 i;
req = &mbx->req_virt->update_rxq;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
-
- if (!vf->vf_queues[qid].rxq_active) {
- DP_NOTICE(p_hwfn, true,
- "VF rx_qid = %d isn`t active!\n", qid);
- status = PFVF_STATUS_FAILURE;
- break;
+ /* Validate inputs */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
}
+ }
- rc = ecore_sp_eth_rx_queues_update(p_hwfn,
- vf->vf_queues[qid].fw_rx_qid,
- 1,
- complete_cqe_flg,
- complete_event_flg,
- ECORE_SPQ_MODE_EBLOCK,
- OSAL_NULL);
+ for (i = 0; i < req->num_rxqs; i++) {
+ struct ecore_vf_queue *p_queue;
+ u16 qid = req->rx_qid + i;
- if (rc) {
- status = PFVF_STATUS_FAILURE;
- break;
- }
+ p_queue = &vf->vf_queues[qid];
+ handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ p_queue);
}
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
length, status);
}
@@ -2422,12 +2877,14 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_rss_params *p_rss,
- struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2452,39 +2909,38 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
VFPF_UPDATE_RSS_KEY_FLAG);
p_rss->rss_enable = p_rss_tlv->rss_enable;
- p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_eng_id = vf->rss_eng_id;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
sizeof(p_rss->rss_key));
table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ struct ecore_queue_cid *p_cid;
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d,"
- " rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].rxq_active)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
}
static void
@@ -2540,11 +2996,11 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
struct ecore_sp_vport_update_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct ecore_sge_tpa_params sge_tpa_params;
u16 tlvs_mask = 0, tlvs_accepted = 0;
- struct ecore_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
u16 length;
enum _ecore_status_t rc;
@@ -2559,6 +3015,12 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
goto out;
}
+ p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
OSAL_MEMSET(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
@@ -2572,20 +3034,24 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
- ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
- mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
&sge_tpa_params, mbx, &tlvs_mask);
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
/* Just log a message if there is no single extended tlv in buffer.
* When all features of vport update ramrod would be requested by VF
* as extended TLVs in buffer then an error can be returned in response
* if there is no extended TLV present in buffer.
*/
- tlvs_accepted = tlvs_mask;
-
-#ifndef LINUX_REMOVE
if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
&params, &tlvs_accepted) !=
ECORE_SUCCESS) {
@@ -2593,7 +3059,6 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
-#endif
if (!tlvs_accepted) {
if (tlvs_mask)
@@ -2614,6 +3079,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
tlvs_mask, tlvs_accepted);
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
@@ -2916,6 +3382,88 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
length, status);
}
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ u16 qid;
+ int i;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3049,6 +3597,13 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
*/
@@ -3115,9 +3670,10 @@ ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
- u16 i, found = 0;
+ bool found = false;
+ u16 i;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3127,7 +3683,7 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
if (!p_hwfn->p_dev->p_iov_info) {
DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
- return 0;
+ return false;
}
/* Mark VFs */
@@ -3156,7 +3712,7 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
- found = 1;
+ found = true;
}
}
@@ -3215,7 +3771,8 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
p_vf, mbx->first_tlv.tl.type);
/* check if tlv type is known */
- if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
/* switch on the opcode */
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
@@ -3257,7 +3814,34 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE:
ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
+ } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* If we've received a message from a VF we consider malicious
+ * we ignore the messasge unless it's one for RELEASE, in which
+ * case we'll let it have the benefit of doubt, allowing the
+ * next loaded driver to start again.
+ */
+ if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
+ /* TODO - initiate FLR, remove malicious indication */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
+ p_vf->abs_vf_id);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
@@ -3322,21 +3906,31 @@ void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
}
-static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
- u16 abs_vfid,
- struct regpair *vf_msg)
+static struct ecore_vf_info *
+ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
{
u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
- struct ecore_vf_info *p_vf;
- if (!ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Got a message from VF [abs 0x%08x] that cannot be"
+ "Got indication for VF [abs 0x%08x] that cannot be"
" handled by PF\n",
abs_vfid);
- return ECORE_SUCCESS;
+ return OSAL_NULL;
}
- p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+ u16 abs_vfid,
+ struct regpair *vf_msg)
+{
+ struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return ECORE_SUCCESS;
/* List the physical address of the request so that handler
* could later on copy the message from it.
@@ -3346,6 +3940,25 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
}
+static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
+ struct malicious_vf_eqe_data *p_data)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+
+ if (!p_vf)
+ return;
+
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->errId);
+
+ p_vf->b_malicious = true;
+
+ OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
+}
+
enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
@@ -3359,6 +3972,9 @@ enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF-FLR is still not supported\n");
return ECORE_SUCCESS;
+ case COMMON_EVENT_MALICIOUS_VF:
+ ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+ return ECORE_SUCCESS;
default:
DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -3381,11 +3997,11 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
goto out;
for (i = rel_vf_id; i < p_iov->total_vfs; i++)
- if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
return i;
out:
- return MAX_NUM_VFS;
+ return E4_MAX_NUM_VFS;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -3427,6 +4043,12 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced MAC to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
feature = 1 << MAC_ADDR_FORCED;
OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
@@ -3451,6 +4073,12 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
"Can not set MAC, invalid vfid [%d]\n", vfid);
return ECORE_INVAL;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -3476,7 +4104,14 @@ ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->p_dev, true,
- "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ "Can not set untagged default, invalid vfid [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set untagged default to malicious VF [%d]\n",
+ vfid);
return ECORE_INVAL;
}
@@ -3516,18 +4151,6 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
*opaque_fid = vf_info->opaque_fid;
}
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vort_id)
-{
- struct ecore_vf_info *vf_info;
-
- vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
- if (!vf_info)
- return;
-
- *p_vort_id = vf_info->vport_id;
-}
-
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
{
@@ -3541,6 +4164,12 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
vfid);
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced vlan to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
feature = 1 << VLAN_ADDR_FORCED;
vf_info->bulletin.p_virt->pvid = pvid;
@@ -3552,6 +4181,29 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_vf_info *p_vf_info;
@@ -3734,30 +4386,6 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
- int vfid, u32 rate)
-{
- struct ecore_vf_info *vf;
- u8 vport_id;
- int i;
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
- DP_NOTICE(p_hwfn, true,
- "SR-IOV sanity check failed,"
- " can't set min rate\n");
- return ECORE_INVAL;
- }
- }
-
- vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
- vport_id = vf->vport_id;
-
- return ecore_configure_vport_wfq(p_dev, vport_id, rate);
-}
-
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
@@ -3856,7 +4484,20 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
return (p_vf->state == VF_ENABLED);
}
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
+enum _ecore_status_t
+ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_wfq_data *vf_vp_wfq;
struct ecore_vf_info *vf_info;
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index ed6ddc49..3c2f58bd 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -13,9 +13,10 @@
#include "ecore_vfpf_if.h"
#include "ecore_iov_api.h"
#include "ecore_hsi_common.h"
+#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
- (MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+ (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
@@ -62,12 +63,18 @@ struct ecore_iov_vf_mbx {
*/
};
-struct ecore_vf_q_info {
+struct ecore_vf_queue_cid {
+ bool b_is_tx;
+ struct ecore_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct ecore_vf_queue {
+ /* Input from upper-layer, mapping relateive queue to queue-zone */
u16 fw_rx_qid;
u16 fw_tx_qid;
- u8 fw_cid;
- u8 rxq_active;
- u8 txq_active;
+
+ struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
@@ -97,6 +104,7 @@ struct ecore_vf_info {
struct ecore_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
+ bool b_malicious;
u8 to_disable;
struct ecore_bulletin bulletin;
@@ -110,6 +118,7 @@ struct ecore_vf_info {
u16 mtu;
u8 vport_id;
+ u8 rss_eng_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
@@ -125,7 +134,7 @@ struct ecore_vf_info {
u8 num_mac_filters;
u8 num_vlan_filters;
- struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
/* TODO - Only windows is using it - should be removed */
@@ -151,10 +160,9 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
- struct ecore_vf_info vfs_array[MAX_NUM_VFS];
+ struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
u64 pending_events[ECORE_VF_ARRAY_LENGTH];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
- u16 base_vport_id;
#ifndef REMOVE_DBG
/* This doesn't serve anything functionally, but it makes windows
@@ -276,8 +284,8 @@ u32 ecore_crc32(u32 crc,
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
- u32 *disabled_vfs);
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index 6277bc80..c77ec260 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -10,6 +10,7 @@
#define __ECORE_STATUS_H__
enum _ecore_status_t {
+ ECORE_CONN_RESET = -13,
ECORE_UNKNOWN_ERROR = -12,
ECORE_NORESOURCES = -11,
ECORE_NODEV = -10,
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
index 616b44c2..034cf1eb 100644
--- a/drivers/net/qede/base/ecore_utils.h
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -10,6 +10,12 @@
#define __ECORE_UTILS_H__
/* dma_addr_t manip */
+/* Suppress "right shift count >= width of type" warning when that quantity is
+ * 32-bits rquires the >> 16) >> 16)
+ */
+#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16))
+
#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index be8b1ec4..f4d331cf 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -65,13 +65,15 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
}
-static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
- u8 *done, u32 resp_size)
+static enum _ecore_status_t
+ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+ u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = ECORE_SUCCESS, time = 100;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int time = 100;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
@@ -81,16 +83,6 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
/* need to add the END TLV to the message size */
resp_size += sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel) {
- rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
- done,
- p_req,
- p_hwfn->vf_iov_info->pf2vf_reply,
- sizeof(union vfpf_tlvs), resp_size);
- /* TODO - no prints about message ? */
- return rc;
- }
-
/* Send TLVs over HW channel */
OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
@@ -134,7 +126,6 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = ECORE_TIMEOUT;
- return rc;
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"PF response: %d [Type %d]\n",
@@ -294,8 +285,17 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
" override\n");
req->vfdev_info.capabilities |=
VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
}
}
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF rejected acquisition by VF\n");
+ rc = ECORE_INVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned err %d to VF acquisition request\n",
@@ -386,8 +386,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
- OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
-
/* Allocate vf2pf msg */
p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_iov->
@@ -453,20 +451,174 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_qid,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM **pp_prod)
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= (1 << mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= (1 << mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls,
+ struct ecore_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
+ enum ecore_tunn_mode val)
+{
+ if (feature_mask & (1 << val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void
+ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ ECORE_MODE_VXLAN_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ ECORE_MODE_L2GENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ ECORE_MODE_IPGENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ ECORE_MODE_L2GRE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ ECORE_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+}
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc;
+
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ ECORE_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = ECORE_INVAL;
+ }
+
+ ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
- int rc;
+ u16 rx_qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
@@ -475,19 +627,20 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; /* Keep initialized, for future compatibility */
/* If PF is legacy, we'll need to calculate producers ourselves
* as well as clean them.
*/
- if (pp_prod && p_iov->b_pre_fp_hsi) {
+ if (p_iov->b_pre_fp_hsi) {
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
MSTORM_QZONE_START(p_hwfn->p_dev) +
(hw_qid) * MSTORM_QZONE_SIZE;
@@ -512,7 +665,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
}
/* Learn the address of the producer from the response */
- if (pp_prod && !p_iov->b_pre_fp_hsi) {
+ if (!p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
@@ -536,17 +689,18 @@ exit:
}
enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion)
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
- req->rx_qid = rx_qid;
+ req->rx_qid = p_cid->rel.queue_id;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
@@ -571,29 +725,28 @@ exit:
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- int rc;
+ u16 qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
- req->tx_qid = tx_queue_id;
+ req->tx_qid = qid;
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
/* add list termination tlv */
ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -610,42 +763,40 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
goto exit;
}
- if (pp_doorbell) {
- /* Modern PFs provide the actual offsets, while legacy
- * provided only the queue id.
- */
- if (!p_iov->b_pre_fp_hsi) {
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- resp->offset;
- } else {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
-
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
- }
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- tx_queue_id, *pp_doorbell, resp->offset);
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
- req->tx_qid = tx_qid;
+ req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
/* add list termination tlv */
@@ -670,20 +821,36 @@ exit:
}
enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ struct ecore_queue_cid **pp_cid,
u8 num_rxqs,
- u8 comp_cqe_flg, u8 comp_event_flg)
+ u8 comp_cqe_flg,
+ u8 comp_event_flg)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
struct vfpf_update_rxq_tlv *req;
- int rc;
+ enum _ecore_status_t rc;
+
+ /* TODO - API is limited to assuming continuous regions of queues,
+ * but VF queues might not fullfil this requirement.
+ * Need to consider whether we need new TLVs for this, or whether
+ * simply doing it iteratively is good enough.
+ */
+ if (!num_rxqs)
+ return ECORE_INVAL;
+again:
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
- req->rx_qid = rx_queue_id;
- req->num_rxqs = num_rxqs;
+ /* Find the length of the current contagious range of queues beginning
+ * at first queue's index.
+ */
+ req->rx_qid = (*pp_cid)->rel.queue_id;
+ for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
+ if (pp_cid[req->num_rxqs]->rel.queue_id !=
+ req->rx_qid + req->num_rxqs)
+ break;
if (comp_cqe_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
@@ -704,9 +871,17 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
goto exit;
}
+ /* Make sure we're done with all the queues */
+ if (req->num_rxqs < num_rxqs) {
+ num_rxqs -= req->num_rxqs;
+ pp_cid += req->num_rxqs;
+ /* TODO - should we give a non-locked variant instead? */
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ goto again;
+ }
+
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
-
return rc;
}
@@ -719,7 +894,8 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_vport_start_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc, i;
+ enum _ecore_status_t rc;
+ int i;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
@@ -761,7 +937,7 @@ enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
@@ -859,7 +1035,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
u8 update_rx, update_tx;
u32 resp_size = 0;
u16 size, tlv;
- int rc;
+ enum _ecore_status_t rc;
resp = &p_iov->pf2vf_reply->default_resp;
resp_size = sizeof(*resp);
@@ -956,6 +1132,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
if (p_params->rss_params) {
struct ecore_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -977,8 +1154,16 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
- OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
- sizeof(rss_params->rss_ind_table));
+
+ table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+
OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}
@@ -1067,7 +1252,7 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
@@ -1101,7 +1286,7 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
u32 size;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
@@ -1140,7 +1325,6 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
}
OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
- p_hwfn->vf_iov_info = OSAL_NULL;
return rc;
}
@@ -1173,7 +1357,7 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_ucast_filter_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* Sanitize */
if (p_ucast->opcode == ECORE_FILTER_MOVE) {
@@ -1214,7 +1398,7 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
@@ -1240,6 +1424,48 @@ exit:
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_coalesce *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(*req));
+
+ req->rx_coal = rx_coal;
+ req->tx_coal = tx_coal;
+ req->qid = p_cid->rel.queue_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+ rx_coal, tx_coal, req->qid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
@@ -1356,6 +1582,12 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
{
OSAL_MEMCPY(port_mac,
@@ -1372,23 +1604,21 @@ void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
}
-/* @DPDK */
-void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
- u32 *num_mac)
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs)
{
struct ecore_vf_iov *p_vf;
p_vf = p_hwfn->vf_iov_info;
- *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
+ *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
}
-void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
- u32 *num_sbs)
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters)
{
- struct ecore_vf_iov *p_vf;
+ struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info;
- p_vf = p_hwfn->vf_iov_info;
- *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
}
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
@@ -1428,6 +1658,18 @@ bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
return true;
}
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port,
+ u16 *p_geneve_port)
+{
+ struct ecore_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
{
struct ecore_bulletin_content *bulletin;
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 6077d600..f4713884 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -38,10 +38,33 @@ struct ecore_vf_iov {
bool b_pre_fp_hsi;
};
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the configuration.
+ *
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param queue_cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid);
+
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief hw preparation for VF
- * sends ACQUIRE message
+ * sends ACQUIRE message
*
* @param p_hwfn
*
@@ -53,10 +76,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
- * @param cid - zero based within the VF
- * @param rx_queue_id - zero based within the VF
- * @param sb - VF status block for this queue
- * @param sb_index - Index within the status block
+ * @param p_cid - Only relative fields are relevant
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
@@ -67,9 +87,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -81,46 +99,44 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
* PF.
*
* @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
+ * @param p_cid
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
- * @param rx_qid
+ * @param p_cid
* @param cqe_completion
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid,
- bool cqe_completion);
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
- * @param tx_qid
+ * @param p_cid
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_qid);
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
#ifndef LINUX_REMOVE
/**
@@ -128,20 +144,18 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
* PF
*
* @param p_hwfn
- * @param rx_queue_id
+ * @param pp_cid - list of queue-cids which we want to update
* @param num_rxqs
- * @param init_sge_ring
* @param comp_cqe_flg
* @param comp_event_flg
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxqs_update(
- struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- u8 num_rxqs,
- u8 comp_cqe_flg,
- u8 comp_event_flg);
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
#endif
/**
@@ -267,5 +281,10 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn);
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index 571fd374..be3a326b 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -61,6 +61,15 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
u8 *num_rxqs);
/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs);
+
+/**
* @brief Get port mac address for VF
*
* @param p_hwfn
@@ -78,18 +87,18 @@ void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
u8 *num_vlan_filters);
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs);
+
/**
* @brief Get number of MAC filters allocated for VF by ecore
*
- * @param p_hwfn
- * @param num_mac_filters - allocated MAC filters
+ * @param p_hwfn
+ * @param num_rxqs - allocated MAC filters
*/
void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u32 *num_mac_filters);
-void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
- u32 *num_sbs);
-
/**
* @brief Check if VF can set a MAC address
*
@@ -152,5 +161,7 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_minor,
u16 *fw_rev,
u16 *fw_eng);
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port);
#endif
#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 149d092b..66184421 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -416,10 +416,55 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3];
};
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
+struct vfpf_update_coalesce {
+ struct vfpf_first_tlv first_tlv;
+ u16 rx_coal;
+ u16 tx_coal;
+ u16 qid;
+ u8 padding[2];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -431,6 +476,8 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
+ struct vfpf_update_coalesce update_coalesce;
struct tlv_buffer_size tlv_buf_size;
};
@@ -439,6 +486,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@@ -506,9 +554,12 @@ struct ecore_bulletin_content {
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
+
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
@@ -552,6 +603,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 32130709..6dc969b0 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -34,6 +34,14 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP
+ * header (relevant to LSO for tunneled packet):
+ */
+/* Offset is limited to 253 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+/* Offset is limited to 251 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
@@ -141,16 +149,23 @@ struct eth_tx_1st_bd_flags {
/* Do not allow additional VLAN manipulations on this packet. */
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-/* IP checksum recalculation in needed */
+/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
-/* TCP/UDP checksum recalculation in needed */
+/* Recalculate TCP/UDP checksum.
+ * For tunneled packet - relevant to inner header.
+ */
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
-/* If set, need to add the VLAN in vlan field to the packet. */
+/* If set, insert VLAN tag from vlan field to the packet.
+ * For tunneled packet - relevant to outer header.
+ */
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
-/* If set, this is an LSO packet. */
+/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of
+ * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively)
+ * bytes, inclusive.
+ */
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
@@ -165,8 +180,9 @@ struct eth_tx_1st_bd_flags {
* The parsing information data for the first tx bd of a given packet.
*/
struct eth_tx_data_1st_bd {
- __le16 vlan /* VLAN tag to insert to packet (if needed). */;
-/* Number of BDs in packet. Should be at least 2 in non-LSO packet and at least
+/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */
+ __le16 vlan;
+/* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least
* 3 in LSO (or Tunnel with IPv6+ext) packet.
*/
u8 nbds;
@@ -209,10 +225,14 @@ struct eth_tx_data_2nd_bd {
/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
-/* For LSO / Tunnel header with IPv6+ext - Set if outer header has IPv6+ext */
+/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension.
+ * Otherwise set to 1 if the header is IPv6 with extension.
+ */
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
-/* Set if Tunnel header has IPv6 ext. (3rd BD is required) */
+/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD
+ * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext)
+ */
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 96efc3c8..fcf98477 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -84,9 +84,32 @@ struct eth_phy_cfg {
/* Remote Serdes Loopback (RX to TX) */
#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
- /* features */
- u32 feature_config_flags;
-#define ETH_EEE_MODE_ADV_LPI (1 << 0)
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * 16xmicroseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in 16xmicroseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_TIMER_USEC_MASK (0x000fffff)
+#define EEE_MODE_TIMER_USEC_OFFSET (0)
+#define EEE_MODE_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_MODE_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_TIMER_USEC_LATENCY_TIME (0x6000)
+/* Set by the driver to request status timer will be in microseconds and and not
+ * in EEE policy definition
+ */
+#define EEE_MODE_OUTPUT_TIME (1 << 28)
+/* Set by the driver to override default nvm timer */
+#define EEE_MODE_OVERRIDE_NVRAM (1 << 29)
+#define EEE_MODE_ENABLE_LPI (1 << 30) /* Set when */
+#define EEE_MODE_ADV_LPI (1 << 31) /* Set when EEE is enabled */
};
struct port_mf_cfg {
@@ -271,16 +294,20 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
*/
u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC (4)
+/* Fixed TCP OOO TC usage is deprecated and used only for driver backward
+ * compatibility
+ */
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
@@ -447,6 +474,14 @@ struct public_global {
#define MDUMP_REASON_INTERNAL_ERROR (1 << 0)
#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1)
#define MDUMP_REASON_DUMP_AGED (1 << 2)
+ u32 ext_phy_upgrade_fw;
+#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
+#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
+#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
+#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
+#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
+#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
};
/**************************************/
@@ -553,23 +588,20 @@ struct public_port {
#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
@@ -578,25 +610,23 @@ struct public_port {
#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-
#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000
u32 link_status1;
u32 ext_phy_fw_version;
@@ -654,45 +684,47 @@ struct public_port {
u32 fc_npiv_nvram_tbl_addr;
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
-#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
/* 1G Passive copper cable */
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
/* 1G Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
/* 10G Passive copper cable */
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
/* 10G Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+/* Active optical cable */
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+/* Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
/* 25G Passive copper cable - short */
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
/* 25G Active copper cable - short */
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
/* 25G Passive copper cable - medium */
#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
/* 25G Active copper cable - medium */
@@ -718,6 +750,39 @@ struct public_port {
u32 wol_pkt_len;
u32 wol_pkt_details;
struct dcb_dscp_map dcb_dscp_map;
+
+ /* the status of EEE auto-negotiation
+ * bits 19:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if 30:29
+ * are 2'b11.
+ * bit 31 - When 1'b0 bits 15:0 contain
+ * NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_XXX define as value.
+ * When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status;
+#define EEE_TIMER_MASK 0x000fffff
+#define EEE_ADV_STATUS_MASK 0x00f00000
+#define EEE_1G_ADV (1 << 1)
+#define EEE_10G_ADV (1 << 2)
+#define EEE_ADV_STATUS_SHIFT 20
+#define EEE_LP_ADV_STATUS_MASK 0x0f000000
+#define EEE_LP_ADV_STATUS_SHIFT 24
+#define EEE_REQUESTED_BIT 0x10000000
+#define EEE_LPI_REQUESTED_BIT 0x20000000
+#define EEE_ACTIVE_BIT 0x40000000
+#define EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 eee_remote; /* Used for EEE in LLDP */
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_SHIFT 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_SHIFT 16
};
/**************************************/
@@ -813,9 +878,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
@@ -958,6 +1025,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -975,8 +1043,47 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */
struct mcp_mac wol_mac; /* UNLOAD_DONE */
/* This configuration should be set by the driver for the LINK_SET command. */
@@ -995,13 +1102,17 @@ union drv_union_data {
struct lan_stats_stc lan_stats;
struct fcoe_stats_stc fcoe_stats;
- struct iscsi_stats_stc icsci_stats;
+ struct iscsi_stats_stc iscsi_stats;
struct rdma_stats_stc rdma_stats;
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
struct resource_info resource;
struct bist_nvm_image_att nvm_image_att;
struct mdump_config_stc mdump_config;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
/* ... */
};
@@ -1011,6 +1122,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -1026,16 +1138,15 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-
/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp,
* data: struct resource_info
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1087,19 +1198,6 @@ struct public_drv_mb {
* MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers.
*/
#define DRV_MSG_CODE_MCP_HALT 0x00100000
-/* Host shall provide buffer and size for MFW */
-#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
-/* Host shall provide buffer and size for MFW */
-#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
-/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
- * [16:31] - offset
- */
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
- * [16:31] - offset
- */
-#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
-
/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
@@ -1108,20 +1206,31 @@ struct public_drv_mb {
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
/* indicate OCBB related information */
#define DRV_MSG_CODE_OCBB_DATA 0x00180000
-
/* Set function BW, params[15:8] - min, params[7:0] - max */
#define DRV_MSG_CODE_SET_BW 0x00190000
#define BW_MAX_MASK 0x000000ff
@@ -1137,14 +1246,10 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
-
/* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_GPIO_READ 0x001c0000
/* Param: [0:15] - gpio number, [16:31] - gpio value */
#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
-/* Param: [0:15] - gpio number */
-#define DRV_MSG_CODE_GPIO_INFO 0x00270000
-
/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000
@@ -1157,11 +1262,16 @@ struct public_drv_mb {
#define DRV_MSG_CODE_TIMESTAMP 0x00210000
/* This is an empty mailbox just return OK*/
#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+
/* Param[0:4] - resource number (0-31), Param[5:7] - opcode,
* param[15:8] - age
*/
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
/* request resource ownership with default aging */
#define RESOURCE_OPCODE_REQ 1
/* request resource ownership without aging */
@@ -1169,8 +1279,15 @@ struct public_drv_mb {
/* request resource ownership with specific aging timer (in seconds) */
#define RESOURCE_OPCODE_REQ_W_AGING 3
#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
-#define RESOURCE_OPCODE_FORCE_RELEASE 5 /* force resource release */
-
+/* force resource release */
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
/* resource is free and granted to requester */
#define RESOURCE_OPCODE_GNT 1
/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
@@ -1184,11 +1301,11 @@ struct public_drv_mb {
/* indicate wrong owner during release */
#define RESOURCE_OPCODE_WRONG_OWNER 5
#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
/* dedicate resource 0 for dump */
-#define RESOURCE_DUMP (1 << 0)
+#define RESOURCE_DUMP 0
#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */
-
/* Send crash dump commands with param[3:0] - opcode */
#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
@@ -1202,14 +1319,26 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
/* Request valid logs and config words */
#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-/* Set triggers mask. drv_mb_param should indicate (bitwise) which trigger
- * enabled
+/* Set triggers mask. drv_mb_param should indicate (bitwise) which
+ * trigger enabled
*/
#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 /* Clear all logs */
-
-
+/* Clear all logs */
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_INFO 0x00270000
+/* Value will be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
+/* Value should be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -1360,12 +1489,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
@@ -1418,6 +1551,10 @@ struct public_drv_mb {
#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
/* MFW reject "mcp reset" command if one of the drivers is up */
#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_NVM_FAILED_CALC_HASH 0x00310000
+#define FW_MSG_CODE_NVM_PUBLIC_KEY_MISSING 0x00320000
+#define FW_MSG_CODE_NVM_INVALID_PUBLIC_KEY 0x00330000
+
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_PHY_ERROR 0x00120000
#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
@@ -1425,25 +1562,30 @@ struct public_drv_mb {
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
-#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
-#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000
#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000
#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000
#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
-#define FW_MSG_CODE_GPIO_OK 0x00160000
-#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_GPIO_OK 0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
-#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000
+#define FW_MSG_CODE_EXTPHY_INVALID_IMAGE_HEADER 0x00700000
+#define FW_MSG_CODE_EXTPHY_INVALID_PHY_TYPE 0x00710000
+#define FW_MSG_CODE_EXTPHY_OPERATION_FAILED 0x00720000
+#define FW_MSG_CODE_EXTPHY_NO_PHY_DETECTED 0x00730000
+#define FW_MSG_CODE_RECOVERY_MODE 0x00740000
-/* mdump related response codes */
+ /* mdump related response codes */
#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000
#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000
#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
@@ -1454,7 +1596,7 @@ struct public_drv_mb {
u32 fw_mb_param;
- /* Resource Allocation params - MFW version support*/
+/* Resource Allocation params - MFW version support */
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
@@ -1523,6 +1665,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 8e9c08a7..4e588350 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -13,13 +13,21 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
- * Created: 5/9/2016
+ * Created: 12/15/2016
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
+#define NVM_CFG_version 0x81805
+
+#define NVM_CFG_new_option_seq 15
+
+#define NVM_CFG_removed_option_seq 0
+
+#define NVM_CFG_updated_value_seq 1
+
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
@@ -64,10 +72,12 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_DISABLED \
+ 0x0
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_ENABLED 0x1
u32 engineering_change[3]; /* 0x4 */
u32 manufacturing_id; /* 0x10 */
u32 serial_number[4]; /* 0x14 */
@@ -144,6 +154,7 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0
@@ -241,6 +252,11 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ /* ROL enable */
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_ENABLED 0x1
u32 f_lane_cfg1; /* 0x38 */
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
@@ -469,6 +485,15 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ /* Select package id method */
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_MASK 0x40000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_OFFSET 30
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_NVRAM 0x0
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_IO_PINS 0x1
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_OFFSET 31
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_ENABLED 0x1
u32 manufacture_time; /* 0x70 */
#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
@@ -476,6 +501,14 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ /* Max MSIX for Ethernet in default mode */
+ #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18
+ /* PF Mapping */
+ #define NVM_CFG1_GLOB_PF_MAPPING_MASK 0x0C000000
+ #define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
+ #define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
+ #define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@@ -485,6 +518,47 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ /* Max. continues operating temperature */
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_OFFSET 16
+ /* GPIO which triggers run-time port swap according to the map
+ * specified in option 205
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO31 0x20
u32 generic_cont1; /* 0x78 */
#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
@@ -496,6 +570,25 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
#define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
#define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+ /* Enable option 195 - Overriding the PCIe Preset value */
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1
+ /* PCIe Preset value - applies only if option 194 is enabled */
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19
+ /* Port mapping to be used when the run-time GPIO for port-swap is
+ * defined and set.
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_MASK 0x01800000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_OFFSET 23
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_MASK 0x06000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_OFFSET 25
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_MASK 0x18000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_OFFSET 27
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_MASK 0x60000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_OFFSET 29
u32 mbi_version; /* 0x7C */
#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
@@ -503,6 +596,44 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ /* If set to other than NA, 0 - Normal operation, 1 - Thermal event
+ * occurred
+ */
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO31 0x20
u32 mbi_date; /* 0x80 */
u32 misc_sig; /* 0x84 */
/* Define the GPIO mapping to switch i2c mux */
@@ -543,6 +674,81 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ /* Interrupt signal used for SMBus/I2C management interface
+ * 0 = Interrupt event occurred
+ * 1 = Normal
+ */
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO31 0x20
+ /* Set aLOM FAN on GPIO */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO31 0x20
u32 device_capabilities; /* 0x88 */
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
@@ -578,11 +784,263 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40
#define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \
0x80
- u32 reserved[41]; /* 0x9C */
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X10G 0x100
+ /* @DPDK */
+ u32 reserved1[12]; /* 0x9C */
+ u32 oem1_number[8]; /* 0xCC */
+ u32 oem2_number[8]; /* 0xEC */
+ u32 mps25_active_txfir_pre; /* 0x10C */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_OFFSET 24
+ u32 mps25_active_txfir_main; /* 0x110 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_OFFSET 24
+ u32 mps25_active_txfir_post; /* 0x114 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_OFFSET 24
+ u32 features; /* 0x118 */
+ /* Set the Aux Fan on temperature */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_OFFSET 0
+ /* Set NC-SI package ID */
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_OFFSET 8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO31 0x20
+ /* PMBUS Clock GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO31 0x20
+ /* PMBUS Data GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO31 0x20
+ u32 tx_rx_eq_25g_hlpc; /* 0x11C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_OFFSET 24
+ u32 tx_rx_eq_25g_llpc; /* 0x120 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_OFFSET 24
+ u32 tx_rx_eq_25g_ac; /* 0x124 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_OFFSET 24
+ u32 tx_rx_eq_10g_pc; /* 0x128 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_OFFSET 24
+ u32 tx_rx_eq_10g_ac; /* 0x12C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_OFFSET 24
+ u32 tx_rx_eq_1g; /* 0x130 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_OFFSET 24
+ u32 tx_rx_eq_25g_bt; /* 0x134 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_OFFSET 24
+ u32 tx_rx_eq_10g_bt; /* 0x138 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_OFFSET 24
+ u32 generic_cont4; /* 0x13C */
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_OFFSET 0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
+ u32 reserved[58]; /* 0x140 */
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[1]; /* 0x0 */
};
struct nvm_cfg1_port {
@@ -621,6 +1079,44 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ /* GPIO for HW reset the PHY. In case it is the same for all ports,
+ * need to set same value for all ports
+ */
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20
u32 pcie_cfg; /* 0xC */
#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
@@ -697,6 +1193,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+ #define NVM_CFG1_PORT_FEC_AN_MODE_MASK 0x00700000
+ #define NVM_CFG1_PORT_FEC_AN_MODE_OFFSET 20
+ #define NVM_CFG1_PORT_FEC_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE 0x2
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_AND_25G_FIRECODE 0x3
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
+ #define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
u32 phy_cfg; /* 0x1C */
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
@@ -736,9 +1242,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
- #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ /* EEE power saving mode */
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
u32 mba_cfg1; /* 0x28 */
#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
@@ -970,6 +1483,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_25g_cap; /* 0x58 */
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1047,6 +1561,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_40g_cap; /* 0x64 */
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1124,6 +1639,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_50g_cap; /* 0x70 */
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1203,6 +1719,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_100g_cap; /* 0x7C */
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
0x0000FFFF
@@ -1277,6 +1794,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
u32 reserved[116]; /* 0x88 */
};
@@ -1387,12 +1905,17 @@ struct nvm_cfg1_func {
#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_MASK 0x001E0000
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_OFFSET 17
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
struct nvm_cfg1_glob glob; /* 0x0 */
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x228 */
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
};
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 3c369aa5..f9920f37 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1141,3 +1141,62 @@
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
#define PRS_REG_MSG_INFO 0x1f0a1cUL
#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
+
+/* 8.18.7.0 FW */
+#define BRB_REG_INT_MASK_10 0x3401b8UL
+
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
+
+#define CDU_REG_CCFC_CTX_VALID0 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 0x580408UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 0x009150UL
+#define CNIG_REG_NW_PORT_MODE_BB 0x218200UL
+#define CNIG_REG_PMEG_IF_CMD_BB 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB 0x218228UL
+#define NWM_REG_MAC0_K2_E5 0x800400UL
+#define CNIG_REG_NIG_PORT0_CONF_K2_E5 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE_K2_E5 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH_K2_E5 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH_K2_E5 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG_K2_E5 0x000008UL
+#define MISC_REG_XMAC_CORE_PORT_MODE_BB 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE_BB 0x008c04UL
+#define XMAC_REG_MODE_BB 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
+#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
+#define XMAC_REG_CTRL_BB 0x210000UL
+#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_RX_CTRL_BB 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5 0x2aafa4UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB 0x2aa410UL
+#define MISCS_REG_FUNCTION_HIDE_BB_K2 0x0096f0UL
+#define PCIE_REG_PRTY_MASK_K2_E5 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
+
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL