summaryrefslogtreecommitdiffstats
path: root/drivers/net/qede
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qede')
-rw-r--r--drivers/net/qede/Makefile2
-rw-r--r--drivers/net/qede/base/bcm_osal.c2
-rw-r--r--drivers/net/qede/base/bcm_osal.h3
-rw-r--r--drivers/net/qede/base/common_hsi.h15
-rw-r--r--drivers/net/qede/base/ecore.h62
-rw-r--r--drivers/net/qede/base/ecore_cxt.c15
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c99
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h10
-rw-r--r--drivers/net/qede/base/ecore_dev.c1892
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h173
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h57
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h15
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h57
-rw-r--r--drivers/net/qede/base/ecore_hw.c127
-rw-r--r--drivers/net/qede/base/ecore_hw.h40
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c93
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h42
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c26
-rw-r--r--drivers/net/qede/base/ecore_int.c99
-rw-r--r--drivers/net/qede/base/ecore_int.h1
-rw-r--r--drivers/net/qede/base/ecore_int_api.h14
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h17
-rw-r--r--drivers/net/qede/base/ecore_iro.h164
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h42
-rw-r--r--drivers/net/qede/base/ecore_l2.c108
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h41
-rw-r--r--drivers/net/qede/base/ecore_mcp.c280
-rw-r--r--drivers/net/qede/base/ecore_mcp.h21
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h40
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h265
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c8
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h3
-rw-r--r--drivers/net/qede/base/ecore_spq.c56
-rw-r--r--drivers/net/qede/base/ecore_sriov.c94
-rw-r--r--drivers/net/qede/base/ecore_vf.c25
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h2
-rw-r--r--drivers/net/qede/base/eth_common.h5
-rw-r--r--drivers/net/qede/base/mcp_public.h44
-rw-r--r--drivers/net/qede/base/meson.build60
-rw-r--r--drivers/net/qede/base/reg_addr.h76
-rw-r--r--drivers/net/qede/meson.build12
-rw-r--r--drivers/net/qede/qede_ethdev.c743
-rw-r--r--drivers/net/qede/qede_ethdev.h69
-rw-r--r--drivers/net/qede/qede_fdir.c470
-rw-r--r--drivers/net/qede/qede_filter.c1546
-rw-r--r--drivers/net/qede/qede_main.c15
-rw-r--r--drivers/net/qede/qede_rxtx.c140
-rw-r--r--drivers/net/qede/qede_rxtx.h17
48 files changed, 4971 insertions, 2236 deletions
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 488ca1d9..2ecbd8d2 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -105,6 +105,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_filter.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index d5d6f8e2..693328f1 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -17,7 +17,7 @@
/* Array of memzone pointers */
static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
/* Counter to track current memzone allocated */
-uint16_t ecore_mz_count;
+static uint16_t ecore_mz_count;
unsigned long qede_log2_align(unsigned long n)
{
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 630867fa..1abf44fa 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -447,10 +447,13 @@ u32 qede_crc32(u32 crc, u8 *ptr, u32 length);
#define OSAL_CRC8(table, pdata, nbytes, crc) 0
#define OSAL_MFW_TLV_REQ(p_hwfn) nothing
#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_HW_INFO_CHANGE(p_hwfn, change) nothing
#define OSAL_MFW_CMD_PREEMPT(p_hwfn) nothing
#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
#define OSAL_DIV_S64(a, b) ((a) / (b))
#define OSAL_LLDP_RX_TLVS(p_hwfn, tlv_buf, tlv_size) nothing
+#define OSAL_DBG_ALLOC_USER_DATA(p_hwfn, user_data_ptr) (0)
+#define OSAL_DB_REC_OCCURRED(p_hwfn) nothing
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index ca8e59db..2aaf298f 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -95,8 +95,8 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 33
-#define FW_REVISION_VERSION 12
+#define FW_MINOR_VERSION 37
+#define FW_REVISION_VERSION 7
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -1033,13 +1033,14 @@ struct db_rdma_dpm_params {
#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
-/* RoCE completion flag */
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+/* RoCE ack request (will be set 1) */
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_ACK_REQUEST_SHIFT 28
#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1
-#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
+/* RoCE completion flag for FW use */
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 30
/* Connection type is iWARP */
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1
#define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 5d79fdf0..524a1dd4 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -19,6 +19,7 @@
#include <zlib.h>
#endif
+#include "ecore_status.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_debug_tools.h"
#include "ecore_hsi_init_func.h"
@@ -27,8 +28,8 @@
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
-#define ECORE_MINOR_VERSION 30
-#define ECORE_REVISION_VERSION 8
+#define ECORE_MINOR_VERSION 37
+#define ECORE_REVISION_VERSION 20
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
@@ -207,6 +208,7 @@ struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
+struct ecore_llh_info;
struct ecore_rt_data {
u32 *init_val;
@@ -543,6 +545,9 @@ enum ecore_mf_mode_bit {
/* Use stag for steering */
ECORE_MF_8021AD_TAGGING,
+
+ /* Allow FIP discovery fallback */
+ ECORE_MF_FIP_SPECIAL,
};
enum ecore_ufp_mode {
@@ -660,6 +665,7 @@ struct ecore_hwfn {
#endif
struct dbg_tools_data dbg_info;
+ void *dbg_user_info;
struct z_stream_s *stream;
@@ -739,6 +745,7 @@ struct ecore_dev {
#endif
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+#define ECORE_IS_E4(dev) (ECORE_IS_BB(dev) || ECORE_IS_AH(dev))
u16 vendor_id;
u16 device_id;
@@ -833,8 +840,26 @@ struct ecore_dev {
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
#define ECORE_IS_CMT(dev) ((dev)->num_hwfns > 1)
+ /* Engine affinity */
+ u8 l2_affin_hint;
+ u8 fir_affin;
+ u8 iwarp_affin;
+ /* Macro for getting the engine-affinitized hwfn for FCoE/iSCSI/RoCE */
+#define ECORE_FIR_AFFIN_HWFN(dev) (&dev->hwfns[dev->fir_affin])
+ /* Macro for getting the engine-affinitized hwfn for iWARP */
+#define ECORE_IWARP_AFFIN_HWFN(dev) (&dev->hwfns[dev->iwarp_affin])
+ /* Generic macro for getting the engine-affinitized hwfn */
+#define ECORE_AFFIN_HWFN(dev) \
+ (ECORE_IS_IWARP_PERSONALITY(ECORE_LEADING_HWFN(dev)) ? \
+ ECORE_IWARP_AFFIN_HWFN(dev) : \
+ ECORE_FIR_AFFIN_HWFN(dev))
+ /* Macro for getting the index (0/1) of the engine-affinitized hwfn */
+#define ECORE_AFFIN_HWFN_IDX(dev) \
+ (IS_LEAD_HWFN(ECORE_AFFIN_HWFN(dev)) ? 0 : 1)
+
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
@@ -869,6 +894,12 @@ struct ecore_dev {
#ifndef ASIC_ONLY
bool b_is_emul_full;
#endif
+ /* LLH info */
+ u8 ppfid_bitmap;
+ struct ecore_llh_info *p_llh_info;
+
+ /* Indicates whether this PF serves a storage target */
+ bool b_is_target;
#ifdef CONFIG_ECORE_BINARY_FW /* @DPDK */
void *firmware;
@@ -958,6 +989,8 @@ void ecore_db_recovery_dp(struct ecore_hwfn *p_hwfn);
void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
enum ecore_db_rec_exec);
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn);
+
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
@@ -965,6 +998,29 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
-#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ ecore_device_num_ports((_p_hwfn)->p_dev))
+
+/* The PFID<->PPFID calculation is based on the relative index of a PF on its
+ * port. In BB there is a bug in the LLH in which the PPFID is actually engine
+ * based, and thus it equals the PFID.
+ */
+#define ECORE_PFID_BY_PPFID(_p_hwfn, abs_ppfid) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? \
+ (abs_ppfid) : \
+ (abs_ppfid) * (_p_hwfn)->p_dev->num_ports_in_engine + \
+ MFW_PORT(_p_hwfn))
+#define ECORE_PPFID_BY_PFID(_p_hwfn) \
+ (ECORE_IS_BB((_p_hwfn)->p_dev) ? \
+ (_p_hwfn)->rel_pf_id : \
+ (_p_hwfn)->rel_pf_id / (_p_hwfn)->p_dev->num_ports_in_engine)
+
+enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 addr,
+ u32 val);
+
+/* Utility functions for dumping the content of the NIG LLH filters */
+enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid);
+enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev);
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index bf36ce58..5c3370e1 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -1133,6 +1133,9 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
+ /* Set the cxt mangr pointer prior to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
/* Initialize ILT client registers */
clients = p_mngr->clients;
clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
@@ -1174,13 +1177,13 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
/* Initialize the dynamic ILT allocation mutex */
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex)) {
+ DP_NOTICE(p_hwfn, false, "Failed to alloc p_mngr->mutex\n");
+ return ECORE_NOMEM;
+ }
#endif
OSAL_MUTEX_INIT(&p_mngr->mutex);
- /* Set the cxt mangr pointer priori to further allocations */
- p_hwfn->p_cxt_mngr = p_mngr;
-
return ECORE_SUCCESS;
}
@@ -2111,7 +2114,7 @@ ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
- 0 /* no flags */);
+ OSAL_NULL /* default parameters */);
if (elem_type == ECORE_ELEM_CXT) {
u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
@@ -2218,7 +2221,7 @@ ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
(u64)(osal_uintptr_t)&ilt_hw_entry,
reg_offset,
sizeof(ilt_hw_entry) / sizeof(u32),
- 0 /* no flags */);
+ OSAL_NULL /* default parameters */);
}
ecore_ptt_release(p_hwfn, p_ptt);
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 96678745..cbc69cde 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -129,7 +129,7 @@ u8 ecore_dcbx_get_dscp_value(struct ecore_hwfn *p_hwfn, u8 pri)
static void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
- struct ecore_hwfn *p_hwfn,
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
@@ -154,12 +154,19 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
/* QM reconf data */
if (p_hwfn->hw_info.personality == personality)
p_hwfn->hw_info.offload_tc = tc;
+
+ /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) &&
+ (type == DCBX_PROTOCOL_ROCE)) {
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+ }
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
- struct ecore_hwfn *p_hwfn,
+ struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
@@ -175,7 +182,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
- ecore_dcbx_set_params(p_data, p_hwfn, enable,
+ ecore_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
prio, tc, type, personality);
}
}
@@ -231,7 +238,7 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
-ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
+ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
int count, u8 dcbx_version)
@@ -280,8 +287,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
enable = true;
}
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
- priority, tc, type);
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt,
+ enable, priority, tc, type);
}
}
@@ -302,8 +309,8 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
if (p_data->arr[type].update)
continue;
- enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
+ /* if no app tlv was present, don't override in FW */
+ ecore_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false,
priority, tc, type);
}
@@ -314,11 +321,11 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static enum _ecore_status_t
-ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
+ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
{
struct dcbx_app_priority_feature *p_app;
struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_results data = { 0 };
+ struct ecore_dcbx_results data;
struct dcbx_ets_feature *p_ets;
struct ecore_hw_info *p_info;
u32 pri_tc_tbl, flags;
@@ -338,7 +345,8 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
p_info = &p_hwfn->hw_info;
num_entries = GET_MFW_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ OSAL_MEMSET(&data, 0, sizeof(struct ecore_dcbx_results));
+ rc = ecore_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc != ECORE_SUCCESS)
return rc;
@@ -879,7 +887,7 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
if (type == ECORE_DCBX_OPERATIONAL_MIB) {
ecore_dcbx_get_dscp_params(p_hwfn, &p_hwfn->p_dcbx_info->get);
- rc = ecore_dcbx_process_mib_info(p_hwfn);
+ rc = ecore_dcbx_process_mib_info(p_hwfn, p_ptt);
if (!rc) {
/* reconfigure tcs of QM queues according
* to negotiation results
@@ -893,12 +901,19 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
- /* Update the DSCP to TC mapping bit if required */
+ /* Update the DSCP to TC mapping enable bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
p_hwfn->p_dcbx_info->dscp_nig_update) {
u8 val = !!p_hwfn->p_dcbx_info->get.dscp.enabled;
+ u32 addr = NIG_REG_DSCP_TO_TC_MAP_ENABLE;
+
+ rc = ecore_all_ppfids_wr(p_hwfn, p_ptt, addr, val);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to update the DSCP to TC mapping enable bit\n");
+ return rc;
+ }
- ecore_wr(p_hwfn, p_ptt, NIG_REG_DSCP_TO_TC_MAP_ENABLE, val);
p_hwfn->p_dcbx_info->dscp_nig_update = false;
}
@@ -1533,3 +1548,59 @@ ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return rc;
}
+
+enum _ecore_status_t
+ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
+ u8 dscp_index, u8 *p_dscp_pri)
+{
+ struct ecore_dcbx_get *p_dcbx_info;
+ enum _ecore_status_t rc;
+
+ if (dscp_index >= ECORE_DCBX_DSCP_SIZE) {
+ DP_ERR(p_hwfn, "Invalid dscp index %d\n", dscp_index);
+ return ECORE_INVAL;
+ }
+
+ p_dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(*p_dcbx_info));
+ if (!p_dcbx_info)
+ return ECORE_NOMEM;
+
+ OSAL_MEMSET(p_dcbx_info, 0, sizeof(*p_dcbx_info));
+ rc = ecore_dcbx_query_params(p_hwfn, p_dcbx_info,
+ ECORE_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ OSAL_FREE(p_hwfn->p_dev, p_dcbx_info);
+ return rc;
+ }
+
+ *p_dscp_pri = p_dcbx_info->dscp.dscp_pri_map[dscp_index];
+ OSAL_FREE(p_hwfn->p_dev, p_dcbx_info);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 dscp_index, u8 pri_val)
+{
+ struct ecore_dcbx_set dcbx_set;
+ enum _ecore_status_t rc;
+
+ if (dscp_index >= ECORE_DCBX_DSCP_SIZE ||
+ pri_val >= ECORE_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid dscp params: index = %d pri = %d\n",
+ dscp_index, pri_val);
+ return ECORE_INVAL;
+ }
+
+ OSAL_MEMSET(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = ecore_dcbx_get_config_params(p_hwfn, &dcbx_set);
+ if (rc)
+ return rc;
+
+ dcbx_set.override_flags = ECORE_DCBX_OVERRIDE_DSCP_CFG;
+ dcbx_set.dscp.dscp_pri_map[dscp_index] = pri_val;
+
+ return ecore_dcbx_config_params(p_hwfn, p_ptt, &dcbx_set, 1);
+}
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index eaf8e082..6fad2ecc 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -228,6 +228,16 @@ enum _ecore_status_t
ecore_lldp_set_system_tlvs(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_lldp_sys_tlvs *p_params);
+/* Returns priority value for a given dscp index */
+enum _ecore_status_t
+ecore_dcbx_get_dscp_priority(struct ecore_hwfn *p_hwfn,
+ u8 dscp_index, u8 *p_dscp_pri);
+
+/* Sets priority value for a given dscp index */
+enum _ecore_status_t
+ecore_dcbx_set_dscp_priority(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 dscp_index, u8 pri_val);
+
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 31f1f3ee..cf454b19 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -352,6 +352,1189 @@ void ecore_db_recovery_execute(struct ecore_hwfn *p_hwfn,
}
/******************** Doorbell Recovery end ****************/
+/********************************** NIG LLH ***********************************/
+
+enum ecore_llh_filter_type {
+ ECORE_LLH_FILTER_TYPE_MAC,
+ ECORE_LLH_FILTER_TYPE_PROTOCOL,
+};
+
+struct ecore_llh_mac_filter {
+ u8 addr[ETH_ALEN];
+};
+
+struct ecore_llh_protocol_filter {
+ enum ecore_llh_prot_filter_type_t type;
+ u16 source_port_or_eth_type;
+ u16 dest_port;
+};
+
+union ecore_llh_filter {
+ struct ecore_llh_mac_filter mac;
+ struct ecore_llh_protocol_filter protocol;
+};
+
+struct ecore_llh_filter_info {
+ bool b_enabled;
+ u32 ref_cnt;
+ enum ecore_llh_filter_type type;
+ union ecore_llh_filter filter;
+};
+
+struct ecore_llh_info {
+ /* Number of LLH filters banks */
+ u8 num_ppfid;
+
+#define MAX_NUM_PPFID 8
+ u8 ppfid_array[MAX_NUM_PPFID];
+
+ /* Array of filters arrays:
+ * "num_ppfid" elements of filters banks, where each is an array of
+ * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
+ */
+ struct ecore_llh_filter_info **pp_filters;
+};
+
+static void ecore_llh_free(struct ecore_dev *p_dev)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ u32 i;
+
+ if (p_llh_info != OSAL_NULL) {
+ if (p_llh_info->pp_filters != OSAL_NULL) {
+ for (i = 0; i < p_llh_info->num_ppfid; i++)
+ OSAL_FREE(p_dev, p_llh_info->pp_filters[i]);
+ }
+
+ OSAL_FREE(p_dev, p_llh_info->pp_filters);
+ }
+
+ OSAL_FREE(p_dev, p_llh_info);
+ p_dev->p_llh_info = OSAL_NULL;
+}
+
+static enum _ecore_status_t ecore_llh_alloc(struct ecore_dev *p_dev)
+{
+ struct ecore_llh_info *p_llh_info;
+ u32 size;
+ u8 i;
+
+ p_llh_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_llh_info));
+ if (!p_llh_info)
+ return ECORE_NOMEM;
+ p_dev->p_llh_info = p_llh_info;
+
+ for (i = 0; i < MAX_NUM_PPFID; i++) {
+ if (!(p_dev->ppfid_bitmap & (0x1 << i)))
+ continue;
+
+ p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
+ DP_VERBOSE(p_dev, ECORE_MSG_SP, "ppfid_array[%d] = %hhd\n",
+ p_llh_info->num_ppfid, i);
+ p_llh_info->num_ppfid++;
+ }
+
+ size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
+ p_llh_info->pp_filters = OSAL_ZALLOC(p_dev, GFP_KERNEL, size);
+ if (!p_llh_info->pp_filters)
+ return ECORE_NOMEM;
+
+ size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(**p_llh_info->pp_filters);
+ for (i = 0; i < p_llh_info->num_ppfid; i++) {
+ p_llh_info->pp_filters[i] = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+ size);
+ if (!p_llh_info->pp_filters[i])
+ return ECORE_NOMEM;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_llh_shadow_sanity(struct ecore_dev *p_dev,
+ u8 ppfid, u8 filter_idx,
+ const char *action)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(p_dev, false,
+ "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
+ action, ppfid, p_llh_info->num_ppfid);
+ return ECORE_INVAL;
+ }
+
+ if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(p_dev, false,
+ "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
+ action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+#define ECORE_LLH_INVALID_FILTER_IDX 0xff
+
+static enum _ecore_status_t
+ecore_llh_shadow_search_filter(struct ecore_dev *p_dev, u8 ppfid,
+ union ecore_llh_filter *p_filter,
+ u8 *p_filter_idx)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "search");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!OSAL_MEMCMP(p_filter, &p_filters[i].filter,
+ sizeof(*p_filter))) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_get_free_idx(struct ecore_dev *p_dev, u8 ppfid,
+ u8 *p_filter_idx)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+ u8 i;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "get_free_idx");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_filter_idx = ECORE_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!p_filters[i].b_enabled) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+__ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid, u8 filter_idx,
+ enum ecore_llh_filter_type type,
+ union ecore_llh_filter *p_filter, u32 *p_ref_cnt)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "add");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ p_filters[filter_idx].b_enabled = true;
+ p_filters[filter_idx].type = type;
+ OSAL_MEMCPY(&p_filters[filter_idx].filter, p_filter,
+ sizeof(p_filters[filter_idx].filter));
+ }
+
+ *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_add_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_filter_type type,
+ union ecore_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ enum _ecore_status_t rc;
+
+ /* Check if the same filter already exist */
+ rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
+ p_filter_idx);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Find a new entry in case of a new filter */
+ if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
+ rc = ecore_llh_shadow_get_free_idx(p_dev, ppfid, p_filter_idx);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ /* No free entry was found */
+ if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(p_dev, false,
+ "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
+ ppfid);
+ return ECORE_NORESOURCES;
+ }
+
+ return __ecore_llh_shadow_add_filter(p_dev, ppfid, *p_filter_idx, type,
+ p_filter, p_ref_cnt);
+}
+
+static enum _ecore_status_t
+__ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 filter_idx, u32 *p_ref_cnt)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, filter_idx, "remove");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ DP_NOTICE(p_dev, false,
+ "LLH shadow: trying to remove a filter with ref_cnt=0\n");
+ return ECORE_INVAL;
+ }
+
+ *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
+ if (!p_filters[filter_idx].ref_cnt)
+ OSAL_MEM_ZERO(&p_filters[filter_idx],
+ sizeof(p_filters[filter_idx]));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_remove_filter(struct ecore_dev *p_dev, u8 ppfid,
+ union ecore_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_search_filter(p_dev, ppfid, p_filter,
+ p_filter_idx);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* No matching filter was found */
+ if (*p_filter_idx == ECORE_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(p_dev, false,
+ "Failed to find a filter in the LLH shadow\n");
+ return ECORE_INVAL;
+ }
+
+ return __ecore_llh_shadow_remove_filter(p_dev, ppfid, *p_filter_idx,
+ p_ref_cnt);
+}
+
+static enum _ecore_status_t
+ecore_llh_shadow_remove_all_filters(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ struct ecore_llh_filter_info *p_filters;
+ enum _ecore_status_t rc;
+
+ rc = ecore_llh_shadow_sanity(p_dev, ppfid, 0, "remove_all");
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ OSAL_MEM_ZERO(p_filters,
+ NIG_REG_LLH_FUNC_FILTER_EN_SIZE * sizeof(*p_filters));
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t ecore_abs_ppfid(struct ecore_dev *p_dev,
+ u8 rel_ppfid, u8 *p_abs_ppfid)
+{
+ struct ecore_llh_info *p_llh_info = p_dev->p_llh_info;
+ u8 ppfids = p_llh_info->num_ppfid - 1;
+
+ if (rel_ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(p_dev, false,
+ "rel_ppfid %d is not valid, available indices are 0..%hhd\n",
+ rel_ppfid, ppfids);
+ return ECORE_INVAL;
+ }
+
+ *p_abs_ppfid = p_llh_info->ppfid_array[rel_ppfid];
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+__ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum ecore_eng eng;
+ u8 ppfid;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_get_engine_config(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to get the engine affinity configuration\n");
+ return rc;
+ }
+
+ /* RoCE PF is bound to a single engine */
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
+ rc = ecore_llh_set_roce_affinity(p_dev, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ /* Storage PF is bound to a single engine while L2 PF uses both */
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
+ eng = p_dev->fir_affin ? ECORE_ENG1 : ECORE_ENG0;
+ else /* L2_PERSONALITY */
+ eng = ECORE_BOTH_ENG;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_set_engine_affin(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ bool avoid_eng_affin)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum _ecore_status_t rc;
+
+ /* Backwards compatible mode:
+ * - RoCE packets - Use engine 0.
+ * - Non-RoCE packets - Use connection based classification for L2 PFs,
+ * and engine 0 otherwise.
+ */
+ if (avoid_eng_affin) {
+ enum ecore_eng eng;
+ u8 ppfid;
+
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = ECORE_ENG0;
+ rc = ecore_llh_set_roce_affinity(p_dev, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH [backwards compatible mode]: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ eng = (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) ? ECORE_ENG0
+ : ECORE_BOTH_ENG;
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_llh_set_ppfid_affinity(p_dev, ppfid, eng);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, false,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH [backwards compatible mode]: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return ECORE_SUCCESS;
+ }
+
+ return __ecore_llh_set_engine_affin(p_hwfn, p_ptt);
+}
+
+static enum _ecore_status_t ecore_llh_hw_init_pf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ bool avoid_eng_affin)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 ppfid, abs_ppfid;
+ enum _ecore_status_t rc;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ u32 addr;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+ ecore_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+ }
+
+ if (OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+ !ECORE_IS_FCOE_PERSONALITY(p_hwfn)) {
+ rc = ecore_llh_add_mac_filter(p_dev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_dev, false,
+ "Failed to add an LLH filter with the primary MAC\n");
+ }
+
+ if (ECORE_IS_CMT(p_dev)) {
+ rc = ecore_llh_set_engine_affin(p_hwfn, p_ptt, avoid_eng_affin);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev)
+{
+ return p_dev->p_llh_info->num_ppfid;
+}
+
+enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev)
+{
+ return p_dev->l2_affin_hint ? ECORE_ENG1 : ECORE_ENG0;
+}
+
+/* TBD - should be removed when these definitions are available in reg_addr.h */
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
+
+enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
+ u8 ppfid, enum ecore_eng eng)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 abs_ppfid;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!ECORE_IS_CMT(p_dev))
+ goto out;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ switch (eng) {
+ case ECORE_ENG0:
+ eng_sel = 0;
+ break;
+ case ECORE_ENG1:
+ eng_sel = 1;
+ break;
+ case ECORE_BOTH_ENG:
+ eng_sel = 2;
+ break;
+ default:
+ DP_NOTICE(p_dev, false,
+ "Invalid affinity value for ppfid [%d]\n", eng);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+
+ /* The iWARP affinity is set as the affinity of ppfid 0 */
+ if (!ppfid && ECORE_IS_IWARP_PERSONALITY(p_hwfn))
+ p_dev->iwarp_affin = (eng == ECORE_ENG1) ? 1 : 0;
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
+ enum ecore_eng eng)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 ppfid, abs_ppfid;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!ECORE_IS_CMT(p_dev))
+ goto out;
+
+ switch (eng) {
+ case ECORE_ENG0:
+ eng_sel = 0;
+ break;
+ case ECORE_ENG1:
+ eng_sel = 1;
+ break;
+ case ECORE_BOTH_ENG:
+ eng_sel = 2;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
+ 0xf /* QP bit 15 */);
+ break;
+ default:
+ DP_NOTICE(p_dev, false,
+ "Invalid affinity value for RoCE [%d]\n", eng);
+ rc = ECORE_INVAL;
+ goto out;
+ }
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = ecore_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
+ ecore_wr(p_hwfn, p_ptt, addr, val);
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+struct ecore_llh_filter_e4_details {
+ u64 value;
+ u32 mode;
+ u32 protocol_type;
+ u32 hdr_sel;
+ u32 enable;
+};
+
+static enum _ecore_status_t
+ecore_llh_access_filter_e4(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx,
+ struct ecore_llh_filter_e4_details *p_details,
+ bool b_write_access)
+{
+ u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+ struct ecore_dmae_params params;
+ enum _ecore_status_t rc;
+ u32 addr;
+
+ /* The NIG/LLH registers that are accessed in this function have only 16
+ * rows which are exposed to a PF. I.e. only the 16 filters of its
+ * default ppfid
+ * Accessing filters of other ppfids requires pretending to other PFs,
+ * and thus the usage of the ecore_ppfid_rd/wr() functions.
+ */
+
+ /* Filter enable - should be done first when removing a filter */
+ if (b_write_access && !p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->enable);
+ }
+
+ /* Filter value */
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 + 2 * filter_idx * 0x4;
+ OSAL_MEMSET(&params, 0, sizeof(params));
+
+ if (b_write_access) {
+ params.flags = ECORE_DMAE_FLAG_PF_DST;
+ params.dst_pfid = pfid;
+ rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&p_details->value,
+ addr, 2 /* size_in_dwords */, &params);
+ } else {
+ params.flags = ECORE_DMAE_FLAG_PF_SRC |
+ ECORE_DMAE_FLAG_COMPLETION_DST;
+ params.src_pfid = pfid;
+ rc = ecore_dmae_grc2host(p_hwfn, p_ptt, addr,
+ (u64)(osal_uintptr_t)&p_details->value,
+ 2 /* size_in_dwords */, &params);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Filter mode */
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, p_details->mode);
+ else
+ p_details->mode = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
+ addr);
+
+ /* Filter protocol type */
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->protocol_type);
+ else
+ p_details->protocol_type = ecore_ppfid_rd(p_hwfn, p_ptt,
+ abs_ppfid, addr);
+
+ /* Filter header select */
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->hdr_sel);
+ else
+ p_details->hdr_sel = ecore_ppfid_rd(p_hwfn, p_ptt, abs_ppfid,
+ addr);
+
+ /* Filter enable - should be done last when adding a filter */
+ if (!b_write_access || p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + filter_idx * 0x4;
+ if (b_write_access)
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr,
+ p_details->enable);
+ else
+ p_details->enable = ecore_ppfid_rd(p_hwfn, p_ptt,
+ abs_ppfid, addr);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_add_filter_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type,
+ u32 high, u32 low)
+{
+ struct ecore_llh_filter_e4_details filter_details;
+
+ filter_details.enable = 1;
+ filter_details.value = ((u64)high << 32) | low;
+ filter_details.hdr_sel =
+ OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits) ?
+ 1 : /* inner/encapsulated header */
+ 0; /* outer/tunnel header */
+ filter_details.protocol_type = filter_prot_type;
+ filter_details.mode = filter_prot_type ?
+ 1 : /* protocol-based classification */
+ 0; /* MAC-address based classification */
+
+ return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_filter_e4(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
+{
+ struct ecore_llh_filter_e4_details filter_details;
+
+ OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
+
+ return ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details,
+ true /* write access */);
+}
+
+static enum _ecore_status_t
+ecore_llh_add_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx, u8 filter_prot_type, u32 high,
+ u32 low)
+{
+ return ecore_llh_add_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, filter_prot_type,
+ high, low);
+}
+
+static enum _ecore_status_t
+ecore_llh_remove_filter(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u8 filter_idx)
+{
+ return ecore_llh_remove_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+}
+
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN])
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ union ecore_llh_filter filter;
+ u8 filter_idx, abs_ppfid;
+ u32 high, low, ref_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
+ ECORE_LLH_FILTER_TYPE_MAC,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ high = mac_addr[1] | (mac_addr[0] << 8);
+ low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
+ (mac_addr[2] << 24);
+ rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ 0, high, low);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Added MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
+ ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_dev, false,
+ "LLH: Failed to add MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] to ppfid %hhd\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static enum _ecore_status_t
+ecore_llh_protocol_filter_stringify(struct ecore_dev *p_dev,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port,
+ char *str, osal_size_t str_len)
+{
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ OSAL_SNPRINTF(str, str_len, "Ethertype 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ OSAL_SNPRINTF(str, str_len, "TCP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ OSAL_SNPRINTF(str, str_len, "UDP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "TCP dst port 0x%04x", dest_port);
+ break;
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "UDP dst port 0x%04x", dest_port);
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ OSAL_SNPRINTF(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ default:
+ DP_NOTICE(p_dev, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_protocol_filter_to_hilo(struct ecore_dev *p_dev,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port,
+ u32 *p_high, u32 *p_low)
+{
+ *p_high = 0;
+ *p_low = 0;
+
+ switch (type) {
+ case ECORE_LLH_FILTER_ETHERTYPE:
+ *p_high = source_port_or_eth_type;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_PORT:
+ *p_low = source_port_or_eth_type << 16;
+ break;
+ case ECORE_LLH_FILTER_TCP_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_DEST_PORT:
+ *p_low = dest_port;
+ break;
+ case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ *p_low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(p_dev, true,
+ "Non valid LLH protocol filter type %d\n", type);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, type_bitmap;
+ char str[32];
+ union ecore_llh_filter filter;
+ u32 high, low, ref_cnt;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ rc = ecore_llh_protocol_filter_stringify(p_dev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = ecore_llh_shadow_add_filter(p_dev, ppfid,
+ ECORE_LLH_FILTER_TYPE_PROTOCOL,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = ecore_llh_protocol_filter_to_hilo(p_dev, type,
+ source_port_or_eth_type,
+ dest_port, &high, &low);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ type_bitmap = 0x1 << type;
+ rc = ecore_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ type_bitmap, high, low);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_hwfn, false,
+ "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
+ str, ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN])
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ union ecore_llh_filter filter;
+ u8 filter_idx, abs_ppfid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 ref_cnt;
+
+ if (p_ptt == OSAL_NULL)
+ return;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ OSAL_MEMCPY(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Removed MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid, abs_ppfid, filter_idx,
+ ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_dev, false,
+ "LLH: Failed to remove MAC filter [%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx] from ppfid %hhd\n",
+ mac_addr[0], mac_addr[1], mac_addr[2], mac_addr[3],
+ mac_addr[4], mac_addr[5], ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+}
+
+void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ char str[32];
+ union ecore_llh_filter filter;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 ref_cnt;
+
+ if (p_ptt == OSAL_NULL)
+ return;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ rc = ecore_llh_protocol_filter_stringify(p_dev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ OSAL_MEM_ZERO(&filter, sizeof(filter));
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = ecore_llh_shadow_remove_filter(p_dev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = ecore_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc != ECORE_SUCCESS)
+ goto err;
+ }
+
+ DP_VERBOSE(p_dev, ECORE_MSG_SP,
+ "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err:
+ DP_NOTICE(p_dev, false,
+ "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
+ str, ppfid);
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+}
+
+void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (p_ptt == OSAL_NULL)
+ return;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ goto out;
+
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ rc = ecore_llh_shadow_remove_all_filters(p_dev, ppfid);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ rc = ecore_llh_remove_filter_e4(p_hwfn, p_ptt,
+ abs_ppfid, filter_idx);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+}
+
+void ecore_llh_clear_all_filters(struct ecore_dev *p_dev)
+{
+ u8 ppfid;
+
+ if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS, &p_dev->mf_bits) &&
+ !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits))
+ return;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++)
+ ecore_llh_clear_ppfid_filters(p_dev, ppfid);
+}
+
+enum _ecore_status_t ecore_all_ppfids_wr(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u32 addr,
+ u32 val)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 ppfid, abs_ppfid;
+ enum _ecore_status_t rc;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_abs_ppfid(p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ ecore_ppfid_wr(p_hwfn, p_ptt, abs_ppfid, addr, val);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_llh_dump_ppfid_e4(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 ppfid)
+{
+ struct ecore_llh_filter_e4_details filter_details;
+ u8 abs_ppfid, filter_idx;
+ u32 addr;
+ enum _ecore_status_t rc;
+
+ rc = ecore_abs_ppfid(p_hwfn->p_dev, ppfid, &abs_ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ DP_NOTICE(p_hwfn, false,
+ "[rel_pf_id %hhd, ppfid={rel %hhd, abs %hhd}, engine_sel 0x%x]\n",
+ p_hwfn->rel_pf_id, ppfid, abs_ppfid,
+ ecore_rd(p_hwfn, p_ptt, addr));
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ OSAL_MEMSET(&filter_details, 0, sizeof(filter_details));
+ rc = ecore_llh_access_filter_e4(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, &filter_details,
+ false /* read access */);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ DP_NOTICE(p_hwfn, false,
+ "filter %2hhd: enable %d, value 0x%016lx, mode %d, protocol_type 0x%x, hdr_sel 0x%x\n",
+ filter_idx, filter_details.enable,
+ (unsigned long)filter_details.value,
+ filter_details.mode,
+ filter_details.protocol_type, filter_details.hdr_sel);
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_llh_dump_ppfid(struct ecore_dev *p_dev, u8 ppfid)
+{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+ enum _ecore_status_t rc;
+
+ if (p_ptt == OSAL_NULL)
+ return ECORE_AGAIN;
+
+ rc = ecore_llh_dump_ppfid_e4(p_hwfn, p_ptt, ppfid);
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+enum _ecore_status_t ecore_llh_dump_all(struct ecore_dev *p_dev)
+{
+ u8 ppfid;
+ enum _ecore_status_t rc;
+
+ for (ppfid = 0; ppfid < p_dev->p_llh_info->num_ppfid; ppfid++) {
+ rc = ecore_llh_dump_ppfid(p_dev, ppfid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+/******************************* NIG LLH - End ********************************/
+
/* Configurable */
#define ECORE_MIN_DPIS (4) /* The minimal num of DPIs required to
* load the driver. The number was
@@ -456,6 +1639,12 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
}
+static void ecore_dbg_user_data_free(struct ecore_hwfn *p_hwfn)
+{
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = OSAL_NULL;
+}
+
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
@@ -470,6 +1659,8 @@ void ecore_resc_free(struct ecore_dev *p_dev)
OSAL_FREE(p_dev, p_dev->reset_stats);
+ ecore_llh_free(p_dev);
+
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -483,6 +1674,7 @@ void ecore_resc_free(struct ecore_dev *p_dev)
ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn);
+ ecore_dbg_user_data_free(p_hwfn);
/* @@@TBD Flush work-queue ? */
/* destroy doorbell recovery mechanism */
@@ -562,12 +1754,11 @@ u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
{
u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- /* @DPDK */
/* num RLs can't exceed resource amount of rls or vports or the
* dcqcn qps
*/
num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
+ RESC_NUM(p_hwfn, ECORE_VPORT));
/* make sure after we reserve the default and VF rls we'll have
* something left
@@ -828,7 +2019,7 @@ u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
if (tc > max_tc)
DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
}
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
@@ -838,17 +2029,17 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
if (vf > max_vf)
DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
}
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
{
u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
- if (rl > max_rl)
- DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
-
- return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+ /* for rate limiters, it is okay to use the modulo behavior - no
+ * DP_ERR
+ */
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + (rl % max_rl);
}
u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
@@ -1334,6 +2525,20 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
"Failed to allocate memory for dcbx structure\n");
goto alloc_err;
}
+
+ rc = OSAL_DBG_ALLOC_USER_DATA(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to allocate dbg user info structure\n");
+ goto alloc_err;
+ }
+ } /* hwfn loop */
+
+ rc = ecore_llh_alloc(p_dev);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_dev, true,
+ "Failed to allocate memory for the llh_info structure\n");
+ goto alloc_err;
}
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
@@ -1476,8 +2681,7 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
return ECORE_INVAL;
}
- if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS,
- &p_hwfn->p_dev->mf_bits))
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
hw_mode |= 1 << MODE_MF_SD;
else
hw_mode |= 1 << MODE_MF_SI;
@@ -1960,6 +3164,14 @@ enum ECORE_ROCE_EDPM_MODE {
ECORE_ROCE_EDPM_MODE_DISABLE = 2,
};
+bool ecore_edpm_enabled(struct ecore_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -2047,7 +3259,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
DP_INFO(p_hwfn,
" dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
p_hwfn->dpi_size, p_hwfn->dpi_count,
- ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
+ (!ecore_edpm_enabled(p_hwfn)) ?
"disabled" : "enabled");
/* Check return codes from above calls */
@@ -2073,17 +3285,7 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int hw_mode)
{
- u32 ppf_to_eng_sel[NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE];
- u32 val;
enum _ecore_status_t rc = ECORE_SUCCESS;
- u8 i;
-
- /* In CMT for non-RoCE packets - use connection based classification */
- val = ECORE_IS_CMT(p_hwfn->p_dev) ? 0x8 : 0x0;
- for (i = 0; i < NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE; i++)
- ppf_to_eng_sel[i] = val;
- STORE_RT_REG_AGG(p_hwfn, NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET,
- ppf_to_eng_sel);
/* In CMT the gate should be cleared by the 2nd hwfn */
if (!ECORE_IS_CMT(p_hwfn->p_dev) || !IS_LEAD_HWFN(p_hwfn))
@@ -2135,12 +3337,8 @@ static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
}
static enum _ecore_status_t
-ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_tunnel_info *p_tunn,
- int hw_mode,
- bool b_hw_start,
- enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
+ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ int hw_mode, struct ecore_hw_init_params *p_params)
{
u8 rel_pf_id = p_hwfn->rel_pf_id;
u32 prs_reg;
@@ -2228,17 +3426,18 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
*/
rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
- if (b_hw_start) {
+
+ if (p_params->b_hw_start) {
/* enable interrupts */
- rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, p_params->int_mode);
if (rc != ECORE_SUCCESS)
return rc;
/* send function start command */
- rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_tunn,
- allow_npar_tx_switch);
+ rc = ecore_sp_pf_start(p_hwfn, p_ptt, p_params->p_tunn,
+ p_params->allow_npar_tx_switch);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Function start ramrod failed\n");
@@ -2410,6 +3609,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
bool b_default_mtu = true;
struct ecore_hwfn *p_hwfn;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 ether_type;
int i;
if ((p_params->int_mode == ECORE_INT_MODE_MSI) && ECORE_IS_CMT(p_dev)) {
@@ -2442,6 +3642,25 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
+ if (IS_PF(p_dev) && (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits) ||
+ OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+ &p_dev->mf_bits))) {
+ if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING,
+ &p_dev->mf_bits))
+ ether_type = ETHER_TYPE_VLAN;
+ else
+ ether_type = ETHER_TYPE_QINQ;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms);
rc = ecore_fill_load_req_params(p_hwfn, &load_req_params,
@@ -2542,11 +3761,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_params->p_tunn,
p_hwfn->hw_info.hw_mode,
- p_params->b_hw_start,
- p_params->int_mode,
- p_params->allow_npar_tx_switch);
+ p_params);
break;
default:
DP_NOTICE(p_hwfn, false,
@@ -2591,6 +3807,34 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
}
if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
+ if (IS_PF(p_dev)) {
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+ }
+
+ if (IS_PF(p_dev)) {
p_hwfn = ECORE_LEADING_HWFN(p_dev);
drv_mb_param = STORM_FW_VERSION;
rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
@@ -2599,17 +3843,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update firmware version\n");
- if (!b_default_mtu)
+ if (!b_default_mtu) {
rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.mtu);
- if (rc != ECORE_SUCCESS)
- DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+ }
rc = ecore_mcp_ov_update_driver_state(p_hwfn,
p_hwfn->p_main_ptt,
ECORE_OV_DRIVER_STATE_DISABLED);
if (rc != ECORE_SUCCESS)
DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = ecore_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_ESWITCH_NONE);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
return rc;
@@ -2742,6 +3992,12 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
rc2 = ECORE_UNKNOWN_ERROR;
}
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point we don't expect the FW to send us async
+ * events
+ */
+
/* perform debug action after PF stop was sent */
OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
@@ -2778,6 +4034,12 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
+ if (IS_LEAD_HWFN(p_hwfn) &&
+ OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS, &p_dev->mf_bits) &&
+ !ECORE_IS_FCOE_PERSONALITY(p_hwfn))
+ ecore_llh_remove_mac_filter(p_dev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+
if (!p_dev->recov_in_prog) {
ecore_verify_reg_val(p_hwfn, p_ptt,
QM_REG_USG_CNT_PF_TX, 0);
@@ -2987,15 +4249,30 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE));
}
- if (ECORE_IS_FCOE_PERSONALITY(p_hwfn))
- feat_num[ECORE_FCOE_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ if (ECORE_IS_FCOE_PERSONALITY(p_hwfn) ||
+ ECORE_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ u32 *p_storage_feat = ECORE_IS_FCOE_PERSONALITY(p_hwfn) ?
+ &feat_num[ECORE_FCOE_CQ] :
+ &feat_num[ECORE_ISCSI_CQ];
+ u32 limit = sb_cnt.cnt;
- if (ECORE_IS_ISCSI_PERSONALITY(p_hwfn))
- feat_num[ECORE_ISCSI_CQ] =
- OSAL_MIN_T(u32, sb_cnt.cnt, RESC_NUM(p_hwfn,
- ECORE_CMDQS_CQS));
+ /* The number of queues should not exceed the number of FP SBs.
+ * In storage target, the queues are divided into pairs of a CQ
+ * and a CmdQ, and each pair uses a single SB. The limit in
+ * this case should allow a max ratio of 2:1 instead of 1:1.
+ */
+ if (p_hwfn->p_dev->b_is_target)
+ limit *= 2;
+ *p_storage_feat = OSAL_MIN_T(u32, limit,
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
+ /* @DPDK */
+ /* The size of "cq_cmdq_sb_num_arr" in the fcoe/iscsi init
+ * ramrod is limited to "NUM_OF_GLOBAL_QUEUES / 2".
+ */
+ *p_storage_feat = OSAL_MIN_T(u32, *p_storage_feat,
+ (NUM_OF_GLOBAL_QUEUES / 2));
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
"#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
@@ -3276,6 +4553,59 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+#define ECORE_NONUSED_PPFID_MASK_BB_4P_LO_PORTS 0xaa
+#define ECORE_NONUSED_PPFID_MASK_BB_4P_HI_PORTS 0x55
+#define ECORE_NONUSED_PPFID_MASK_AH_4P 0xf0
+
+static enum _ecore_status_t ecore_hw_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 native_ppfid_idx = ECORE_PPFID_BY_PFID(p_hwfn), new_bitmap;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL)
+ return rc;
+ else if (rc == ECORE_NOTIMPL)
+ p_dev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+
+ /* 4-ports mode has limitations that should be enforced:
+ * - BB: the MFW can access only PPFIDs which their corresponding PFIDs
+ * belong to this certain port.
+ * - AH/E5: only 4 PPFIDs per port are available.
+ */
+ if (ecore_device_num_ports(p_dev) == 4) {
+ u8 mask;
+
+ if (ECORE_IS_BB(p_dev))
+ mask = MFW_PORT(p_hwfn) > 1 ?
+ ECORE_NONUSED_PPFID_MASK_BB_4P_HI_PORTS :
+ ECORE_NONUSED_PPFID_MASK_BB_4P_LO_PORTS;
+ else
+ mask = ECORE_NONUSED_PPFID_MASK_AH_4P;
+
+ if (p_dev->ppfid_bitmap & mask) {
+ new_bitmap = p_dev->ppfid_bitmap & ~mask;
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap for 4-ports mode: 0x%hhx -> 0x%hhx\n",
+ p_dev->ppfid_bitmap, new_bitmap);
+ p_dev->ppfid_bitmap = new_bitmap;
+ }
+ }
+
+ /* The native PPFID is expected to be part of the allocated bitmap */
+ if (!(p_dev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
+ new_bitmap = 0x1 << native_ppfid_idx;
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap to inculde the native PPFID: %hhd -> 0x%hhx\n",
+ p_dev->ppfid_bitmap, new_bitmap);
+ p_dev->ppfid_bitmap = new_bitmap;
+ }
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool drv_resc_alloc)
@@ -3350,6 +4680,13 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
"Failed to release the resource lock for the resource allocation commands\n");
}
+ /* PPFID bitmap */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = ecore_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
/* Reduced build contains less PQs */
@@ -3621,7 +4958,8 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_MF_MODE_BD:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS |
1 << ECORE_MF_LLH_PROTO_CLSS |
- 1 << ECORE_MF_8021AD_TAGGING;
+ 1 << ECORE_MF_8021AD_TAGGING |
+ 1 << ECORE_MF_FIP_SPECIAL;
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS |
@@ -4139,9 +5477,8 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
#endif
static enum _ecore_status_t
-ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM * p_regview,
- void OSAL_IOMEM * p_doorbells,
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
+ void OSAL_IOMEM *p_doorbells, u64 db_phys_addr,
struct ecore_hw_prepare_params *p_params)
{
struct ecore_mdump_retain_data mdump_retain;
@@ -4152,6 +5489,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
/* Split PCI bars evenly between hwfns */
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ p_hwfn->db_phys_addr = db_phys_addr;
if (IS_VF(p_dev))
return ecore_vf_hw_prepare(p_hwfn);
@@ -4217,6 +5555,13 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+
+ /* Workaround for MFW issue where PF FLR does not cleanup
+ * IGU block
+ */
+ if (!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP))
+ ecore_pf_flr_igu_cleanup(p_hwfn);
}
/* Check if mdump logs/data are present and update the epoch value */
@@ -4287,6 +5632,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
p_dev->allow_mdump = p_params->allow_mdump;
p_hwfn->b_en_pacing = p_params->b_en_pacing;
+ p_dev->b_is_target = p_params->b_is_target;
if (p_params->b_relaxed_probe)
p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -4296,9 +5642,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
ecore_init_iro_array(p_dev);
/* Initialize the first hwfn - will learn number of hwfns */
- rc = ecore_hw_prepare_single(p_hwfn,
- p_dev->regview,
- p_dev->doorbells, p_params);
+ rc = ecore_hw_prepare_single(p_hwfn, p_dev->regview,
+ p_dev->doorbells, p_dev->db_phys_addr,
+ p_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -4308,24 +5654,26 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
if (ECORE_IS_CMT(p_dev)) {
void OSAL_IOMEM *p_regview, *p_doorbell;
u8 OSAL_IOMEM *addr;
+ u64 db_phys_addr;
+ u32 offset;
/* adjust bar offset for second engine */
- addr = (u8 OSAL_IOMEM *)p_dev->regview +
- ecore_hw_bar_size(p_hwfn,
- p_hwfn->p_main_ptt,
- BAR_ID_0) / 2;
+ offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ addr = (u8 OSAL_IOMEM *)p_dev->regview + offset;
p_regview = (void OSAL_IOMEM *)addr;
- addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
- ecore_hw_bar_size(p_hwfn,
- p_hwfn->p_main_ptt,
- BAR_ID_1) / 2;
+ offset = ecore_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
+ addr = (u8 OSAL_IOMEM *)p_dev->doorbells + offset;
p_doorbell = (void OSAL_IOMEM *)addr;
+ db_phys_addr = p_dev->db_phys_addr + offset;
p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
/* prepare second hw function */
rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
- p_doorbell, p_params);
+ p_doorbell, db_phys_addr,
+ p_params);
/* in case of error, need to free the previously
* initiliazed hwfn 0.
@@ -4722,419 +6070,6 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum _ecore_status_t
-ecore_llh_add_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 high, u32 low,
- u32 *p_entry_num)
-{
- u32 en;
- int i;
-
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32), 1);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_NORESOURCES;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return ECORE_SUCCESS;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_add_mac_filter_bb_ah(p_hwfn, p_ptt, high, low,
- &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return rc;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx is added at %d\n",
- p_filter[0], p_filter[1], p_filter[2], p_filter[3],
- p_filter[4], p_filter[5], entry_num);
-
- return rc;
-}
-
-static enum _ecore_status_t
-ecore_llh_remove_mac_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 high, u32 low,
- u32 *p_entry_num)
-{
- int i;
-
- /* Find the entry and clean it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), 0);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_INVAL;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 *p_filter)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return;
-
- high = p_filter[1] | (p_filter[0] << 8);
- low = p_filter[5] | (p_filter[4] << 8) |
- (p_filter[3] << 16) | (p_filter[2] << 24);
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_remove_mac_filter_bb_ah(p_hwfn, p_ptt, high,
- low, &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter\n");
- return;
- }
-
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "MAC: %02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx was removed from %d\n",
- p_filter[0], p_filter[1], p_filter[2], p_filter[3],
- p_filter[4], p_filter[5], entry_num);
-}
-
-static enum _ecore_status_t
-ecore_llh_add_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- enum ecore_llh_port_filter_type_t type,
- u32 high, u32 low, u32 *p_entry_num)
-{
- u32 en;
- int i;
-
- /* Find a free entry and utilize it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- en = ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32));
- if (en)
- continue;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), low);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), high);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32), 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32), 1 << type);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 1);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_NORESOURCES;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-enum _ecore_status_t
-ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return rc;
-
- high = 0;
- low = 0;
-
- switch (type) {
- case ECORE_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case ECORE_LLH_FILTER_TCP_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Non valid LLH protocol filter type %d\n", type);
- return ECORE_INVAL;
- }
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_add_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
- high, low, &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Failed to find an empty LLH filter to utilize\n");
- return rc;
- }
- switch (type) {
- case ECORE_LLH_FILTER_ETHERTYPE:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "ETH type %x is added at %d\n",
- source_port_or_eth_type, entry_num);
- break;
- case ECORE_LLH_FILTER_TCP_SRC_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP src port %x is added at %d\n",
- source_port_or_eth_type, entry_num);
- break;
- case ECORE_LLH_FILTER_UDP_SRC_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP src port %x is added at %d\n",
- source_port_or_eth_type, entry_num);
- break;
- case ECORE_LLH_FILTER_TCP_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP dst port %x is added at %d\n", dest_port,
- entry_num);
- break;
- case ECORE_LLH_FILTER_UDP_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP dst port %x is added at %d\n", dest_port,
- entry_num);
- break;
- case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "TCP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, entry_num);
- break;
- case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "UDP src/dst ports %x/%x are added at %d\n",
- source_port_or_eth_type, dest_port, entry_num);
- break;
- }
-
- return rc;
-}
-
-static enum _ecore_status_t
-ecore_llh_remove_protocol_filter_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- enum ecore_llh_port_filter_type_t type,
- u32 high, u32 low, u32 *p_entry_num)
-{
- int i;
-
- /* Find the entry and clean it */
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32)))
- continue;
- if (!ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32)))
- continue;
- if (!(ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32)) & (1 << type)))
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32)) != low)
- continue;
- if (ecore_rd(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32)) != high)
- continue;
-
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 + i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_MODE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), 0);
- break;
- }
-
- if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
- return ECORE_INVAL;
-
- *p_entry_num = i;
-
- return ECORE_SUCCESS;
-}
-
-void
-ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type)
-{
- u32 high, low, entry_num;
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return;
-
- high = 0;
- low = 0;
-
- switch (type) {
- case ECORE_LLH_FILTER_ETHERTYPE:
- high = source_port_or_eth_type;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_PORT:
- low = source_port_or_eth_type << 16;
- break;
- case ECORE_LLH_FILTER_TCP_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_DEST_PORT:
- low = dest_port;
- break;
- case ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
- case ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
- low = (source_port_or_eth_type << 16) | dest_port;
- break;
- default:
- DP_NOTICE(p_hwfn, true,
- "Non valid LLH protocol filter type %d\n", type);
- return;
- }
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- rc = ecore_llh_remove_protocol_filter_bb_ah(p_hwfn, p_ptt, type,
- high, low,
- &entry_num);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, false,
- "Tried to remove a non-configured filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x]\n",
- type, source_port_or_eth_type, dest_port);
- return;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
- "Protocol filter [type %d, source_port_or_eth_type 0x%x, dest_port 0x%x] was removed from %d\n",
- type, source_port_or_eth_type, dest_port, entry_num);
-}
-
-static void ecore_llh_clear_all_filters_bb_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- int i;
-
- if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
- return;
-
- for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_EN_BB_K2 +
- i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- 2 * i * sizeof(u32), 0);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_FILTER_VALUE_BB_K2 +
- (2 * i + 1) * sizeof(u32), 0);
- }
-}
-
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- if (!OSAL_TEST_BIT(ECORE_MF_LLH_PROTO_CLSS,
- &p_hwfn->p_dev->mf_bits) &&
- !OSAL_TEST_BIT(ECORE_MF_LLH_MAC_CLSS,
- &p_hwfn->p_dev->mf_bits))
- return;
-
- if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev))
- ecore_llh_clear_all_filters_bb_ah(p_hwfn, p_ptt);
-}
-
enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
@@ -5713,3 +6648,8 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
+
+bool ecore_is_mf_fip_special(struct ecore_dev *p_dev)
+{
+ return !!OSAL_TEST_BIT(ECORE_MF_FIP_SPECIAL, &p_dev->mf_bits);
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 02bacc22..73080632 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -114,6 +114,9 @@ struct ecore_hw_init_params {
/* Driver load parameters */
struct ecore_drv_load_params *p_drv_load_params;
+ /* Avoid engine affinity for RoCE/storage in case of CMT mode */
+ bool avoid_eng_affin;
+
/* SPQ block timeout in msec */
u32 spq_timeout_ms;
};
@@ -271,6 +274,9 @@ struct ecore_hw_prepare_params {
/* Enable/disable request by ecore client for pacing */
bool b_en_pacing;
+
+ /* Indicates whether this PF serves a storage target */
+ bool b_is_target;
};
/**
@@ -425,11 +431,17 @@ enum ecore_dmae_address_type_t {
#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
#define ECORE_DMAE_FLAG_VF_DST 0x00000004
#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define ECORE_DMAE_FLAG_PORT 0x00000010
+#define ECORE_DMAE_FLAG_PF_SRC 0x00000020
+#define ECORE_DMAE_FLAG_PF_DST 0x00000040
struct ecore_dmae_params {
u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
};
/**
@@ -441,7 +453,9 @@ struct ecore_dmae_params {
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
- * @param flags (one of the flags defined above)
+ * @param p_params (default parameters will be used in case of OSAL_NULL)
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
@@ -449,7 +463,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
- u32 flags);
+ struct ecore_dmae_params *p_params);
/**
* @brief ecore_dmae_grc2host - Read data from dmae data offset
@@ -459,7 +473,9 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
- * @param flags - one of the flags defined above
+ * @param p_params (default parameters will be used in case of OSAL_NULL)
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
@@ -467,7 +483,7 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
u32 grc_addr,
dma_addr_t dest_addr,
u32 size_in_dwords,
- u32 flags);
+ struct ecore_dmae_params *p_params);
/**
* @brief ecore_dmae_host2host - copy data from to source address
@@ -478,7 +494,9 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
* @param source_addr
* @param dest_addr
* @param size_in_dwords
- * @param params
+ * @param p_params (default parameters will be used in case of OSAL_NULL)
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
@@ -559,28 +577,79 @@ enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 *dst_id);
/**
- * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
+ * @brief ecore_llh_get_num_ppfid - Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_filter - MAC to add
+ * @param p_dev
+ *
+ * @return u8 - Number of LLH filter banks
*/
-enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter);
+u8 ecore_llh_get_num_ppfid(struct ecore_dev *p_dev);
+
+enum ecore_eng {
+ ECORE_ENG0,
+ ECORE_ENG1,
+ ECORE_BOTH_ENG,
+};
/**
- * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
+ * @brief ecore_llh_get_l2_affinity_hint - Return the hint for the L2 affinity
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_filter - MAC to remove
+ * @param p_dev
+ *
+ * @return enum ecore_eng - L2 affintiy hint
+ */
+enum ecore_eng ecore_llh_get_l2_affinity_hint(struct ecore_dev *p_dev);
+
+/**
+ * @brief ecore_llh_set_ppfid_affinity - Set the engine affinity for the given
+ * LLH filter bank.
+ *
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param eng
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_llh_set_ppfid_affinity(struct ecore_dev *p_dev,
+ u8 ppfid, enum ecore_eng eng);
+
+/**
+ * @brief ecore_llh_set_roce_affinity - Set the RoCE engine affinity
+ *
+ * @param p_dev
+ * @param eng
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_llh_set_roce_affinity(struct ecore_dev *p_dev,
+ enum ecore_eng eng);
+
+/**
+ * @brief ecore_llh_add_mac_filter - Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param mac_addr - MAC to add
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN]);
+
+/**
+ * @brief ecore_llh_remove_mac_filter - Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param mac_addr - MAC to remove
*/
-void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 *p_filter);
+void ecore_llh_remove_mac_filter(struct ecore_dev *p_dev, u8 ppfid,
+ u8 mac_addr[ETH_ALEN]);
-enum ecore_llh_port_filter_type_t {
+enum ecore_llh_prot_filter_type_t {
ECORE_LLH_FILTER_ETHERTYPE,
ECORE_LLH_FILTER_TCP_SRC_PORT,
ECORE_LLH_FILTER_TCP_DEST_PORT,
@@ -591,45 +660,52 @@ enum ecore_llh_port_filter_type_t {
};
/**
- * @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
+ * @brief ecore_llh_add_protocol_filter - Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
- * @param type - type of filters and comparing
+ *
+ * @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type);
+ecore_llh_add_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
+ * @brief ecore_llh_remove_protocol_filter - Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
+ * @param type - type of filters and comparing
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
- * @param type - type of filters and comparing
*/
-void
-ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 source_port_or_eth_type,
- u16 dest_port,
- enum ecore_llh_port_filter_type_t type);
+void ecore_llh_remove_protocol_filter(struct ecore_dev *p_dev, u8 ppfid,
+ enum ecore_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port);
/**
- * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
+ * @brief ecore_llh_clear_ppfid_filters - Remove all LLH filters from the given
+ * filter bank.
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_dev
+ * @param ppfid - relative within the allocated ppfids ('0' is the default one).
*/
-void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+void ecore_llh_clear_ppfid_filters(struct ecore_dev *p_dev, u8 ppfid);
+
+/**
+ * @brief ecore_llh_clear_all_filters - Remove all LLH filters
+ *
+ * @param p_dev
+ */
+void ecore_llh_clear_all_filters(struct ecore_dev *p_dev);
/**
* @brief ecore_llh_set_function_as_default - set function as default per port
@@ -701,4 +777,13 @@ ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_enable);
+
+/**
+ * @brief Whether FIP discovery fallback special mode is enabled or not.
+ *
+ * @param cdev
+ *
+ * @return true if device is in FIP special mode, false otherwise.
+ */
+bool ecore_is_mf_fip_special(struct ecore_dev *p_dev);
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 2d761b97..6d4a4dd7 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -922,7 +922,11 @@ struct core_rx_start_ramrod_data {
struct core_rx_action_on_error action_on_error;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
- u8 reserved[6];
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
+ * zero out, used for TenantDcb
+ */
+ u8 wipe_inner_vlan_pri_en;
+ u8 reserved[5];
};
@@ -1044,7 +1048,11 @@ struct core_tx_start_ramrod_data {
__le16 qm_pq_id /* QM PQ ID */;
/* set when in GSI offload mode on ROCE connection */
u8 gsi_offload_flag;
- u8 resrved[3];
+/* vport id of the current connection, used to access non_rdma_in_to_in_pri_map
+ * which is per vport
+ */
+ u8 vport_id;
+ u8 resrved[2];
};
@@ -1171,6 +1179,25 @@ struct eth_rx_rate_limit {
};
+/* Update RSS indirection table entry command. One outstanding command supported
+ * per PF.
+ */
+struct eth_tstorm_rss_update_data {
+/* Valid flag. Driver must set this flag, FW clear valid flag when ready for new
+ * RSS update command.
+ */
+ u8 valid;
+/* Global VPORT ID. If RSS is disable for VPORT, RSS update command will be
+ * ignored.
+ */
+ u8 vport_id;
+ u8 ind_table_index /* RSS indirect table index that will be updated. */;
+ u8 reserved;
+ __le16 ind_table_value /* RSS indirect table new value. */;
+ __le16 reserved1 /* reserved. */;
+};
+
+
struct eth_ustorm_per_pf_stat {
/* number of total ucast bytes received on loopback port without errors */
struct regpair rcv_lb_ucast_bytes;
@@ -1463,6 +1490,10 @@ struct pf_start_tunnel_config {
* FW will use a default port
*/
u8 set_geneve_udp_port_flg;
+/* Set no-innet-L2 VXLAN tunnel UDP destination port to
+ * no_inner_l2_vxlan_udp_port. If not set - FW will use a default port
+ */
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan /* Rx classification scheme for VXLAN tunnel. */;
/* Rx classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1470,11 +1501,15 @@ struct pf_start_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Rx classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Rx classification scheme for ip GRE tunnel. */;
- u8 reserved;
/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
__le16 vxlan_udp_port;
/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
__le16 geneve_udp_port;
+/* no-innet-L2 VXLAN tunnel UDP destination port. Valid if
+ * set_no_inner_l2_vxlan_udp_port_flg=1
+ */
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved[3];
};
/*
@@ -1547,6 +1582,8 @@ struct pf_update_tunnel_config {
u8 set_vxlan_udp_port_flg;
/* Update GENEVE tunnel UDP destination port. */
u8 set_geneve_udp_port_flg;
+/* Update no-innet-L2 VXLAN tunnel UDP destination port. */
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
/* Classification scheme for l2 GENEVE tunnel. */
u8 tunnel_clss_l2geneve;
@@ -1554,9 +1591,12 @@ struct pf_update_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
+ u8 reserved;
__le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
__le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
- __le16 reserved;
+/* no-innet-L2 VXLAN tunnel UDP destination port. */
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved1[3];
};
/*
@@ -1686,6 +1726,13 @@ struct rl_update_ramrod_data {
/* ID of last RL, that will be updated. If clear, single RL will updated. */
u8 rl_id_last;
u8 rl_dc_qcn_flg /* If set, RL will used for DCQCN. */;
+/* If set, alpha will be reset to 1 when the state machine is idle. */
+ u8 dcqcn_reset_alpha_on_idle;
+/* Byte counter threshold to change rate increase stage. */
+ u8 rl_bc_stage_th;
+/* Timer threshold to change rate increase stage. */
+ u8 rl_timer_stage_th;
+ u8 reserved1;
__le32 rl_bc_rate /* Byte Counter Limit. */;
__le16 rl_max_rate /* Maximum rate in 1.6 Mbps resolution. */;
__le16 rl_r_ai /* Active increase rate. */;
@@ -1694,7 +1741,7 @@ struct rl_update_ramrod_data {
__le32 dcqcn_k_us /* DCQCN Alpha update interval. */;
__le32 dcqcn_timeuot_us /* DCQCN timeout. */;
__le32 qcn_timeuot_us /* QCN timeout. */;
- __le32 reserved[2];
+ __le32 reserved2;
};
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index bf548722..085af0a3 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -1091,6 +1091,15 @@ struct idle_chk_data {
};
/*
+ * Pretend parameters
+ */
+struct pretend_params {
+ u8 split_type /* Pretend split type (from enum init_split_types) */;
+ u8 reserved;
+ u16 split_id /* Preted split ID (within the pretend split type) */;
+};
+
+/*
* Debug Tools data (per HW function)
*/
struct dbg_tools_data {
@@ -1102,11 +1111,17 @@ struct dbg_tools_data {
u8 block_in_reset[88];
u8 chip_id /* Chip ID (from enum chip_ids) */;
u8 platform_id /* Platform ID */;
+ u8 num_ports /* Number of ports in the chip */;
+ u8 num_pfs_per_port /* Number of PFs in each port */;
+ u8 num_vfs /* Number of VFs in the chip */;
u8 initialized /* Indicates if the data was initialized */;
u8 use_dmae /* Indicates if DMAE should be used */;
+ u8 reserved;
+ struct pretend_params pretend /* Current pretend parameters */;
/* Numbers of registers that were read since last log */
u32 num_regs_read;
};
+
#endif /* __ECORE_HSI_DEBUG_TOOLS__ */
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index 6b512305..158ca673 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -832,6 +832,26 @@ enum eth_filter_type {
/*
+ * inner to inner vlan priority translation configurations
+ */
+struct eth_in_to_in_pri_map_cfg {
+/* If set, non_rdma_in_to_in_pri_map or rdma_in_to_in_pri_map will be used for
+ * inner to inner priority mapping depending on protocol type
+ */
+ u8 inner_vlan_pri_remap_en;
+ u8 reserved[7];
+/* Map for inner to inner vlan priority translation for Non RDMA protocols, used
+ * for TenantDcb. Set inner_vlan_pri_remap_en, when init the map.
+ */
+ u8 non_rdma_in_to_in_pri_map[8];
+/* Map for inner to inner vlan priority translation for RDMA protocols, used for
+ * TenantDcb. Set inner_vlan_pri_remap_en, when init the map.
+ */
+ u8 rdma_in_to_in_pri_map[8];
+};
+
+
+/*
* eth IPv4 Fragment Type
*/
enum eth_ipv4_frag_type {
@@ -1030,8 +1050,11 @@ struct eth_vport_rx_mode {
/* accept all broadcast packets (subject to vlan) */
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+/* accept any VNI in tunnel VNI classification. Used for default queue. */
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7
};
@@ -1357,6 +1380,20 @@ struct tx_queue_update_ramrod_data {
};
+/*
+ * Inner to Inner VLAN priority map update mode
+ */
+enum update_in_to_in_pri_map_mode_enum {
+/* Inner to Inner VLAN priority map update Disabled */
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED,
+/* Update Inner to Inner VLAN priority map for non RDMA protocols */
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL,
+/* Update Inner to Inner VLAN priority map for RDMA protocols */
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL,
+ MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM
+};
+
+
/*
* Ramrod data for vport update ramrod
@@ -1405,7 +1442,12 @@ struct vport_start_ramrod_data {
u8 ctl_frame_mac_check_en;
/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
- u8 reserved[1];
+/* If set, the inner vlan (802.1q tag) priority that is written to cqe will be
+ * zero out, used for TenantDcb
+ */
+ u8 wipe_inner_vlan_pri_en;
+/* inner to inner vlan priority translation configurations */
+ struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
};
@@ -1473,7 +1515,14 @@ struct vport_update_ramrod_data_cmn {
u8 ctl_frame_mac_check_en;
/* If set, control frames will be filtered according to ethtype check. */
u8 ctl_frame_ethtype_check_en;
- u8 reserved[15];
+/* Indicates to update RDMA or NON-RDMA vlan remapping priority table according
+ * to update_in_to_in_pri_map_mode_enum, used for TenantDcb (use enum
+ * update_in_to_in_pri_map_mode_enum)
+ */
+ u8 update_in_to_in_pri_map_mode;
+/* Map for inner to inner vlan priority translation, used for TenantDcb. */
+ u8 in_to_in_pri_map[8];
+ u8 reserved[6];
};
struct vport_update_ramrod_mcast {
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 51bba27e..72cd7e9c 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -407,6 +407,30 @@ void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
*(u32 *)&p_ptt->pxp.pretend);
}
+void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 port_id, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
+
+ REG_WR(p_hwfn,
+ ecore_ptt_config_addr(p_ptt) +
+ OFFSETOF(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
@@ -426,14 +450,17 @@ u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
* If this changes, this needs to be revisted.
*/
-/* Ecore DMAE
- * =============
- */
+/* DMAE */
+
+#define ECORE_DMAE_FLAGS_IS_SET(params, flag) \
+ ((params) != OSAL_NULL && ((params)->flags & ECORE_DMAE_FLAG_##flag))
+
static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct ecore_dmae_params *p_params)
{
+ u8 src_pfid, dst_pfid, port_id;
u16 opcode_b = 0;
u32 opcode = 0;
@@ -443,16 +470,20 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
*/
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
- opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
- DMAE_CMD_SRC_PF_ID_SHIFT;
+ src_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ opcode |= (src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
+ DMAE_CMD_SRC_PF_ID_SHIFT;
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
- opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
- DMAE_CMD_DST_PF_ID_SHIFT;
+ dst_pfid = ECORE_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ opcode |= (dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
+ DMAE_CMD_DST_PF_ID_SHIFT;
- /* DMAE_E4_TODO need to check which value to specifiy here. */
+ /* DMAE_E4_TODO need to check which value to specify here. */
/* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
/* Whether to write a completion word to the completion destination:
@@ -462,7 +493,7 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
- if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
+ if (ECORE_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
/* swapping mode 3 - big endian there should be a define ifdefed in
@@ -470,7 +501,9 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
*/
opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
- opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
+ port_id = (ECORE_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
+ p_params->port_id : p_hwfn->port_id;
+ opcode |= port_id << DMAE_CMD_PORT_ID_SHIFT;
/* reset source address in next go */
opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
@@ -479,14 +512,14 @@ static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
- if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
+ if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
} else {
opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
}
- if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
+ if (ECORE_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
@@ -831,7 +864,7 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
for (i = 0; i <= cnt_split; i++) {
offset = length_limit * i;
- if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
+ if (!ECORE_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
if (src_type == ECORE_DMAE_ADDRESS_GRC)
src_addr_split = src_addr + offset;
else
@@ -872,51 +905,45 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
return ecore_status;
}
-enum _ecore_status_t
-ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u64 source_addr,
- u32 grc_addr, u32 size_in_dwords, u32 flags)
+enum _ecore_status_t ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct ecore_dmae_params params;
enum _ecore_status_t rc;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
- params.flags = flags;
-
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
grc_addr_in_dw,
ECORE_DMAE_ADDRESS_HOST_VIRT,
ECORE_DMAE_ADDRESS_GRC,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
return rc;
}
-enum _ecore_status_t
-ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u32 grc_addr,
- dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+enum _ecore_status_t ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords,
+ struct ecore_dmae_params *p_params)
{
u32 grc_addr_in_dw = grc_addr / sizeof(u32);
- struct ecore_dmae_params params;
enum _ecore_status_t rc;
- OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
- params.flags = flags;
-
OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock);
rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
dest_addr, ECORE_DMAE_ADDRESS_GRC,
ECORE_DMAE_ADDRESS_HOST_VIRT,
- size_in_dwords, &params);
+ size_in_dwords, p_params);
OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock);
@@ -965,7 +992,6 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
const char *phase)
{
u32 size = OSAL_PAGE_SIZE / 2, val;
- struct ecore_dmae_params params;
enum _ecore_status_t rc = ECORE_SUCCESS;
dma_addr_t p_phys;
void *p_virt;
@@ -997,9 +1023,9 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
(unsigned long)(p_phys + size),
(u8 *)p_virt + size, size);
- OSAL_MEMSET(&params, 0, sizeof(params));
rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
- size / 4 /* size_in_dwords */, &params);
+ size / 4 /* size_in_dwords */,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, false,
"DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n",
@@ -1030,3 +1056,32 @@ out:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size);
return rc;
}
+
+void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr, u32 val)
+{
+ u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ ecore_wr(p_hwfn, p_ptt, hw_addr, val);
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+}
+
+u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr)
+{
+ u8 pfid = ECORE_PFID_BY_PPFID(p_hwfn, abs_ppfid);
+ u32 val;
+
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ val = ecore_rd(p_hwfn, p_ptt, hw_addr);
+ ecore_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ return val;
+}
diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h
index 394207eb..0b5b40c4 100644
--- a/drivers/net/qede/base/ecore_hw.h
+++ b/drivers/net/qede/base/ecore_hw.h
@@ -134,8 +134,8 @@ struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param val
* @param hw_addr
+ * @param val
*/
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -147,7 +147,6 @@ void ecore_wr(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param val
* @param hw_addr
*/
u32 ecore_rd(struct ecore_hwfn *p_hwfn,
@@ -223,6 +222,18 @@ void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
+ * @brief ecore_port_fid_pretend - pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param port_id - the port to pretend to
+ * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ */
+void ecore_port_fid_pretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 port_id, u16 fid);
+
+/**
* @brief ecore_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
@@ -257,4 +268,29 @@ enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
const char *phase);
+/**
+ * @brief ecore_ppfid_wr - Write value to BAR using the given ptt while
+ * pretending to a PF to which the given PPFID pertains.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param abs_ppfid
+ * @param hw_addr
+ * @param val
+ */
+void ecore_ppfid_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr, u32 val);
+
+/**
+ * @brief ecore_ppfid_rd - Read value from BAR using the given ptt while
+ * pretending to a PF to which the given PPFID pertains.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param abs_ppfid
+ * @param hw_addr
+ */
+u32 ecore_ppfid_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 abs_ppfid, u32 hw_addr);
+
#endif /* __ECORE_HW_H__ */
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index b8496cb2..cfc1156e 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -1665,7 +1665,7 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
bool ipv6,
enum gft_profile_type profile_type)
{
- u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
+ u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
@@ -1729,6 +1729,9 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
ram_line_lo = 0;
ram_line_hi = 0;
+ /* Search no IP as GFT */
+ search_non_ip_as_gft = 0;
+
/* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
@@ -1752,8 +1755,13 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn,
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+
+ /* Allow tunneled traffic without inner IP */
+ search_non_ip_as_gft = 1;
}
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT,
+ search_non_ip_as_gft);
ecore_wr(p_hwfn, p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
ram_line_lo);
@@ -1996,52 +2004,49 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
}
-#define RSS_IND_TABLE_BASE_ADDR 4112
-#define RSS_IND_TABLE_VPORT_SIZE 16
-#define RSS_IND_TABLE_ENTRY_PER_LINE 8
-/* Update RSS indirection table entry. */
-void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 rss_id,
- u8 ind_table_index,
- u16 ind_table_value)
+/*******************************************************************************
+ * File name : rdma_init.c
+ * Author : Michael Shteinbok
+ *******************************************************************************
+ *******************************************************************************
+ * Description:
+ * RDMA HSI functions
+ *
+ *******************************************************************************
+ * Notes: This is the input to the auto generated file drv_init_fw_funcs.c
+ *
+ *******************************************************************************
+ */
+static u32 ecore_get_rdma_assert_ram_addr(struct ecore_hwfn *p_hwfn,
+ u8 storm_id)
{
- u32 cnt, rss_addr;
- u32 *reg_val;
- u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE];
- u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE];
-
- /* get entry address */
- rss_addr = RSS_IND_TABLE_BASE_ADDR +
- RSS_IND_TABLE_VPORT_SIZE * rss_id +
- ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE;
-
- /* prepare update command */
- ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE;
-
- for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) {
- if (cnt == ind_table_index) {
- rss_ind_entry[cnt] = ind_table_value;
- rss_ind_mask[cnt] = 0xFFFF;
- } else {
- rss_ind_entry[cnt] = 0;
- rss_ind_mask[cnt] = 0;
- }
+ switch (storm_id) {
+ case 0: return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 1: return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 2: return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 3: return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 4: return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 5: return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+
+ default: return 0;
}
+}
- /* Update entry in HW*/
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
-
- reg_val = (u32 *)rss_ind_mask;
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]);
+void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS])
+{
+ u8 storm_id;
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ u32 ram_addr = ecore_get_rdma_assert_ram_addr(p_hwfn, storm_id);
- reg_val = (u32 *)rss_ind_entry;
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]);
- ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]);
+ ecore_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
+ }
}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 1024bb26..3503a90c 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -472,21 +472,35 @@ void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
-/**
- * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table
- * entry.
- * The function must run in exclusive mode to prevent wrong RSS configuration.
+
+/*******************************************************************************
+ * File name : rdma_init.h
+ * Author : Michael Shteinbok
+ *******************************************************************************
+ *******************************************************************************
+ * Description:
+ * RDMA HSI functions header
+ *
+ *******************************************************************************
+ * Notes: This is the input to the auto generated file drv_init_fw_funcs.h
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param rss_id - RSS engine ID.
- * @param ind_table_index - RSS indirect table index.
- * @param ind_table_value - RSS indirect table new value.
+ *******************************************************************************
*/
-void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u8 rss_id,
- u8 ind_table_index,
- u16 ind_table_value);
+#define NUM_STORMS 6
+
+
+
+/**
+ * @brief ecore_set_rdma_error_level - Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers
+ * @param assert_level - An array of assert levels for each storm.
+ */
+void ecore_set_rdma_error_level(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS]);
+
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index b7636f36..044308bf 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -101,7 +101,8 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_init_val + i),
- addr + (i << 2), segment, 0);
+ addr + (i << 2), segment,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS)
return rc;
@@ -165,8 +166,9 @@ static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
} else {
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_buf +
- dmae_data_offset),
- addr, size, 0);
+ dmae_data_offset),
+ addr, size,
+ OSAL_NULL /* default parameters */);
}
return rc;
@@ -177,13 +179,15 @@ static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+ struct ecore_dmae_params params;
OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.flags = ECORE_DMAE_FLAG_RW_REPL_SRC;
return ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)&zero_buffer[0],
- addr, fill_count,
- ECORE_DMAE_FLAG_RW_REPL_SRC);
+ addr, fill_count, &params);
}
static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
@@ -416,11 +420,11 @@ static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
u16 *p_offset, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
+ const u8 *modes_tree;
- modes_tree_buf = p_dev->fw_data->modes_tree_buf;
- tree_val = modes_tree_buf[(*p_offset)++];
+ modes_tree = p_dev->fw_data->modes_tree_buf;
+ tree_val = modes_tree[(*p_offset)++];
switch (tree_val) {
case INIT_MODE_OP_NOT:
return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
@@ -470,12 +474,12 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 cmd_num, num_init_ops;
- union init_op *init_ops;
+ union init_op *init;
bool b_dmae = false;
enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
- init_ops = p_dev->fw_data->init_ops;
+ init = p_dev->fw_data->init_ops;
#ifdef CONFIG_ECORE_ZIPPED_FW
p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
@@ -487,7 +491,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
#endif
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
- union init_op *cmd = &init_ops[cmd_num];
+ union init_op *cmd = &init[cmd_num];
u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 4c271d35..7368d55f 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -428,14 +428,13 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
-#define ECORE_DB_REC_COUNT 10
+#define ECORE_DB_REC_COUNT 1000
#define ECORE_DB_REC_INTERVAL 100
-/* assumes sticky overflow indication was set for this PF */
-static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t ecore_db_rec_flush_queue(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- u8 count = ECORE_DB_REC_COUNT;
+ u32 count = ECORE_DB_REC_COUNT;
u32 usage = 1;
/* wait for usage to zero or count to run out. This is necessary since
@@ -461,6 +460,28 @@ static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
return ECORE_TIMEOUT;
}
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 overflow;
+ enum _ecore_status_t rc;
+
+ overflow = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ DP_NOTICE(p_hwfn, false, "PF Overflow sticky 0x%x\n", overflow);
+ if (!overflow) {
+ ecore_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ return ECORE_SUCCESS;
+ }
+
+ if (ecore_edpm_enabled(p_hwfn)) {
+ rc = ecore_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
+
/* flush any pedning (e)dpm as they may never arrive */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
@@ -477,8 +498,7 @@ static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
{
- u32 int_sts, first_drop_reason, details, address, overflow,
- all_drops_reason;
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
enum _ecore_status_t rc;
@@ -504,8 +524,6 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
DORQ_REG_DB_DROP_DETAILS);
address = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_ADDRESS);
- overflow = ecore_rd(p_hwfn, p_ptt,
- DORQ_REG_PF_OVFL_STICKY);
all_drops_reason = ecore_rd(p_hwfn, p_ptt,
DORQ_REG_DB_DROP_DETAILS_REASON);
@@ -516,19 +534,16 @@ static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
"FID\t\t0x%04x\t\t(Opaque FID)\n"
"Size\t\t0x%04x\t\t(in bytes)\n"
"1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
- "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
- "Overflow\t0x%x\t\t(a per PF indication)\n",
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
address,
GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
- first_drop_reason, all_drops_reason, overflow);
+ first_drop_reason, all_drops_reason);
- /* if this PF caused overflow, initiate recovery */
- if (overflow) {
- rc = ecore_db_rec_attn(p_hwfn, p_ptt);
- if (rc != ECORE_SUCCESS)
- return rc;
- }
+ rc = ecore_db_rec_handler(p_hwfn, p_ptt);
+ OSAL_DB_REC_OCCURRED(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ return rc;
/* clear the doorbell drop details and prepare for next drop */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
@@ -1209,8 +1224,9 @@ static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *igu_addr, u32 ack_cons)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ struct igu_prod_cons_update igu_ack;
+ OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
igu_ack.sb_id_and_flags =
((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
@@ -1546,11 +1562,13 @@ void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&phys_addr,
CAU_REG_SB_ADDR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- igu_sb_id * sizeof(u64), 2, 0);
+ igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
} else {
/* Initialize Status Block Address */
STORE_RT_REG_AGG(p_hwfn,
@@ -2631,7 +2649,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
sb_id * sizeof(u64),
- (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ (u64)(osal_uintptr_t)&sb_entry, 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2644,8 +2663,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- sb_id * sizeof(u64), 2, 0);
+ CAU_REG_SB_VAR_MEMORY + sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
return rc;
@@ -2681,3 +2700,35 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct ecore_ptt *p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+ int i;
+
+ /* Do not reorder the following cleanup sequence */
+ /* Ack all attentions */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ACK_BITS, 0xfff);
+
+ /* Clear driver attention */
+ ecore_wr(p_hwfn, p_dpc_ptt,
+ ((p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0), 0);
+
+ /* Clear per-PF IGU registers to restore them as if the IGU
+ * was reset for this PF
+ */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+
+ /* Execute IGU clean up*/
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_FUNCTIONAL_CLEANUP, 1);
+
+ /* Clear Stats */
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED, 0);
+
+ for (i = 0; i < IGU_REG_PBA_STS_PF_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, IGU_REG_PBA_STS_PF + i * 4, 0);
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 041240d7..ff2310cf 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -256,5 +256,6 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_hw_init);
+void ecore_pf_flr_igu_cleanup(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_INT_H__ */
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index aeaf469e..42538a46 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -92,8 +92,9 @@ static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
enum igu_int_cmd int_cmd, u8 upd_flg)
{
- struct igu_prod_cons_update igu_ack = { 0 };
+ struct igu_prod_cons_update igu_ack;
+ OSAL_MEMSET(&igu_ack, 0, sizeof(struct igu_prod_cons_update));
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
@@ -343,4 +344,15 @@ enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u16 sb_id, bool b_to_vf);
+
+/**
+ * @brief - Doorbell Recovery handler.
+ * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
+ * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_db_rec_handler(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index 29001d71..55de7086 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -84,6 +84,13 @@ struct ecore_public_vf_info {
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
+
+ /* Trusted VFs can configure promiscuous mode and
+ * set MAC address inspite PF has set forced MAC.
+ * Also store shadow promisc configuration if needed.
+ */
+ bool is_trusted_configured;
+ bool is_trusted_request;
};
struct ecore_iov_vf_init_params {
@@ -695,6 +702,16 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+/**
+ * @brief - Configure min rate for VF's vport.
+ * @param p_dev
+ * @param vfid
+ * @param - rate in Mbps
+ *
+ * @return
+ */
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+ int vfid, u32 rate);
#endif
/**
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index 05693029..12d45c1c 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -113,91 +113,129 @@
/* Tstorm Eth limit Rx rate */
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) (IRO[30].base + \
+ ((pf_id) * IRO[30].m1))
+#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[30].size)
/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[30].base + \
- ((queue_id) * IRO[30].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[31].base + \
+ ((queue_id) * IRO[31].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[31].size)
/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[31].base + \
- ((rss_id) * IRO[31].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + \
((rss_id) * IRO[32].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[33].base + \
+ ((rss_id) * IRO[33].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[33].size)
/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[33].base + \
- ((pf_id) * IRO[33].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[34].base + \
+ ((pf_id) * IRO[34].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[34].size)
/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[34].base + \
- ((cmdq_queue_id) * IRO[34].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
+#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[35].base + \
+ ((cmdq_queue_id) * IRO[35].m1))
+#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[35].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
* BDqueue-id
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[35].base + \
- ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
-/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
+#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[36].base + \
((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) (IRO[37].base + \
+ ((func_id) * IRO[37].m1) + ((bdq_id) * IRO[37].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[37].size)
/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[37].base + \
- ((pf_id) * IRO[37].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
+#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + \
((pf_id) * IRO[38].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + \
((pf_id) * IRO[39].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[40].base + \
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[40].base + \
((pf_id) * IRO[40].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[40].size)
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + \
((pf_id) * IRO[41].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + \
((pf_id) * IRO[42].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[43].base + \
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[43].base + \
((pf_id) * IRO[43].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[44].base + \
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[43].size)
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[44].base + \
((pf_id) * IRO[44].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[44].size)
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[45].base + \
+ ((pf_id) * IRO[45].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[45].size)
/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
-/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
((rdma_stat_counter_id) * IRO[46].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[47].base + \
+ ((rdma_stat_counter_id) * IRO[47].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[47].size)
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[48].base + \
+ ((pf_id) * IRO[48].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[48].size)
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[49].base + \
+ ((pf_id) * IRO[49].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[49].size)
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[50].base + \
+ ((pf_id) * IRO[50].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[50].size)
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[51].base + \
+ ((pf_id) * IRO[51].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[51].size)
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[52].base + \
+ ((pf_id) * IRO[52].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[52].size)
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) (IRO[53].base + \
+ ((pf_id) * IRO[53].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[53].size)
/* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
- ((pf_id) * IRO[47].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[54].base + \
+ ((pf_id) * IRO[54].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[54].size)
/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
- ((roce_pf_id) * IRO[48].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[55].base + \
+ ((roce_pf_id) * IRO[55].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[55].size)
/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[49].base + \
- ((roce_pf_id) * IRO[49].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[49].size)
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) (IRO[56].base + \
+ ((roce_pf_id) * IRO[56].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[56].size)
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) (IRO[57].base + \
+ ((roce_pf_id) * IRO[57].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[57].size)
/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[50].base + \
- ((roce_pf_id) * IRO[50].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[50].size)
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) (IRO[58].base + \
+ ((roce_pf_id) * IRO[58].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[58].size)
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) (IRO[59].base + \
+ ((roce_pf_id) * IRO[59].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[59].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 685fa2e8..30e632ce 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -7,7 +7,7 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[51] = {
+static const struct iro iro_arr[60] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
@@ -29,7 +29,7 @@ static const struct iro iro_arr[51] = {
/* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3e38, 0x0, 0x0, 0x0, 0x78},
/* PSTORM_INTEG_TEST_DATA_OFFSET */
- { 0x2b78, 0x0, 0x0, 0x0, 0x78},
+ { 0x3ef8, 0x0, 0x0, 0x0, 0x78},
/* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78},
/* MSTORM_INTEG_TEST_DATA_OFFSET */
@@ -43,7 +43,7 @@ static const struct iro iro_arr[51] = {
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
- { 0x96c0, 0x30, 0x0, 0x0, 0x30},
+ { 0xa990, 0x30, 0x0, 0x0, 0x30},
/* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x4b68, 0x80, 0x0, 0x0, 0x40},
/* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
@@ -59,15 +59,17 @@ static const struct iro iro_arr[51] = {
/* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0xe770, 0x60, 0x0, 0x0, 0x60},
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
- { 0x2d10, 0x80, 0x0, 0x0, 0x38},
+ { 0x4090, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf2b8, 0x78, 0x0, 0x0, 0x78},
+ { 0xfea8, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
{ 0xaf20, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
{ 0xb010, 0x8, 0x0, 0x0, 0x8},
+/* TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) */
+ { 0xc00, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -91,25 +93,41 @@ static const struct iro iro_arr[51] = {
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xa588, 0x50, 0x0, 0x0, 0x20},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x8700, 0x40, 0x0, 0x0, 0x28},
+ { 0x8f00, 0x40, 0x0, 0x0, 0x28},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x10300, 0x18, 0x0, 0x0, 0x10},
+ { 0x10e30, 0x18, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0xde48, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
- { 0x10768, 0x20, 0x0, 0x0, 0x20},
+ { 0x11298, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x2d48, 0x80, 0x0, 0x0, 0x10},
+ { 0x40c8, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5048, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0xa928, 0x8, 0x0, 0x0, 0x1},
+/* YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0xa128, 0x8, 0x0, 0x0, 0x1},
+/* PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0x11a30, 0x8, 0x0, 0x0, 0x1},
+/* TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0xf030, 0x8, 0x0, 0x0, 0x1},
+/* MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0x13028, 0x8, 0x0, 0x0, 0x1},
+/* USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) */
+ { 0x12c58, 0x8, 0x0, 0x0, 0x1},
/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xc9b8, 0x30, 0x0, 0x0, 0x10},
/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
- { 0xed90, 0x10, 0x0, 0x0, 0x10},
+ { 0xed90, 0x28, 0x0, 0x0, 0x28},
/* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */
- { 0xa520, 0x10, 0x0, 0x0, 0x10},
+ { 0xad20, 0x18, 0x0, 0x0, 0x18},
+/* YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) */
+ { 0xaea0, 0x8, 0x0, 0x0, 0x8},
/* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */
- { 0x13108, 0x8, 0x0, 0x0, 0x8},
+ { 0x13c38, 0x8, 0x0, 0x0, 0x8},
+/* USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) */
+ { 0x13c50, 0x18, 0x0, 0x0, 0x18},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index d71f4616..8b9817eb 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -608,6 +608,9 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & ECORE_ACCEPT_BCAST));
+ SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
+ !!(accept_filter & ECORE_ACCEPT_ANY_VNI));
+
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
@@ -783,6 +786,11 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
return rc;
}
+ if (p_params->update_ctl_frame_check) {
+ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+ }
+
/* Update mcast bins for VFs, PF doesn't use this functionality */
ecore_sp_update_mcast_bin(p_ramrod, p_params);
@@ -2084,6 +2092,24 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
}
}
+static enum gft_profile_type
+ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode)
+{
+ if (mode == ECORE_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_ADDR;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_TUNN_TYPE)
+ return GFT_PROFILE_TYPE_TUNNEL_TYPE;
+
+ if (mode == ECORE_FILTER_CONFIG_MODE_IP_SRC)
+ return GFT_PROFILE_TYPE_IP_SRC_ADDR;
+
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params)
@@ -2091,13 +2117,13 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
return;
- if (p_cfg_params->arfs_enable) {
+ if (p_cfg_params->mode != ECORE_FILTER_CONFIG_MODE_DISABLE) {
ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp,
p_cfg_params->udp,
p_cfg_params->ipv4,
p_cfg_params->ipv6,
- GFT_PROFILE_TYPE_4_TUPLE);
+ ecore_arfs_mode_to_hsi(p_cfg_params->mode));
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
p_cfg_params->tcp ? "Enable" : "Disable",
@@ -2107,8 +2133,8 @@ void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
} else {
ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
}
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
- p_cfg_params->arfs_enable ? "Enable" : "Disable");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %d\n",
+ (int)p_cfg_params->mode);
}
enum _ecore_status_t
@@ -2179,10 +2205,10 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_queue_cid *p_cid,
- u16 *p_rx_coal)
+enum _ecore_status_t ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_rx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
@@ -2191,7 +2217,8 @@ int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ (u64)(osal_uintptr_t)&sb_entry, 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2213,10 +2240,10 @@ int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct ecore_queue_cid *p_cid,
- u16 *p_tx_coal)
+enum _ecore_status_t ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid,
+ u16 *p_tx_coal)
{
u32 coalesce, address, is_valid;
struct cau_sb_entry sb_entry;
@@ -2225,7 +2252,8 @@ int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
p_cid->sb_igu_id * sizeof(u64),
- (u64)(osal_uintptr_t)&sb_entry, 2, 0);
+ (u64)(osal_uintptr_t)&sb_entry, 2,
+ OSAL_NULL /* default parameters */);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
return rc;
@@ -2302,3 +2330,55 @@ ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
p_link->speed);
}
+
+#define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100
+#define RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US 1
+
+enum _ecore_status_t
+ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u8 ind_table_index,
+ u16 ind_table_value)
+{
+ struct eth_tstorm_rss_update_data update_data = { 0 };
+ void OSAL_IOMEM *addr = OSAL_NULL;
+ enum _ecore_status_t rc;
+ u8 abs_vport_id;
+ u32 cnt = 0;
+
+ OSAL_BUILD_BUG_ON(sizeof(update_data) != sizeof(u64));
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn->rel_pf_id);
+
+ *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);
+
+ for (cnt = 0; update_data.valid &&
+ cnt < RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT; cnt++) {
+ OSAL_UDELAY(RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US);
+ *(u64 *)(&update_data) = DIRECT_REG_RD64(p_hwfn, addr);
+ }
+
+ if (update_data.valid) {
+ DP_NOTICE(p_hwfn, true,
+ "rss update valid status is not clear! valid=0x%x vport id=%d ind_Table_idx=%d ind_table_value=%d.\n",
+ update_data.valid, vport_id, ind_table_index,
+ ind_table_value);
+
+ return ECORE_AGAIN;
+ }
+
+ update_data.valid = 1;
+ update_data.ind_table_index = ind_table_index;
+ update_data.ind_table_value = ind_table_value;
+ update_data.vport_id = abs_vport_id;
+
+ DIRECT_REG_WR64(p_hwfn, addr, *(u64 *)(&update_data));
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 575b9e3a..004fb61b 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -137,6 +137,16 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_MCAST_MATCHED 0x08
#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
#define ECORE_ACCEPT_BCAST 0x20
+#define ECORE_ACCEPT_ANY_VNI 0x40
+};
+
+enum ecore_filter_config_mode {
+ ECORE_FILTER_CONFIG_MODE_DISABLE,
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE,
+ ECORE_FILTER_CONFIG_MODE_L4_PORT,
+ ECORE_FILTER_CONFIG_MODE_IP_DEST,
+ ECORE_FILTER_CONFIG_MODE_TUNN_TYPE,
+ ECORE_FILTER_CONFIG_MODE_IP_SRC,
};
struct ecore_arfs_config_params {
@@ -144,7 +154,7 @@ struct ecore_arfs_config_params {
bool udp;
bool ipv4;
bool ipv6;
- bool arfs_enable; /* Enable or disable arfs mode */
+ enum ecore_filter_config_mode mode;
};
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
@@ -337,7 +347,10 @@ struct ecore_sp_vport_update_params {
/* MTU change - notice this requires the vport to be disabled.
* If non-zero, value would be used.
*/
- u16 mtu;
+ u16 mtu;
+ u8 update_ctl_frame_check;
+ u8 mac_chk_en;
+ u8 ethtype_chk_en;
};
/**
@@ -460,4 +473,28 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add);
+
+/**
+ * @brief - ecore_update_eth_rss_ind_table_entry
+ *
+ * This function being used to update RSS indirection table entry to FW RAM
+ * instead of using the SP vport update ramrod with rss params.
+ *
+ * Notice:
+ * This function supports only one outstanding command per engine. Ecore
+ * clients which use this function should call ecore_mcp_ind_table_lock() prior
+ * to it and ecore_mcp_ind_table_unlock() after it.
+ *
+ * @params p_hwfn
+ * @params vport_id
+ * @params ind_table_index
+ * @params ind_table_value
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn,
+ u8 vport_id,
+ u8 ind_table_index,
+ u16 ind_table_value);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index ea14c172..6c656068 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -177,10 +177,16 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define ECORE_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define ECORE_MCP_SHMEM_RDY_ITER_MS 50
+
static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = ECORE_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = ECORE_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
@@ -198,6 +204,35 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
p_info->public_base |= GRCBASE_MCP;
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+
+ /* @@@TBD:
+ * The driver can notify that there was an MCP reset, and read the SHMEM
+ * values before the MFW has completed initializing them.
+ * As a temporary solution, the "sup_msgs" field is used as a data ready
+ * indication.
+ * This should be replaced with an actual indication when it is provided
+ * by the MFW.
+ */
+ while (!p_info->mfw_mb_length && cnt--) {
+ OSAL_MSLEEP(msec);
+ p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr);
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ ECORE_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return ECORE_TIMEOUT;
+ }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -208,14 +243,6 @@ static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
" mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
- /* Set the MFW MB address */
- mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_info->public_base,
- PUBLIC_MFW_MB));
- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
- p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
- p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
* the first command
*/
@@ -1656,6 +1683,49 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
&param);
}
+static void ecore_mcp_update_stag(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != ECORE_MCP_VLAN_UNSET) {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
+ ecore_sp_pf_update_stag(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+ OSAL_HW_INFO_CHANGE(p_hwfn, ECORE_HW_INFO_CHANGE_OVLAN);
+
+ /* Acknowledge the MFW */
+ ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
+}
+
static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn)
{
/* A single notification should be sent to upper driver in CMT mode */
@@ -1946,7 +2016,7 @@ ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
DP_NOTICE(p_hwfn, false, "Unknown Host priority control %d\n",
val);
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"UFP shmem config: mode = %d tc = %d pri_type = %d\n",
p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
p_hwfn->ufp_info.pri_type);
@@ -2041,6 +2111,9 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
case MFW_DRV_MSG_BW_UPDATE:
ecore_mcp_update_bw(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ ecore_mcp_update_stag(p_hwfn, p_ptt);
+ break;
case MFW_DRV_MSG_FAILURE_DETECTED:
ecore_mcp_handle_fan_failure(p_hwfn);
break;
@@ -2155,8 +2228,10 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_tranceiver_type)
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
{
+ u32 transceiver_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* TODO - Add support for VFs */
@@ -2167,14 +2242,23 @@ enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n");
return ECORE_BUSY;
}
- if (!p_ptt) {
- *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
- rc = ECORE_INVAL;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ transceiver_info = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ *p_transceiver_state = GET_MFW_FIELD(transceiver_info,
+ ETH_TRANSCEIVER_STATE);
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) {
+ *p_transceiver_type = GET_MFW_FIELD(transceiver_info,
+ ETH_TRANSCEIVER_TYPE);
} else {
- *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt,
- p_hwfn->mcp_info->port_addr +
- offsetof(struct public_port,
- transceiver_data));
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
}
return rc;
@@ -2194,15 +2278,11 @@ enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *p_speed_mask)
{
- u32 transceiver_data, transceiver_type, transceiver_state;
+ u32 transceiver_type, transceiver_state;
- ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data);
+ ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
- transceiver_state = GET_MFW_FIELD(transceiver_data,
- ETH_TRANSCEIVER_STATE);
-
- transceiver_type = GET_MFW_FIELD(transceiver_data,
- ETH_TRANSCEIVER_TYPE);
if (is_transceiver_ready(transceiver_state, transceiver_type) == 0)
return ECORE_INVAL;
@@ -2823,10 +2903,72 @@ ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
enum _ecore_status_t
-ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u16 mtu)
+ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u16 mtu)
{
- return 0;
+ u32 resp = 0, param = 0, drv_mb_param = 0;
+ enum _ecore_status_t rc;
+
+ SET_MFW_FIELD(drv_mb_param, DRV_MB_PARAM_OV_MTU_SIZE, (u32)mtu);
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 *mac)
+{
+ struct ecore_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_VMAC_TYPE,
+ DRV_MSG_CODE_VMAC_TYPE_MAC);
+ mb_params.param |= MCP_PF_ID(p_hwfn);
+ OSAL_MEMCPY(&union_data.raw_data, mac, ETH_ALEN);
+ mb_params.p_data_src = &union_data;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_ov_eswitch eswitch)
+{
+ enum _ecore_status_t rc;
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+
+ switch (eswitch) {
+ case ECORE_OV_ESWITCH_NONE:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+ break;
+ case ECORE_OV_ESWITCH_VEB:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+ break;
+ case ECORE_OV_ESWITCH_VEPA:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+ return ECORE_INVAL;
+ }
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+ return rc;
}
enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
@@ -2938,11 +3080,11 @@ enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
}
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
- u32 addr, u8 *p_buf, u32 len)
+ u32 addr, u8 *p_buf, u32 *p_len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -2953,7 +3095,7 @@ enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
(cmd == ECORE_PHY_CORE_READ) ?
DRV_MSG_CODE_PHY_CORE_READ :
DRV_MSG_CODE_PHY_RAW_READ,
- addr, &resp, &param, &len, (u32 *)p_buf);
+ addr, &resp, &param, p_len, (u32 *)p_buf);
if (rc != ECORE_SUCCESS)
DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
@@ -2982,7 +3124,7 @@ enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -3001,7 +3143,7 @@ enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
struct ecore_ptt *p_ptt;
- u32 resp, param;
+ u32 resp = 0, param;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -3095,8 +3237,8 @@ enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
u32 addr, u8 *p_buf, u32 len)
{
struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ u32 resp = 0, param, nvm_cmd;
struct ecore_ptt *p_ptt;
- u32 resp, param, nvm_cmd;
enum _ecore_status_t rc;
p_ptt = ecore_ptt_acquire(p_hwfn);
@@ -4002,13 +4144,83 @@ ecore_mcp_drv_attribute(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mcp_mb_params mb_params;
+ u8 fir_valid, l2_valid;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_engine_config command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ fir_valid = GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+ if (fir_valid)
+ p_dev->fir_affin =
+ GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+ l2_valid = GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+ if (l2_valid)
+ p_dev->l2_affin_hint =
+ GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+ DP_INFO(p_hwfn,
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+ fir_valid, p_dev->fir_affin, l2_valid, p_dev->l2_affin_hint);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mcp_mb_params mb_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ p_dev->ppfid_bitmap = GET_MFW_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_PPFID_BITMAP);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "PPFID bitmap 0x%hhx\n",
+ p_dev->ppfid_bitmap);
+
+ return ECORE_SUCCESS;
+}
+
void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u32 offset, u32 val)
{
- struct ecore_mcp_mb_params mb_params = {0};
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 dword = val;
+ struct ecore_mcp_mb_params mb_params;
+ OSAL_MEMSET(&mb_params, 0, sizeof(struct ecore_mcp_mb_params));
mb_params.cmd = DRV_MSG_CODE_WRITE_WOL_REG;
mb_params.param = offset;
mb_params.p_data_src = &dword;
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 8e125310..2c052b7f 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -25,9 +25,6 @@
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
-#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
- ecore_device_num_ports((_p_hwfn)->p_dev))
-
struct ecore_mcp_info {
/* List for mailbox commands which were sent and wait for a response */
osal_list_t cmd_list;
@@ -566,4 +563,22 @@ ecore_mcp_read_ufp_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u32 offset, u32 val);
+/**
+ * @brief Get the engine affinity configuration.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_engine_config(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Get the PPFID bitmap.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+enum _ecore_status_t ecore_mcp_get_ppfid_bitmap(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index cfb9f99d..7327074f 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -185,6 +185,12 @@ enum ecore_ov_driver_state {
ECORE_OV_DRIVER_STATE_ACTIVE
};
+enum ecore_ov_eswitch {
+ ECORE_OV_ESWITCH_NONE,
+ ECORE_OV_ESWITCH_VEB,
+ ECORE_OV_ESWITCH_VEPA
+};
+
#define ECORE_MAX_NPIV_ENTRIES 128
#define ECORE_WWN_SIZE 8
struct ecore_fc_npiv_tbl {
@@ -521,6 +527,10 @@ union ecore_mfw_tlv_data {
struct ecore_mfw_tlv_iscsi iscsi;
};
+enum ecore_hw_info_change {
+ ECORE_HW_INFO_CHANGE_OVLAN,
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -597,6 +607,7 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
*
* @param p_dev - ecore dev pointer
* @param p_ptt
+ * @param p_transceiver_state - transceiver state.
* @param p_transceiver_type - media type value
*
* @return enum _ecore_status_t -
@@ -605,6 +616,7 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
+ u32 *p_transceiver_state,
u32 *p_tranceiver_type);
/**
@@ -810,6 +822,32 @@ enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mtu);
/**
+ * @brief Send MAC address to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param mac - MAC address
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_mac(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ u8 *mac);
+
+/**
+ * @brief Send eswitch mode to MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param eswitch - eswitch mode
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_ov_eswitch eswitch);
+
+/**
* @brief Set LED status
*
* @param p_hwfn
@@ -905,7 +943,7 @@ enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf);
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
- u32 addr, u8 *p_buf, u32 len);
+ u32 addr, u8 *p_buf, u32 *p_len);
/**
* @brief Read from nvm
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 721b8c15..3860e1a5 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -390,147 +390,146 @@
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39786
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39794
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40818
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41330
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41842
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355
+#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42354
#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512
-#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867
+#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42866
#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974
-#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977
-#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980
-#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983
-#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986
-#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989
-#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992
-#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995
-#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998
-#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001
-#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004
-#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007
-#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010
-#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013
-#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016
-#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019
-#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42898
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42899
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42900
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42901
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42902
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42903
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42904
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42905
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42906
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42907
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42908
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42909
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42910
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42911
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42912
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42913
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42914
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42915
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42916
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42917
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42918
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42919
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42920
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42921
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42922
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42923
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42924
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42925
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42926
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42927
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42928
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42929
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42930
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42931
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42932
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42933
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42934
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42935
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42936
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42937
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42938
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42939
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42940
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42941
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42942
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42943
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42944
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42945
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42946
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42947
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42948
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42949
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42950
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42951
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42952
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42953
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42954
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42955
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42956
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42957
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42958
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42959
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42960
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42961
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42962
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42963
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42964
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42965
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42966
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42967
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42968
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42969
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42970
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42971
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42972
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42973
+#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42974
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42975
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42976
+#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42977
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42978
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42979
+#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42980
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42981
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42982
+#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42983
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42984
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42985
+#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42986
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42987
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42988
+#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42989
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42990
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42991
+#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42992
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42993
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42994
+#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42995
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42996
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42997
+#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42998
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 42999
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43000
+#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43001
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43002
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43003
+#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43004
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43005
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43006
+#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43007
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43008
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43009
+#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43010
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43011
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43012
+#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43013
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43014
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43015
+#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43016
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43017
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43018
+#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43019
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43020
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43021
-#define RUNTIME_ARRAY_SIZE 43023
+#define RUNTIME_ARRAY_SIZE 43022
/* Init Callbacks */
#define DMAE_READY_CB 0
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index b43baf9d..49a5ff55 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -515,6 +515,10 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
rl_update->rl_id_first = params->rl_id_first;
rl_update->rl_id_last = params->rl_id_last;
rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
+ rl_update->dcqcn_reset_alpha_on_idle =
+ params->dcqcn_reset_alpha_on_idle;
+ rl_update->rl_bc_stage_th = params->rl_bc_stage_th;
+ rl_update->rl_timer_stage_th = params->rl_timer_stage_th;
rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
rl_update->rl_max_rate =
OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
@@ -529,12 +533,14 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x,dcqcn_reset_alpha_on_idle %x, rl_bc_stage_th %x, rl_timer_stage_th %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
rl_update->qcn_update_param_flg,
rl_update->dcqcn_update_param_flg,
rl_update->rl_init_flg, rl_update->rl_start_flg,
rl_update->rl_stop_flg, rl_update->rl_id_first,
rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+ rl_update->dcqcn_reset_alpha_on_idle,
+ rl_update->rl_bc_stage_th, rl_update->rl_timer_stage_th,
rl_update->rl_bc_rate, rl_update->rl_max_rate,
rl_update->rl_r_ai, rl_update->rl_r_hai,
rl_update->dcqcn_g, rl_update->dcqcn_k_us,
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index e57414cf..524fe57a 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -119,6 +119,9 @@ struct ecore_rl_update_params {
u8 rl_stop_flg;
u8 rl_id_first;
u8 rl_id_last;
+ u8 dcqcn_reset_alpha_on_idle;
+ u8 rl_bc_stage_th;
+ u8 rl_timer_stage_th;
u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
u32 rl_bc_rate; /* Byte Counter Limit */
u32 rl_max_rate; /* Maximum rate in Mbps resolution */
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 776c86f7..88ad961e 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -282,20 +282,30 @@ ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
ecore_spq_async_comp_cb cb;
+ enum _ecore_status_t rc;
- if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+ DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
return ECORE_INVAL;
+ }
cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
- if (cb) {
- return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
- &p_eqe->data, p_eqe->fw_return_code);
- } else {
+ if (!cb) {
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return ECORE_INVAL;
}
+
+ rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, true,
+ "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]",
+ rc, p_eqe->opcode, p_eqe->echo,
+ p_eqe->fw_return_code);
+
+ return rc;
}
enum _ecore_status_t
@@ -339,10 +349,16 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
{
struct ecore_eq *p_eq = cookie;
struct ecore_chain *p_chain = &p_eq->chain;
- enum _ecore_status_t rc = 0;
+ u16 fw_cons_idx = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ if (!p_hwfn->p_spq) {
+ DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
+ return ECORE_INVAL;
+ }
/* take a snapshot of the FW consumer */
- u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
+ fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
@@ -358,7 +374,8 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
if (!p_eqe) {
- rc = ECORE_INVAL;
+ DP_ERR(p_hwfn,
+ "Unexpected NULL chain consumer entry\n");
break;
}
@@ -374,15 +391,13 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
*/
p_eqe->flags);
- if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
- if (ecore_async_event_completion(p_hwfn, p_eqe))
- rc = ECORE_INVAL;
- } else if (ecore_spq_completion(p_hwfn,
- p_eqe->echo,
- p_eqe->fw_return_code,
- &p_eqe->data)) {
- rc = ECORE_INVAL;
- }
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC))
+ ecore_async_event_completion(p_hwfn, p_eqe);
+ else
+ ecore_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data);
ecore_chain_recycle_consumed(p_chain);
}
@@ -928,12 +943,11 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *found = OSAL_NULL;
enum _ecore_status_t rc;
- if (!p_hwfn)
- return ECORE_INVAL;
-
p_spq = p_hwfn->p_spq;
- if (!p_spq)
+ if (!p_spq) {
+ DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
return ECORE_INVAL;
+ }
OSAL_SPIN_LOCK(&p_spq->lock);
OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index f7ebf7ad..7d73ef9f 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -31,7 +31,7 @@ static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
union event_ring_data *data,
u8 fw_return_code);
-const char *ecore_channel_tlvs_string[] = {
+const char *qede_ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
"CHANNEL_TLV_VPORT_START",
@@ -218,7 +218,7 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
static struct ecore_queue_cid *
ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
{
- int i;
+ u32 i;
for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
if (p_queue->cids[i].p_cid &&
@@ -240,7 +240,7 @@ static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
enum ecore_iov_validate_q_mode mode,
bool b_is_tx)
{
- int i;
+ u32 i;
if (mode == ECORE_IOV_VALIDATE_Q_NA)
return true;
@@ -979,10 +979,12 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
p_hwfn->rel_pf_id,
vf->abs_vf_id, 1);
+
ecore_dmae_host2grc(p_hwfn, p_ptt,
(u64)(osal_uintptr_t)&sb_entry,
CAU_REG_SB_VAR_MEMORY +
- p_block->igu_sb_id * sizeof(u64), 2, 0);
+ p_block->igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
}
vf->num_sbs = (u8)num_rx_queues;
@@ -1278,7 +1280,7 @@ static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
ECORE_MSG_IOV,
"VF[%d]: vf pf channel locked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[tlv]);
+ qede_ecore_channel_tlvs_string[tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
@@ -1296,7 +1298,7 @@ static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
ECORE_MSG_IOV,
"VF[%d]: vf pf channel unlocked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[expected_tlv]);
+ qede_ecore_channel_tlvs_string[expected_tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
@@ -1336,7 +1338,7 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
if (ecore_iov_tlv_supported(tlv->type))
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV number %d: type %s, length %d\n",
- i, ecore_channel_tlvs_string[tlv->type],
+ i, qede_ecore_channel_tlvs_string[tlv->type],
tlv->length);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -1968,7 +1970,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
return ECORE_INVAL;
if ((events & (1 << MAC_ADDR_FORCED)) ||
- p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
@@ -1989,7 +1992,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
return rc;
}
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
p_vf->configured_features |=
1 << VFPF_BULLETIN_MAC_ADDR;
else
@@ -2085,8 +2089,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_sp_vport_start_params params = { 0 };
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct ecore_sp_vport_start_params params;
struct vfpf_vport_start_tlv *start;
u8 status = PFVF_STATUS_SUCCESS;
struct ecore_vf_info *vf_info;
@@ -2137,6 +2141,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
}
+ OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_start_params));
params.tpa_mode = start->tpa_mode;
params.remove_inner_vlan = start->inner_vlan_removal;
params.tx_switching = true;
@@ -2156,7 +2161,9 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
- params.check_mac = true;
+
+ /* Non trusted VFs should enable control frame filtering */
+ params.check_mac = !vf->p_vf_info.is_trusted_configured;
rc = ecore_sp_eth_vport_start(p_hwfn, &params);
if (rc != ECORE_SUCCESS) {
@@ -2912,7 +2919,7 @@ void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
if (p_tlv->type == req_type) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Extended tlv type %s, length %d found\n",
- ecore_channel_tlvs_string[p_tlv->type],
+ qede_ecore_channel_tlvs_string[p_tlv->type],
p_tlv->length);
return p_tlv;
}
@@ -3351,6 +3358,15 @@ ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
return ECORE_SUCCESS;
+ /* Since we don't have the implementation of the logic for removing
+ * a forced MAC and restoring shadow MAC, let's not worry about
+ * processing shadow copies of MAC as long as VF trust mode is ON,
+ * to keep things simple.
+ */
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
+ return ECORE_SUCCESS;
+
/* First remove entries and then add new ones */
if (p_params->opcode == ECORE_FILTER_REMOVE) {
for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
@@ -3653,7 +3669,7 @@ static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid;
u16 rx_coal, tx_coal;
u16 qid;
- int i;
+ u32 i;
req = &mbx->req_virt->update_coalesce;
@@ -3733,7 +3749,8 @@ ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid;
struct ecore_vf_info *vf;
struct ecore_ptt *p_ptt;
- int i, rc = 0;
+ int rc = 0;
+ u32 i;
if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
DP_NOTICE(p_hwfn, true,
@@ -4415,17 +4432,23 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
return;
}
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured) {
feature = 1 << VFPF_BULLETIN_MAC_ADDR;
- else
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << MAC_ADDR_FORCED);
+ } else {
feature = 1 << MAC_ADDR_FORCED;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ }
- OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
+ mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- /* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
@@ -4460,7 +4483,8 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured)
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
return ECORE_SUCCESS;
@@ -4780,6 +4804,32 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
p_link->speed);
}
+enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
+ int vfid, u32 rate)
+{
+ struct ecore_vf_info *vf;
+ int i;
+
+ for_each_hwfn(p_dev, i) {
+ struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+ if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn, true,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return ECORE_INVAL;
+ }
+ }
+
+ vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
+ if (!vf) {
+ DP_NOTICE(p_dev, true,
+ "Getting vf info failed, can't set min rate\n");
+ return ECORE_INVAL;
+ }
+
+ return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
+}
+
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
@@ -4890,7 +4940,7 @@ bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
}
-enum _ecore_status_t
+int
ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_wfq_data *vf_vp_wfq;
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index d2213f79..3ba6a0cf 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -32,7 +32,7 @@ static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"preparing to send %s tlv over vf pf channel\n",
- ecore_channel_tlvs_string[type]);
+ qede_ecore_channel_tlvs_string[type]);
/* Reset Request offset */
p_iov->offset = (u8 *)(p_iov->vf2pf_request);
@@ -565,13 +565,20 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
phys,
p_iov->bulletin.
size);
+ if (!p_iov->bulletin.p_virt) {
+ DP_NOTICE(p_hwfn, false, "Failed to alloc bulletin memory\n");
+ goto free_pf2vf_reply;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
p_iov->bulletin.size);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex)) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate p_iov->mutex\n");
+ goto free_bulletin_mem;
+ }
#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
@@ -609,6 +616,16 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return rc;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+free_bulletin_mem:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys,
+ p_iov->bulletin.size);
+#endif
+free_pf2vf_reply:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
free_vf2pf_request:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
p_iov->vf2pf_request_phys,
@@ -1167,7 +1184,7 @@ ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
return !!p_data->sge_tpa_params;
default:
DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
- tlv, ecore_channel_tlvs_string[tlv]);
+ tlv, qede_ecore_channel_tlvs_string[tlv]);
return false;
}
}
@@ -1191,7 +1208,7 @@ ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
if (p_resp && p_resp->hdr.status)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV[%d] type %s Configuration %s\n",
- tlv, ecore_channel_tlvs_string[tlv],
+ tlv, qede_ecore_channel_tlvs_string[tlv],
(p_resp && p_resp->hdr.status) ? "succeeded"
: "failed");
}
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index c30677ab..c7ecb01c 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -698,6 +698,6 @@ enum {
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
-extern const char *ecore_channel_tlvs_string[];
+extern const char *qede_ecore_channel_tlvs_string[];
#endif /* __ECORE_VF_PF_IF_H__ */
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index abfa6854..9a401ed4 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -178,6 +178,11 @@ struct eth_tx_1st_bd_flags {
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
+/* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type). In case of
+ * GRE tunnel, this flag means GRE CSO, and in this case GRE checksum field
+ * Must be present.
+ */
+#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 81aa88e7..13c2e2d1 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -1258,6 +1258,17 @@ struct public_drv_mb {
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
+#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
+#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
+#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
+#define DRV_MSG_CODE_OEM_UPDATE_FCOE_CVID 0x3c000000
+#define DRV_MSG_CODE_OEM_UPDATE_FCOE_FABRIC_NAME 0x3d000000
+#define DRV_MSG_CODE_OEM_UPDATE_BOOT_CFG 0x3e000000
+#define DRV_MSG_CODE_OEM_RESET_TO_DEFAULT 0x3f000000
+#define DRV_MSG_CODE_OV_GET_CURR_CFG 0x40000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
+/* params [31:8] - reserved, [7:0] - bitmap */
+#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1467,6 +1478,7 @@ struct public_drv_mb {
/* Param: Password len. Union: Plain Password */
#define DRV_MSG_CODE_ENCRYPT_PASSWORD 0x00360000
+#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000 /* Param: None */
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -1582,6 +1594,16 @@ struct public_drv_mb {
#define DRV_MB_PARAM_OV_MTU_SIZE_OFFSET 0
#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
@@ -1677,6 +1699,8 @@ struct public_drv_mb {
#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
#define FW_MSG_CODE_RESOURCE_ALLOC_GEN_ERR 0x37000000
+#define FW_MSG_CODE_GET_OEM_UPDATES_DONE 0x41000000
+
#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
@@ -1778,11 +1802,31 @@ struct public_drv_mb {
#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002
/* MFW supports DRV_LOAD Timeout */
#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004
+/* MFW support complete IGU cleanup upon FLR */
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP 0x00000080
/* MFW supports virtual link */
#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
+#define FW_MB_PARAM_OEM_UPDATE_MASK 0xFF
+#define FW_MB_PARAM_OEM_UPDATE_OFFSET 0
+#define FW_MB_PARAM_OEM_UPDATE_BW 0x01
+#define FW_MB_PARAM_OEM_UPDATE_S_TAG 0x02
+#define FW_MB_PARAM_OEM_UPDATE_CFG 0x04
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_OFFSET 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_OFFSET 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_OFFSET 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_OFFSET 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xFF
+#define FW_MB_PARAM_PPFID_BITMAP_OFFSET 0
+
u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
diff --git a/drivers/net/qede/base/meson.build b/drivers/net/qede/base/meson.build
new file mode 100644
index 00000000..71b89737
--- /dev/null
+++ b/drivers/net/qede/base/meson.build
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+sources = [
+ 'bcm_osal.c',
+ 'ecore_cxt.c',
+ 'ecore_dcbx.c',
+ 'ecore_dev.c',
+ 'ecore_hw.c',
+ 'ecore_init_fw_funcs.c',
+ 'ecore_init_ops.c',
+ 'ecore_int.c',
+ 'ecore_l2.c',
+ 'ecore_mcp.c',
+ 'ecore_sp_commands.c',
+ 'ecore_spq.c',
+ 'ecore_sriov.c',
+ 'ecore_vf.c',
+]
+
+
+error_cflags = [
+ '-Wno-unused-parameter',
+ '-Wno-sign-compare',
+ '-Wno-missing-prototypes',
+ '-Wno-cast-qual',
+ '-Wno-unused-function',
+ '-Wno-unused-variable',
+ '-Wno-strict-aliasing',
+ '-Wno-missing-prototypes',
+ '-Wno-unused-value',
+ '-Wno-format-nonliteral',
+ '-Wno-shift-negative-value',
+ '-Wno-unused-but-set-variable',
+ '-Wno-missing-declarations',
+ '-Wno-maybe-uninitialized',
+ '-Wno-strict-prototypes',
+ '-Wno-shift-negative-value',
+ '-Wno-implicit-fallthrough',
+ '-Wno-format-extra-args',
+ '-Wno-visibility',
+ '-Wno-empty-body',
+ '-Wno-invalid-source-encoding',
+ '-Wno-sometimes-uninitialized',
+ '-Wno-pointer-bool-conversion',
+]
+c_args = cflags
+if allow_experimental_apis
+ c_args += '-DALLOW_EXPERIMENTAL_API'
+endif
+foreach flag: error_cflags
+ if cc.has_argument(flag)
+ c_args += flag
+ endif
+endforeach
+
+base_lib = static_library('qede_base', sources,
+ dependencies: static_rte_net,
+ c_args: c_args)
+base_objs = base_lib.extract_all_objects()
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 402f6204..be59f773 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -8,13 +8,13 @@
0
#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
- 0xfff << 0)
+ 0xfffUL << 0)
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
12
#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
- 0xfff << 12)
+ 0xfffUL << 12)
#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
24
@@ -322,6 +322,21 @@
0x180820UL
#define IGU_REG_ATTN_MSG_ADDR_H \
0x180824UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define IGU_REG_ATTENTION_ACK_BITS \
+ 0x180838UL
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
+#define IGU_REG_PF_FUNCTIONAL_CLEANUP \
+ 0x181210UL
+#define IGU_REG_STATISTIC_NUM_OF_INTA_ASSERTED \
+ 0x18042cUL
+#define IGU_REG_PBA_STS_PF_SIZE 5
+#define IGU_REG_PBA_STS_PF \
+ 0x180d20UL
#define MISC_REG_AEU_GENERAL_ATTN_0 \
0x008400UL
#define CAU_REG_SB_ADDR_MEMORY \
@@ -351,9 +366,9 @@
#define IGU_REG_COMMAND_REG_CTRL \
0x180848UL
#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
- 0x1 << 1)
+ 0x1UL << 1)
#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
- 0x1 << 0)
+ 0x1UL << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define MISCS_REG_GENERIC_POR_0 \
@@ -361,7 +376,7 @@
#define MCP_REG_NVM_CFG4 \
0xe0642cUL
#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
- 0x7 << 0)
+ 0x7UL << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL
@@ -394,7 +409,7 @@
#define XMAC_REG_TX_CTRL_LO 0x210020UL
#define XMAC_REG_CTRL 0x210000UL
#define XMAC_REG_RX_CTRL 0x210030UL
-#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12)
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1UL << 12)
#define MISC_REG_CLK_100G_MODE 0x008c10UL
#define MISC_REG_OPTE_MODE 0x008c0cUL
#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL
@@ -424,16 +439,16 @@
#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL
#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL
-#define XMAC_REG_CTRL_TX_EN (0x1 << 0)
-#define XMAC_REG_CTRL_RX_EN (0x1 << 1)
+#define XMAC_REG_CTRL_TX_EN (0x1UL << 0)
+#define XMAC_REG_CTRL_RX_EN (0x1UL << 1)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xffUL << 24) /* @DPDK */
-#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xffUL << 16)
#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16
-#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xffUL << 16)
#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xffUL << 24) /* @DPDK */
-#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfffUL << 0)
#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0
-#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfffUL << 0)
#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0
#define PSWRQ2_REG_ILT_MEMORY 0x260000UL
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
@@ -521,7 +536,7 @@
#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL
#define MCP_REG_CPU_STATE 0xe05004UL
#define MCP_REG_CPU_MODE 0xe05000UL
-#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10)
+#define MCP_REG_CPU_MODE_SOFT_HALT (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK 0xe05008UL
#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL
#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL
@@ -550,15 +565,15 @@
#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL
#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL
#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL
-#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10)
+#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1UL << 10)
#define DORQ_REG_DB_DROP_REASON 0x100a2cUL
#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL
#define TM_REG_INT_STS_1 0x2c0190UL
-#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6)
-#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5)
+#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1UL << 6)
+#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1UL << 5)
#define TM_REG_INT_MASK_1 0x2c0194UL
-#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5)
-#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6)
+#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1UL << 5)
+#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1UL << 6)
#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL
#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL
#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL
@@ -1172,10 +1187,10 @@
#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
#define XMAC_REG_CTRL_BB 0x210000UL
-#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
-#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_CTRL_TX_EN_BB (0x1UL << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1UL << 1)
#define XMAC_REG_RX_CTRL_BB 0x210030UL
-#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1UL << 12)
#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
@@ -1202,15 +1217,26 @@
#define DORQ_REG_DPM_FORCE_ABORT 0x1009d8UL
#define DORQ_REG_PF_OVFL_STICKY 0x1009d0UL
#define DORQ_REG_INT_STS 0x100180UL
- #define DORQ_REG_INT_STS_DB_DROP (0x1 << 1)
- #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1 << 2)
- #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1 << 3)
+ #define DORQ_REG_INT_STS_DB_DROP (0x1UL << 1)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR (0x1UL << 2)
+ #define DORQ_REG_INT_STS_DORQ_FIFO_AFULL (0x1UL << 3)
#define DORQ_REG_DB_DROP_DETAILS_REL 0x100a28UL
#define DORQ_REG_INT_STS_WR 0x100188UL
#define DORQ_REG_DB_DROP_DETAILS_REASON 0x100a20UL
#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
- #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10)
+ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL
#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
#define RSS_REG_RSS_RAM_MASK 0x238c10UL
+
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
+#define DORQ_REG_TAG1_OVRD_MODE 0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 0x1008c8UL
+#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
+#define NIG_REG_LLH_PPFID2PFID_TBL_0 0x501970UL
+#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
+#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL 0x501b98UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_BB_K2 0x501b40UL
diff --git a/drivers/net/qede/meson.build b/drivers/net/qede/meson.build
new file mode 100644
index 00000000..12388a68
--- /dev/null
+++ b/drivers/net/qede/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+subdir('base')
+objs = [base_objs]
+
+sources = files(
+ 'qede_ethdev.c',
+ 'qede_filter.c',
+ 'qede_main.c',
+ 'qede_rxtx.c',
+)
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index df52ea92..518673dc 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -14,112 +14,10 @@ int qede_logtype_init;
int qede_logtype_driver;
static const struct qed_eth_ops *qed_ops;
-#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
+static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev);
+static int qede_eth_dev_init(struct rte_eth_dev *eth_dev);
-/* VXLAN tunnel classification mapping */
-const struct _qede_udp_tunn_types {
- uint16_t rte_filter_type;
- enum ecore_filter_ucast_type qede_type;
- enum ecore_tunn_clss qede_tunn_clss;
- const char *string;
-} qede_tunn_types[] = {
- {
- ETH_TUNNEL_FILTER_OMAC,
- ECORE_FILTER_MAC,
- ECORE_TUNN_CLSS_MAC_VLAN,
- "outer-mac"
- },
- {
- ETH_TUNNEL_FILTER_TENID,
- ECORE_FILTER_VNI,
- ECORE_TUNN_CLSS_MAC_VNI,
- "vni"
- },
- {
- ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_INNER_MAC,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-mac"
- },
- {
- ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_INNER_VLAN,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-vlan"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
- ECORE_FILTER_MAC_VNI_PAIR,
- ECORE_TUNN_CLSS_MAC_VNI,
- "outer-mac and vni"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-mac and inner-mac"
- },
- {
- ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-mac and inner-vlan"
- },
- {
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
- ECORE_FILTER_INNER_MAC_VNI_PAIR,
- ECORE_TUNN_CLSS_INNER_MAC_VNI,
- "vni and inner-mac",
- },
- {
- ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "vni and inner-vlan",
- },
- {
- ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
- ECORE_FILTER_INNER_PAIR,
- ECORE_TUNN_CLSS_INNER_MAC_VLAN,
- "inner-mac and inner-vlan",
- },
- {
- ETH_TUNNEL_FILTER_OIP,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "outer-IP"
- },
- {
- ETH_TUNNEL_FILTER_IIP,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "inner-IP"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_IVLAN,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_IVLAN"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_IVLAN_TENID"
- },
- {
- RTE_TUNNEL_FILTER_IMAC_TENID,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "IMAC_TENID"
- },
- {
- RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
- ECORE_FILTER_UNUSED,
- MAX_ECORE_TUNN_CLSS,
- "OMAC_TENID_IMAC"
- },
-};
+#define QEDE_SP_TIMER_PERIOD 10000 /* 100ms */
struct rte_qede_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -399,7 +297,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
(info->mfw_rev >> 16) & 0xff,
(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
- DP_INFO(edev, " Firmware file : %s\n", fw_file);
+ DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
DP_INFO(edev, "*********************************\n");
}
@@ -614,14 +512,6 @@ int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
return 0;
}
-static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
-{
- memset(ucast, 0, sizeof(struct ecore_filter_ucast));
- ucast->is_rx_filter = true;
- ucast->is_tx_filter = true;
- /* ucast->assert_on_error = true; - For debug */
-}
-
static int
qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
enum qed_filter_rx_mode_type type)
@@ -660,167 +550,7 @@ qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
ECORE_SPQ_MODE_CB, NULL);
}
-static int
-qede_tunnel_update(struct qede_dev *qdev,
- struct ecore_tunnel_info *tunn_info)
-{
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_hwfn *p_hwfn;
- struct ecore_ptt *p_ptt;
- int i;
-
- for_each_hwfn(edev, i) {
- p_hwfn = &edev->hwfns[i];
- if (IS_PF(edev)) {
- p_ptt = ecore_ptt_acquire(p_hwfn);
- if (!p_ptt) {
- DP_ERR(p_hwfn, "Can't acquire PTT\n");
- return -EAGAIN;
- }
- } else {
- p_ptt = NULL;
- }
-
- rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
- tunn_info, ECORE_SPQ_MODE_CB, NULL);
- if (IS_PF(edev))
- ecore_ptt_release(p_hwfn, p_ptt);
-
- if (rc != ECORE_SUCCESS)
- break;
- }
-
- return rc;
-}
-
-static int
-qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- if (qdev->vxlan.enable == enable)
- return ECORE_SUCCESS;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.vxlan.b_update_mode = true;
- tunn.vxlan.b_mode_enabled = enable;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
- tunn.vxlan.tun_cls = clss;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->vxlan.enable = enable;
- qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
- DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
- enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- tunn.vxlan.tun_cls);
- }
-
- return rc;
-}
-
-static int
-qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.l2_geneve.b_update_mode = true;
- tunn.l2_geneve.b_mode_enabled = enable;
- tunn.ip_geneve.b_update_mode = true;
- tunn.ip_geneve.b_mode_enabled = enable;
- tunn.l2_geneve.tun_cls = clss;
- tunn.ip_geneve.tun_cls = clss;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->geneve.enable = enable;
- qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
- DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
- enable ? "enabled" : "disabled", qdev->geneve.udp_port);
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- clss);
- }
-
- return rc;
-}
-
-static int
-qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- bool enable)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum _ecore_status_t rc = ECORE_INVAL;
- struct ecore_tunnel_info tunn;
-
- memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
- tunn.ip_gre.b_update_mode = true;
- tunn.ip_gre.b_mode_enabled = enable;
- tunn.ip_gre.tun_cls = clss;
- tunn.ip_gre.tun_cls = clss;
- tunn.b_update_rx_cls = true;
- tunn.b_update_tx_cls = true;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc == ECORE_SUCCESS) {
- qdev->ipgre.enable = enable;
- DP_INFO(edev, "IPGRE is %s\n",
- enable ? "enabled" : "disabled");
- } else {
- DP_ERR(edev, "Failed to update tunn_clss %u\n",
- clss);
- }
-
- return rc;
-}
-
-static int
-qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
- enum rte_eth_tunnel_type tunn_type, bool enable)
-{
- int rc = -EINVAL;
-
- switch (tunn_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- rc = qede_vxlan_enable(eth_dev, clss, enable);
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- rc = qede_geneve_enable(eth_dev, clss, enable);
- break;
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- rc = qede_ipgre_enable(eth_dev, clss, enable);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- return rc;
-}
-
-static int
+int
qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
@@ -941,7 +671,7 @@ static int qede_del_mcast_filters(struct rte_eth_dev *eth_dev)
return 0;
}
-static enum _ecore_status_t
+enum _ecore_status_t
qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
{
@@ -1033,7 +763,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct ecore_sp_vport_update_params params;
@@ -1359,7 +1089,7 @@ static void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "Device is stopped\n");
}
-const char *valid_args[] = {
+static const char * const valid_args[] = {
QEDE_NPAR_TX_SWITCHING,
QEDE_VF_TX_SWITCHING,
NULL,
@@ -1483,7 +1213,7 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
@@ -1554,7 +1284,6 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
@@ -1651,14 +1380,11 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
PMD_INIT_FUNC_TRACE(edev);
-#endif
-
- enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
@@ -1668,12 +1394,10 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
-#endif
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
qed_configure_filter_rx_mode(eth_dev,
@@ -2499,19 +2223,18 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
struct qede_fastpath *fp;
uint32_t max_rx_pkt_len;
uint32_t frame_size;
- uint16_t rx_buf_size;
uint16_t bufsz;
bool restart = false;
- int i;
+ int i, rc;
PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
- max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
- frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
+ max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
+ frame_size = max_rx_pkt_len;
if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN -
- ETHER_CRC_LEN - QEDE_ETH_OVERHEAD);
+ QEDE_ETH_OVERHEAD);
return -EINVAL;
}
if (!dev->data->scattered_rx &&
@@ -2539,14 +2262,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = frame_size;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "RX buffer size %u\n", rx_buf_size);
+ /* cache align the mbuf size to simplfy rx_buf_size
+ * calculation
+ */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
+ rc = qede_calc_rx_buf_size(dev, bufsz, frame_size);
+ if (rc < 0)
+ return rc;
+
+ fp->rxq->rx_buf_size = rc;
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
@@ -2569,411 +2293,15 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
}
static int
-qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn; /* @DPDK */
- uint16_t udp_port;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- memset(&tunn, 0, sizeof(tunn));
-
- switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
- udp_port = 0;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- return rc;
- }
-
- qdev->vxlan.udp_port = udp_port;
- /* If the request is to delete UDP port and if the number of
- * VXLAN filters have reached 0 then VxLAN offload can be be
- * disabled.
- */
- if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
- return qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false);
-
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
- DP_ERR(edev, "UDP port %u doesn't exist\n",
- tunnel_udp->udp_port);
- return ECORE_INVAL;
- }
-
- udp_port = 0;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u\n",
- tunn.vxlan_port.port);
- return rc;
- }
-
- qdev->vxlan.udp_port = udp_port;
- /* If the request is to delete UDP port and if the number of
- * GENEVE filters have reached 0 then GENEVE offload can be be
- * disabled.
- */
- if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
- return qede_geneve_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, false);
-
- break;
-
- default:
- return ECORE_INVAL;
- }
-
- return 0;
-
-}
-static int
-qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
- struct rte_eth_udp_tunnel *tunnel_udp)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_tunnel_info tunn; /* @DPDK */
- uint16_t udp_port;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- memset(&tunn, 0, sizeof(tunn));
-
- switch (tunnel_udp->prot_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u for VXLAN was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
-
- /* Enable VxLAN tunnel with default MAC/VLAN classification if
- * it was not enabled while adding VXLAN filter before UDP port
- * update.
- */
- if (!qdev->vxlan.enable) {
- rc = qede_vxlan_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable VXLAN "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
-
- tunn.vxlan_port.b_update_port = true;
- tunn.vxlan_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
- udp_port);
- return rc;
- }
-
- DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
-
- qdev->vxlan.udp_port = udp_port;
- break;
- case RTE_TUNNEL_TYPE_GENEVE:
- if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
- DP_INFO(edev,
- "UDP port %u for GENEVE was already configured\n",
- tunnel_udp->udp_port);
- return ECORE_SUCCESS;
- }
-
- /* Enable GENEVE tunnel with default MAC/VLAN classification if
- * it was not enabled while adding GENEVE filter before UDP port
- * update.
- */
- if (!qdev->geneve.enable) {
- rc = qede_geneve_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN, true);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Failed to enable GENEVE "
- "prior to updating UDP port\n");
- return rc;
- }
- }
- udp_port = tunnel_udp->udp_port;
-
- tunn.geneve_port.b_update_port = true;
- tunn.geneve_port.port = udp_port;
-
- rc = qede_tunnel_update(qdev, &tunn);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
- udp_port);
- return rc;
- }
-
- DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
-
- qdev->geneve.udp_port = udp_port;
- break;
- default:
- return ECORE_INVAL;
- }
-
- return 0;
-}
-
-static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
- uint32_t *clss, char *str)
-{
- uint16_t j;
- *clss = MAX_ECORE_TUNN_CLSS;
-
- for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
- if (filter == qede_tunn_types[j].rte_filter_type) {
- *type = qede_tunn_types[j].qede_type;
- *clss = qede_tunn_types[j].qede_tunn_clss;
- strcpy(str, qede_tunn_types[j].string);
- return;
- }
- }
-}
-
-static int
-qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
- const struct rte_eth_tunnel_filter_conf *conf,
- uint32_t type)
-{
- /* Init commmon ucast params first */
- qede_set_ucast_cmn_params(ucast);
-
- /* Copy out the required fields based on classification type */
- ucast->type = type;
-
- switch (type) {
- case ECORE_FILTER_VNI:
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_VLAN:
- ucast->vlan = conf->inner_vlan;
- break;
- case ECORE_FILTER_MAC:
- memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
- break;
- case ECORE_FILTER_INNER_MAC:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- break;
- case ECORE_FILTER_MAC_VNI_PAIR:
- memcpy(ucast->mac, conf->outer_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_MAC_VNI_PAIR:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vni = conf->tenant_id;
- break;
- case ECORE_FILTER_INNER_PAIR:
- memcpy(ucast->mac, conf->inner_mac.addr_bytes,
- ETHER_ADDR_LEN);
- ucast->vlan = conf->inner_vlan;
- break;
- default:
- return -EINVAL;
- }
-
- return ECORE_SUCCESS;
-}
-
-static int
-_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
- const struct rte_eth_tunnel_filter_conf *conf,
- __attribute__((unused)) enum rte_filter_op filter_op,
- enum ecore_tunn_clss *clss,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast = {0};
- enum ecore_filter_ucast_type type;
- uint16_t filter_type = 0;
- char str[80];
- int rc;
-
- filter_type = conf->filter_type;
- /* Determine if the given filter classification is supported */
- qede_get_ecore_tunn_params(filter_type, &type, clss, str);
- if (*clss == MAX_ECORE_TUNN_CLSS) {
- DP_ERR(edev, "Unsupported filter type\n");
- return -EINVAL;
- }
- /* Init tunnel ucast params */
- rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
- if (rc != ECORE_SUCCESS) {
- DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
- conf->filter_type);
- return rc;
- }
- DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
- str, filter_op, ucast.type);
-
- ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
-
- /* Skip MAC/VLAN if filter is based on VNI */
- if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
- rc = qede_mac_int_ops(eth_dev, &ucast, add);
- if ((rc == 0) && add) {
- /* Enable accept anyvlan */
- qede_config_accept_any_vlan(qdev, true);
- }
- } else {
- rc = qede_ucast_filter(eth_dev, &ucast, add);
- if (rc == 0)
- rc = ecore_filter_ucast_cmd(edev, &ucast,
- ECORE_SPQ_MODE_CB, NULL);
- }
-
- return rc;
-}
-
-static int
-qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- const struct rte_eth_tunnel_filter_conf *conf)
+qede_dev_reset(struct rte_eth_dev *dev)
{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
- bool add;
- int rc;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- add = true;
- break;
- case RTE_ETH_FILTER_DELETE:
- add = false;
- break;
- default:
- DP_ERR(edev, "Unsupported operation %d\n", filter_op);
- return -EINVAL;
- }
-
- if (IS_VF(edev))
- return qede_tunn_enable(eth_dev,
- ECORE_TUNN_CLSS_MAC_VLAN,
- conf->tunnel_type, add);
-
- rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- if (add) {
- if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
- qdev->vxlan.num_filters++;
- qdev->vxlan.filter_type = conf->filter_type;
- } else { /* GENEVE */
- qdev->geneve.num_filters++;
- qdev->geneve.filter_type = conf->filter_type;
- }
-
- if (!qdev->vxlan.enable || !qdev->geneve.enable ||
- !qdev->ipgre.enable)
- return qede_tunn_enable(eth_dev, clss,
- conf->tunnel_type,
- true);
- } else {
- if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
- qdev->vxlan.num_filters--;
- else /*GENEVE*/
- qdev->geneve.num_filters--;
-
- /* Disable VXLAN if VXLAN filters become 0 */
- if ((qdev->vxlan.num_filters == 0) ||
- (qdev->geneve.num_filters == 0))
- return qede_tunn_enable(eth_dev, clss,
- conf->tunnel_type,
- false);
- }
-
- return 0;
-}
+ int ret;
-int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_tunnel_filter_conf *filter_conf =
- (struct rte_eth_tunnel_filter_conf *)arg;
-
- switch (filter_type) {
- case RTE_ETH_FILTER_TUNNEL:
- switch (filter_conf->tunnel_type) {
- case RTE_TUNNEL_TYPE_VXLAN:
- case RTE_TUNNEL_TYPE_GENEVE:
- case RTE_TUNNEL_TYPE_IP_IN_GRE:
- DP_INFO(edev,
- "Packet steering to the specified Rx queue"
- " is not supported with UDP tunneling");
- return(qede_tunn_filter_config(eth_dev, filter_op,
- filter_conf));
- case RTE_TUNNEL_TYPE_TEREDO:
- case RTE_TUNNEL_TYPE_NVGRE:
- case RTE_L2_TUNNEL_TYPE_E_TAG:
- DP_ERR(edev, "Unsupported tunnel type %d\n",
- filter_conf->tunnel_type);
- return -EINVAL;
- case RTE_TUNNEL_TYPE_NONE:
- default:
- return 0;
- }
- break;
- case RTE_ETH_FILTER_FDIR:
- return qede_fdir_filter_conf(eth_dev, filter_op, arg);
- case RTE_ETH_FILTER_NTUPLE:
- return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
- case RTE_ETH_FILTER_MACVLAN:
- case RTE_ETH_FILTER_ETHERTYPE:
- case RTE_ETH_FILTER_FLEXIBLE:
- case RTE_ETH_FILTER_SYN:
- case RTE_ETH_FILTER_HASH:
- case RTE_ETH_FILTER_L2_TUNNEL:
- case RTE_ETH_FILTER_MAX:
- default:
- DP_ERR(edev, "Unsupported filter type %d\n",
- filter_type);
- return -EINVAL;
- }
+ ret = qede_eth_dev_uninit(dev);
+ if (ret)
+ return ret;
- return 0;
+ return qede_eth_dev_init(dev);
}
static const struct eth_dev_ops qede_eth_dev_ops = {
@@ -2981,9 +2309,11 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
+ .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
+ .dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
@@ -3022,9 +2352,11 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
.dev_infos_get = qede_dev_info_get,
.rx_queue_setup = qede_rx_queue_setup,
.rx_queue_release = qede_rx_queue_release,
+ .rx_descriptor_status = qede_rx_descriptor_status,
.tx_queue_setup = qede_tx_queue_setup,
.tx_queue_release = qede_tx_queue_release,
.dev_start = qede_dev_start,
+ .dev_reset = qede_dev_reset,
.dev_set_link_up = qede_dev_set_link_up,
.dev_set_link_down = qede_dev_set_link_down,
.link_update = qede_link_update,
@@ -3257,7 +2589,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
- SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->arfs_info.arfs_list_head);
SLIST_INIT(&adapter->vlan_list_head);
SLIST_INIT(&adapter->uc_list_head);
SLIST_INIT(&adapter->mc_list_head);
@@ -3311,12 +2643,10 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
-#endif
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -3329,11 +2659,6 @@ static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
- if (eth_dev->data->mac_addrs)
- rte_free(eth_dev->data->mac_addrs);
-
- eth_dev->data->mac_addrs = NULL;
-
return 0;
}
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index 6e9a5b4b..c06274d9 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -44,7 +44,7 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
#define QEDE_PMD_VERSION_MAJOR 2
-#define QEDE_PMD_VERSION_MINOR 9
+#define QEDE_PMD_VERSION_MINOR 10
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -118,7 +118,7 @@
-extern char fw_file[];
+extern char qede_fw_file[];
/* Number of PF connections - 32 RX + 32 TX */
#define QEDE_PF_NUM_CONNS (64)
@@ -151,18 +151,48 @@ struct qede_ucast_entry {
SLIST_ENTRY(qede_ucast_entry) list;
};
-struct qede_fdir_entry {
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+struct qede_arfs_tuple {
+ union {
+ uint32_t src_ipv4;
+ uint8_t src_ipv6[IPV6_ADDR_LEN];
+ };
+
+ union {
+ uint32_t dst_ipv4;
+ uint8_t dst_ipv6[IPV6_ADDR_LEN];
+ };
+
+ uint16_t src_port;
+ uint16_t dst_port;
+ uint16_t eth_proto;
+ uint8_t ip_proto;
+
+ /* Describe filtering mode needed for this kind of filter */
+ enum ecore_filter_config_mode mode;
+};
+
+struct qede_arfs_entry {
uint32_t soft_id; /* unused for now */
uint16_t pkt_len; /* actual packet length to match */
uint16_t rx_queue; /* queue to be steered to */
const struct rte_memzone *mz; /* mz used to hold L2 frame */
- SLIST_ENTRY(qede_fdir_entry) list;
+ struct qede_arfs_tuple tuple;
+ SLIST_ENTRY(qede_arfs_entry) list;
};
-struct qede_fdir_info {
+/* Opaque handle for rte flow managed by PMD */
+struct rte_flow {
+ struct qede_arfs_entry entry;
+};
+
+struct qede_arfs_info {
struct ecore_arfs_config_params arfs;
uint16_t filter_count;
- SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+ SLIST_HEAD(arfs_list_head, qede_arfs_entry)arfs_list_head;
};
/* IANA assigned default UDP ports for encapsulation protocols */
@@ -207,7 +237,7 @@ struct qede_dev {
struct qede_tunn_params vxlan;
struct qede_tunn_params geneve;
struct qede_tunn_params ipgre;
- struct qede_fdir_info fdir_info;
+ struct qede_arfs_info arfs_info;
bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
bool vport_started;
@@ -215,6 +245,15 @@ struct qede_dev {
void *ethdev;
};
+static inline void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
+{
+ memset(ucast, 0, sizeof(struct ecore_filter_ucast));
+ ucast->is_rx_filter = true;
+ ucast->is_tx_filter = true;
+ /* ucast->assert_on_error = true; - For debug */
+}
+
+
/* Non-static functions */
int qede_config_rss(struct rte_eth_dev *eth_dev);
@@ -235,9 +274,6 @@ int qede_link_update(struct rte_eth_dev *eth_dev,
int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
enum rte_filter_op op, void *arg);
-int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op, void *arg);
-
int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op, void *arg);
@@ -255,5 +291,16 @@ int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg);
int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu);
int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg);
-
+int qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+int qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp);
+
+enum _ecore_status_t
+qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
+ bool add);
+void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg);
+int qede_ucast_filter(struct rte_eth_dev *eth_dev,
+ struct ecore_filter_ucast *ucast,
+ bool add);
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
deleted file mode 100644
index 83580d04..00000000
--- a/drivers/net/qede/qede_fdir.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2017 Cavium Inc.
- * All rights reserved.
- * www.cavium.com
- */
-
-#include <rte_udp.h>
-#include <rte_tcp.h>
-#include <rte_sctp.h>
-#include <rte_errno.h>
-
-#include "qede_ethdev.h"
-
-#define IP_VERSION (0x40)
-#define IP_HDRLEN (0x5)
-#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
-#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
-#define QEDE_FDIR_IPV4_DEF_TTL (64)
-
-/* Sum of length of header types of L2, L3, L4.
- * L2 : ether_hdr + vlan_hdr + vxlan_hdr
- * L3 : ipv6_hdr
- * L4 : tcp_hdr
- */
-#define QEDE_MAX_FDIR_PKT_LEN (86)
-
-#ifndef IPV6_ADDR_LEN
-#define IPV6_ADDR_LEN (16)
-#endif
-
-#define QEDE_VALID_FLOW(flow_type) \
- ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
- (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
-
-/* Note: Flowdir support is only partial.
- * For ex: drop_queue, FDIR masks, flex_conf are not supported.
- * Parameters like pballoc/status fields are irrelevant here.
- */
-int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
-
- /* check FDIR modes */
- switch (fdir->mode) {
- case RTE_FDIR_MODE_NONE:
- qdev->fdir_info.arfs.arfs_enable = false;
- DP_INFO(edev, "flowdir is disabled\n");
- break;
- case RTE_FDIR_MODE_PERFECT:
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- qdev->fdir_info.arfs.arfs_enable = false;
- return -ENOTSUP;
- }
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "flowdir is enabled\n");
- break;
- case RTE_FDIR_MODE_PERFECT_TUNNEL:
- case RTE_FDIR_MODE_SIGNATURE:
- case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
- DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
- return -ENOTSUP;
- }
-
- return 0;
-}
-
-void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct qede_fdir_entry *tmp = NULL;
-
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (tmp) {
- if (tmp->mz)
- rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
- rte_free(tmp);
- }
- }
-}
-
-static int
-qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir_filter,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
- struct qede_fdir_entry *tmp = NULL;
- struct qede_fdir_entry *fdir = NULL;
- const struct rte_memzone *mz;
- struct ecore_hwfn *p_hwfn;
- enum _ecore_status_t rc;
- uint16_t pkt_len;
- void *pkt;
-
- if (add) {
- if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
- DP_ERR(edev, "Reached max flowdir filter limit\n");
- return -EINVAL;
- }
- fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
- RTE_CACHE_LINE_SIZE);
- if (!fdir) {
- DP_ERR(edev, "Did not allocate memory for fdir\n");
- return -ENOMEM;
- }
- }
- /* soft_id could have been used as memzone string, but soft_id is
- * not currently used so it has no significance.
- */
- snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
- (unsigned long)rte_get_timer_cycles());
- mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
- SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
- if (!mz) {
- DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
- rte_strerror(rte_errno));
- rc = -rte_errno;
- goto err1;
- }
-
- pkt = mz->addr;
- memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
- pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
- &qdev->fdir_info.arfs);
- if (pkt_len == 0) {
- rc = -EINVAL;
- goto err2;
- }
- DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
- if (add) {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
- DP_INFO(edev, "flowdir filter exist\n");
- rc = 0;
- goto err2;
- }
- }
- } else {
- SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
- if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
- break;
- }
- if (!tmp) {
- DP_ERR(edev, "flowdir filter does not exist\n");
- rc = -EEXIST;
- goto err2;
- }
- }
- p_hwfn = ECORE_LEADING_HWFN(edev);
- if (add) {
- if (!qdev->fdir_info.arfs.arfs_enable) {
- /* Force update */
- eth_dev->data->dev_conf.fdir_conf.mode =
- RTE_FDIR_MODE_PERFECT;
- qdev->fdir_info.arfs.arfs_enable = true;
- DP_INFO(edev, "Force enable flowdir in perfect mode\n");
- }
- /* Enable ARFS searcher with updated flow_types */
- ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
- }
- /* configure filter with ECORE_SPQ_MODE_EBLOCK */
- rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
- (dma_addr_t)mz->iova,
- pkt_len,
- fdir_filter->action.rx_queue,
- 0, add);
- if (rc == ECORE_SUCCESS) {
- if (add) {
- fdir->rx_queue = fdir_filter->action.rx_queue;
- fdir->pkt_len = pkt_len;
- fdir->mz = mz;
- SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
- fdir, list);
- qdev->fdir_info.filter_count++;
- DP_INFO(edev, "flowdir filter added, count = %d\n",
- qdev->fdir_info.filter_count);
- } else {
- rte_memzone_free(tmp->mz);
- SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
- qede_fdir_entry, list);
- rte_free(tmp); /* the node deleted */
- rte_memzone_free(mz); /* temp node allocated */
- qdev->fdir_info.filter_count--;
- DP_INFO(edev, "Fdir filter deleted, count = %d\n",
- qdev->fdir_info.filter_count);
- }
- } else {
- DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
- rc, qdev->fdir_info.filter_count);
- }
-
- /* Disable ARFS searcher if there are no more filters */
- if (qdev->fdir_info.filter_count == 0) {
- memset(&qdev->fdir_info.arfs, 0,
- sizeof(struct ecore_arfs_config_params));
- DP_INFO(edev, "Disabling flowdir\n");
- qdev->fdir_info.arfs.arfs_enable = false;
- ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
- &qdev->fdir_info.arfs);
- }
- return 0;
-
-err2:
- rte_memzone_free(mz);
-err1:
- if (add)
- rte_free(fdir);
- return rc;
-}
-
-static int
-qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
- bool add)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
- DP_ERR(edev, "invalid flow_type input\n");
- return -EINVAL;
- }
-
- if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
- DP_ERR(edev, "invalid queue number %u\n",
- fdir->action.rx_queue);
- return -EINVAL;
- }
-
- if (fdir->input.flow_ext.is_vf) {
- DP_ERR(edev, "flowdir is not supported over VF\n");
- return -EINVAL;
- }
-
- return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
-}
-
-/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
-uint16_t
-qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
- struct rte_eth_fdir_filter *fdir,
- void *buff,
- struct ecore_arfs_config_params *params)
-
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- uint16_t *ether_type;
- uint8_t *raw_pkt;
- struct rte_eth_fdir_input *input;
- static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
- struct ipv4_hdr *ip;
- struct ipv6_hdr *ip6;
- struct udp_hdr *udp;
- struct tcp_hdr *tcp;
- uint16_t len;
- static const uint8_t next_proto[] = {
- [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
- [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
- };
- raw_pkt = (uint8_t *)buff;
- input = &fdir->input;
- DP_INFO(edev, "flow_type %d\n", input->flow_type);
-
- len = 2 * sizeof(struct ether_addr);
- raw_pkt += 2 * sizeof(struct ether_addr);
- if (input->flow_ext.vlan_tci) {
- DP_INFO(edev, "adding VLAN header\n");
- rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
- rte_memcpy(raw_pkt + sizeof(uint16_t),
- &input->flow_ext.vlan_tci,
- sizeof(uint16_t));
- raw_pkt += sizeof(vlan_frame);
- len += sizeof(vlan_frame);
- }
- ether_type = (uint16_t *)raw_pkt;
- raw_pkt += sizeof(uint16_t);
- len += sizeof(uint16_t);
-
- switch (input->flow_type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
- /* fill the common ip header */
- ip = (struct ipv4_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
- ip->total_length = sizeof(struct ipv4_hdr);
- ip->next_proto_id = input->flow.ip4_flow.proto ?
- input->flow.ip4_flow.proto :
- next_proto[input->flow_type];
- ip->time_to_live = input->flow.ip4_flow.ttl ?
- input->flow.ip4_flow.ttl :
- QEDE_FDIR_IPV4_DEF_TTL;
- ip->type_of_service = input->flow.ip4_flow.tos;
- ip->dst_addr = input->flow.ip4_flow.dst_ip;
- ip->src_addr = input->flow.ip4_flow.src_ip;
- len += sizeof(struct ipv4_hdr);
- params->ipv4 = true;
-
- raw_pkt = (uint8_t *)buff;
- /* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- udp->dst_port = input->flow.udp4_flow.dst_port;
- udp->src_port = input->flow.udp4_flow.src_port;
- udp->dgram_len = sizeof(struct udp_hdr);
- len += sizeof(struct udp_hdr);
- /* adjust ip total_length */
- ip->total_length += sizeof(struct udp_hdr);
- params->udp = true;
- } else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
- tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
- /* adjust ip total_length */
- ip->total_length += sizeof(struct tcp_hdr);
- params->tcp = true;
- }
- break;
- case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- ip6 = (struct ipv6_hdr *)raw_pkt;
- *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
- ip6->proto = input->flow.ipv6_flow.proto ?
- input->flow.ipv6_flow.proto :
- next_proto[input->flow_type];
- rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
- IPV6_ADDR_LEN);
- rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
- IPV6_ADDR_LEN);
- len += sizeof(struct ipv6_hdr);
-
- raw_pkt = (uint8_t *)buff;
- /* UDP */
- if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
- udp = (struct udp_hdr *)(raw_pkt + len);
- udp->src_port = input->flow.udp6_flow.dst_port;
- udp->dst_port = input->flow.udp6_flow.src_port;
- len += sizeof(struct udp_hdr);
- params->udp = true;
- } else { /* TCP */
- tcp = (struct tcp_hdr *)(raw_pkt + len);
- tcp->src_port = input->flow.tcp4_flow.src_port;
- tcp->dst_port = input->flow.tcp4_flow.dst_port;
- tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
- len += sizeof(struct tcp_hdr);
- params->tcp = true;
- }
- break;
- default:
- DP_ERR(edev, "Unsupported flow_type %u\n",
- input->flow_type);
- return 0;
- }
-
- return len;
-}
-
-int
-qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_fdir_filter *fdir;
- int ret;
-
- fdir = (struct rte_eth_fdir_filter *)arg;
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- /* Typically used to query flowdir support */
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
- return 0; /* means supported */
- case RTE_ETH_FILTER_ADD:
- ret = qede_fdir_filter_add(eth_dev, fdir, 1);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = qede_fdir_filter_add(eth_dev, fdir, 0);
- break;
- case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_UPDATE:
- case RTE_ETH_FILTER_INFO:
- return -ENOTSUP;
- break;
- default:
- DP_ERR(edev, "unknown operation %u", filter_op);
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_ntuple_filter *ntuple;
- struct rte_eth_fdir_filter fdir_entry;
- struct rte_eth_tcpv4_flow *tcpv4_flow;
- struct rte_eth_udpv4_flow *udpv4_flow;
- bool add = false;
-
- switch (filter_op) {
- case RTE_ETH_FILTER_NOP:
- /* Typically used to query fdir support */
- if (ECORE_IS_CMT(edev)) {
- DP_ERR(edev, "flowdir is not supported in 100G mode\n");
- return -ENOTSUP;
- }
- return 0; /* means supported */
- case RTE_ETH_FILTER_ADD:
- add = true;
- break;
- case RTE_ETH_FILTER_DELETE:
- break;
- case RTE_ETH_FILTER_INFO:
- case RTE_ETH_FILTER_GET:
- case RTE_ETH_FILTER_UPDATE:
- case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_SET:
- case RTE_ETH_FILTER_STATS:
- case RTE_ETH_FILTER_OP_MAX:
- DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
- return -ENOTSUP;
- }
- ntuple = (struct rte_eth_ntuple_filter *)arg;
- /* Internally convert ntuple to fdir entry */
- memset(&fdir_entry, 0, sizeof(fdir_entry));
- if (ntuple->proto == IPPROTO_TCP) {
- fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
- tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
- tcpv4_flow->ip.src_ip = ntuple->src_ip;
- tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
- tcpv4_flow->ip.proto = IPPROTO_TCP;
- tcpv4_flow->src_port = ntuple->src_port;
- tcpv4_flow->dst_port = ntuple->dst_port;
- } else {
- fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
- udpv4_flow = &fdir_entry.input.flow.udp4_flow;
- udpv4_flow->ip.src_ip = ntuple->src_ip;
- udpv4_flow->ip.dst_ip = ntuple->dst_ip;
- udpv4_flow->ip.proto = IPPROTO_TCP;
- udpv4_flow->src_port = ntuple->src_port;
- udpv4_flow->dst_port = ntuple->dst_port;
- }
-
- fdir_entry.action.rx_queue = ntuple->queue;
-
- return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
-}
diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c
new file mode 100644
index 00000000..5e6571ca
--- /dev/null
+++ b/drivers/net/qede/qede_filter.c
@@ -0,0 +1,1546 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017 Cavium Inc.
+ * All rights reserved.
+ * www.cavium.com
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+#include <rte_flow_driver.h>
+
+#include "qede_ethdev.h"
+
+/* VXLAN tunnel classification mapping */
+const struct _qede_udp_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+#define QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW (0x60000000)
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+static inline bool qede_valid_flow(uint16_t flow_type)
+{
+ return ((flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) ||
+ (flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP));
+}
+
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_DISABLE;
+ return -ENOTSUP;
+ }
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+ DP_INFO(edev, "flowdir is enabled (5 Tuple mode)\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_arfs_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+ qede_arfs_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ struct qede_arfs_entry *arfs)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_input *input;
+
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+
+ input = &fdir->input;
+
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ arfs->tuple.eth_proto = ETHER_TYPE_IPv4;
+ arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
+ arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
+ arfs->tuple.ip_proto = next_proto[input->flow_type];
+
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ arfs->tuple.dst_port = input->flow.udp4_flow.dst_port;
+ arfs->tuple.src_port = input->flow.udp4_flow.src_port;
+ } else { /* TCP */
+ arfs->tuple.dst_port = input->flow.tcp4_flow.dst_port;
+ arfs->tuple.src_port = input->flow.tcp4_flow.src_port;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ arfs->tuple.eth_proto = ETHER_TYPE_IPv6;
+ arfs->tuple.ip_proto = next_proto[input->flow_type];
+ rte_memcpy(arfs->tuple.dst_ipv6,
+ &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(arfs->tuple.src_ipv6,
+ &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ arfs->tuple.dst_port = input->flow.udp6_flow.dst_port;
+ arfs->tuple.src_port = input->flow.udp6_flow.src_port;
+ } else { /* TCP */
+ arfs->tuple.dst_port = input->flow.tcp6_flow.dst_port;
+ arfs->tuple.src_port = input->flow.tcp6_flow.src_port;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return -ENOTSUP;
+ }
+
+ arfs->rx_queue = fdir->action.rx_queue;
+ return 0;
+}
+
+static int
+qede_config_arfs_filter(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_arfs_entry *tmp = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->arfs_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ }
+
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ return -rte_errno;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_arfs_construct_pkt(eth_dev, arfs, pkt,
+ &qdev->arfs_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_INFO(edev, "flowdir filter exist\n");
+ rc = -EEXIST;
+ goto err1;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->arfs_info.arfs_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err1;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (qdev->arfs_info.arfs.mode ==
+ ECORE_FILTER_CONFIG_MODE_DISABLE) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->arfs_info.arfs.mode =
+ ECORE_FILTER_CONFIG_MODE_5_TUPLE;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->arfs_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
+ (dma_addr_t)mz->iova,
+ pkt_len,
+ arfs->rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ arfs->pkt_len = pkt_len;
+ arfs->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->arfs_info.arfs_list_head,
+ arfs, list);
+ qdev->arfs_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->arfs_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->arfs_info.arfs_list_head, tmp,
+ qede_arfs_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->arfs_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->arfs_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->arfs_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->arfs_info.filter_count == 0) {
+ memset(&qdev->arfs_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->arfs_info.arfs.mode = ECORE_FILTER_CONFIG_MODE_DISABLE;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->arfs_info.arfs);
+ }
+ return 0;
+
+err1:
+ rte_memzone_free(mz);
+ return rc;
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct qede_arfs_entry *arfs = NULL;
+ int rc = 0;
+
+ arfs = rte_malloc(NULL, sizeof(struct qede_arfs_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!arfs) {
+ DP_ERR(edev, "Did not allocate memory for arfs\n");
+ return -ENOMEM;
+ }
+
+ rc = qede_fdir_to_arfs_filter(eth_dev, fdir_filter, arfs);
+ if (rc < 0)
+ return rc;
+
+ rc = qede_config_arfs_filter(eth_dev, arfs, add);
+ if (rc < 0)
+ rte_free(arfs);
+
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!qede_valid_flow(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+static uint16_t
+qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct qede_arfs_entry *arfs,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+
+ raw_pkt = (uint8_t *)buff;
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
+ switch (arfs->tuple.eth_proto) {
+ case ETHER_TYPE_IPv4:
+ ip = (struct ipv4_hdr *)raw_pkt;
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = arfs->tuple.ip_proto;
+ ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL;
+ ip->dst_addr = arfs->tuple.dst_ipv4;
+ ip->src_addr = arfs->tuple.src_ipv4;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (arfs->tuple.ip_proto == IPPROTO_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = arfs->tuple.dst_port;
+ udp->src_port = arfs->tuple.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = arfs->tuple.src_port;
+ tcp->dst_port = arfs->tuple.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case ETHER_TYPE_IPv6:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ ip6->proto = arfs->tuple.ip_proto;
+ ip6->vtc_flow =
+ rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW);
+
+ rte_memcpy(&ip6->src_addr, arfs->tuple.src_ipv6,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+ params->ipv6 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (arfs->tuple.ip_proto == IPPROTO_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = arfs->tuple.src_port;
+ udp->dst_port = arfs->tuple.dst_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = arfs->tuple.src_port;
+ tcp->dst_port = arfs->tuple.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported eth_proto %u\n",
+ arfs->tuple.eth_proto);
+ return 0;
+ }
+
+ return len;
+}
+
+static int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+
+ fdir_entry.action.rx_queue = ntuple->queue;
+
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
+
+static int
+qede_tunnel_update(struct qede_dev *qdev,
+ struct ecore_tunnel_info *tunn_info)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ if (IS_PF(edev)) {
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+ } else {
+ p_ptt = NULL;
+ }
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt,
+ tunn_info, ECORE_SPQ_MODE_CB, NULL);
+ if (IS_PF(edev))
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ if (rc != ECORE_SUCCESS)
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_vxlan_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ if (qdev->vxlan.enable == enable)
+ return ECORE_SUCCESS;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.vxlan.b_update_mode = true;
+ tunn.vxlan.b_mode_enabled = enable;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+ tunn.vxlan.tun_cls = clss;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = enable ? QEDE_VXLAN_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->vxlan.enable = enable;
+ qdev->vxlan.udp_port = (enable) ? QEDE_VXLAN_DEF_PORT : 0;
+ DP_INFO(edev, "vxlan is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->vxlan.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+
+ return rc;
+}
+
+static int
+qede_geneve_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.l2_geneve.b_update_mode = true;
+ tunn.l2_geneve.b_mode_enabled = enable;
+ tunn.ip_geneve.b_update_mode = true;
+ tunn.ip_geneve.b_mode_enabled = enable;
+ tunn.l2_geneve.tun_cls = clss;
+ tunn.ip_geneve.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = enable ? QEDE_GENEVE_DEF_PORT : 0;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->geneve.enable = enable;
+ qdev->geneve.udp_port = (enable) ? QEDE_GENEVE_DEF_PORT : 0;
+ DP_INFO(edev, "GENEVE is %s, UDP port = %d\n",
+ enable ? "enabled" : "disabled", qdev->geneve.udp_port);
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+static int
+qede_ipgre_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ bool enable)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum _ecore_status_t rc = ECORE_INVAL;
+ struct ecore_tunnel_info tunn;
+
+ memset(&tunn, 0, sizeof(struct ecore_tunnel_info));
+ tunn.ip_gre.b_update_mode = true;
+ tunn.ip_gre.b_mode_enabled = enable;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.ip_gre.tun_cls = clss;
+ tunn.b_update_rx_cls = true;
+ tunn.b_update_tx_cls = true;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc == ECORE_SUCCESS) {
+ qdev->ipgre.enable = enable;
+ DP_INFO(edev, "IPGRE is %s\n",
+ enable ? "enabled" : "disabled");
+ } else {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ clss);
+ }
+
+ return rc;
+}
+
+int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+ udp_port = 0;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * VXLAN filters have reached 0 then VxLAN offload can be be
+ * disabled.
+ */
+ if (qdev->vxlan.enable && qdev->vxlan.num_filters == 0)
+ return qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port != tunnel_udp->udp_port) {
+ DP_ERR(edev, "UDP port %u doesn't exist\n",
+ tunnel_udp->udp_port);
+ return ECORE_INVAL;
+ }
+
+ udp_port = 0;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+
+ qdev->vxlan.udp_port = udp_port;
+ /* If the request is to delete UDP port and if the number of
+ * GENEVE filters have reached 0 then GENEVE offload can be be
+ * disabled.
+ */
+ if (qdev->geneve.enable && qdev->geneve.num_filters == 0)
+ return qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, false);
+
+ break;
+
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ uint16_t udp_port;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+
+ switch (tunnel_udp->prot_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ if (qdev->vxlan.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for VXLAN was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable VxLAN tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding VXLAN filter before UDP port
+ * update.
+ */
+ if (!qdev->vxlan.enable) {
+ rc = qede_vxlan_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable VXLAN "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for VXLAN\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for VXLAN\n", udp_port);
+
+ qdev->vxlan.udp_port = udp_port;
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ if (qdev->geneve.udp_port == tunnel_udp->udp_port) {
+ DP_INFO(edev,
+ "UDP port %u for GENEVE was already configured\n",
+ tunnel_udp->udp_port);
+ return ECORE_SUCCESS;
+ }
+
+ /* Enable GENEVE tunnel with default MAC/VLAN classification if
+ * it was not enabled while adding GENEVE filter before UDP port
+ * update.
+ */
+ if (!qdev->geneve.enable) {
+ rc = qede_geneve_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN, true);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to enable GENEVE "
+ "prior to updating UDP port\n");
+ return rc;
+ }
+ }
+ udp_port = tunnel_udp->udp_port;
+
+ tunn.geneve_port.b_update_port = true;
+ tunn.geneve_port.port = udp_port;
+
+ rc = qede_tunnel_update(qdev, &tunn);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u for GENEVE\n",
+ udp_port);
+ return rc;
+ }
+
+ DP_INFO(edev, "Updated UDP port %u for GENEVE\n", udp_port);
+
+ qdev->geneve.udp_port = udp_port;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return 0;
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ __attribute__((unused)) enum rte_filter_op filter_op,
+ enum ecore_tunn_clss *clss,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_ucast ucast = {0};
+ enum ecore_filter_ucast_type type;
+ uint16_t filter_type = 0;
+ char str[80];
+ int rc;
+
+ filter_type = conf->filter_type;
+ /* Determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, clss, str);
+ if (*clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Unsupported filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported Tunnel filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+
+ ucast.opcode = add ? ECORE_FILTER_ADD : ECORE_FILTER_REMOVE;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, add);
+ if (rc == 0 && add) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, add);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_enable(struct rte_eth_dev *eth_dev, uint8_t clss,
+ enum rte_eth_tunnel_type tunn_type, bool enable)
+{
+ int rc = -EINVAL;
+
+ switch (tunn_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ rc = qede_vxlan_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_GENEVE:
+ rc = qede_geneve_enable(eth_dev, clss, enable);
+ break;
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ rc = qede_ipgre_enable(eth_dev, clss, enable);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static int
+qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ enum ecore_tunn_clss clss = MAX_ECORE_TUNN_CLSS;
+ bool add;
+ int rc;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ add = false;
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+
+ if (IS_VF(edev))
+ return qede_tunn_enable(eth_dev,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ conf->tunnel_type, add);
+
+ rc = _qede_tunn_filter_config(eth_dev, conf, filter_op, &clss, add);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (add) {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN) {
+ qdev->vxlan.num_filters++;
+ qdev->vxlan.filter_type = conf->filter_type;
+ } else { /* GENEVE */
+ qdev->geneve.num_filters++;
+ qdev->geneve.filter_type = conf->filter_type;
+ }
+
+ if (!qdev->vxlan.enable || !qdev->geneve.enable ||
+ !qdev->ipgre.enable)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ true);
+ } else {
+ if (conf->tunnel_type == RTE_TUNNEL_TYPE_VXLAN)
+ qdev->vxlan.num_filters--;
+ else /*GENEVE*/
+ qdev->geneve.num_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->vxlan.num_filters == 0 ||
+ qdev->geneve.num_filters == 0)
+ return qede_tunn_enable(eth_dev, clss,
+ conf->tunnel_type,
+ false);
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->transfer != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
+ return -rte_errno;
+ }
+
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ bool l3 = false, l4 = false;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ if (!pattern->spec) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item spec not defined");
+ return -rte_errno;
+ }
+
+ if (pattern->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item last not supported");
+ return -rte_errno;
+ }
+
+ if (pattern->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item mask not supported");
+ return -rte_errno;
+ }
+
+ /* Below validation is only for 4 tuple flow
+ * (GFT_PROFILE_TYPE_4_TUPLE)
+ * - src and dst L3 address (IPv4 or IPv6)
+ * - src and dst L4 port (TCP or UDP)
+ */
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv4 *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
+ flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = true;
+
+ if (flow) {
+ const struct rte_flow_item_ipv6 *spec;
+
+ spec = pattern->spec;
+ rte_memcpy(flow->entry.tuple.src_ipv6,
+ spec->hdr.src_addr,
+ IPV6_ADDR_LEN);
+ rte_memcpy(flow->entry.tuple.dst_ipv6,
+ spec->hdr.dst_addr,
+ IPV6_ADDR_LEN);
+ flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_udp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_UDP;
+ }
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ l4 = true;
+
+ if (flow) {
+ const struct rte_flow_item_tcp *spec;
+
+ spec = pattern->spec;
+ flow->entry.tuple.src_port =
+ spec->hdr.src_port;
+ flow->entry.tuple.dst_port =
+ spec->hdr.dst_port;
+ flow->entry.tuple.ip_proto = IPPROTO_TCP;
+ }
+
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!(l3 && l4)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "Item types need to have both L3 and L4 protocols");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ const struct rte_flow_action_queue *queue;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+
+ if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ if (flow)
+ flow->entry.rx_queue = queue->index;
+
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Action is not supported - only ACTION_TYPE_QUEUE supported");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+
+{
+ int rc = 0;
+
+ rc = qede_flow_validate_attr(dev, attr, error);
+ if (rc)
+ return rc;
+
+ /* parse and validate item pattern and actions.
+ * Given item list and actions will be translate to qede PMD
+ * specific arfs structure.
+ */
+ rc = qede_flow_parse_pattern(dev, patterns, error, flow);
+ if (rc)
+ return rc;
+
+ rc = qede_flow_parse_actions(dev, actions, error, flow);
+
+ return rc;
+}
+
+static int
+qede_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item patterns[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ return qede_flow_parse(dev, attr, patterns, actions, error, NULL);
+}
+
+static struct rte_flow *
+qede_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ return NULL;
+ }
+
+ rc = qede_flow_parse(dev, attr, pattern, actions, error, flow);
+ if (rc < 0) {
+ rte_free(flow);
+ return NULL;
+ }
+
+ rc = qede_config_arfs_filter(dev, &flow->entry, true);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to configure flow filter");
+ rte_free(flow);
+ return NULL;
+ }
+
+ return flow;
+}
+
+static int
+qede_flow_destroy(struct rte_eth_dev *eth_dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ rc = qede_config_arfs_filter(eth_dev, &flow->entry, false);
+ if (rc < 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to delete flow filter");
+ rte_free(flow);
+ }
+
+ return rc;
+}
+
+const struct rte_flow_ops qede_flow_ops = {
+ .validate = qede_flow_validate,
+ .create = qede_flow_create,
+ .destroy = qede_flow_destroy,
+};
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with UDP tunneling");
+ return(qede_tunn_filter_config(eth_dev, filter_op,
+ filter_conf));
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_GENERIC:
+ if (ECORE_IS_CMT(edev)) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+
+ *(const void **)arg = &qede_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index 46fa8371..df83666f 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -15,10 +15,10 @@
#define QEDE_ALARM_TIMEOUT_US 100000
/* Global variable to hold absolute path of fw file */
-char fw_file[PATH_MAX];
+char qede_fw_file[PATH_MAX];
-const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.33.12.0.bin";
+static const char * const QEDE_DEFAULT_FIRMWARE =
+ "/lib/firmware/qed/qed_init_values-8.37.7.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -126,11 +126,11 @@ static int qed_load_firmware_data(struct ecore_dev *edev)
const char *fw = RTE_LIBRTE_QEDE_FW;
if (strcmp(fw, "") == 0)
- strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+ strcpy(qede_fw_file, QEDE_DEFAULT_FIRMWARE);
else
- strcpy(fw_file, fw);
+ strcpy(qede_fw_file, fw);
- fd = open(fw_file, O_RDONLY);
+ fd = open(qede_fw_file, O_RDONLY);
if (fd < 0) {
DP_ERR(edev, "Can't open firmware file\n");
return -ENOENT;
@@ -234,7 +234,8 @@ static int qed_slowpath_start(struct ecore_dev *edev,
#ifdef CONFIG_ECORE_BINARY_FW
rc = qed_load_firmware_data(edev);
if (rc) {
- DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
+ DP_ERR(edev, "Failed to find fw file %s\n",
+ qede_fw_file);
goto err;
}
#endif
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index 0f157ded..8a4772f4 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -35,6 +35,49 @@ static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
return 0;
}
+/* Criterias for calculating Rx buffer size -
+ * 1) rx_buf_size should not exceed the size of mbuf
+ * 2) In scattered_rx mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
+ * 3) In regular mode - minimum rx_buf_size should be
+ * (MTU + Maximum L2 Header Size + 2)
+ * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * header.
+ * 4) rx_buf_size should be cacheline-size aligned. So considering
+ * criteria 1, we need to adjust the size to floor instead of ceil,
+ * so that we don't exceed mbuf size while ceiling rx_buf_size.
+ */
+int
+qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ int rx_buf_size;
+
+ if (dev->data->scattered_rx) {
+ /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
+ * bufferes can be used for single packet. So need to make sure
+ * mbuf size is sufficient enough for this.
+ */
+ if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
+ (max_frame_size + QEDE_ETH_OVERHEAD)) {
+ DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
+ mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
+ return -EINVAL;
+ }
+
+ rx_buf_size = RTE_MAX(mbufsz,
+ (max_frame_size + QEDE_ETH_OVERHEAD) /
+ ETH_RX_MAX_BUFF_PER_PKT);
+ } else {
+ rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
+ }
+
+ /* Align to cache-line size if needed */
+ return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
+}
+
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
@@ -85,6 +128,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
(max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
@@ -93,13 +138,13 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
}
- if (dev->data->scattered_rx)
- rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
- ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
- else
- rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
- /* Align to cache-line size if needed */
- rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ if (rc < 0) {
+ rte_free(rxq);
+ return rc;
+ }
+
+ rxq->rx_buf_size = rc;
DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
@@ -2106,3 +2151,84 @@ qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
{
return 0;
}
+
+
+/* this function does a fake walk through over completion queue
+ * to calculate number of BDs used by HW.
+ * At the end, it restores the state of completion queue.
+ */
+static uint16_t
+qede_parse_fp_cqe(struct qede_rx_queue *rxq)
+{
+ uint16_t hw_comp_cons, sw_comp_cons, bd_count = 0;
+ union eth_rx_cqe *cqe, *orig_cqe = NULL;
+
+ hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ orig_cqe = cqe;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ switch (cqe->fast_path_regular.type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ bd_count += cqe->fast_path_regular.bd_num;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ bd_count += cqe->fast_path_tpa_end.num_of_bds;
+ break;
+ default:
+ break;
+ }
+
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
+ }
+
+ /* revert comp_ring to original state */
+ ecore_chain_set_cons(&rxq->rx_comp_ring, sw_comp_cons, orig_cqe);
+
+ return bd_count;
+}
+
+int
+qede_rx_descriptor_status(void *p_rxq, uint16_t offset)
+{
+ uint16_t hw_bd_cons, sw_bd_cons, sw_bd_prod;
+ uint16_t produced, consumed;
+ struct qede_rx_queue *rxq = p_rxq;
+
+ if (offset > rxq->nb_rx_desc)
+ return -EINVAL;
+
+ sw_bd_cons = ecore_chain_get_cons_idx(&rxq->rx_bd_ring);
+ sw_bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
+
+ /* find BDs used by HW from completion queue elements */
+ hw_bd_cons = sw_bd_cons + qede_parse_fp_cqe(rxq);
+
+ if (hw_bd_cons < sw_bd_cons)
+ /* wraparound case */
+ consumed = (0xffff - sw_bd_cons) + hw_bd_cons;
+ else
+ consumed = hw_bd_cons - sw_bd_cons;
+
+ if (offset <= consumed)
+ return RTE_ETH_RX_DESC_DONE;
+
+ if (sw_bd_prod < sw_bd_cons)
+ /* wraparound case */
+ produced = (0xffff - sw_bd_cons) + sw_bd_prod;
+ else
+ produced = sw_bd_prod - sw_bd_cons;
+
+ if (offset <= produced)
+ return RTE_ETH_RX_DESC_AVAIL;
+
+ return RTE_ETH_RX_DESC_UNAVAIL;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index e710fbae..d3a41e92 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -61,9 +61,16 @@
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
~(QEDE_FW_RX_ALIGN_END - 1))
-/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
-#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) - (ETHER_CRC_LEN) \
- + (QEDE_LLC_SNAP_HDR_LEN))
+#define QEDE_FLOOR_TO_CACHE_LINE_SIZE(n) RTE_ALIGN_FLOOR(n, \
+ QEDE_FW_RX_ALIGN_END)
+
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional,
+ * +2 is for padding in front of L2 header
+ */
+#define QEDE_ETH_OVERHEAD (((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN) + 2)
+
+#define QEDE_MAX_ETHER_HDR_LEN (ETHER_HDR_LEN + QEDE_ETH_OVERHEAD)
#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
ETH_RSS_NONFRAG_IPV4_TCP |\
@@ -267,6 +274,10 @@ uint16_t qede_rxtx_pkts_dummy(void *p_rxq,
int qede_start_queues(struct rte_eth_dev *eth_dev);
void qede_stop_queues(struct rte_eth_dev *eth_dev);
+int qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
+ uint16_t max_frame_size);
+int
+qede_rx_descriptor_status(void *rxq, uint16_t offset);
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);