diff options
Diffstat (limited to 'drivers/net/qede/base')
60 files changed, 1979 insertions, 1091 deletions
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c index fe42f325..d5d6f8e2 100644 --- a/drivers/net/qede/base/bcm_osal.c +++ b/drivers/net/qede/base/bcm_osal.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include <rte_memzone.h> @@ -133,10 +131,10 @@ void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); - mz = rte_memzone_reserve_aligned(mz_name, size, - socket_id, 0, RTE_CACHE_LINE_SIZE); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, RTE_CACHE_LINE_SIZE); if (!mz) { DP_ERR(p_dev, "Unable to allocate DMA memory " "of size %zu bytes - %s\n", @@ -172,9 +170,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, snprintf(mz_name, sizeof(mz_name) - 1, "%lx", (unsigned long)rte_get_timer_cycles()); if (core_id == (unsigned int)LCORE_ID_ANY) - core_id = 0; + core_id = rte_get_master_lcore(); socket_id = rte_lcore_to_socket_id(core_id); - mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, + RTE_MEMZONE_IOVA_CONTIG, align); if (!mz) { DP_ERR(p_dev, "Unable to allocate DMA memory " "of size %zu bytes - %s\n", @@ -200,6 +199,11 @@ void osal_dma_free_mem(struct ecore_dev *p_dev, dma_addr_t phys) DP_VERBOSE(p_dev, ECORE_MSG_SP, "Free memzone %s\n", ecore_mz_mapping[j]->name); rte_memzone_free(ecore_mz_mapping[j]); + while (j < ecore_mz_count - 1) { + ecore_mz_mapping[j] = ecore_mz_mapping[j + 1]; + j++; + } + ecore_mz_count--; return; } } diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index 52c2f0ec..630867fa 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __BCM_OSAL_H diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h index 9a6059ac..ca8e59db 100644 --- a/drivers/net/qede/base/common_hsi.h +++ b/drivers/net/qede/base/common_hsi.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __COMMON_HSI__ @@ -96,10 +94,10 @@ /****************************************************************************/ -#define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 30 -#define FW_REVISION_VERSION 12 -#define FW_ENGINEERING_VERSION 0 +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 33 +#define FW_REVISION_VERSION 12 +#define FW_ENGINEERING_VERSION 0 /***********************/ /* COMMON HW CONSTANTS */ diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h index ce5f3a90..5d79fdf0 100644 --- a/drivers/net/qede/base/ecore.h +++ b/drivers/net/qede/base/ecore.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_H @@ -41,6 +39,9 @@ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) +#define IS_ECORE_PACING(p_hwfn) \ + (!!(p_hwfn->b_en_pacing)) + #define MAX_HWFNS_PER_DEVICE 2 #define NAME_SIZE 128 /* @DPDK */ #define ECORE_WFQ_UNIT 100 @@ -432,8 +433,10 @@ struct ecore_hw_info { #define DMAE_MAX_RW_SIZE 0x2000 struct ecore_dmae_info { - /* Mutex for synchronizing access to functions */ - osal_mutex_t mutex; + /* Spinlock for synchronizing access to functions */ + osal_spinlock_t lock; + + bool b_mem_ready; u8 channel; @@ -534,6 +537,12 @@ enum ecore_mf_mode_bit { ECORE_MF_UFP_SPECIFIC, ECORE_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + ECORE_MF_8021Q_TAGGING, + + /* Use stag for steering */ + ECORE_MF_8021AD_TAGGING, }; enum ecore_ufp_mode { @@ -672,6 +681,13 @@ struct ecore_hwfn { /* Mechanism for recovering from doorbell drop */ struct ecore_db_recovery_info db_recovery_info; + /* Enable/disable pacing, if request to enable then + * IOV and mcos configuration will be skipped. + * this actually reflects the value requested in + * struct ecore_hw_prepare_params by ecore client. + */ + bool b_en_pacing; + /* @DPDK */ struct ecore_ptt *p_arfs_ptt; }; @@ -924,12 +940,16 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb, #define PQ_FLAGS_ACK (1 << 4) #define PQ_FLAGS_OFLD (1 << 5) #define PQ_FLAGS_VFS (1 << 6) +#define PQ_FLAGS_LLT (1 << 7) /* physical queue index for cm context intialization */ u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags); u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc); u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf); -u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid); +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); + +/* qm vport for rate limit configuration */ +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl); const char *ecore_hw_get_resc_name(enum ecore_resources res_id); diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h index d8951bcc..ec773fbd 100644 --- a/drivers/net/qede/base/ecore_attn_values.h +++ b/drivers/net/qede/base/ecore_attn_values.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ATTN_VALUES_H__ diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h index d8f69ad6..6d0382d3 100644 --- a/drivers/net/qede/base/ecore_chain.h +++ b/drivers/net/qede/base/ecore_chain.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_CHAIN_H__ @@ -526,7 +524,7 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain) p_chain->p_prod_elem = p_chain->p_virt_addr; if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { - /* Use (page_cnt - 1) as a reset value for the prod/cons page's + /* Use "page_cnt-1" as a reset value for the prod/cons page's * indices, to avoid unnecessary page advancing on the first * call to ecore_chain_produce/consume. Instead, the indices * will be advanced to page_cnt and then will be wrapped to 0. @@ -726,6 +724,21 @@ out: static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, u32 prod_idx, void *p_prod_elem) { + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "prod_idx-1" since ecore_chain_produce() advances the + * page index before the producer index when getting to + * "next_page_mask". + */ + u32 elem_idx = + (prod_idx - 1 + p_chain->capacity) % p_chain->capacity; + u32 page_idx = elem_idx / p_chain->elem_per_page; + + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx; + else + p_chain->pbl.c.u32.prod_page_idx = page_idx; + } + if (is_chain_u16(p_chain)) p_chain->u.chain16.prod_idx = (u16)prod_idx; else @@ -734,6 +747,38 @@ static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, } /** + * @brief ecore_chain_set_cons - sets the cons to the given value + * + * @param cons_idx + * @param p_cons_elem + */ +static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain, + u32 cons_idx, void *p_cons_elem) +{ + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use "cons_idx-1" since ecore_chain_consume() advances the + * page index before the consumer index when getting to + * "next_page_mask". + */ + u32 elem_idx = + (cons_idx - 1 + p_chain->capacity) % p_chain->capacity; + u32 page_idx = elem_idx / p_chain->elem_per_page; + + if (is_chain_u16(p_chain)) + p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx; + else + p_chain->pbl.c.u32.cons_page_idx = page_idx; + } + + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx = (u16)cons_idx; + else + p_chain->u.chain32.cons_idx = cons_idx; + + p_chain->p_cons_elem = p_cons_elem; +} + +/** * @brief ecore_chain_pbl_zero_mem - set chain memory to 0 * * @param p_chain diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c index 50bd66da..bf36ce58 100644 --- a/drivers/net/qede/base/ecore_cxt.c +++ b/drivers/net/qede/base/ecore_cxt.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -834,7 +832,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn) p_mngr->t2_num_pages * sizeof(struct ecore_dma_mem)); if (!p_mngr->t2) { - DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n"); rc = ECORE_NOMEM; goto t2_fail; } @@ -919,6 +917,9 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; u32 ilt_size, i; + if (p_mngr->ilt_shadow == OSAL_NULL) + return; + ilt_size = ecore_cxt_ilt_shadow_size(p_cli); for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { @@ -931,6 +932,7 @@ static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) p_dma->p_virt = OSAL_NULL; } OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow); + p_mngr->ilt_shadow = OSAL_NULL; } static enum _ecore_status_t @@ -1000,8 +1002,7 @@ static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn) size * sizeof(struct ecore_dma_mem)); if (!p_mngr->ilt_shadow) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate ilt shadow table\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n"); rc = ECORE_NOMEM; goto ilt_shadow_fail; } @@ -1044,12 +1045,14 @@ static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn) for (type = 0; type < MAX_CONN_TYPES; type++) { OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].cid_map = OSAL_NULL; p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) { OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired_vf[type][vf].cid_map); + p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL; p_mngr->acquired_vf[type][vf].max_count = 0; p_mngr->acquired_vf[type][vf].start_cid = 0; } @@ -1126,8 +1129,7 @@ enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn) p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr)); if (!p_mngr) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_cxt_mngr'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n"); return ECORE_NOMEM; } @@ -1189,21 +1191,21 @@ enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn) /* Allocate the ILT shadow table */ rc = ecore_ilt_shadow_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n"); goto tables_alloc_fail; } /* Allocate the T2 table */ rc = ecore_cxt_src_t2_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n"); goto tables_alloc_fail; } /* Allocate and initialize the acquired cids bitmaps */ rc = ecore_cid_map_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n"); goto tables_alloc_fail; } @@ -1427,7 +1429,8 @@ static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn) } } -void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading) { struct ecore_qm_info *qm_info = &p_hwfn->qm_info; struct ecore_mcp_link_state *p_link; @@ -1438,8 +1441,9 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; - ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->port_id, - p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port, + ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + qm_info->max_phys_tcs_per_port, + is_pf_loading, iids.cids, iids.vf_cids, iids.tids, qm_info->start_pq, qm_info->num_pqs - qm_info->num_vf_pqs, @@ -1797,7 +1801,7 @@ void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn) void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) { - ecore_qm_init_pf(p_hwfn, p_ptt); + ecore_qm_init_pf(p_hwfn, p_ptt, true); ecore_cm_init_pf(p_hwfn); ecore_dq_init_pf(p_hwfn); ecore_cdu_init_pf(p_hwfn); diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h index 54761e4e..f8c955ca 100644 --- a/drivers/net/qede/base/ecore_cxt.h +++ b/drivers/net/qede/base/ecore_cxt.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _ECORE_CID_ @@ -107,8 +105,10 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); * * @param p_hwfn * @param p_ptt + * @param is_pf_loading */ -void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + bool is_pf_loading); /** * @brief Reconfigures QM pf on the fly diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h index 6d87620d..6c8b2831 100644 --- a/drivers/net/qede/base/ecore_cxt_api.h +++ b/drivers/net/qede/base/ecore_cxt_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_CXT_API_H__ diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c index 21ddda92..96678745 100644 --- a/drivers/net/qede/base/ecore_dcbx.c +++ b/drivers/net/qede/base/ecore_dcbx.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -149,6 +147,10 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data, } p_data->arr[type].update = UPDATE_DCB_DSCP; + /* Do not add valn tag 0 when DCB is enabled and port is in UFP mode */ + if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) + p_data->arr[type].dont_add_vlan0 = true; + /* QM reconf data */ if (p_hwfn->hw_info.personality == personality) p_hwfn->hw_info.offload_tc = tc; @@ -910,7 +912,7 @@ enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn) p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_hwfn->p_dcbx_info)); if (!p_hwfn->p_dcbx_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_dcbx_info'"); return ECORE_NOMEM; } @@ -935,6 +937,7 @@ static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, p_data->dcb_tc = p_src->arr[type].tc; p_data->dscp_enable_flag = p_src->arr[type].dscp_enable; p_data->dscp_val = p_src->arr[type].dscp_val; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; } /* Set pf update ramrod command params */ diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h index 469e42dd..519e6cea 100644 --- a/drivers/net/qede/base/ecore_dcbx.h +++ b/drivers/net/qede/base/ecore_dcbx.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_DCBX_H__ diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h index 9ff4df4c..eaf8e082 100644 --- a/drivers/net/qede/base/ecore_dcbx_api.h +++ b/drivers/net/qede/base/ecore_dcbx_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_DCBX_API_H__ @@ -29,6 +27,7 @@ struct ecore_dcbx_app_data { u8 tc; /* Traffic Class */ bool dscp_enable; /* DSCP enabled */ u8 dscp_val; /* DSCP value */ + bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ }; #ifndef __EXTRACT__LINUX__ diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index 744d2043..31f1f3ee 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -39,7 +37,7 @@ * there's more than a single compiled ecore component in system]. */ static osal_spinlock_t qm_lock; -static bool qm_lock_init; +static u32 qm_lock_ref_cnt; /******************** Doorbell Recovery *******************/ /* The doorbell recovery mechanism consists of a list of entries which represent @@ -227,7 +225,8 @@ enum _ecore_status_t ecore_db_recovery_setup(struct ecore_hwfn *p_hwfn) OSAL_LIST_INIT(&p_hwfn->db_recovery_info.list); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->db_recovery_info.lock)) + return ECORE_NOMEM; #endif OSAL_SPIN_LOCK_INIT(&p_hwfn->db_recovery_info.lock); p_hwfn->db_recovery_info.db_recovery_counter = 0; @@ -411,7 +410,7 @@ void ecore_init_dp(struct ecore_dev *p_dev, } } -void ecore_init_struct(struct ecore_dev *p_dev) +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev) { u8 i; @@ -423,9 +422,10 @@ void ecore_init_struct(struct ecore_dev *p_dev) p_hwfn->b_active = false; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_hwfn->dmae_info.lock)) + goto handle_err; #endif - OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK_INIT(&p_hwfn->dmae_info.lock); } /* hwfn 0 is always active */ @@ -433,6 +433,17 @@ void ecore_init_struct(struct ecore_dev *p_dev) /* set the default cache alignment to 128 (may be overridden later) */ p_dev->cache_shift = 7; + return ECORE_SUCCESS; +#ifdef CONFIG_ECORE_LOCK_ALLOC +handle_err: + while (--i) { + struct ecore_hwfn *p_hwfn = OSAL_NULL; + + p_hwfn = &p_dev->hwfns[i]; + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); + } + return ECORE_NOMEM; +#endif } static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) @@ -500,11 +511,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) /* feature flags */ if (IS_ECORE_SRIOV(p_hwfn->p_dev)) flags |= PQ_FLAGS_VFS; + if (IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_RLS; /* protocol flags */ switch (p_hwfn->hw_info.personality) { case ECORE_PCI_ETH: - flags |= PQ_FLAGS_MCOS; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; case ECORE_PCI_FCOE: flags |= PQ_FLAGS_OFLD; @@ -513,11 +527,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn) flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; break; case ECORE_PCI_ETH_ROCE: - flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD; + flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; case ECORE_PCI_ETH_IWARP: - flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | - PQ_FLAGS_OFLD; + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + if (!IS_ECORE_PACING(p_hwfn)) + flags |= PQ_FLAGS_MCOS; break; default: DP_ERR(p_hwfn, "unknown personality %d\n", @@ -721,6 +738,7 @@ static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn, "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); /* init pq params */ + qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + qm_info->num_vports; qm_info->qm_pq_params[pq_idx].tc_id = tc; @@ -823,7 +841,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf) return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; } -u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) +u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) { u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn); @@ -833,6 +851,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl) return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl; } +u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl) +{ + u16 start_pq, pq, qm_pq_idx; + + pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl); + start_pq = p_hwfn->qm_info.start_pq; + qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE; + + if (qm_pq_idx > p_hwfn->qm_info.num_pqs) { + DP_ERR(p_hwfn, + "qm_pq_idx %d must be smaller than %d\n", + qm_pq_idx, p_hwfn->qm_info.num_pqs); + } + + return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id; +} + /* Functions for creating specific types of pqs */ static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn) { @@ -1025,10 +1060,9 @@ static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn) for (i = 0; i < qm_info->num_pqs; i++) { pq = &qm_info->qm_pq_params[i]; DP_VERBOSE(p_hwfn, ECORE_MSG_HW, - "pq idx %d, vport_id %d, tc %d, wrr_grp %d," - " rl_valid %d\n", - qm_info->start_pq + i, pq->vport_id, pq->tc_id, - pq->wrr_group, pq->rl_valid); + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n", + qm_info->start_pq + i, pq->port_id, pq->vport_id, + pq->tc_id, pq->wrr_group, pq->rl_valid); } } @@ -1083,7 +1117,7 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, ecore_init_clear_rt_data(p_hwfn); /* prepare QM portion of runtime array */ - ecore_qm_init_pf(p_hwfn, p_ptt); + ecore_qm_init_pf(p_hwfn, p_ptt, false); /* activate init tool on runtime array */ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, @@ -1289,16 +1323,14 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) /* DMA info initialization */ rc = ecore_dmae_info_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate memory for dmae_info" - " structure\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dmae_info structure\n"); goto alloc_err; } /* DCBX initialization */ rc = ecore_dcbx_info_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for dcbx structure\n"); goto alloc_err; } @@ -1307,7 +1339,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->reset_stats)); if (!p_dev->reset_stats) { - DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n"); + DP_NOTICE(p_dev, false, "Failed to allocate reset statistics\n"); goto alloc_no_mem; } @@ -1658,7 +1690,8 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, ecore_init_cache_line_size(p_hwfn, p_ptt); - rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ECORE_PATH_ID(p_hwfn), + hw_mode); if (rc != ECORE_SUCCESS) return rc; @@ -2160,6 +2193,11 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, /* perform debug configuration when chip is out of reset */ OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); + /* Sanity check before the PF init sequence that uses DMAE */ + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + /* PF Init sequence */ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); if (rc) @@ -2205,42 +2243,43 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, DP_NOTICE(p_hwfn, true, "Function start ramrod failed\n"); } else { - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); - - if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, - (1 << 2)); - ecore_wr(p_hwfn, p_ptt, - PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, - 0x100); - } - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH registers after start PFn\n"); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TCP: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_UDP: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, - PRS_REG_SEARCH_TCP_FIRST_FRAG); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", - prs_reg); - prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); - DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, - "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + return rc; } + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + + if (p_hwfn->hw_info.personality == ECORE_PCI_FCOE) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, + (1 << 2)); + ecore_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH registers after start PFn\n"); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_UDP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_FCOE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_ROCE: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, + PRS_REG_SEARCH_TCP_FIRST_FRAG); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", + prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); } - return rc; + return ECORE_SUCCESS; } enum _ecore_status_t ecore_pglueb_set_pfid_enable(struct ecore_hwfn *p_hwfn, @@ -2283,14 +2322,15 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, } static void ecore_pglueb_clear_err(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt) { ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); } -static void -ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, +static enum _ecore_status_t +ecore_fill_load_req_params(struct ecore_hwfn *p_hwfn, + struct ecore_load_req_params *p_load_req, struct ecore_drv_load_params *p_drv_load) { /* Make sure that if ecore-client didn't provide inputs, all the @@ -2302,15 +2342,51 @@ ecore_fill_load_req_params(struct ecore_load_req_params *p_load_req, OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); - if (p_drv_load != OSAL_NULL) { - p_load_req->drv_role = p_drv_load->is_crash_kernel ? - ECORE_DRV_ROLE_KDUMP : - ECORE_DRV_ROLE_OS; + if (p_drv_load == OSAL_NULL) + goto out; + + p_load_req->drv_role = p_drv_load->is_crash_kernel ? + ECORE_DRV_ROLE_KDUMP : + ECORE_DRV_ROLE_OS; + p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; + p_load_req->override_force_load = p_drv_load->override_force_load; + + /* Old MFW versions don't support timeout values other than default and + * none, so these values are replaced according to the fall-back action. + */ + + if (p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT || + p_drv_load->mfw_timeout_val == ECORE_LOAD_REQ_LOCK_TO_NONE || + (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO)) { p_load_req->timeout_val = p_drv_load->mfw_timeout_val; - p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; - p_load_req->override_force_load = - p_drv_load->override_force_load; + goto out; + } + + switch (p_drv_load->mfw_timeout_fallback) { + case ECORE_TO_FALLBACK_TO_NONE: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_NONE; + break; + case ECORE_TO_FALLBACK_TO_DEFAULT: + p_load_req->timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + break; + case ECORE_TO_FALLBACK_FAIL_LOAD: + DP_NOTICE(p_hwfn, false, + "Received %d as a value for MFW timeout while the MFW supports only default [%d] or none [%d]. Abort.\n", + p_drv_load->mfw_timeout_val, + ECORE_LOAD_REQ_LOCK_TO_DEFAULT, + ECORE_LOAD_REQ_LOCK_TO_NONE); + return ECORE_ABORTED; } + + DP_INFO(p_hwfn, + "Modified the MFW timeout value from %d to %s [%d] due to lack of MFW support\n", + p_drv_load->mfw_timeout_val, + (p_load_req->timeout_val == ECORE_LOAD_REQ_LOCK_TO_DEFAULT) ? + "default" : "none", + p_load_req->timeout_val); +out: + return ECORE_SUCCESS; } enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, @@ -2366,12 +2442,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (rc != ECORE_SUCCESS) return rc; - ecore_fill_load_req_params(&load_req_params, - p_params->p_drv_load_params); + ecore_set_spq_block_timeout(p_hwfn, p_params->spq_timeout_ms); + + rc = ecore_fill_load_req_params(p_hwfn, &load_req_params, + p_params->p_drv_load_params); + if (rc != ECORE_SUCCESS) + return rc; + rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_req_params); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a LOAD_REQ command\n"); return rc; } @@ -2404,10 +2485,17 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, p_hwfn->first_on_engine = (load_code == FW_MSG_CODE_DRV_LOAD_ENGINE); - if (!qm_lock_init) { + if (!qm_lock_ref_cnt) { +#ifdef CONFIG_ECORE_LOCK_ALLOC + rc = OSAL_SPIN_LOCK_ALLOC(p_hwfn, &qm_lock); + if (rc) { + DP_ERR(p_hwfn, "qm_lock allocation failed\n"); + goto qm_lock_fail; + } +#endif OSAL_SPIN_LOCK_INIT(&qm_lock); - qm_lock_init = true; } + ++qm_lock_ref_cnt; /* Clean up chip from previous driver if such remains exist. * This is not needed when the PF is the first one on the @@ -2423,9 +2511,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } } - /* Log and clean previous pglue_b errors if such exist */ - ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt); - ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + /* Log and clear previous pglue_b errors if such exist */ + ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); /* Enable the PF's internal FID_enable in the PXP */ rc = ecore_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, @@ -2433,6 +2520,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (rc != ECORE_SUCCESS) goto load_err; + /* Clear the pglue_b was_error indication. + * In E4 it must be done after the BME and the internal + * FID_enable for the PF are set, since VDMs may cause the + * indication to be set again. + */ + ecore_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + switch (load_code) { case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, @@ -2462,15 +2556,23 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "init phase failed for loadcode 0x%x (rc %d)\n", load_code, rc); goto load_err; } rc = ecore_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); - if (rc != ECORE_SUCCESS) + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Sending load done failed, rc = %d\n", rc); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Sending load done was failed due to memory allocation failure\n"); + goto load_err; + } return rc; + } /* send DCBX attention request command */ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, @@ -2480,7 +2582,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 1 << DRV_MB_PARAM_DCBX_NOTIFY_OFFSET, &resp, ¶m); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to send DCBX attention request\n"); return rc; } @@ -2513,6 +2615,12 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, return rc; load_err: + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +qm_lock_fail: +#endif /* The MFW load lock should be released regardless of success or failure * of initialization. * TODO: replace this with an attempt to send cancel_load. @@ -2547,8 +2655,8 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev, if (i < ECORE_HW_STOP_RETRY_LIMIT) return; - DP_NOTICE(p_hwfn, true, "Timers linear scans are not over" - " [Connection %02x Tasks %02x]\n", + DP_NOTICE(p_hwfn, false, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); } @@ -2613,7 +2721,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) if (!p_dev->recov_in_prog) { rc = ecore_mcp_unload_req(p_hwfn, p_ptt); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a UNLOAD_REQ command. rc = %d.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2628,7 +2736,7 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) rc = ecore_sp_pf_stop(p_hwfn); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2682,10 +2790,21 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); + --qm_lock_ref_cnt; +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (!qm_lock_ref_cnt) + OSAL_SPIN_LOCK_DEALLOC(&qm_lock); +#endif + if (!p_dev->recov_in_prog) { - ecore_mcp_unload_done(p_hwfn, p_ptt); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + if (rc == ECORE_NOMEM) { + DP_NOTICE(p_hwfn, false, + "Failed sending an UNLOAD_DONE command due to a memory allocation failure. Resending.\n"); + rc = ecore_mcp_unload_done(p_hwfn, p_ptt); + } if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed sending a UNLOAD_DONE command. rc = %d.\n", rc); rc2 = ECORE_UNKNOWN_ERROR; @@ -2936,7 +3055,7 @@ __ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn, rc = ecore_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, resc_max_val, p_mcp_resp); if (rc != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "MFW response failure for a max value setting of resource %d [%s]\n", res_id, ecore_hw_get_resc_name(res_id)); return rc; @@ -3496,9 +3615,14 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, break; case NVM_CFG1_GLOB_MF_MODE_UFP: p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | - 1 << ECORE_MF_UFP_SPECIFIC; + 1 << ECORE_MF_UFP_SPECIFIC | + 1 << ECORE_MF_8021Q_TAGGING; + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_OVLAN_CLSS | + 1 << ECORE_MF_LLH_PROTO_CLSS | + 1 << ECORE_MF_8021AD_TAGGING; break; - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: p_hwfn->p_dev->mf_bits = 1 << ECORE_MF_LLH_MAC_CLSS | 1 << ECORE_MF_LLH_PROTO_CLSS | @@ -3527,6 +3651,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, */ switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + case NVM_CFG1_GLOB_MF_MODE_BD: p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: @@ -3780,8 +3905,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, bool drv_resc_alloc = p_params->drv_resc_alloc; enum _ecore_status_t rc; + if (IS_ECORE_PACING(p_hwfn)) { + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV, + "Skipping IOV as packet pacing is requested\n"); + } + /* Since all information is common, only first hwfns should do this */ - if (IS_LEAD_HWFN(p_hwfn)) { + if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) { rc = ecore_iov_hw_info(p_hwfn); if (rc != ECORE_SUCCESS) { if (p_params->b_relaxed_probe) @@ -3866,7 +3996,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, * that can result in performance penalty in some cases. 4 * represents a good tradeoff between performance and flexibility. */ - p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; + if (IS_ECORE_PACING(p_hwfn)) + p_hwfn->hw_info.num_hw_tc = 1; + else + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; /* start out with a single active tc. This can be increased either * by dcbx negotiation or by upper layer driver @@ -4037,7 +4170,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Allocate PTT pool */ rc = ecore_ptt_pool_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); + DP_NOTICE(p_hwfn, false, "Failed to prepare hwfn's hw\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err0; @@ -4062,7 +4195,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Initialize MCP structure */ rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); + DP_NOTICE(p_hwfn, false, "Failed initializing mcp command\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err1; @@ -4072,7 +4205,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, p_params->personality, p_params); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); + DP_NOTICE(p_hwfn, false, "Failed to get HW information\n"); goto err2; } @@ -4115,7 +4248,7 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, /* Allocate the init RT array and initialize the init-ops engine */ rc = ecore_init_alloc(p_hwfn); if (rc) { - DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate the init array\n"); if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM; goto err2; @@ -4153,6 +4286,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, p_dev->chk_reg_fifo = p_params->chk_reg_fifo; p_dev->allow_mdump = p_params->allow_mdump; + p_hwfn->b_en_pacing = p_params->b_en_pacing; if (p_params->b_relaxed_probe) p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS; @@ -4188,6 +4322,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, BAR_ID_1) / 2; p_doorbell = (void OSAL_IOMEM *)addr; + p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing; /* prepare second hw function */ rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, p_doorbell, p_params); @@ -4205,8 +4340,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, ecore_mcp_free(p_hwfn); ecore_hw_hwfn_free(p_hwfn); } else { - DP_NOTICE(p_dev, true, - "What do we need to free when VF hwfn1 init fails\n"); + DP_NOTICE(p_dev, false, "What do we need to free when VF hwfn1 init fails\n"); } return rc; } @@ -4237,7 +4371,7 @@ void ecore_hw_remove(struct ecore_dev *p_dev) ecore_mcp_free(p_hwfn); #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->dmae_info.lock); #endif } @@ -4368,7 +4502,7 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4401,7 +4535,7 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4425,7 +4559,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, size = page_cnt * sizeof(*pp_virt_addr_tbl); pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size); if (!pp_virt_addr_tbl) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate memory for the chain virtual addresses table\n"); return ECORE_NOMEM; } @@ -4449,7 +4583,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_virt_addr_tbl); if (!p_pbl_virt) { - DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); + DP_NOTICE(p_dev, false, "Failed to allocate chain pbl memory\n"); return ECORE_NOMEM; } @@ -4457,7 +4591,7 @@ ecore_chain_alloc_pbl(struct ecore_dev *p_dev, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); if (!p_virt) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Failed to allocate chain memory\n"); return ECORE_NOMEM; } @@ -4497,7 +4631,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, page_cnt); if (rc) { - DP_NOTICE(p_dev, true, + DP_NOTICE(p_dev, false, "Cannot allocate a chain with the given arguments:\n" "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n", intended_use, mode, cnt_type, num_elems, elem_size); diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h index 98bcabe8..02bacc22 100644 --- a/drivers/net/qede/base/ecore_dev_api.h +++ b/drivers/net/qede/base/ecore_dev_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_DEV_API_H__ @@ -32,7 +30,7 @@ void ecore_init_dp(struct ecore_dev *p_dev, * * @param p_dev */ -void ecore_init_struct(struct ecore_dev *p_dev); +enum _ecore_status_t ecore_init_struct(struct ecore_dev *p_dev); /** * @brief ecore_resc_free - @@ -57,6 +55,12 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); */ void ecore_resc_setup(struct ecore_dev *p_dev); +enum ecore_mfw_timeout_fallback { + ECORE_TO_FALLBACK_TO_NONE, + ECORE_TO_FALLBACK_TO_DEFAULT, + ECORE_TO_FALLBACK_FAIL_LOAD, +}; + enum ecore_override_force_load { ECORE_OVERRIDE_FORCE_LOAD_NONE, ECORE_OVERRIDE_FORCE_LOAD_ALWAYS, @@ -79,6 +83,11 @@ struct ecore_drv_load_params { #define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 #define ECORE_LOAD_REQ_LOCK_TO_NONE 255 + /* Action to take in case the MFW doesn't support timeout values other + * than default and none. + */ + enum ecore_mfw_timeout_fallback mfw_timeout_fallback; + /* Avoid engine reset when first PF loads on it */ bool avoid_eng_reset; @@ -104,6 +113,9 @@ struct ecore_hw_init_params { /* Driver load parameters */ struct ecore_drv_load_params *p_drv_load_params; + + /* SPQ block timeout in msec */ + u32 spq_timeout_ms; }; /** @@ -256,6 +268,9 @@ struct ecore_hw_prepare_params { */ bool b_relaxed_probe; enum ecore_hw_prepare_result p_relaxed_res; + + /* Enable/disable request by ecore client for pacing */ + bool b_en_pacing; }; /** @@ -363,6 +378,7 @@ struct ecore_eth_stats_common { u64 tx_mac_mc_packets; u64 tx_mac_bc_packets; u64 tx_mac_ctrl_frames; + u64 link_change_count; }; struct ecore_eth_stats_bb { diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h index 2acd864d..8c8fed4e 100644 --- a/drivers/net/qede/base/ecore_gtt_reg_addr.h +++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef GTT_REG_ADDR_H diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h index 2ddc5f19..adc20c0c 100644 --- a/drivers/net/qede/base/ecore_gtt_values.h +++ b/drivers/net/qede/base/ecore_gtt_values.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __PREVENT_PXP_GLOBAL_WIN__ diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h index d8abd604..2d761b97 100644 --- a/drivers/net/qede/base/ecore_hsi_common.h +++ b/drivers/net/qede/base/ecore_hsi_common.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_COMMON__ @@ -381,7 +379,7 @@ struct e4_xstorm_core_conn_ag_ctx { __le16 reserved16 /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_or_spq_prod /* word4 */; - __le16 word5 /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -904,8 +902,10 @@ struct core_rx_start_ramrod_data { /* if set, 802.1q tags will be removed and copied to CQE */ /* if set, 802.1q tags will be removed and copied to CQE */ u8 inner_vlan_stripping_en; -/* if set, outer tag wont be stripped, valid only in MF OVLAN. */ - u8 outer_vlan_stripping_dis; +/* if set and inner vlan does not exist, the outer vlan will copied to CQE as + * inner vlan. should be used in MF_OVLAN mode only. + */ + u8 report_outer_vlan; u8 queue_id /* Light L2 RX Queue ID */; u8 main_func_queue /* Is this the main queue for the PF */; /* Duplicate broadcast packets to LL2 main queue in mf_si mode. Valid if @@ -946,7 +946,9 @@ struct core_tx_bd_data { /* Do not allow additional VLAN manipulations on this packet (DCB) */ #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 #define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 -/* Insert VLAN into packet */ +/* Insert VLAN into packet. Cannot be set for LB packets + * (tx_dst == CORE_TX_DEST_LB) + */ #define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 #define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 /* This is the first BD of the packet (for debug) */ @@ -1069,11 +1071,11 @@ struct core_tx_update_ramrod_data { * Enum flag for what type of dcb data to update */ enum dcb_dscp_update_mode { -/* use when no change should be done to dcb data */ +/* use when no change should be done to DCB data */ DONT_UPDATE_DCB_DSCP, - UPDATE_DCB /* use to update only l2 (vlan) priority */, - UPDATE_DSCP /* use to update only l3 dscp */, - UPDATE_DCB_DSCP /* update vlan pri and dscp */, + UPDATE_DCB /* use to update only L2 (vlan) priority */, + UPDATE_DSCP /* use to update only IP DSCP */, + UPDATE_DCB_DSCP /* update vlan pri and DSCP */, MAX_DCB_DSCP_UPDATE_FLAG }; @@ -1291,10 +1293,15 @@ enum fw_flow_ctrl_mode { * GFT profile type. */ enum gft_profile_type { - GFT_PROFILE_TYPE_4_TUPLE /* 4 tuple, IP type and L4 type match. */, -/* L4 destination port, IP type and L4 type match. */ +/* tunnel type, inner 4 tuple, IP type and L4 type match. */ + GFT_PROFILE_TYPE_4_TUPLE, +/* tunnel type, inner L4 destination port, IP type and L4 type match. */ GFT_PROFILE_TYPE_L4_DST_PORT, - GFT_PROFILE_TYPE_IP_DST_PORT /* IP destination port and IP type. */, +/* tunnel type, inner IP destination address and IP type match. */ + GFT_PROFILE_TYPE_IP_DST_ADDR, +/* tunnel type, inner IP source address and IP type match. */ + GFT_PROFILE_TYPE_IP_SRC_ADDR, + GFT_PROFILE_TYPE_TUNNEL_TYPE /* tunnel type and outer IP type match. */, MAX_GFT_PROFILE_TYPE }; @@ -1411,8 +1418,9 @@ struct vlan_header { * outer tag configurations */ struct outer_tag_config_struct { -/* Enables the STAG Priority Change , Should be 1 for Bette Davis and UFP with - * Host Control mode. Else - 0 +/* Enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0. */ u8 enable_stag_pri_change; /* If inner_to_outer_pri_map is initialize then set pri_map_valid */ @@ -1507,15 +1515,18 @@ struct pf_start_ramrod_data { /* - * Data for port update ramrod + * Per protocol DCB data */ struct protocol_dcb_data { - u8 dcb_enable_flag /* dcbEnable flag value */; - u8 dscp_enable_flag /* If set use dscp value */; - u8 dcb_priority /* dcbPri flag value */; - u8 dcb_tc /* dcb TC value */; - u8 dscp_val /* dscp value to write if dscp_enable_flag is set */; - u8 reserved0; + u8 dcb_enable_flag /* Enable DCB */; + u8 dscp_enable_flag /* Enable updating DSCP value */; + u8 dcb_priority /* DCB priority */; + u8 dcb_tc /* DCB TC */; + u8 dscp_val /* DSCP value to write if dscp_enable_flag is set */; +/* When DCB is enabled - if this flag is set, dont add VLAN 0 tag to untagged + * frames + */ + u8 dcb_dont_add_vlan0; }; /* @@ -1575,8 +1586,9 @@ struct pf_update_ramrod_data { /* core iwarp related fields */ struct protocol_dcb_data iwarp_dcb_data; __le16 mf_vlan /* new outer vlan id value */; -/* enables the inner to outer TAG priority mapping. Should be 1 for Bette Davis - * and UFP with Host Control mode, else - 0. +/* enables updating S-tag priority from inner tag or DCB. Should be 1 for Bette + * Davis, UFP with Host Control mode, and UFP with DCB over base interface. + * else - 0 */ u8 enable_stag_pri_change; u8 reserved; @@ -1739,6 +1751,7 @@ struct tstorm_per_port_stat { struct regpair eth_vxlan_tunn_filter_discard; /* GENEVE dropped packets */ struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt /* GFT dropped packets */; }; @@ -2130,6 +2143,53 @@ struct e4_ystorm_core_conn_ag_ctx { }; +struct fw_asserts_ram_section { +/* The offset of the section in the RAM in RAM lines (64-bit units) */ + __le16 section_ram_line_offset; +/* The size of the section in RAM lines (64-bit units) */ + __le16 section_ram_line_size; +/* The offset of the asserts list within the section in dwords */ + u8 list_dword_offset; +/* The size of an assert list element in dwords */ + u8 list_element_dword_size; + u8 list_num_elements /* The number of elements in the asserts list */; +/* The offset of the next list index field within the section in dwords */ + u8 list_next_index_dword_offset; +}; + + +struct fw_ver_num { + u8 major /* Firmware major version number */; + u8 minor /* Firmware minor version number */; + u8 rev /* Firmware revision version number */; + u8 eng /* Firmware engineering version number (for bootleg versions) */; +}; + +struct fw_ver_info { + __le16 tools_ver /* Tools version number */; + u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; + u8 reserved1; + struct fw_ver_num num /* FW version number */; + __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; + __le32 reserved2; +}; + +struct fw_info { + struct fw_ver_info ver /* FW version information */; +/* Info regarding the FW asserts section in the Storm RAM */ + struct fw_asserts_ram_section fw_asserts_section; +}; + + +struct fw_info_location { + __le32 grc_addr /* GRC address where the fw_info struct is located. */; +/* Size of the fw_info structure (thats located at the grc_addr). */ + __le32 size; +}; + + + + /* * IGU cleanup command */ diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h index ebb66482..bf548722 100644 --- a/drivers/net/qede/base/ecore_hsi_debug_tools.h +++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_DEBUG_TOOLS__ @@ -225,7 +223,7 @@ enum bin_dbg_buffer_type { * Attention bit mapping */ struct dbg_attn_bit_mapping { - __le16 data; + u16 data; /* The index of an attention in the blocks attentions list * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits * (if is_unused_bit_cnt=1) @@ -247,14 +245,14 @@ struct dbg_attn_block_type_data { /* Offset of this block attention names in the debug attention name offsets * array */ - __le16 names_offset; - __le16 reserved1; + u16 names_offset; + u16 reserved1; u8 num_regs /* Number of attention registers in this block */; u8 reserved2; /* Offset of this blocks attention registers in the attention registers array * (in dbg_attn_reg units) */ - __le16 regs_offset; + u16 regs_offset; }; /* @@ -272,20 +270,20 @@ struct dbg_attn_block { * Attention register result */ struct dbg_attn_reg_result { - __le32 data; + u32 data; /* STS attention register GRC address (in dwords) */ #define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 /* Number of attention indexes in this register */ #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 -/* The offset of this registers attentions within the blocks attentions - * list (a value in the range 0..number of block attentions-1) +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) */ - __le16 attn_idx_offset; - __le16 reserved; - __le32 sts_val /* Value read from the STS attention register */; - __le32 mask_val /* Value read from the MASK attention register */; + u16 block_attn_offset; + u16 reserved; + u32 sts_val /* Value read from the STS attention register */; + u32 mask_val /* Value read from the MASK attention register */; }; /* @@ -303,7 +301,7 @@ struct dbg_attn_block_result { /* Offset of this registers block attention names in the attention name offsets * array */ - __le16 names_offset; + u16 names_offset; /* result data for each register in the block in which at least one attention * bit is set */ @@ -316,7 +314,7 @@ struct dbg_attn_block_result { * mode header */ struct dbg_mode_hdr { - __le16 data; + u16 data; /* indicates if a mode expression should be evaluated (0/1) */ #define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 #define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 @@ -331,12 +329,11 @@ struct dbg_mode_hdr { * Attention register */ struct dbg_attn_reg { - struct dbg_mode_hdr mode /* Mode header */; -/* The offset of this registers attentions within the blocks attentions - * list (a value in the range 0..number of block attentions-1) +/* The offset of this registers attentions within the blocks attentions list + * (a value in the range 0..number of block attentions-1) */ - __le16 attn_idx_offset; - __le32 data; + u16 block_attn_offset; + u32 data; /* STS attention register GRC address (in dwords) */ #define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF #define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 @@ -344,9 +341,8 @@ struct dbg_attn_reg { #define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF #define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 /* STS_CLR attention register GRC address (in dwords) */ - __le32 sts_clr_address; -/* MASK attention register GRC address (in dwords) */ - __le32 mask_address; + u32 sts_clr_address; + u32 mask_address /* MASK attention register GRC address (in dwords) */; }; @@ -370,7 +366,7 @@ struct dbg_bus_block { /* Indicates if this block has a latency events debug line (0/1). */ u8 has_latency_events; /* Offset of this blocks lines in the Debug Bus lines array. */ - __le16 lines_offset; + u16 lines_offset; }; @@ -383,7 +379,7 @@ struct dbg_bus_block_user_data { /* Indicates if this block has a latency events debug line (0/1). */ u8 has_latency_events; /* Offset of this blocks lines in the debug bus line name offsets array. */ - __le16 names_offset; + u16 names_offset; }; @@ -422,13 +418,13 @@ struct dbg_dump_cond_hdr { * memory data for registers dump */ struct dbg_dump_mem { - __le32 dword0; + u32 dword0; /* register address (in dwords) */ #define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF #define DBG_DUMP_MEM_ADDRESS_SHIFT 0 #define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF /* memory group ID */ #define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 - __le32 dword1; + u32 dword1; /* register size (in dwords) */ #define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF #define DBG_DUMP_MEM_LENGTH_SHIFT 0 @@ -444,7 +440,7 @@ struct dbg_dump_mem { * register data for registers dump */ struct dbg_dump_reg { - __le32 data; + u32 data; /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */ #define DBG_DUMP_REG_ADDRESS_SHIFT 0 @@ -460,7 +456,7 @@ struct dbg_dump_reg { * split header for registers dump */ struct dbg_dump_split_hdr { - __le32 hdr; + u32 hdr; /* size in dwords of the data following this header */ #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF #define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 @@ -474,8 +470,7 @@ struct dbg_dump_split_hdr { */ struct dbg_idle_chk_cond_hdr { struct dbg_mode_hdr mode /* Mode header */; -/* size in dwords of the data following this header */ - __le16 data_size; + u16 data_size /* size in dwords of the data following this header */; }; @@ -483,7 +478,7 @@ struct dbg_idle_chk_cond_hdr { * Idle Check condition register */ struct dbg_idle_chk_cond_reg { - __le32 data; + u32 data; /* Register GRC address (in dwords) */ #define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 @@ -493,7 +488,7 @@ struct dbg_idle_chk_cond_reg { /* value from block_id enum */ #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 - __le16 num_entries /* number of registers entries to check */; + u16 num_entries /* number of registers entries to check */; u8 entry_size /* size of registers entry (in dwords) */; u8 start_entry /* index of the first entry to check */; }; @@ -503,7 +498,7 @@ struct dbg_idle_chk_cond_reg { * Idle Check info register */ struct dbg_idle_chk_info_reg { - __le32 data; + u32 data; /* Register GRC address (in dwords) */ #define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF #define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 @@ -513,7 +508,7 @@ struct dbg_idle_chk_info_reg { /* value from block_id enum */ #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF #define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 - __le16 size /* register size in dwords */; + u16 size /* register size in dwords */; struct dbg_mode_hdr mode /* Mode header */; }; @@ -531,8 +526,8 @@ union dbg_idle_chk_reg { * Idle Check result header */ struct dbg_idle_chk_result_hdr { - __le16 rule_id /* Failing rule index */; - __le16 mem_entry_id /* Failing memory entry index */; + u16 rule_id /* Failing rule index */; + u16 mem_entry_id /* Failing memory entry index */; u8 num_dumped_cond_regs /* number of dumped condition registers */; u8 num_dumped_info_regs /* number of dumped condition registers */; u8 severity /* from dbg_idle_chk_severity_types enum */; @@ -552,7 +547,7 @@ struct dbg_idle_chk_result_reg_hdr { #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F #define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 u8 start_entry /* index of the first checked entry */; - __le16 size /* register size in dwords */; + u16 size /* register size in dwords */; }; @@ -560,7 +555,7 @@ struct dbg_idle_chk_result_reg_hdr { * Idle Check rule */ struct dbg_idle_chk_rule { - __le16 rule_id /* Idle Check rule ID */; + u16 rule_id /* Idle Check rule ID */; u8 severity /* value from dbg_idle_chk_severity_types enum */; u8 cond_id /* Condition ID */; u8 num_cond_regs /* number of condition registers */; @@ -570,11 +565,11 @@ struct dbg_idle_chk_rule { /* offset of this rules registers in the idle check register array * (in dbg_idle_chk_reg units) */ - __le16 reg_offset; + u16 reg_offset; /* offset of this rules immediate values in the immediate values array * (in dwords) */ - __le16 imm_offset; + u16 imm_offset; }; @@ -582,7 +577,7 @@ struct dbg_idle_chk_rule { * Idle Check rule parsing data */ struct dbg_idle_chk_rule_parsing_data { - __le32 data; + u32 data; /* indicates if this register has a FW message */ #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 #define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 @@ -693,8 +688,8 @@ struct dbg_bus_trigger_state_data { * Debug Bus memory address */ struct dbg_bus_mem_addr { - __le32 lo; - __le32 hi; + u32 lo; + u32 hi; }; /* @@ -703,7 +698,7 @@ struct dbg_bus_mem_addr { struct dbg_bus_pci_buf_data { struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */; struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */; - __le32 size /* PCI buffer size in bytes */; + u32 size /* PCI buffer size in bytes */; }; /* @@ -747,21 +742,20 @@ struct dbg_bus_storm_data { u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */; /* EID filter params to filter on. Valid only if eid_filter_en is set. */ union dbg_bus_storm_eid_params eid_filter_params; -/* CID to filter on. Valid only if cid_filter_en is set. */ - __le32 cid; + u32 cid /* CID to filter on. Valid only if cid_filter_en is set. */; }; /* * Debug Bus data */ struct dbg_bus_data { - __le32 app_version /* The tools version number of the application */; + u32 app_version /* The tools version number of the application */; u8 state /* The current debug bus state */; u8 hw_dwords /* HW dwords per cycle */; /* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the * HW ID of dword/qword i */ - __le16 hw_id_mask; + u16 hw_id_mask; u8 num_enabled_blocks /* Number of blocks enabled for recording */; u8 num_enabled_storms /* Number of Storms enabled for recording */; u8 target /* Output target */; @@ -783,7 +777,7 @@ struct dbg_bus_data { * Valid only if both filter and trigger are enabled (0/1) */ u8 filter_post_trigger; - __le16 reserved; + u16 reserved; /* Indicates if the recording trigger is enabled (0/1) */ u8 trigger_en; /* trigger states data */ @@ -933,9 +927,10 @@ struct dbg_grc_data { /* Indicates if the GRC parameters were initialized */ u8 params_initialized; u8 reserved1; - __le16 reserved2; -/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */ - __le32 param_val[48]; + u16 reserved2; +/* Value of each GRC parameter. Array size must match the enum dbg_grc_params. + */ + u32 param_val[48]; }; @@ -960,7 +955,8 @@ enum dbg_grc_params { DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */, DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */, DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */, - DBG_GRC_PARAM_RESERVED /* reserved */, +/* MCP Trace meta data size in bytes */ + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */, DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */, DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */, @@ -1087,11 +1083,11 @@ enum dbg_storms { * Idle Check data */ struct idle_chk_data { - __le32 buf_size /* Idle check buffer size in dwords */; + u32 buf_size /* Idle check buffer size in dwords */; /* Indicates if the idle check buffer size was set (0/1) */ u8 buf_size_set; u8 reserved1; - __le16 reserved2; + u16 reserved2; }; /* @@ -1109,7 +1105,7 @@ struct dbg_tools_data { u8 initialized /* Indicates if the data was initialized */; u8 use_dmae /* Indicates if DMAE should be used */; /* Numbers of registers that were read since last log */ - __le32 num_regs_read; + u32 num_regs_read; }; diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h index ffbf5c71..6b512305 100644 --- a/drivers/net/qede/base/ecore_hsi_eth.h +++ b/drivers/net/qede/base/ecore_hsi_eth.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_ETH__ @@ -346,7 +344,7 @@ struct e4_xstorm_eth_conn_ag_ctx { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -1034,7 +1032,6 @@ struct eth_vport_rx_mode { #define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 #define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF #define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 - __le16 reserved2[3]; }; @@ -1046,11 +1043,11 @@ struct eth_vport_tpa_param { u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */; u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */; u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */; -/* If set, start each tpa segment on new SGE (GRO mode). One SGE per segment - * allowed +/* If set, start each TPA segment on new BD (GRO mode). One BD per segment + * allowed. */ u8 tpa_pkt_split_flg; -/* If set, put header of first TPA segment on bd and data on SGE */ +/* If set, put header of first TPA segment on first BD and data on second BD. */ u8 tpa_hdr_data_split_flg; /* If set, GRO data consistent will checked for TPA continue */ u8 tpa_gro_consistent_flg; @@ -1089,7 +1086,6 @@ struct eth_vport_tx_mode { #define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 #define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF #define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 - __le16 reserved2[3]; }; @@ -1216,7 +1212,9 @@ struct rx_queue_update_ramrod_data { u8 complete_cqe_flg /* post completion to the CQE ring if set */; u8 complete_event_flg /* post completion to the event ring if set */; u8 vport_id /* ID of virtual port */; - u8 reserved[4]; +/* If set, update default rss queue to this RX queue. */ + u8 set_default_rss_queue; + u8 reserved[3]; u8 reserved1 /* FW reserved. */; u8 reserved2 /* FW reserved. */; u8 reserved3 /* FW reserved. */; @@ -1257,7 +1255,8 @@ struct rx_update_gft_filter_data { __le16 action_icid; __le16 rx_qid /* RX queue ID. Valid if rx_qid_valid set. */; __le16 flow_id /* RX flow ID. Valid if flow_id_valid set. */; - u8 vport_id /* RX vport Id. */; +/* RX vport Id. For drop flow, set to ETH_GFT_TRASHCAN_VPORT. */ + __le16 vport_id; /* If set, action_icid will used for GFT filter update. */ u8 action_icid_valid; /* If set, rx_qid will used for traffic steering, in additional to vport_id. @@ -1273,7 +1272,10 @@ struct rx_update_gft_filter_data { * case of error. */ u8 assert_on_error; - u8 reserved[2]; +/* If set, inner VLAN will be removed regardless to VPORT configuration. + * Supported by E4 only. + */ + u8 inner_vlan_removal_en; }; @@ -1403,7 +1405,7 @@ struct vport_start_ramrod_data { u8 ctl_frame_mac_check_en; /* If set, control frames will be filtered according to ethtype check. */ u8 ctl_frame_ethtype_check_en; - u8 reserved[5]; + u8 reserved[1]; }; @@ -1486,6 +1488,7 @@ struct vport_update_ramrod_data { struct vport_update_ramrod_data_cmn common; struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */; struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */; + __le32 reserved[3]; /* TPA configuration parameters */ struct eth_vport_tpa_param tpa_param; struct vport_update_ramrod_mcast approx_mcast; @@ -1809,7 +1812,7 @@ struct E4XstormEthConnAgCtxDqExtLdPart { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; u8 byte3 /* byte3 */; u8 byte4 /* byte4 */; @@ -2153,7 +2156,7 @@ struct e4_xstorm_eth_hw_conn_ag_ctx { __le16 edpm_num_bds /* physical_q2 */; __le16 tx_bd_cons /* word3 */; __le16 tx_bd_prod /* word4 */; - __le16 tx_class /* word5 */; + __le16 updated_qm_pq_id /* word5 */; __le16 conn_dpi /* conn_dpi */; }; diff --git a/drivers/net/qede/base/ecore_hsi_init_func.h b/drivers/net/qede/base/ecore_hsi_init_func.h index 48b0048f..d77edaa1 100644 --- a/drivers/net/qede/base/ecore_hsi_init_func.h +++ b/drivers/net/qede/base/ecore_hsi_init_func.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_INIT_FUNC__ @@ -24,10 +22,10 @@ * BRB RAM init requirements */ struct init_brb_ram_req { - __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */; - __le32 headroom_per_tc /* headroom size per TC, in bytes */; - __le32 min_pkt_size /* min packet size, in bytes */; - __le32 max_ports_per_engine /* min packet size, in bytes */; + u32 guranteed_per_tc /* guaranteed size per TC, in bytes */; + u32 headroom_per_tc /* headroom size per TC, in bytes */; + u32 min_pkt_size /* min packet size, in bytes */; + u32 max_ports_per_engine /* min packet size, in bytes */; u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */; }; @@ -44,15 +42,14 @@ struct init_ets_tc_req { * (indicated by the weight field) */ u8 use_wfq; -/* An arbitration weight. Valid only if use_wfq is set. */ - __le16 weight; + u16 weight /* An arbitration weight. Valid only if use_wfq is set. */; }; /* * ETS init requirements */ struct init_ets_req { - __le32 mtu /* Max packet size (in bytes) */; + u32 mtu /* Max packet size (in bytes) */; /* ETS initialization requirements per TC. */ struct init_ets_tc_req tc_req[NUM_OF_TCS]; }; @@ -64,12 +61,12 @@ struct init_ets_req { */ struct init_nig_lb_rl_req { /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ - __le16 lb_mac_rate; + u16 lb_mac_rate; /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */ - __le16 lb_rate; - __le32 mtu /* Max packet size (in bytes) */; + u16 lb_rate; + u32 mtu /* Max packet size (in bytes) */; /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */ - __le16 tc_rate[NUM_OF_PHYS_TCS]; + u16 tc_rate[NUM_OF_PHYS_TCS]; }; @@ -98,10 +95,10 @@ struct init_qm_port_params { /* Vector of valid bits for active TCs used by this port */ u8 active_phys_tcs; /* number of PBF command lines that can be used by this port */ - __le16 num_pbf_cmd_lines; + u16 num_pbf_cmd_lines; /* number of BTB blocks that can be used by this port */ - __le16 num_btb_blocks; - __le16 reserved; + u16 num_btb_blocks; + u16 reserved; }; @@ -114,6 +111,9 @@ struct init_qm_pq_params { u8 wrr_group /* WRR group */; /* Indicates if a rate limiter should be allocated for the PQ (0/1) */ u8 rl_valid; + u8 port_id /* Port ID */; + u8 reserved0; + u16 reserved1; }; @@ -124,13 +124,13 @@ struct init_qm_vport_params { /* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if * VPORT RL is globally disabled. */ - __le32 vport_rl; + u32 vport_rl; /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is * globally disabled. */ - __le16 vport_wfq; + u16 vport_wfq; /* the first Tx PQ ID associated with this VPORT for each TC. */ - __le16 first_tx_pq_id[NUM_OF_TCS]; + u16 first_tx_pq_id[NUM_OF_TCS]; }; #endif /* __ECORE_HSI_INIT_FUNC__ */ diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h index 1f57e9b2..0e157f9b 100644 --- a/drivers/net/qede/base/ecore_hsi_init_tool.h +++ b/drivers/net/qede/base/ecore_hsi_init_tool.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HSI_INIT_TOOL__ @@ -30,59 +28,13 @@ enum chip_ids { }; -struct fw_asserts_ram_section { -/* The offset of the section in the RAM in RAM lines (64-bit units) */ - __le16 section_ram_line_offset; -/* The size of the section in RAM lines (64-bit units) */ - __le16 section_ram_line_size; -/* The offset of the asserts list within the section in dwords */ - u8 list_dword_offset; -/* The size of an assert list element in dwords */ - u8 list_element_dword_size; - u8 list_num_elements /* The number of elements in the asserts list */; -/* The offset of the next list index field within the section in dwords */ - u8 list_next_index_dword_offset; -}; - - -struct fw_ver_num { - u8 major /* Firmware major version number */; - u8 minor /* Firmware minor version number */; - u8 rev /* Firmware revision version number */; -/* Firmware engineering version number (for bootleg versions) */ - u8 eng; -}; - -struct fw_ver_info { - __le16 tools_ver /* Tools version number */; - u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; - u8 reserved1; - struct fw_ver_num num /* FW version number */; - __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; - __le32 reserved2; -}; - -struct fw_info { - struct fw_ver_info ver /* FW version information */; -/* Info regarding the FW asserts section in the Storm RAM */ - struct fw_asserts_ram_section fw_asserts_section; -}; - - -struct fw_info_location { -/* GRC address where the fw_info struct is located. */ - __le32 grc_addr; -/* Size of the fw_info structure (thats located at the grc_addr). */ - __le32 size; -}; - /* * Binary buffer header */ struct bin_buffer_hdr { /* buffer offset in bytes from the beginning of the binary file */ - __le32 offset; - __le32 length /* buffer length in bytes */; + u32 offset; + u32 length /* buffer length in bytes */; }; @@ -103,7 +55,7 @@ enum bin_init_buffer_type { * init array header: raw */ struct init_array_raw_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF #define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 @@ -116,7 +68,7 @@ struct init_array_raw_hdr { * init array header: standard */ struct init_array_standard_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF #define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 @@ -129,7 +81,7 @@ struct init_array_standard_hdr { * init array header: zipped */ struct init_array_zipped_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF #define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 @@ -142,7 +94,7 @@ struct init_array_zipped_hdr { * init array header: pattern */ struct init_array_pattern_hdr { - __le32 data; + u32 data; /* Init array type, from init_array_types enum */ #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 @@ -223,14 +175,14 @@ enum init_array_types { * init operation: callback */ struct init_callback_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_CALLBACK_OP_OP_MASK 0xF #define INIT_CALLBACK_OP_OP_SHIFT 0 #define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF #define INIT_CALLBACK_OP_RESERVED_SHIFT 4 - __le16 callback_id /* Callback ID */; - __le16 block_id /* Blocks ID */; + u16 callback_id /* Callback ID */; + u16 block_id /* Blocks ID */; }; @@ -238,7 +190,7 @@ struct init_callback_op { * init operation: delay */ struct init_delay_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_DELAY_OP_OP_MASK 0xF #define INIT_DELAY_OP_OP_SHIFT 0 @@ -252,7 +204,7 @@ struct init_delay_op { * init operation: if_mode */ struct init_if_mode_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_IF_MODE_OP_OP_MASK 0xF #define INIT_IF_MODE_OP_OP_SHIFT 0 @@ -261,9 +213,8 @@ struct init_if_mode_op { /* Commands to skip if the modes dont match */ #define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 - __le16 reserved2; -/* offset (in bytes) in modes expression buffer */ - __le16 modes_buf_offset; + u16 reserved2; + u16 modes_buf_offset /* offset (in bytes) in modes expression buffer */; }; @@ -271,7 +222,7 @@ struct init_if_mode_op { * init operation: if_phase */ struct init_if_phase_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_IF_PHASE_OP_OP_MASK 0xF #define INIT_IF_PHASE_OP_OP_SHIFT 0 @@ -283,7 +234,7 @@ struct init_if_phase_op { /* Commands to skip if the phases dont match */ #define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF #define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 - __le32 phase_data; + u32 phase_data; #define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */ #define INIT_IF_PHASE_OP_PHASE_SHIFT 0 #define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF @@ -308,21 +259,21 @@ enum init_mode_ops { * init operation: raw */ struct init_raw_op { - __le32 op_data; + u32 op_data; /* Init operation, from init_op_types enum */ #define INIT_RAW_OP_OP_MASK 0xF #define INIT_RAW_OP_OP_SHIFT 0 #define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */ #define INIT_RAW_OP_PARAM1_SHIFT 4 - __le32 param2 /* Init param 2 */; + u32 param2 /* Init param 2 */; }; /* * init array params */ struct init_op_array_params { - __le16 size /* array size in dwords */; - __le16 offset /* array start offset in dwords */; + u16 size /* array size in dwords */; + u16 offset /* array start offset in dwords */; }; /* @@ -330,11 +281,11 @@ struct init_op_array_params { */ union init_write_args { /* value to write, used when init source is INIT_SRC_INLINE */ - __le32 inline_val; + u32 inline_val; /* number of zeros to write, used when init source is INIT_SRC_ZEROS */ - __le32 zeros_count; + u32 zeros_count; /* array offset to write, used when init source is INIT_SRC_ARRAY */ - __le32 array_offset; + u32 array_offset; /* runtime array params to write, used when init source is INIT_SRC_RUNTIME */ struct init_op_array_params runtime; }; @@ -343,7 +294,7 @@ union init_write_args { * init operation: write */ struct init_write_op { - __le32 data; + u32 data; /* init operation, from init_op_types enum */ #define INIT_WRITE_OP_OP_MASK 0xF #define INIT_WRITE_OP_OP_SHIFT 0 @@ -365,7 +316,7 @@ struct init_write_op { * init operation: read */ struct init_read_op { - __le32 op_data; + u32 op_data; /* init operation, from init_op_types enum */ #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 @@ -378,7 +329,7 @@ struct init_read_op { #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 /* expected polling value, used only when polling is done */ - __le32 expected_val; + u32 expected_val; }; /* @@ -444,11 +395,11 @@ enum init_source_types { * Internal RAM Offsets macro data */ struct iro { - __le32 base /* RAM field offset */; - __le16 m1 /* multiplier 1 */; - __le16 m2 /* multiplier 2 */; - __le16 m3 /* multiplier 3 */; - __le16 size /* RAM field size */; + u32 base /* RAM field offset */; + u16 m1 /* multiplier 1 */; + u16 m2 /* multiplier 2 */; + u16 m3 /* multiplier 3 */; + u16 size /* RAM field size */; }; #endif /* __ECORE_HSI_INIT_TOOL__ */ diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c index 84f273b0..51bba27e 100644 --- a/drivers/net/qede/base/ecore_hw.c +++ b/drivers/net/qede/base/ecore_hw.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -38,6 +36,12 @@ struct ecore_ptt_pool { struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; }; +void __ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = OSAL_NULL; +} + enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) { struct ecore_ptt_pool *p_pool = OSAL_ALLOC(p_hwfn->p_dev, @@ -65,10 +69,12 @@ enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) p_hwfn->p_ptt_pool = p_pool; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock)) { + __ecore_ptt_pool_free(p_hwfn); + return ECORE_NOMEM; + } #endif OSAL_SPIN_LOCK_INIT(&p_pool->lock); - return ECORE_SUCCESS; } @@ -89,7 +95,7 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) if (p_hwfn->p_ptt_pool) OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock); #endif - OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + __ecore_ptt_pool_free(p_hwfn); } struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn) @@ -569,7 +575,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32)); if (*p_comp == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `p_completion_word'\n"); goto err; } @@ -578,7 +584,7 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(struct dmae_cmd)); if (*p_cmd == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct dmae_cmd'\n"); goto err; } @@ -587,12 +593,13 @@ enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32) * DMAE_MAX_RW_SIZE); if (*p_buff == OSAL_NULL) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `intermediate_buffer'\n"); goto err; } - p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + p_hwfn->dmae_info.b_mem_ready = true; return ECORE_SUCCESS; err: @@ -604,8 +611,9 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) { dma_addr_t p_phys; - /* Just make sure no one is in the middle */ - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); + p_hwfn->dmae_info.b_mem_ready = false; + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) { p_phys = p_hwfn->dmae_info.completion_word_phys_addr; @@ -630,8 +638,6 @@ void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE); p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL; } - - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); } static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn) @@ -777,6 +783,15 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn, enum _ecore_status_t ecore_status = ECORE_SUCCESS; u32 offset = 0; + if (!p_hwfn->dmae_info.b_mem_ready) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "No buffers allocated. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", + (unsigned long)src_addr, src_type, + (unsigned long)dst_addr, dst_type, + size_in_dwords); + return ECORE_NOMEM; + } + if (p_hwfn->p_dev->recov_in_prog) { DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n", @@ -870,7 +885,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); params.flags = flags; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, grc_addr_in_dw, @@ -878,7 +893,7 @@ ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, ECORE_DMAE_ADDRESS_GRC, size_in_dwords, ¶ms); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -896,14 +911,14 @@ ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); params.flags = flags; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, dest_addr, ECORE_DMAE_ADDRESS_GRC, ECORE_DMAE_ADDRESS_HOST_VIRT, size_in_dwords, ¶ms); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -917,7 +932,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, { enum _ecore_status_t rc; - OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_LOCK(&p_hwfn->dmae_info.lock); rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, dest_addr, @@ -925,7 +940,7 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, ECORE_DMAE_ADDRESS_HOST_PHYS, size_in_dwords, p_params); - OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + OSAL_SPIN_UNLOCK(&p_hwfn->dmae_info.lock); return rc; } @@ -944,3 +959,74 @@ void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type); } + +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase) +{ + u32 size = OSAL_PAGE_SIZE / 2, val; + struct ecore_dmae_params params; + enum _ecore_status_t rc = ECORE_SUCCESS; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, 2 * size); + if (!p_virt) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return ECORE_NOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); + p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(osal_uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + OSAL_MEM_ZERO((u8 *)p_virt + size, size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%lx, virt %p}, dst_addr={phys 0x%lx, virt %p}, size 0x%x\n", + phase, (unsigned long)p_phys, p_virt, + (unsigned long)(p_phys + size), + (u8 *)p_virt + size, size); + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + rc = ecore_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4 /* size_in_dwords */, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: ecore_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); + p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(osal_uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, false, + "DMAE sanity [%s]: addr={phys 0x%lx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (unsigned long)p_phys + + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = ECORE_UNKNOWN_ERROR; + goto out; + } + } + +out: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_virt, p_phys, 2 * size); + return rc; +} diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h index 0b9814f5..394207eb 100644 --- a/drivers/net/qede/base/ecore_hw.h +++ b/drivers/net/qede/base/ecore_hw.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_HW_H__ @@ -255,4 +253,8 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, enum ecore_hw_err_type err_type); +enum _ecore_status_t ecore_dmae_sanity(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + const char *phase); + #endif /* __ECORE_HW_H__ */ diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h index 4456af43..b8c2686f 100644 --- a/drivers/net/qede/base/ecore_hw_defs.h +++ b/drivers/net/qede/base/ecore_hw_defs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _ECORE_IGU_DEF_H_ diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c index 1da80a65..b8496cb2 100644 --- a/drivers/net/qede/base/ecore_init_fw_funcs.c +++ b/drivers/net/qede/base/ecore_init_fw_funcs.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -76,12 +74,12 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { #define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) /* RL increment value - rate is specified in mbps. the factor of 1.01 was -* added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC -* 2544 test. In this scenario the PF RL was reducing the line rate to 99% -* although the credit increment value was the correct one and FW calculated -* correct packet sizes. The reason for the inaccuracy of the RL is unknown at -* this point. -*/ + * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC + * 2544 test. In this scenario the PF RL was reducing the line rate to 99% + * although the credit increment value was the correct one and FW calculated + * correct packet sizes. The reason for the inaccuracy of the RL is unknown at + * this point. + */ #define QM_RL_INC_VAL(rate) \ OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \ (8 * 100)), 1) @@ -182,7 +180,7 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = { (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | \ ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24)) #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ - (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4) + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21776 + (pq_id) * 4) /******************** INTERNAL IMPLEMENTATION *********************/ @@ -421,9 +419,9 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, /* Prepare Tx PQ mapping runtime init values for the specified PF */ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u16 start_pq, @@ -437,7 +435,7 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, /* A bit per Tx PQ indicating if the PQ is associated with a VF */ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; - u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; num_pqs = num_pf_pqs + num_vf_pqs; @@ -467,11 +465,11 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, bool is_vf_pq, rl_valid; u16 first_tx_pq_id; - ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, + ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, + pq_params[i].tc_id, max_phys_tcs_per_port); is_vf_pq = (i >= num_pf_pqs); - rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id < - max_qm_global_rls; + rl_valid = pq_params[i].rl_valid > 0; /* Update first Tx PQ of VPORT/TC */ vport_id_in_pf = pq_params[i].vport_id - start_vport; @@ -492,28 +490,38 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, } /* Check RL ID */ - if (pq_params[i].rl_valid && pq_params[i].vport_id >= - max_qm_global_rls) + if (rl_valid && pq_params[i].vport_id >= max_qm_global_rls) { DP_NOTICE(p_hwfn, true, "Invalid VPORT ID for rate limiter config\n"); + rl_valid = false; + } /* Prepare PQ map entry */ struct qm_rf_pq_map_e4 tx_pq_map; + QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ? 1 : 0, first_tx_pq_id, rl_valid ? pq_params[i].vport_id : 0, ext_voq, pq_params[i].wrr_group); - /* Set base address */ + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, mem_addr_4kb); + /* Clear PQ pointer table entry (64 bit) */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + /* Write PQ info to RAM */ if (WRITE_PQ_INFO_TO_RAM != 0) { u32 pq_info = 0; + pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id, - pq_params[i].tc_id, port_id, + pq_params[i].tc_id, + pq_params[i].port_id, rl_valid ? 1 : 0, rl_valid ? pq_params[i].vport_id : 0); ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), @@ -540,12 +548,13 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, /* Prepare Other PQ mapping runtime init values for the specified PF */ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, + bool is_pf_loading, u32 num_pf_cids, u32 num_tids, u32 base_mem_addr_4kb) { u32 pq_size, pq_mem_4kb, mem_addr_4kb; - u16 i, pq_id, pq_group; + u16 i, j, pq_id, pq_group; /* A single other PQ group is used in each PF, where PQ group i is used * in PF i. @@ -563,11 +572,19 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, QM_PQ_SIZE_256B(pq_size)); - /* Set base address */ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + mem_addr_4kb += pq_mem_4kb; } } @@ -576,7 +593,6 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, * Return -1 on error. */ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, - u8 port_id, u8 pf_id, u16 pf_wfq, u8 max_phys_tcs_per_port, @@ -595,7 +611,8 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, } for (i = 0; i < num_tx_pqs; i++) { - ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id, + ext_voq = ecore_get_ext_voq(p_hwfn, pq_params[i].port_id, + pq_params[i].tc_id, max_phys_tcs_per_port); crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : @@ -604,12 +621,12 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, (pf_id % MAX_NUM_PFS_BB); OVERWRITE_RT_REG(p_hwfn, crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, - QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); - STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, - inc_val); } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + + pf_id, QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); + return 0; } @@ -820,9 +837,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, @@ -850,20 +867,21 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, /* Map Other PQs (if any) */ #if QM_OTHER_PQS_PER_PF > 0 - ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0); + ecore_other_pq_map_rt_init(p_hwfn, pf_id, is_pf_loading, num_pf_cids, + num_tids, 0); #endif /* Map Tx PQs */ - ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, - max_phys_tcs_per_port, num_pf_cids, num_vf_cids, + ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, pf_id, max_phys_tcs_per_port, + is_pf_loading, num_pf_cids, num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs, start_vport, other_mem_size_4kb, pq_params, vport_params); /* Init PF WFQ */ if (pf_wfq) - if (ecore_pf_wfq_rt_init - (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, - num_pf_pqs + num_vf_pqs, pq_params)) + if (ecore_pf_wfq_rt_init(p_hwfn, pf_id, pf_wfq, + max_phys_tcs_per_port, + num_pf_pqs + num_vf_pqs, pq_params)) return -1; /* Init PF RL */ @@ -1419,7 +1437,9 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType) #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0)) -#define PRS_ETH_TUNN_FIC_FORMAT -188897008 +#define PRS_ETH_TUNN_OUTPUT_FORMAT -188897008 +#define PRS_ETH_OUTPUT_FORMAT -46832 + void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 dest_port) { @@ -1444,9 +1464,14 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, vxlan_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1476,9 +1501,14 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, ip_gre_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1526,9 +1556,14 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, ip_geneve_enable); ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); - if (reg_val) { - ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, - (u32)PRS_ETH_TUNN_FIC_FORMAT); + if (reg_val) { /* TODO: handle E5 init */ + reg_val = ecore_rd(p_hwfn, p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0_BB_K2); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); } /* Update NIG register */ @@ -1548,6 +1583,36 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, ip_geneve_enable ? 1 : 0); } +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 4 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -927094512 + +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable) +{ + u32 reg_val, cfg_mask; + + /* read PRS config register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); + + /* set VXLAN_NO_L2_ENABLE mask */ + cfg_mask = (1 << PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); + + if (enable) { + /* set VXLAN_NO_L2_ENABLE flag */ + reg_val |= cfg_mask; + + /* update PRS FIC register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2, + (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); + } else { + /* clear VXLAN_NO_L2_ENABLE flag */ + reg_val &= ~cfg_mask; + } + + /* write PRS config register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); +} #define T_ETH_PACKET_ACTION_GFT_EVENTID 23 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 @@ -1664,6 +1729,10 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn, ram_line_lo = 0; ram_line_hi = 0; + /* Tunnel type */ + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); @@ -1675,9 +1744,14 @@ void ecore_gft_config(struct ecore_hwfn *p_hwfn, SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1); - } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) { + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { + SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { + SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); } ecore_wr(p_hwfn, p_ptt, @@ -1921,3 +1995,53 @@ void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn, ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); } + +#define RSS_IND_TABLE_BASE_ADDR 4112 +#define RSS_IND_TABLE_VPORT_SIZE 16 +#define RSS_IND_TABLE_ENTRY_PER_LINE 8 + +/* Update RSS indirection table entry. */ +void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 rss_id, + u8 ind_table_index, + u16 ind_table_value) +{ + u32 cnt, rss_addr; + u32 *reg_val; + u16 rss_ind_entry[RSS_IND_TABLE_ENTRY_PER_LINE]; + u16 rss_ind_mask[RSS_IND_TABLE_ENTRY_PER_LINE]; + + /* get entry address */ + rss_addr = RSS_IND_TABLE_BASE_ADDR + + RSS_IND_TABLE_VPORT_SIZE * rss_id + + ind_table_index / RSS_IND_TABLE_ENTRY_PER_LINE; + + /* prepare update command */ + ind_table_index %= RSS_IND_TABLE_ENTRY_PER_LINE; + + for (cnt = 0; cnt < RSS_IND_TABLE_ENTRY_PER_LINE; cnt++) { + if (cnt == ind_table_index) { + rss_ind_entry[cnt] = ind_table_value; + rss_ind_mask[cnt] = 0xFFFF; + } else { + rss_ind_entry[cnt] = 0; + rss_ind_mask[cnt] = 0; + } + } + + /* Update entry in HW*/ + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); + + reg_val = (u32 *)rss_ind_mask; + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK, reg_val[0]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 4, reg_val[1]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 8, reg_val[2]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_MASK + 12, reg_val[3]); + + reg_val = (u32 *)rss_ind_entry; + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA, reg_val[0]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 4, reg_val[1]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 8, reg_val[2]); + ecore_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_DATA + 12, reg_val[3]); +} diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h index ab560e59..1024bb26 100644 --- a/drivers/net/qede/base/ecore_init_fw_funcs.h +++ b/drivers/net/qede/base/ecore_init_fw_funcs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef _INIT_FW_FUNCS_H @@ -61,9 +59,10 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, * * @param p_hwfn * @param p_ptt - ptt window used for writing the registers - * @param port_id - port ID * @param pf_id - PF ID * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param is_pf_loading - indicates if the PF is currently loading, + * i.e. it has no allocated QM resources. * @param num_pf_cids - number of connections used by this PF * @param num_vf_cids - number of connections used by VFs of this PF * @param num_tids - number of tasks used by this PF @@ -87,9 +86,9 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, */ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u8 port_id, u8 pf_id, u8 max_phys_tcs_per_port, + bool is_pf_loading, u32 num_pf_cids, u32 num_vf_cids, u32 num_tids, @@ -259,6 +258,16 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, struct init_brb_ram_req *req); #endif /* UNUSED_HSI_FUNC */ +/** + * @brief ecore_set_vxlan_no_l2_enable - enable or disable VXLAN no L2 parsing + * + * @param p_ptt - ptt window used for writing the registers. + * @param enable - VXLAN no L2 enable flag. + */ +void ecore_set_vxlan_no_l2_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool enable); + #ifndef UNUSED_HSI_FUNC /** * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to @@ -462,4 +471,22 @@ void ecore_memset_session_ctx(void *p_ctx_mem, void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/** + * @brief ecore_update_eth_rss_ind_table_entry - Update RSS indirection table + * entry. + * The function must run in exclusive mode to prevent wrong RSS configuration. + * + * @param p_hwfn - HW device data + * @param p_ptt - ptt window used for writing the registers. + * @param rss_id - RSS engine ID. + * @param ind_table_index - RSS indirect table index. + * @param ind_table_value - RSS indirect table new value. + */ +void ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 rss_id, + u8 ind_table_index, + u16 ind_table_value); + #endif diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c index 91633c11..b7636f36 100644 --- a/drivers/net/qede/base/ecore_init_ops.c +++ b/drivers/net/qede/base/ecore_init_ops.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ /* include the precompiled configuration values - only once */ @@ -389,23 +387,29 @@ static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, } if (i == ECORE_INIT_MAX_POLL_COUNT) - DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x" - " Got: %08x (comparsion %08x)]\n", + DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, OSAL_LE32_TO_CPU(cmd->op_data)); } -/* init_ops callbacks entry point. - * OSAL_UNUSED is temporary used to avoid unused-parameter compilation warnings. - * Should be removed when the function is actually used. - */ -static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, - struct ecore_ptt OSAL_UNUSED * p_ptt, - struct init_callback_op OSAL_UNUSED * p_cmd) +/* init_ops callbacks entry point */ +static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_callback_op *p_cmd) { - DP_NOTICE(p_hwfn, true, - "Currently init values have no need of callbacks\n"); + enum _ecore_status_t rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return ECORE_INVAL; + } + + return rc; } static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, @@ -513,7 +517,7 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, break; case INIT_OP_CALLBACK: - ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); break; } diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h index e293a4a3..de7846d4 100644 --- a/drivers/net/qede/base/ecore_init_ops.h +++ b/drivers/net/qede/base/ecore_init_ops.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_INIT_OPS__ diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c index e6cef85b..4c271d35 100644 --- a/drivers/net/qede/base/ecore_int.c +++ b/drivers/net/qede/base/ecore_int.c @@ -1,11 +1,11 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ +#include <rte_string_fns.h> + #include "bcm_osal.h" #include "ecore.h" #include "ecore_spq.h" @@ -231,15 +231,19 @@ static const char *grc_timeout_attn_master_to_str(u8 master) static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) { + enum _ecore_status_t rc = ECORE_SUCCESS; u32 tmp, tmp2; /* We've already cleared the timeout interrupt register, so we learn - * of interrupts via the validity register + * of interrupts via the validity register. + * Any attention which is not for a timeout event is treated as fatal. */ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); - if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) + if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) { + rc = ECORE_INVAL; goto out; + } /* Read the GRC timeout information */ tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, @@ -263,11 +267,11 @@ static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> ECORE_GRC_ATTENTION_VF_SHIFT); -out: - /* Regardles of anything else, clean the validity bit */ + /* Clean the validity bit */ ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); - return ECORE_SUCCESS; +out: + return rc; } #define ECORE_PGLUE_ATTENTION_VALID (1 << 29) @@ -285,9 +289,11 @@ out: #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt) + struct ecore_ptt *p_ptt, + bool is_hw_init) { u32 tmp; + char str[512] = {0}; tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); if (tmp & ECORE_PGLUE_ATTENTION_VALID) { @@ -299,9 +305,8 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, PGLUE_B_REG_TX_ERR_WR_ADD_63_32); details = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS); - - DP_NOTICE(p_hwfn, false, - "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + OSAL_SNPRINTF(str, 512, + "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", addr_hi, addr_lo, details, (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> @@ -318,6 +323,10 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, 1 : 0), (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0)); + if (is_hw_init) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "%s", str); + else + DP_NOTICE(p_hwfn, false, "%s", str); } tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); @@ -393,7 +402,7 @@ enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn) { - return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt); + return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); } static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) @@ -1104,9 +1113,9 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, p_aeu->bit_name, num); else - OSAL_STRNCPY(bit_name, - p_aeu->bit_name, - 30); + strlcpy(bit_name, + p_aeu->bit_name, + sizeof(bit_name)); /* We now need to pass bitmask in its * correct position. @@ -1406,8 +1415,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, /* SB struct */ p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_dev, true, - "Failed to allocate `struct ecore_sb_attn_info'\n"); + DP_NOTICE(p_dev, false, "Failed to allocate `struct ecore_sb_attn_info'\n"); return ECORE_NOMEM; } @@ -1415,8 +1423,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, SB_ATTN_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_dev, true, - "Failed to allocate status block (attentions)\n"); + DP_NOTICE(p_dev, false, "Failed to allocate status block (attentions)\n"); OSAL_FREE(p_dev, p_sb); return ECORE_NOMEM; } @@ -1795,8 +1802,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sb)); if (!p_sb) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sb_info'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sb_info'\n"); return ECORE_NOMEM; } @@ -1804,7 +1810,7 @@ static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, SB_ALIGNED_SIZE(p_hwfn)); if (!p_virt) { - DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate status block\n"); OSAL_FREE(p_hwfn->p_dev, p_sb); return ECORE_NOMEM; } diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h index 563051c3..041240d7 100644 --- a/drivers/net/qede/base/ecore_int.h +++ b/drivers/net/qede/base/ecore_int.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_INT_H__ @@ -256,6 +254,7 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn, #endif enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn, - struct ecore_ptt *p_ptt); + struct ecore_ptt *p_ptt, + bool is_hw_init); #endif /* __ECORE_INT_H__ */ diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h index 24cdf5ed..aeaf469e 100644 --- a/drivers/net/qede/base/ecore_int_api.h +++ b/drivers/net/qede/base/ecore_int_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_INT_API_H__ diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h index 218ef50b..29001d71 100644 --- a/drivers/net/qede/base/ecore_iov_api.h +++ b/drivers/net/qede/base/ecore_iov_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SRIOV_API_H__ @@ -540,6 +538,17 @@ bool ecore_iov_is_valid_vfpf_msg_length(u32 length); u32 ecore_iov_pfvf_msg_length(void); /** + * @brief Returns MAC address if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return OSAL_NULL if mac isn't set; Otherwise, returns MAC. + */ +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** * @brief Returns forced MAC address if one is configured * * @parm p_hwfn diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h index 360d7f88..05693029 100644 --- a/drivers/net/qede/base/ecore_iro.h +++ b/drivers/net/qede/base/ecore_iro.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __IRO_H__ diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h index 41532eeb..685fa2e8 100644 --- a/drivers/net/qede/base/ecore_iro_values.h +++ b/drivers/net/qede/base/ecore_iro_values.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __IRO_VALUES_H__ @@ -13,9 +11,9 @@ static const struct iro iro_arr[51] = { /* YSTORM_FLOW_CONTROL_MODE_OFFSET */ { 0x0, 0x0, 0x0, 0x0, 0x8}, /* TSTORM_PORT_STAT_OFFSET(port_id) */ - { 0x4cb0, 0x80, 0x0, 0x0, 0x80}, + { 0x4cb8, 0x88, 0x0, 0x0, 0x88}, /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */ - { 0x6508, 0x20, 0x0, 0x0, 0x20}, + { 0x6530, 0x20, 0x0, 0x0, 0x20}, /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */ { 0xb00, 0x8, 0x0, 0x0, 0x4}, /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */ @@ -27,49 +25,49 @@ static const struct iro iro_arr[51] = { /* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */ { 0x84, 0x8, 0x0, 0x0, 0x2}, /* XSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4c40, 0x0, 0x0, 0x0, 0x78}, + { 0x4c48, 0x0, 0x0, 0x0, 0x78}, /* YSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x3e10, 0x0, 0x0, 0x0, 0x78}, + { 0x3e38, 0x0, 0x0, 0x0, 0x78}, /* PSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x2b50, 0x0, 0x0, 0x0, 0x78}, + { 0x2b78, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4c38, 0x0, 0x0, 0x0, 0x78}, + { 0x4c40, 0x0, 0x0, 0x0, 0x78}, /* MSTORM_INTEG_TEST_DATA_OFFSET */ - { 0x4990, 0x0, 0x0, 0x0, 0x78}, + { 0x4998, 0x0, 0x0, 0x0, 0x78}, /* USTORM_INTEG_TEST_DATA_OFFSET */ - { 0x7f48, 0x0, 0x0, 0x0, 0x78}, + { 0x7f50, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */ { 0xa28, 0x8, 0x0, 0x0, 0x8}, /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */ - { 0x61e8, 0x10, 0x0, 0x0, 0x10}, + { 0x6210, 0x10, 0x0, 0x0, 0x10}, /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */ { 0xb820, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */ - { 0x96b8, 0x30, 0x0, 0x0, 0x30}, + { 0x96c0, 0x30, 0x0, 0x0, 0x30}, /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x4b60, 0x80, 0x0, 0x0, 0x40}, + { 0x4b68, 0x80, 0x0, 0x0, 0x40}, /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */ { 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */ - { 0x53a0, 0x80, 0x4, 0x0, 0x4}, + { 0x53a8, 0x80, 0x4, 0x0, 0x4}, /* MSTORM_TPA_TIMEOUT_US_OFFSET */ - { 0xc7c8, 0x0, 0x0, 0x0, 0x4}, + { 0xc7d0, 0x0, 0x0, 0x0, 0x4}, /* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */ - { 0x4ba0, 0x80, 0x0, 0x0, 0x20}, + { 0x4ba8, 0x80, 0x0, 0x0, 0x20}, /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x8150, 0x40, 0x0, 0x0, 0x30}, + { 0x8158, 0x40, 0x0, 0x0, 0x30}, /* USTORM_ETH_PF_STAT_OFFSET(pf_id) */ { 0xe770, 0x60, 0x0, 0x0, 0x60}, /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */ - { 0x2ce8, 0x80, 0x0, 0x0, 0x38}, + { 0x2d10, 0x80, 0x0, 0x0, 0x38}, /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */ - { 0xf2b0, 0x78, 0x0, 0x0, 0x78}, + { 0xf2b8, 0x78, 0x0, 0x0, 0x78}, /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */ { 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* TSTORM_ETH_PRS_INPUT_OFFSET */ - { 0xaef8, 0x0, 0x0, 0x0, 0xf0}, + { 0xaf20, 0x0, 0x0, 0x0, 0xf0}, /* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */ - { 0xafe8, 0x8, 0x0, 0x0, 0x8}, + { 0xb010, 0x8, 0x0, 0x0, 0x8}, /* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */ { 0x1f8, 0x8, 0x0, 0x0, 0x8}, /* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */ @@ -81,37 +79,37 @@ static const struct iro iro_arr[51] = { /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */ { 0x0, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */ - { 0x200, 0x18, 0x8, 0x0, 0x8}, + { 0x400, 0x18, 0x8, 0x0, 0x8}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */ { 0xb78, 0x18, 0x8, 0x0, 0x2}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ - { 0xd878, 0x50, 0x0, 0x0, 0x3c}, + { 0xd898, 0x50, 0x0, 0x0, 0x3c}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ { 0x12908, 0x18, 0x0, 0x0, 0x10}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */ { 0x11aa8, 0x40, 0x0, 0x0, 0x18}, /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0xa580, 0x50, 0x0, 0x0, 0x20}, + { 0xa588, 0x50, 0x0, 0x0, 0x20}, /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0x86f8, 0x40, 0x0, 0x0, 0x28}, + { 0x8700, 0x40, 0x0, 0x0, 0x28}, /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */ - { 0x102f8, 0x18, 0x0, 0x0, 0x10}, + { 0x10300, 0x18, 0x0, 0x0, 0x10}, /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */ - { 0xde28, 0x48, 0x0, 0x0, 0x38}, + { 0xde48, 0x48, 0x0, 0x0, 0x38}, /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */ - { 0x10760, 0x20, 0x0, 0x0, 0x20}, + { 0x10768, 0x20, 0x0, 0x0, 0x20}, /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */ - { 0x2d20, 0x80, 0x0, 0x0, 0x10}, + { 0x2d48, 0x80, 0x0, 0x0, 0x10}, /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */ - { 0x5020, 0x10, 0x0, 0x0, 0x10}, + { 0x5048, 0x10, 0x0, 0x0, 0x10}, /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */ - { 0xc9b0, 0x30, 0x0, 0x0, 0x10}, + { 0xc9b8, 0x30, 0x0, 0x0, 0x10}, /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */ - { 0xeec0, 0x10, 0x0, 0x0, 0x10}, + { 0xed90, 0x10, 0x0, 0x0, 0x10}, /* YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) */ - { 0xa398, 0x10, 0x0, 0x0, 0x10}, + { 0xa520, 0x10, 0x0, 0x0, 0x10}, /* PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) */ - { 0x13100, 0x8, 0x0, 0x0, 0x8}, + { 0x13108, 0x8, 0x0, 0x0, 0x8}, }; #endif /* __IRO_VALUES_H__ */ diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c index e3afc8a3..d71f4616 100644 --- a/drivers/net/qede/base/ecore_l2.c +++ b/drivers/net/qede/base/ecore_l2.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -77,7 +75,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn) } #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock); + if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock)) + return ECORE_NOMEM; #endif return ECORE_SUCCESS; @@ -110,6 +109,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn) break; OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage[i]); + p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL; } #ifdef CONFIG_ECORE_LOCK_ALLOC @@ -119,6 +119,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn) #endif OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage); + p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL; out_l2_info: OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info); @@ -687,7 +688,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod, p_ramrod->common.update_approx_mcast_flg = 1; for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { - u32 *p_bins = (u32 *)p_params->bins; + u32 *p_bins = p_params->bins; p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); } @@ -1185,11 +1186,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM * *pp_doorbell) { enum _ecore_status_t rc; + u16 pq_id; + + /* TODO - set tc in the pq_params for multi-cos. + * If pacing is enabled then select queue according to + * rate limiter availability otherwise select queue based + * on multi cos. + */ + if (IS_ECORE_PACING(p_hwfn)) + pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id); + else + pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc); - /* TODO - set tc in the pq_params for multi-cos */ - rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, - pbl_addr, pbl_size, - ecore_get_cm_pq_idx_mcos(p_hwfn, tc)); + rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr, + pbl_size, pq_id); if (rc != ECORE_SUCCESS) return rc; @@ -1556,8 +1566,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, enum spq_mode comp_mode, struct ecore_spq_comp_cb *p_comp_data) { - unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct ecore_spq_entry *p_ent = OSAL_NULL; struct ecore_sp_init_data init_data; u8 abs_vport_id = 0; @@ -1596,8 +1606,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, /* explicitly clear out the entire vector */ OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, sizeof(p_ramrod->approx_mcast.bins)); - OSAL_MEMSET(bins, 0, sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); /* filter ADD op is explicit set op and it removes * any existing filters for the vport. */ @@ -1606,16 +1615,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, u32 bit; bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); - OSAL_SET_BIT(bit, bins); + bins[bit / 32] |= 1 << (bit % 32); } /* Convert to correct endianity */ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { struct vport_update_ramrod_mcast *p_ramrod_bins; - u32 *p_bins = (u32 *)bins; p_ramrod_bins = &p_ramrod->approx_mcast; - p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); + p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]); } } @@ -1945,6 +1953,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, p_ah->tx_1519_to_max_byte_packets = port_stats.eth.u1.ah1.t1519_to_max; } + + p_common->link_change_count = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + link_change_count)); } void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, @@ -2061,11 +2074,14 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev) /* PORT statistics are not necessarily reset, so we need to * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. */ if (!p_dev->reset_stats) DP_INFO(p_dev, "Reset stats not allocated\n"); - else + else { _ecore_get_vport_stats(p_dev, p_dev->reset_stats); + p_dev->reset_stats->common.link_change_count = 0; + } } void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn, @@ -2150,7 +2166,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn, p_ramrod->flow_id_valid = 0; p_ramrod->flow_id = 0; - p_ramrod->vport_id = abs_vport_id; + p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id); p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; @@ -2267,3 +2283,22 @@ out: return rc; } + +enum _ecore_status_t +ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_queue_cid *p_cid, u32 rate) +{ + struct ecore_mcp_link_state *p_link; + u8 vport; + + vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id); + p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output; + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "About to rate limit qm vport %d for queue %d with rate %d\n", + vport, p_cid->rel.queue_id, rate); + + return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate, + p_link->speed); +} diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h index f4212cf2..8fa40302 100644 --- a/drivers/net/qede/base/ecore_l2.h +++ b/drivers/net/qede/base/ecore_l2.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_L2_H__ diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h index ed9837bf..575b9e3a 100644 --- a/drivers/net/qede/base/ecore_l2_api.h +++ b/drivers/net/qede/base/ecore_l2_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_L2_API_H__ @@ -332,7 +330,7 @@ struct ecore_sp_vport_update_params { u8 anti_spoofing_en; u8 update_accept_any_vlan_flg; u8 accept_any_vlan; - unsigned long bins[8]; + u32 bins[8]; struct ecore_rss_params *rss_params; struct ecore_filter_accept_flags accept_flags; struct ecore_sge_tpa_params *sge_tpa_params; diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index 8edd2e96..ea14c172 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -1,14 +1,13 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" +#include "nvm_cfg.h" #include "ecore_mcp.h" #include "mcp_public.h" #include "reg_addr.h" @@ -240,15 +239,24 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, /* Allocate mcp_info structure */ p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, - sizeof(*p_hwfn->mcp_info)); - if (!p_hwfn->mcp_info) - goto err; + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) { + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp_info\n"); + return ECORE_NOMEM; + } p_info = p_hwfn->mcp_info; /* Initialize the MFW spinlocks */ #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock); - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) { + OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + return ECORE_NOMEM; + } #endif OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock); OSAL_SPIN_LOCK_INIT(&p_info->link_lock); @@ -272,7 +280,7 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; err: - DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate mcp memory\n"); ecore_mcp_free(p_hwfn); return ECORE_NOMEM; } @@ -593,7 +601,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, /* MCP not initialized */ if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, true, "MFW is not initialized!\n"); return ECORE_BUSY; } @@ -2121,19 +2129,20 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 *p_media_type) { + enum _ecore_status_t rc = ECORE_SUCCESS; /* TODO - Add support for VFs */ if (IS_VF(p_hwfn->p_dev)) return ECORE_INVAL; if (!ecore_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); return ECORE_BUSY; } if (!p_ptt) { *p_media_type = MEDIA_UNSPECIFIED; - return ECORE_INVAL; + rc = ECORE_INVAL; } else { *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + @@ -2144,6 +2153,197 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, return ECORE_SUCCESS; } +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_tranceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + rc = ECORE_INVAL; + } else { + *p_tranceiver_type = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + } + + return rc; +} + +static int is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return 1; + + return 0; +} + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask) +{ + u32 transceiver_data, transceiver_type, transceiver_state; + + ecore_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_data); + + transceiver_state = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_STATE); + + transceiver_type = GET_MFW_FIELD(transceiver_data, + ETH_TRANSCEIVER_TYPE); + + if (is_transceiver_ready(transceiver_state, transceiver_type) == 0) + return ECORE_INVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + + case ETH_TRANSCEIVER_TYPE_10G_BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + + default: + DP_INFO(p_hwfn, "Unknown transcevier type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, false, "MFW is not initialized!\n"); + return ECORE_BUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + rc = ECORE_INVAL; + } else { + nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, + MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, + nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + } + + return rc; +} + /* @DPDK */ /* Old MFW has a global configuration for all PFs regarding RDMA support */ static void diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h index 6afaf7de..8e125310 100644 --- a/drivers/net/qede/base/ecore_mcp.h +++ b/drivers/net/qede/base/ecore_mcp.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_MCP_H__ diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h index 225890e2..cfb9f99d 100644 --- a/drivers/net/qede/base/ecore_mcp_api.h +++ b/drivers/net/qede/base/ecore_mcp_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_MCP_API_H__ @@ -595,6 +593,52 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_hwfn *p_hwfn, u32 *media_type); /** + * @brief Get transceiver data of the port. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_transceiver_type - media type value + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_transceiver_data(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_tranceiver_type); + +/** + * @brief Get transceiver supported speed mask. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_speed_mask - Bit mask of all supported speeds. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ + +enum _ecore_status_t ecore_mcp_trans_speed_mask(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_speed_mask); + +/** + * @brief Get board configuration. + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_board_config - Board config. + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_board_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_board_config); + +/** * @brief - Sends a command to the MCP mailbox. * * @param p_hwfn - hw function diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c index 3a1de094..f7666472 100644 --- a/drivers/net/qede/base/ecore_mng_tlv.c +++ b/drivers/net/qede/base/ecore_mng_tlv.c @@ -1,3 +1,9 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. + * All rights reserved. + * www.cavium.com + */ + #include "bcm_osal.h" #include "ecore.h" #include "ecore_status.h" diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h index abca7408..f91b25e2 100644 --- a/drivers/net/qede/base/ecore_proto_if.h +++ b/drivers/net/qede/base/ecore_proto_if.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_PROTO_IF_H__ @@ -31,6 +29,9 @@ struct ecore_eth_pf_params { * This will set the maximal number of configured steering-filters. */ u32 num_arfs_filters; + + /* To allow VF to change its MAC despite of PF set forced MAC. */ + bool allow_vf_mac_change; }; /* Most of the parameters below are described in the FW iSCSI / TCP HSI */ diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h index 1d085815..721b8c15 100644 --- a/drivers/net/qede/base/ecore_rt_defs.h +++ b/drivers/net/qede/base/ecore_rt_defs.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __RT_DEFS_H__ @@ -205,329 +203,336 @@ #define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 34082 #define QM_REG_BASEADDROTHERPQ_RT_OFFSET 34083 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34211 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34212 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34213 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34214 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34215 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34216 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34217 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34218 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34219 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34220 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34221 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34222 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34223 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34224 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34225 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34226 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34227 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34228 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34229 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34230 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34231 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34232 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34233 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34234 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34235 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34236 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34237 -#define QM_REG_PQTX2PF_0_RT_OFFSET 34238 -#define QM_REG_PQTX2PF_1_RT_OFFSET 34239 -#define QM_REG_PQTX2PF_2_RT_OFFSET 34240 -#define QM_REG_PQTX2PF_3_RT_OFFSET 34241 -#define QM_REG_PQTX2PF_4_RT_OFFSET 34242 -#define QM_REG_PQTX2PF_5_RT_OFFSET 34243 -#define QM_REG_PQTX2PF_6_RT_OFFSET 34244 -#define QM_REG_PQTX2PF_7_RT_OFFSET 34245 -#define QM_REG_PQTX2PF_8_RT_OFFSET 34246 -#define QM_REG_PQTX2PF_9_RT_OFFSET 34247 -#define QM_REG_PQTX2PF_10_RT_OFFSET 34248 -#define QM_REG_PQTX2PF_11_RT_OFFSET 34249 -#define QM_REG_PQTX2PF_12_RT_OFFSET 34250 -#define QM_REG_PQTX2PF_13_RT_OFFSET 34251 -#define QM_REG_PQTX2PF_14_RT_OFFSET 34252 -#define QM_REG_PQTX2PF_15_RT_OFFSET 34253 -#define QM_REG_PQTX2PF_16_RT_OFFSET 34254 -#define QM_REG_PQTX2PF_17_RT_OFFSET 34255 -#define QM_REG_PQTX2PF_18_RT_OFFSET 34256 -#define QM_REG_PQTX2PF_19_RT_OFFSET 34257 -#define QM_REG_PQTX2PF_20_RT_OFFSET 34258 -#define QM_REG_PQTX2PF_21_RT_OFFSET 34259 -#define QM_REG_PQTX2PF_22_RT_OFFSET 34260 -#define QM_REG_PQTX2PF_23_RT_OFFSET 34261 -#define QM_REG_PQTX2PF_24_RT_OFFSET 34262 -#define QM_REG_PQTX2PF_25_RT_OFFSET 34263 -#define QM_REG_PQTX2PF_26_RT_OFFSET 34264 -#define QM_REG_PQTX2PF_27_RT_OFFSET 34265 -#define QM_REG_PQTX2PF_28_RT_OFFSET 34266 -#define QM_REG_PQTX2PF_29_RT_OFFSET 34267 -#define QM_REG_PQTX2PF_30_RT_OFFSET 34268 -#define QM_REG_PQTX2PF_31_RT_OFFSET 34269 -#define QM_REG_PQTX2PF_32_RT_OFFSET 34270 -#define QM_REG_PQTX2PF_33_RT_OFFSET 34271 -#define QM_REG_PQTX2PF_34_RT_OFFSET 34272 -#define QM_REG_PQTX2PF_35_RT_OFFSET 34273 -#define QM_REG_PQTX2PF_36_RT_OFFSET 34274 -#define QM_REG_PQTX2PF_37_RT_OFFSET 34275 -#define QM_REG_PQTX2PF_38_RT_OFFSET 34276 -#define QM_REG_PQTX2PF_39_RT_OFFSET 34277 -#define QM_REG_PQTX2PF_40_RT_OFFSET 34278 -#define QM_REG_PQTX2PF_41_RT_OFFSET 34279 -#define QM_REG_PQTX2PF_42_RT_OFFSET 34280 -#define QM_REG_PQTX2PF_43_RT_OFFSET 34281 -#define QM_REG_PQTX2PF_44_RT_OFFSET 34282 -#define QM_REG_PQTX2PF_45_RT_OFFSET 34283 -#define QM_REG_PQTX2PF_46_RT_OFFSET 34284 -#define QM_REG_PQTX2PF_47_RT_OFFSET 34285 -#define QM_REG_PQTX2PF_48_RT_OFFSET 34286 -#define QM_REG_PQTX2PF_49_RT_OFFSET 34287 -#define QM_REG_PQTX2PF_50_RT_OFFSET 34288 -#define QM_REG_PQTX2PF_51_RT_OFFSET 34289 -#define QM_REG_PQTX2PF_52_RT_OFFSET 34290 -#define QM_REG_PQTX2PF_53_RT_OFFSET 34291 -#define QM_REG_PQTX2PF_54_RT_OFFSET 34292 -#define QM_REG_PQTX2PF_55_RT_OFFSET 34293 -#define QM_REG_PQTX2PF_56_RT_OFFSET 34294 -#define QM_REG_PQTX2PF_57_RT_OFFSET 34295 -#define QM_REG_PQTX2PF_58_RT_OFFSET 34296 -#define QM_REG_PQTX2PF_59_RT_OFFSET 34297 -#define QM_REG_PQTX2PF_60_RT_OFFSET 34298 -#define QM_REG_PQTX2PF_61_RT_OFFSET 34299 -#define QM_REG_PQTX2PF_62_RT_OFFSET 34300 -#define QM_REG_PQTX2PF_63_RT_OFFSET 34301 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34302 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34303 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34304 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34305 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34306 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34307 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34308 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34309 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34310 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34311 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34312 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34313 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34314 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34315 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34316 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34317 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34318 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34319 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34320 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34321 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34322 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34323 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34324 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34325 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34326 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34327 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34328 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34329 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34330 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 34211 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 34467 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 34468 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 34469 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 34470 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 34471 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 34472 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 34473 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 34474 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 34475 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 34476 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 34477 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 34478 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 34479 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 34480 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 34481 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 34482 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 34483 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 34484 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 34485 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 34486 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 34487 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 34488 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 34489 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 34490 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 34491 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 34492 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 34493 +#define QM_REG_PQTX2PF_0_RT_OFFSET 34494 +#define QM_REG_PQTX2PF_1_RT_OFFSET 34495 +#define QM_REG_PQTX2PF_2_RT_OFFSET 34496 +#define QM_REG_PQTX2PF_3_RT_OFFSET 34497 +#define QM_REG_PQTX2PF_4_RT_OFFSET 34498 +#define QM_REG_PQTX2PF_5_RT_OFFSET 34499 +#define QM_REG_PQTX2PF_6_RT_OFFSET 34500 +#define QM_REG_PQTX2PF_7_RT_OFFSET 34501 +#define QM_REG_PQTX2PF_8_RT_OFFSET 34502 +#define QM_REG_PQTX2PF_9_RT_OFFSET 34503 +#define QM_REG_PQTX2PF_10_RT_OFFSET 34504 +#define QM_REG_PQTX2PF_11_RT_OFFSET 34505 +#define QM_REG_PQTX2PF_12_RT_OFFSET 34506 +#define QM_REG_PQTX2PF_13_RT_OFFSET 34507 +#define QM_REG_PQTX2PF_14_RT_OFFSET 34508 +#define QM_REG_PQTX2PF_15_RT_OFFSET 34509 +#define QM_REG_PQTX2PF_16_RT_OFFSET 34510 +#define QM_REG_PQTX2PF_17_RT_OFFSET 34511 +#define QM_REG_PQTX2PF_18_RT_OFFSET 34512 +#define QM_REG_PQTX2PF_19_RT_OFFSET 34513 +#define QM_REG_PQTX2PF_20_RT_OFFSET 34514 +#define QM_REG_PQTX2PF_21_RT_OFFSET 34515 +#define QM_REG_PQTX2PF_22_RT_OFFSET 34516 +#define QM_REG_PQTX2PF_23_RT_OFFSET 34517 +#define QM_REG_PQTX2PF_24_RT_OFFSET 34518 +#define QM_REG_PQTX2PF_25_RT_OFFSET 34519 +#define QM_REG_PQTX2PF_26_RT_OFFSET 34520 +#define QM_REG_PQTX2PF_27_RT_OFFSET 34521 +#define QM_REG_PQTX2PF_28_RT_OFFSET 34522 +#define QM_REG_PQTX2PF_29_RT_OFFSET 34523 +#define QM_REG_PQTX2PF_30_RT_OFFSET 34524 +#define QM_REG_PQTX2PF_31_RT_OFFSET 34525 +#define QM_REG_PQTX2PF_32_RT_OFFSET 34526 +#define QM_REG_PQTX2PF_33_RT_OFFSET 34527 +#define QM_REG_PQTX2PF_34_RT_OFFSET 34528 +#define QM_REG_PQTX2PF_35_RT_OFFSET 34529 +#define QM_REG_PQTX2PF_36_RT_OFFSET 34530 +#define QM_REG_PQTX2PF_37_RT_OFFSET 34531 +#define QM_REG_PQTX2PF_38_RT_OFFSET 34532 +#define QM_REG_PQTX2PF_39_RT_OFFSET 34533 +#define QM_REG_PQTX2PF_40_RT_OFFSET 34534 +#define QM_REG_PQTX2PF_41_RT_OFFSET 34535 +#define QM_REG_PQTX2PF_42_RT_OFFSET 34536 +#define QM_REG_PQTX2PF_43_RT_OFFSET 34537 +#define QM_REG_PQTX2PF_44_RT_OFFSET 34538 +#define QM_REG_PQTX2PF_45_RT_OFFSET 34539 +#define QM_REG_PQTX2PF_46_RT_OFFSET 34540 +#define QM_REG_PQTX2PF_47_RT_OFFSET 34541 +#define QM_REG_PQTX2PF_48_RT_OFFSET 34542 +#define QM_REG_PQTX2PF_49_RT_OFFSET 34543 +#define QM_REG_PQTX2PF_50_RT_OFFSET 34544 +#define QM_REG_PQTX2PF_51_RT_OFFSET 34545 +#define QM_REG_PQTX2PF_52_RT_OFFSET 34546 +#define QM_REG_PQTX2PF_53_RT_OFFSET 34547 +#define QM_REG_PQTX2PF_54_RT_OFFSET 34548 +#define QM_REG_PQTX2PF_55_RT_OFFSET 34549 +#define QM_REG_PQTX2PF_56_RT_OFFSET 34550 +#define QM_REG_PQTX2PF_57_RT_OFFSET 34551 +#define QM_REG_PQTX2PF_58_RT_OFFSET 34552 +#define QM_REG_PQTX2PF_59_RT_OFFSET 34553 +#define QM_REG_PQTX2PF_60_RT_OFFSET 34554 +#define QM_REG_PQTX2PF_61_RT_OFFSET 34555 +#define QM_REG_PQTX2PF_62_RT_OFFSET 34556 +#define QM_REG_PQTX2PF_63_RT_OFFSET 34557 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 34558 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 34559 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 34560 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 34561 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 34562 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 34563 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 34564 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 34565 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 34566 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 34567 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 34568 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 34569 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 34570 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 34571 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 34572 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 34573 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 34574 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 34575 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 34576 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 34577 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 34578 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 34579 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 34580 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 34581 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 34582 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 34583 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 34584 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 34585 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 34586 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34586 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 34842 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 34842 +#define QM_REG_RLGLBLCRD_RT_OFFSET 35098 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 35098 -#define QM_REG_RLPFPERIOD_RT_OFFSET 35099 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35100 -#define QM_REG_RLPFINCVAL_RT_OFFSET 35101 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 35354 +#define QM_REG_RLPFPERIOD_RT_OFFSET 35355 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 35356 +#define QM_REG_RLPFINCVAL_RT_OFFSET 35357 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35117 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 35373 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 35133 +#define QM_REG_RLPFCRD_RT_OFFSET 35389 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 35149 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35150 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35151 +#define QM_REG_RLPFENABLE_RT_OFFSET 35405 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 35406 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 35407 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35167 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 35423 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 35183 +#define QM_REG_WFQPFCRD_RT_OFFSET 35439 #define QM_REG_WFQPFCRD_RT_SIZE 256 -#define QM_REG_WFQPFENABLE_RT_OFFSET 35439 -#define QM_REG_WFQVPENABLE_RT_OFFSET 35440 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35441 +#define QM_REG_WFQPFENABLE_RT_OFFSET 35695 +#define QM_REG_WFQVPENABLE_RT_OFFSET 35696 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 35697 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 35953 +#define QM_REG_TXPQMAP_RT_OFFSET 36209 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36465 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 36721 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 36977 +#define QM_REG_WFQVPCRD_RT_OFFSET 37233 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 37489 +#define QM_REG_WFQVPMAP_RT_OFFSET 37745 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 38001 +#define QM_REG_PTRTBLTX_RT_OFFSET 38257 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 39281 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 320 -#define QM_REG_VOQCRDLINE_RT_OFFSET 38321 +#define QM_REG_VOQCRDLINE_RT_OFFSET 39601 #define QM_REG_VOQCRDLINE_RT_SIZE 36 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 38357 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 39637 #define QM_REG_VOQINITCRDLINE_RT_SIZE 36 -#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 38393 -#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 38394 -#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 38395 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 38396 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 38397 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 38398 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 38399 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 38400 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 38401 +#define QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET 39673 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 39674 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 39675 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 39676 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 39677 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 39678 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 39679 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 39680 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 39681 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 38405 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 39685 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 38409 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 39689 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 38441 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 39721 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 38457 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 39737 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 38473 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 39753 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 38489 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 39769 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 38505 -#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 38506 -#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 38507 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 39785 +#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 39786 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 39787 #define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 38515 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_OFFSET 39795 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_VALUE_RT_SIZE 1024 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 39539 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_OFFSET 40819 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_EN_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 40051 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_OFFSET 41331 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_MODE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 40563 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 41843 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 41075 +#define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_OFFSET 42355 #define NIG_REG_LLH_PF_CLS_FUNC_FILTER_HDR_SEL_RT_SIZE 512 -#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 41587 +#define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_OFFSET 42867 #define NIG_REG_LLH_PF_CLS_FILTERS_MAP_RT_SIZE 32 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 41619 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 41620 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 41621 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 41622 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 41623 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 41624 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 41625 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 41626 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 41627 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 41628 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 41629 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 41630 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 41631 -#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 41632 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 41633 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 41634 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 41635 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 41636 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 41637 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 41638 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 41639 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 41640 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 41641 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 41642 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 41643 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 41644 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 41645 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 41646 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 41647 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 41648 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 41649 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 41650 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 41651 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 41652 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 41653 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 41654 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 41655 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 41656 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 41657 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 41658 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 41659 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 41660 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 41661 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 41662 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 41663 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 41664 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 41665 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 41666 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 41667 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 41668 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 41669 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 41670 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 41671 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 41672 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 41673 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 41674 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 41675 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 41676 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 41677 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 41678 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 41679 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 41680 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 41681 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 41682 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 41683 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 41684 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 41685 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 41686 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 41687 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 41688 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 41689 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 41690 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 41691 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 41692 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 41693 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 41694 -#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 41695 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 41696 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 41697 -#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 41698 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 41699 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 41700 -#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 41701 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 41702 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 41703 -#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 41704 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 41705 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 41706 -#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 41707 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 41708 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 41709 -#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 41710 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 41711 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 41712 -#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 41713 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 41714 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 41715 -#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 41716 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 41717 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 41718 -#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 41719 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 41720 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 41721 -#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 41722 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 41723 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 41724 -#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 41725 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 41726 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 41727 -#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 41728 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 41729 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 41730 -#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 41731 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 41732 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 41733 -#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 41734 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 41735 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 41736 -#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 41737 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 41738 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 41739 -#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 41740 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 41741 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 41742 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 42899 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 42900 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 42901 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 42902 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 42903 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 42904 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 42905 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 42906 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 42907 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 42908 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 42909 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 42910 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 42911 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 42912 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 42913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 42914 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 42915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 42916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 42917 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 42918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 42919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 42920 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 42921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 42922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 42923 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 42924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 42925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 42926 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 42927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 42928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 42929 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 42930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 42931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 42932 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 42933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 42934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 42935 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 42936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 42937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 42938 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 42939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 42940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 42941 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 42942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 42943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 42944 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 42945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 42946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 42947 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 42948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 42949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 42950 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 42951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 42952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 42953 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 42954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 42955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 42956 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 42957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 42958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 42959 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 42960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 42961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 42962 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 42963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 42964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 42965 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 42966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 42967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 42968 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 42969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 42970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 42971 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 42972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 42973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ20_RT_OFFSET 42974 +#define PBF_REG_BTB_GUARANTEED_VOQ20_RT_OFFSET 42975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ20_RT_OFFSET 42976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ21_RT_OFFSET 42977 +#define PBF_REG_BTB_GUARANTEED_VOQ21_RT_OFFSET 42978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ21_RT_OFFSET 42979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ22_RT_OFFSET 42980 +#define PBF_REG_BTB_GUARANTEED_VOQ22_RT_OFFSET 42981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ22_RT_OFFSET 42982 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ23_RT_OFFSET 42983 +#define PBF_REG_BTB_GUARANTEED_VOQ23_RT_OFFSET 42984 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ23_RT_OFFSET 42985 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ24_RT_OFFSET 42986 +#define PBF_REG_BTB_GUARANTEED_VOQ24_RT_OFFSET 42987 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ24_RT_OFFSET 42988 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ25_RT_OFFSET 42989 +#define PBF_REG_BTB_GUARANTEED_VOQ25_RT_OFFSET 42990 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ25_RT_OFFSET 42991 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ26_RT_OFFSET 42992 +#define PBF_REG_BTB_GUARANTEED_VOQ26_RT_OFFSET 42993 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ26_RT_OFFSET 42994 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ27_RT_OFFSET 42995 +#define PBF_REG_BTB_GUARANTEED_VOQ27_RT_OFFSET 42996 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ27_RT_OFFSET 42997 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ28_RT_OFFSET 42998 +#define PBF_REG_BTB_GUARANTEED_VOQ28_RT_OFFSET 42999 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ28_RT_OFFSET 43000 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ29_RT_OFFSET 43001 +#define PBF_REG_BTB_GUARANTEED_VOQ29_RT_OFFSET 43002 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ29_RT_OFFSET 43003 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ30_RT_OFFSET 43004 +#define PBF_REG_BTB_GUARANTEED_VOQ30_RT_OFFSET 43005 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ30_RT_OFFSET 43006 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ31_RT_OFFSET 43007 +#define PBF_REG_BTB_GUARANTEED_VOQ31_RT_OFFSET 43008 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ31_RT_OFFSET 43009 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ32_RT_OFFSET 43010 +#define PBF_REG_BTB_GUARANTEED_VOQ32_RT_OFFSET 43011 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ32_RT_OFFSET 43012 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ33_RT_OFFSET 43013 +#define PBF_REG_BTB_GUARANTEED_VOQ33_RT_OFFSET 43014 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ33_RT_OFFSET 43015 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ34_RT_OFFSET 43016 +#define PBF_REG_BTB_GUARANTEED_VOQ34_RT_OFFSET 43017 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ34_RT_OFFSET 43018 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ35_RT_OFFSET 43019 +#define PBF_REG_BTB_GUARANTEED_VOQ35_RT_OFFSET 43020 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ35_RT_OFFSET 43021 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 43022 -#define RUNTIME_ARRAY_SIZE 41743 +#define RUNTIME_ARRAY_SIZE 43023 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 #endif /* __RT_DEFS_H__ */ diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h index 86e84964..4633dbeb 100644 --- a/drivers/net/qede/base/ecore_sp_api.h +++ b/drivers/net/qede/base/ecore_sp_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SP_API_H__ diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c index 7598e7a6..b43baf9d 100644 --- a/drivers/net/qede/base/ecore_sp_commands.c +++ b/drivers/net/qede/base/ecore_sp_commands.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -295,6 +293,7 @@ ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, } #define ETH_P_8021Q 0x8100 +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, @@ -308,7 +307,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; u8 page_cnt; - int i; + u8 i; /* update initial eq producer */ ecore_eq_prod_update(p_hwfn, @@ -343,18 +342,27 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, p_ramrod->outer_tag_config.outer_tag.tci = OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan); + if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; + } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING, + &p_hwfn->p_dev->mf_bits)) { + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; + } + + p_ramrod->outer_tag_config.pri_map_valid = 1; + for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode or, UFP with DCB over base interface. + */ if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) { - p_ramrod->outer_tag_config.outer_tag.tpid = - OSAL_CPU_TO_LE16(ETH_P_8021Q); - if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || + (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ramrod->outer_tag_config.enable_stag_pri_change = 1; else p_ramrod->outer_tag_config.enable_stag_pri_change = 0; - p_ramrod->outer_tag_config.pri_map_valid = 1; - for (i = 0; i < 8; i++) - p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = - (u8)i; } /* Place EQ address in RAMROD */ @@ -451,7 +459,8 @@ enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn) return rc; p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; - if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) + if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) || + (p_hwfn->p_dcbx_info->results.dcbx_enabled)) p_ent->ramrod.pf_update.enable_stag_pri_change = 1; else p_ent->ramrod.pf_update.enable_stag_pri_change = 0; diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h index 98009c65..e57414cf 100644 --- a/drivers/net/qede/base/ecore_sp_commands.h +++ b/drivers/net/qede/base/ecore_sp_commands.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SP_COMMANDS_H__ diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c index 70ffa8cd..776c86f7 100644 --- a/drivers/net/qede/base/ecore_spq.c +++ b/drivers/net/qede/base/ecore_spq.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -30,7 +28,7 @@ #define SPQ_BLOCK_DELAY_MAX_ITER (10) #define SPQ_BLOCK_DELAY_US (10) -#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MAX_ITER (200) #define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** @@ -60,8 +58,12 @@ static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn, u32 iter_cnt; comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; - iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter : SPQ_BLOCK_DELAY_MAX_ITER; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter) + iter_cnt *= 5; +#endif while (iter_cnt--) { OSAL_POLL_MODE_DPC(p_hwfn); @@ -138,6 +140,14 @@ err: return ECORE_BUSY; } +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms) +{ + p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ? + spq_timeout_ms / SPQ_BLOCK_SLEEP_MS : + SPQ_BLOCK_SLEEP_MAX_ITER; +} + /*************************************************************************** * SPQ entries inner API ***************************************************************************/ @@ -389,7 +399,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) /* Allocate EQ struct */ p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq)); if (!p_eq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_eq'\n"); return ECORE_NOMEM; } @@ -402,7 +412,7 @@ enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) num_elem, sizeof(union event_ring_element), &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n"); goto eq_allocate_fail; } @@ -547,8 +557,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq)); if (!p_spq) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_spq'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n"); return ECORE_NOMEM; } @@ -560,7 +569,7 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain, OSAL_NULL)) { - DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n"); goto spq_allocate_fail; } @@ -576,7 +585,8 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq->p_phys = p_phys; #ifdef CONFIG_ECORE_LOCK_ALLOC - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock); + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock)) + goto spq_allocate_fail; #endif p_hwfn->p_spq = p_spq; @@ -630,9 +640,7 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent)); if (!p_ent) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate an SPQ entry for a pending" - " ramrod\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n"); rc = ECORE_NOMEM; goto out_unlock; } @@ -1013,7 +1021,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) p_consq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq)); if (!p_consq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_consq'\n"); return ECORE_NOMEM; } @@ -1026,7 +1034,7 @@ enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) ECORE_CHAIN_PAGE_SIZE / 0x80, 0x80, &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) { - DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain"); + DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain"); goto consq_allocate_fail; } diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h index 526cff08..6142c399 100644 --- a/drivers/net/qede/base/ecore_spq.h +++ b/drivers/net/qede/base/ecore_spq.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SPQ_H__ @@ -116,6 +114,9 @@ struct ecore_spq { dma_addr_t p_phys; struct ecore_spq_entry *p_virt; + /* SPQ max sleep iterations used in __ecore_spq_block() */ + u32 block_sleep_max_iter; + /* Bitmap for handling out-of-order completions */ #define SPQ_RING_SIZE \ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) @@ -150,6 +151,16 @@ struct ecore_port; struct ecore_hwfn; /** + * @brief ecore_set_spq_block_timeout - calculates the maximum sleep + * iterations used in __ecore_spq_block(); + * + * @param p_hwfn + * @param spq_timeout_ms + */ +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms); + +/** * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that * Pends it to the future list. * diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c index b1e26d6f..f7ebf7ad 100644 --- a/drivers/net/qede/base/ecore_sriov.c +++ b/drivers/net/qede/base/ecore_sriov.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -61,6 +59,8 @@ const char *ecore_channel_tlvs_string[] = { "CHANNEL_TLV_COALESCE_UPDATE", "CHANNEL_TLV_QID", "CHANNEL_TLV_COALESCE_READ", + "CHANNEL_TLV_BULLETIN_UPDATE_MAC", + "CHANNEL_TLV_UPDATE_MTU", "CHANNEL_TLV_MAX" }; @@ -590,8 +590,7 @@ enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); if (!p_sriov) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_sriov'\n"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n"); return ECORE_NOMEM; } @@ -648,7 +647,7 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL, sizeof(*p_dev->p_iov_info)); if (!p_dev->p_iov_info) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Can't support IOV due to lack of memory\n"); return ECORE_NOMEM; } @@ -1968,7 +1967,8 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, if (!p_vf->vport_instance) return ECORE_INVAL; - if (events & (1 << MAC_ADDR_FORCED)) { + if ((events & (1 << MAC_ADDR_FORCED)) || + p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) { /* Since there's no way [currently] of removing the MAC, * we can always assume this means we need to force it. */ @@ -1989,7 +1989,11 @@ ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, return rc; } - p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + p_vf->configured_features |= + 1 << VFPF_BULLETIN_MAC_ADDR; + else + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; } if (events & (1 << VLAN_ADDR_FORCED)) { @@ -2854,6 +2858,45 @@ out: length, status); } +static enum _ecore_status_t +ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct ecore_sp_vport_update_params params; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct vfpf_update_mtu_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + + /* Valiate PF can send such a request */ + if (!p_vf->vport_instance) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No VPORT instance available for VF[%d], failing MTU update\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto send_status; + } + + p_req = &mbx->req_virt->update_mtu; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.mtu = p_req->mtu; + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; +send_status: + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_UPDATE_MTU, + sizeof(struct pfvf_def_resp_tlv), + status); + return rc; +} + void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) { @@ -2975,8 +3018,7 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn, p_data->update_approx_mcast_flg = 1; OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins, - sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST; } @@ -4137,6 +4179,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_READ: ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_UPDATE_MTU: + ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf); + break; } } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { /* If we've received a message from a VF we consider malicious @@ -4370,7 +4415,11 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, return; } - feature = 1 << MAC_ADDR_FORCED; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + else + feature = 1 << MAC_ADDR_FORCED; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); vf_info->bulletin.p_virt->valid_bitmap |= feature; @@ -4411,9 +4460,13 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, vf_info->bulletin.p_virt->valid_bitmap |= feature; + if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); + return ECORE_SUCCESS; } +#ifndef LINUX_REMOVE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) @@ -4470,6 +4523,7 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, *opaque_fid = vf_info->opaque_fid; } +#endif void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, u16 pvid, int vfid) @@ -4657,6 +4711,22 @@ u32 ecore_iov_pfvf_msg_length(void) return sizeof(union pfvf_tlvs); } +u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + (1 << VFPF_BULLETIN_MAC_ADDR))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { struct ecore_vf_info *p_vf; diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h index 850b1052..50c7d2c9 100644 --- a/drivers/net/qede/base/ecore_sriov.h +++ b/drivers/net/qede/base/ecore_sriov.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_SRIOV_H__ diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h index c77ec260..b893f1d4 100644 --- a/drivers/net/qede/base/ecore_status.h +++ b/drivers/net/qede/base/ecore_status.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_STATUS_H__ diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h index 034cf1eb..249136b0 100644 --- a/drivers/net/qede/base/ecore_utils.h +++ b/drivers/net/qede/base/ecore_utils.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_UTILS_H__ diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c index e0f2dd5a..d2213f79 100644 --- a/drivers/net/qede/base/ecore_vf.c +++ b/drivers/net/qede/base/ecore_vf.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -1275,8 +1273,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, resp_size += sizeof(struct pfvf_def_resp_tlv); OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, - sizeof(unsigned long) * - ETH_MULTICAST_MAC_BINS_IN_REGS); + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); } update_rx = p_params->accept_flags.update_rx_mode_config; @@ -1473,7 +1470,7 @@ void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, u32 bit; bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); - OSAL_SET_BIT(bit, sp_params.bins); + sp_params.bins[bit / 32] |= 1 << (bit % 32); } } @@ -1629,6 +1626,39 @@ exit: return rc; } +enum _ecore_status_t +ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_mtu_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + enum _ecore_status_t rc; + + if (!mtu) + return ECORE_INVAL; + + /* clear mailbox and prep header tlv */ + p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU, + sizeof(*p_req)); + p_req->mtu = mtu; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Requesting MTU update to %d\n", mtu); + + /* add list termination tlv */ + ecore_add_tlv(&p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) + rc = ECORE_INVAL; + + ecore_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) { diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h index de2758cb..a07f82eb 100644 --- a/drivers/net/qede/base/ecore_vf.h +++ b/drivers/net/qede/base/ecore_vf.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_VF_H__ @@ -319,5 +317,14 @@ void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun); u32 ecore_vf_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id); + +/** + * @brief - ecore_vf_pf_update_mtu Update MTU for VF. + * + * @param p_hwfn + * @param - mtu + */ +enum _ecore_status_t +ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu); #endif #endif /* __ECORE_VF_H__ */ diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h index 9815cf8a..1a9fb3b1 100644 --- a/drivers/net/qede/base/ecore_vf_api.h +++ b/drivers/net/qede/base/ecore_vf_api.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_VF_API_H__ diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h index ecb00649..c30677ab 100644 --- a/drivers/net/qede/base/ecore_vfpf_if.h +++ b/drivers/net/qede/base/ecore_vfpf_if.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ECORE_VF_PF_IF_H__ @@ -396,7 +394,13 @@ struct vfpf_vport_update_mcast_bin_tlv { struct channel_tlv tl; u8 padding[4]; - u64 bins[8]; + /* This was a mistake; There are only 256 approx bins, + * and in HSI they're divided into 32-bit values. + * As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; }; struct vfpf_vport_update_accept_param_tlv { @@ -525,6 +529,18 @@ struct pfvf_read_coal_resp_tlv { u8 padding[6]; }; +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + +struct vfpf_update_mtu_tlv { + struct vfpf_first_tlv first_tlv; + u16 mtu; + u8 padding[6]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -539,6 +555,8 @@ union vfpf_tlvs { struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_coalesce update_coalesce; struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; + struct vfpf_update_mtu_tlv update_mtu; struct tlv_buffer_size tlv_buf_size; }; @@ -669,6 +687,8 @@ enum { CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + CHANNEL_TLV_UPDATE_MTU, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h index 45a0356d..abfa6854 100644 --- a/drivers/net/qede/base/eth_common.h +++ b/drivers/net/qede/base/eth_common.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #ifndef __ETH_COMMON__ @@ -119,6 +117,9 @@ /* Number of etherType values configured by driver for control frame check */ #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 +/* GFS constants */ +#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */ + /* diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h index 81ca6634..81aa88e7 100644 --- a/drivers/net/qede/base/mcp_public.h +++ b/drivers/net/qede/base/mcp_public.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ /**************************************************************************** @@ -800,6 +798,7 @@ struct public_port { #define ETH_TRANSCEIVER_TYPE_4x10G 0x1f #define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 #define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 #define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 @@ -1777,6 +1776,8 @@ struct public_drv_mb { #define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ 0x00000001 /* MFW supports EEE */ #define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 +/* MFW supports DRV_LOAD Timeout */ +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO 0x00000004 /* MFW supports virtual link */ #define FW_MB_PARAM_FEATURE_SUPPORT_VLINK 0x00010000 diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h index c99e805d..ab86260e 100644 --- a/drivers/net/qede/base/nvm_cfg.h +++ b/drivers/net/qede/base/nvm_cfg.h @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ /**************************************************************************** diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h index ad15d28a..402f6204 100644 --- a/drivers/net/qede/base/reg_addr.h +++ b/drivers/net/qede/base/reg_addr.h @@ -1,17 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. - */ - -/* - * Copyright (c) 2016 QLogic Corporation. - * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \ @@ -1222,3 +1212,5 @@ #define MCP_REG_CPU_STATE_SOFT_HALTED (0x1 << 10) #define PRS_REG_SEARCH_TENANT_ID 0x1f044cUL #define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL + +#define RSS_REG_RSS_RAM_MASK 0x238c10UL |