aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qede/base/ecore_l2.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qede/base/ecore_l2.c')
-rw-r--r--drivers/net/qede/base/ecore_l2.c73
1 files changed, 54 insertions, 19 deletions
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index e3afc8a3..d71f4616 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
@@ -77,7 +75,8 @@ enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
+ return ECORE_NOMEM;
#endif
return ECORE_SUCCESS;
@@ -110,6 +109,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
break;
OSAL_VFREE(p_hwfn->p_dev,
p_hwfn->p_l2_info->pp_qid_usage[i]);
+ p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
}
#ifdef CONFIG_ECORE_LOCK_ALLOC
@@ -119,6 +119,7 @@ void ecore_l2_free(struct ecore_hwfn *p_hwfn)
#endif
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+ p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
out_l2_info:
OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
@@ -687,7 +688,7 @@ ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
+ u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
}
@@ -1185,11 +1186,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM * *pp_doorbell)
{
enum _ecore_status_t rc;
+ u16 pq_id;
+
+ /* TODO - set tc in the pq_params for multi-cos.
+ * If pacing is enabled then select queue according to
+ * rate limiter availability otherwise select queue based
+ * on multi cos.
+ */
+ if (IS_ECORE_PACING(p_hwfn))
+ pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ else
+ pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
- pbl_addr, pbl_size,
- ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
+ pbl_size, pq_id);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1556,8 +1566,8 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
u8 abs_vport_id = 0;
@@ -1596,8 +1606,7 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
0, sizeof(p_ramrod->approx_mcast.bins));
- OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport.
*/
@@ -1606,16 +1615,15 @@ ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
u32 bit;
bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- OSAL_SET_BIT(bit, bins);
+ bins[bit / 32] |= 1 << (bit % 32);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
- u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
- p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
+ p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
}
}
@@ -1945,6 +1953,11 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
p_ah->tx_1519_to_max_byte_packets =
port_stats.eth.u1.ah1.t1519_to_max;
}
+
+ p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ OFFSETOF(struct public_port,
+ link_change_count));
}
void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
@@ -2061,11 +2074,14 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
/* PORT statistics are not necessarily reset, so we need to
* read and create a baseline for future statistics.
+ * Link change stat is maintained by MFW, return its value as is.
*/
if (!p_dev->reset_stats)
DP_INFO(p_dev, "Reset stats not allocated\n");
- else
+ else {
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
+ p_dev->reset_stats->common.link_change_count = 0;
+ }
}
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
@@ -2150,7 +2166,7 @@ ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
@@ -2267,3 +2283,22 @@ out:
return rc;
}
+
+enum _ecore_status_t
+ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_queue_cid *p_cid, u32 rate)
+{
+ struct ecore_mcp_link_state *p_link;
+ u8 vport;
+
+ vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+ "About to rate limit qm vport %d for queue %d with rate %d\n",
+ vport, p_cid->rel.queue_id, rate);
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
+ p_link->speed);
+}