summaryrefslogtreecommitdiffstats
path: root/drivers/net/qede/qede_ethdev.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers/net/qede/qede_ethdev.c
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/qede/qede_ethdev.c')
-rw-r--r--drivers/net/qede/qede_ethdev.c990
1 files changed, 681 insertions, 309 deletions
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 9fae40b6..0e059898 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -125,143 +125,199 @@ struct rte_qede_xstats_name_off {
};
static const struct rte_qede_xstats_name_off qede_xstats_strings[] = {
- {"rx_unicast_bytes", offsetof(struct ecore_eth_stats, rx_ucast_bytes)},
+ {"rx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_bytes)},
{"rx_multicast_bytes",
- offsetof(struct ecore_eth_stats, rx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_bytes)},
{"rx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, rx_bcast_bytes)},
- {"rx_unicast_packets", offsetof(struct ecore_eth_stats, rx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_bytes)},
+ {"rx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, rx_ucast_pkts)},
{"rx_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_mcast_pkts)},
{"rx_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, rx_bcast_pkts)},
- {"tx_unicast_bytes", offsetof(struct ecore_eth_stats, tx_ucast_bytes)},
+ {"tx_unicast_bytes",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_bytes)},
{"tx_multicast_bytes",
- offsetof(struct ecore_eth_stats, tx_mcast_bytes)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_bytes)},
{"tx_broadcast_bytes",
- offsetof(struct ecore_eth_stats, tx_bcast_bytes)},
- {"tx_unicast_packets", offsetof(struct ecore_eth_stats, tx_ucast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_bytes)},
+ {"tx_unicast_packets",
+ offsetof(struct ecore_eth_stats_common, tx_ucast_pkts)},
{"tx_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_mcast_pkts)},
{"tx_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_bcast_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_bcast_pkts)},
{"rx_64_byte_packets",
- offsetof(struct ecore_eth_stats, rx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_64_byte_packets)},
{"rx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, rx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_65_to_127_byte_packets)},
{"rx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, rx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_128_to_255_byte_packets)},
{"rx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, rx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_256_to_511_byte_packets)},
{"rx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, rx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_512_to_1023_byte_packets)},
{"rx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1024_to_1518_byte_packets)},
- {"rx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_1522_byte_packets)},
- {"rx_1519_to_2047_byte_packets",
- offsetof(struct ecore_eth_stats, rx_1519_to_2047_byte_packets)},
- {"rx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, rx_2048_to_4095_byte_packets)},
- {"rx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, rx_4096_to_9216_byte_packets)},
- {"rx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- rx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ rx_1024_to_1518_byte_packets)},
{"tx_64_byte_packets",
- offsetof(struct ecore_eth_stats, tx_64_byte_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_64_byte_packets)},
{"tx_65_to_127_byte_packets",
- offsetof(struct ecore_eth_stats, tx_65_to_127_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_65_to_127_byte_packets)},
{"tx_128_to_255_byte_packets",
- offsetof(struct ecore_eth_stats, tx_128_to_255_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_128_to_255_byte_packets)},
{"tx_256_to_511_byte_packets",
- offsetof(struct ecore_eth_stats, tx_256_to_511_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_256_to_511_byte_packets)},
{"tx_512_to_1023_byte_packets",
- offsetof(struct ecore_eth_stats, tx_512_to_1023_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_512_to_1023_byte_packets)},
{"tx_1024_to_1518_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1024_to_1518_byte_packets)},
- {"trx_1519_to_1522_byte_packets",
- offsetof(struct ecore_eth_stats, tx_1519_to_2047_byte_packets)},
- {"tx_2048_to_4095_byte_packets",
- offsetof(struct ecore_eth_stats, tx_2048_to_4095_byte_packets)},
- {"tx_4096_to_9216_byte_packets",
- offsetof(struct ecore_eth_stats, tx_4096_to_9216_byte_packets)},
- {"tx_9217_to_16383_byte_packets",
- offsetof(struct ecore_eth_stats,
- tx_9217_to_16383_byte_packets)},
+ offsetof(struct ecore_eth_stats_common,
+ tx_1024_to_1518_byte_packets)},
{"rx_mac_crtl_frames",
- offsetof(struct ecore_eth_stats, rx_mac_crtl_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_crtl_frames)},
{"tx_mac_control_frames",
- offsetof(struct ecore_eth_stats, tx_mac_ctrl_frames)},
- {"rx_pause_frames", offsetof(struct ecore_eth_stats, rx_pause_frames)},
- {"tx_pause_frames", offsetof(struct ecore_eth_stats, tx_pause_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_ctrl_frames)},
+ {"rx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, rx_pause_frames)},
+ {"tx_pause_frames",
+ offsetof(struct ecore_eth_stats_common, tx_pause_frames)},
{"rx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, rx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, rx_pfc_frames)},
{"tx_priority_flow_control_frames",
- offsetof(struct ecore_eth_stats, tx_pfc_frames)},
+ offsetof(struct ecore_eth_stats_common, tx_pfc_frames)},
- {"rx_crc_errors", offsetof(struct ecore_eth_stats, rx_crc_errors)},
- {"rx_align_errors", offsetof(struct ecore_eth_stats, rx_align_errors)},
+ {"rx_crc_errors",
+ offsetof(struct ecore_eth_stats_common, rx_crc_errors)},
+ {"rx_align_errors",
+ offsetof(struct ecore_eth_stats_common, rx_align_errors)},
{"rx_carrier_errors",
- offsetof(struct ecore_eth_stats, rx_carrier_errors)},
+ offsetof(struct ecore_eth_stats_common, rx_carrier_errors)},
{"rx_oversize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_oversize_packets)},
- {"rx_jabber_errors", offsetof(struct ecore_eth_stats, rx_jabbers)},
+ offsetof(struct ecore_eth_stats_common, rx_oversize_packets)},
+ {"rx_jabber_errors",
+ offsetof(struct ecore_eth_stats_common, rx_jabbers)},
{"rx_undersize_packet_errors",
- offsetof(struct ecore_eth_stats, rx_undersize_packets)},
- {"rx_fragments", offsetof(struct ecore_eth_stats, rx_fragments)},
+ offsetof(struct ecore_eth_stats_common, rx_undersize_packets)},
+ {"rx_fragments", offsetof(struct ecore_eth_stats_common, rx_fragments)},
{"rx_host_buffer_not_available",
- offsetof(struct ecore_eth_stats, no_buff_discards)},
+ offsetof(struct ecore_eth_stats_common, no_buff_discards)},
/* Number of packets discarded because they are bigger than MTU */
{"rx_packet_too_big_discards",
- offsetof(struct ecore_eth_stats, packet_too_big_discard)},
+ offsetof(struct ecore_eth_stats_common,
+ packet_too_big_discard)},
{"rx_ttl_zero_discards",
- offsetof(struct ecore_eth_stats, ttl0_discard)},
+ offsetof(struct ecore_eth_stats_common, ttl0_discard)},
{"rx_multi_function_tag_filter_discards",
- offsetof(struct ecore_eth_stats, mftag_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mftag_filter_discards)},
{"rx_mac_filter_discards",
- offsetof(struct ecore_eth_stats, mac_filter_discards)},
+ offsetof(struct ecore_eth_stats_common, mac_filter_discards)},
{"rx_hw_buffer_truncates",
- offsetof(struct ecore_eth_stats, brb_truncates)},
+ offsetof(struct ecore_eth_stats_common, brb_truncates)},
{"rx_hw_buffer_discards",
- offsetof(struct ecore_eth_stats, brb_discards)},
- {"tx_lpi_entry_count",
- offsetof(struct ecore_eth_stats, tx_lpi_entry_count)},
- {"tx_total_collisions",
- offsetof(struct ecore_eth_stats, tx_total_collisions)},
+ offsetof(struct ecore_eth_stats_common, brb_discards)},
{"tx_error_drop_packets",
- offsetof(struct ecore_eth_stats, tx_err_drop_pkts)},
+ offsetof(struct ecore_eth_stats_common, tx_err_drop_pkts)},
- {"rx_mac_bytes", offsetof(struct ecore_eth_stats, rx_mac_bytes)},
+ {"rx_mac_bytes", offsetof(struct ecore_eth_stats_common, rx_mac_bytes)},
{"rx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_uc_packets)},
{"rx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_mc_packets)},
{"rx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, rx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_bc_packets)},
{"rx_mac_frames_ok",
- offsetof(struct ecore_eth_stats, rx_mac_frames_ok)},
- {"tx_mac_bytes", offsetof(struct ecore_eth_stats, tx_mac_bytes)},
+ offsetof(struct ecore_eth_stats_common, rx_mac_frames_ok)},
+ {"tx_mac_bytes", offsetof(struct ecore_eth_stats_common, tx_mac_bytes)},
{"tx_mac_unicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_uc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_uc_packets)},
{"tx_mac_multicast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_mc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_mc_packets)},
{"tx_mac_broadcast_packets",
- offsetof(struct ecore_eth_stats, tx_mac_bc_packets)},
+ offsetof(struct ecore_eth_stats_common, tx_mac_bc_packets)},
{"lro_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_pkts)},
{"lro_coalesced_events",
- offsetof(struct ecore_eth_stats, tpa_coalesced_events)},
+ offsetof(struct ecore_eth_stats_common, tpa_coalesced_events)},
{"lro_aborts_num",
- offsetof(struct ecore_eth_stats, tpa_aborts_num)},
+ offsetof(struct ecore_eth_stats_common, tpa_aborts_num)},
{"lro_not_coalesced_packets",
- offsetof(struct ecore_eth_stats, tpa_not_coalesced_pkts)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_not_coalesced_pkts)},
{"lro_coalesced_bytes",
- offsetof(struct ecore_eth_stats, tpa_coalesced_bytes)},
+ offsetof(struct ecore_eth_stats_common,
+ tpa_coalesced_bytes)},
+};
+
+static const struct rte_qede_xstats_name_off qede_bb_xstats_strings[] = {
+ {"rx_1519_to_1522_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_1522_byte_packets)},
+ {"rx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_1519_to_2047_byte_packets)},
+ {"rx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_2048_to_4095_byte_packets)},
+ {"rx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_4096_to_9216_byte_packets)},
+ {"rx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ rx_9217_to_16383_byte_packets)},
+
+ {"tx_1519_to_2047_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_1519_to_2047_byte_packets)},
+ {"tx_2048_to_4095_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_2048_to_4095_byte_packets)},
+ {"tx_4096_to_9216_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_4096_to_9216_byte_packets)},
+ {"tx_9217_to_16383_byte_packets",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb,
+ tx_9217_to_16383_byte_packets)},
+
+ {"tx_lpi_entry_count",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_lpi_entry_count)},
+ {"tx_total_collisions",
+ offsetof(struct ecore_eth_stats, bb) +
+ offsetof(struct ecore_eth_stats_bb, tx_total_collisions)},
+};
+
+static const struct rte_qede_xstats_name_off qede_ah_xstats_strings[] = {
+ {"rx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ rx_1519_to_max_byte_packets)},
+ {"tx_1519_to_max_byte_packets",
+ offsetof(struct ecore_eth_stats, ah) +
+ offsetof(struct ecore_eth_stats_ah,
+ tx_1519_to_max_byte_packets)},
};
static const struct rte_qede_xstats_name_off qede_rxq_xstats_strings[] = {
@@ -294,7 +350,6 @@ static void
qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
{
rte_memcpy(&qdev->dev_info, info, sizeof(*info));
- qdev->num_tc = qdev->dev_info.num_tc;
qdev->ops = qed_ops;
}
@@ -308,9 +363,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, "*********************************\n");
DP_INFO(edev, " DPDK version:%s\n", rte_version());
- DP_INFO(edev, " Chip details : %s%d\n",
+ DP_INFO(edev, " Chip details : %s %c%d\n",
ECORE_IS_BB(edev) ? "BB" : "AH",
- CHIP_REV_IS_A0(edev) ? 0 : 1);
+ 'A' + edev->chip_rev,
+ (int)edev->chip_metal);
snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d",
info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng);
snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s",
@@ -329,6 +385,178 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
}
#endif
+static int
+qede_start_vport(struct qede_dev *qdev, uint16_t mtu)
+{
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_start_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ /* @DPDK - Disable FW placement */
+ params.zero_placement_offset = 1;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_start(p_hwfn, &params);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ return rc;
+ }
+ }
+ ecore_reset_vport_stats(edev);
+ DP_INFO(edev, "VPORT started with MTU = %u\n", mtu);
+
+ return 0;
+}
+
+static int
+qede_stop_vport(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ uint8_t vport_id;
+ int rc;
+ int i;
+
+ vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid,
+ vport_id);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Stop V-PORT failed rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* Activate or deactivate vport via vport-update */
+int qede_activate_vport(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
+ int rc = -1;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_vport_active_rx_flg = 1;
+ params.update_vport_active_tx_flg = 1;
+ params.vport_active_rx_flg = flg;
+ params.vport_active_tx_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ break;
+ }
+ }
+ DP_INFO(edev, "vport %s\n", flg ? "activated" : "deactivated");
+ return rc;
+}
+
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
+{
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
+ */
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
+}
+
+/* Enable/disable LRO via vport-update */
+int qede_enable_tpa(struct rte_eth_dev *eth_dev, bool flg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_sge_tpa_params tpa_params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, flg);
+ params.vport_id = 0;
+ params.sge_tpa_params = &tpa_params;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update LRO\n");
+ return -1;
+ }
+ }
+
+ DP_INFO(edev, "LRO is %s\n", flg ? "enabled" : "disabled");
+
+ return 0;
+}
+
+/* Update MTU via vport-update without doing port restart.
+ * The vport must be deactivated before calling this API.
+ */
+int qede_update_mtu(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ int rc;
+ int i;
+
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.mtu = mtu;
+ params.vport_id = 0;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update MTU\n");
+ return -1;
+ }
+ }
+ DP_INFO(edev, "MTU updated to %u\n", mtu);
+
+ return 0;
+}
+
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
memset(ucast, 0, sizeof(struct ecore_filter_ucast));
@@ -337,6 +565,43 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static int
+qed_configure_filter_rx_mode(struct rte_eth_dev *eth_dev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_filter_accept_flags flags;
+
+ memset(&flags, 0, sizeof(flags));
+
+ flags.update_rx_mode_config = 1;
+ flags.update_tx_mode_config = 1;
+ flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
+ ECORE_ACCEPT_MCAST_MATCHED |
+ ECORE_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (IS_VF(edev)) {
+ flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
+ }
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
+ QED_FILTER_RX_MODE_TYPE_PROMISC)) {
+ flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
+ ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return ecore_filter_accept_cmd(edev, 0, flags, false, false,
+ ECORE_SPQ_MODE_CB, NULL);
+}
static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
uint8_t clss, bool mode, bool mask)
{
@@ -363,6 +628,7 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
+ ucast->vni == tmp->vni &&
ucast->vlan == tmp->vlan) {
DP_ERR(edev, "Unicast MAC is already added"
" with vlan = %u, vni = %u\n",
@@ -565,49 +831,57 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
-static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
+static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool flg)
{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_update_vport_params params = {
- .vport_id = 0,
- .accept_any_vlan = action,
- .update_accept_any_vlan_flg = 1,
- };
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- /* Proceed only if action actually needs to be performed */
- if (qdev->accept_any_vlan == action)
- return;
-
- rc = qdev->ops->vport_update(edev, &params);
- if (rc) {
- DP_ERR(edev, "Failed to %s accept-any-vlan\n",
- action ? "enable" : "disable");
- } else {
- DP_INFO(edev, "%s accept-any-vlan\n",
- action ? "enabled" : "disabled");
- qdev->accept_any_vlan = action;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to configure accept-any-vlan\n");
+ return;
+ }
}
+
+ DP_INFO(edev, "%s accept-any-vlan\n", flg ? "enabled" : "disabled");
}
-static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
+static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool flg)
{
- struct qed_update_vport_params vport_update_params;
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params params;
+ struct ecore_hwfn *p_hwfn;
+ uint8_t i;
int rc;
- memset(&vport_update_params, 0, sizeof(vport_update_params));
- vport_update_params.vport_id = 0;
- vport_update_params.update_inner_vlan_removal_flg = 1;
- vport_update_params.inner_vlan_removal_flg = set_stripping;
- rc = qdev->ops->vport_update(edev, &vport_update_params);
- if (rc) {
- DP_ERR(edev, "Update V-PORT failed %d\n", rc);
- return rc;
+ memset(&params, 0, sizeof(struct ecore_sp_vport_update_params));
+ params.vport_id = 0;
+ params.update_inner_vlan_removal_flg = 1;
+ params.inner_vlan_removal_flg = flg;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return -1;
+ }
}
- qdev->vlan_strip_flg = set_stripping;
+ DP_INFO(edev, "VLAN stripping %s\n", flg ? "enabled" : "disabled");
return 0;
}
@@ -741,33 +1015,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
}
-static int qede_init_vport(struct qede_dev *qdev)
-{
- struct ecore_dev *edev = &qdev->edev;
- struct qed_start_vport_params start = {0};
- int rc;
-
- start.remove_inner_vlan = 1;
- start.enable_lro = qdev->enable_lro;
- start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
- start.vport_id = 0;
- start.drop_ttl0 = false;
- start.clear_stats = 1;
- start.handle_ptp_pkts = 0;
-
- rc = qdev->ops->vport_start(edev, &start);
- if (rc) {
- DP_ERR(edev, "Start V-PORT failed %d\n", rc);
- return rc;
- }
-
- DP_INFO(edev,
- "Start vport ramrod passed, vport_id = %d, MTU = %u\n",
- start.vport_id, ETHER_MTU);
-
- return 0;
-}
-
static void qede_prandom_bytes(uint32_t *buff)
{
uint8_t i;
@@ -818,33 +1065,119 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)
return 0;
}
+static void qede_fastpath_start(struct ecore_dev *edev)
+{
+ struct ecore_hwfn *p_hwfn;
+ int i;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ ecore_hw_start_fastpath(p_hwfn);
+ }
+}
+
+static int qede_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Update MTU only if it has changed */
+ if (qdev->mtu != qdev->new_mtu) {
+ if (qede_update_mtu(eth_dev, qdev->new_mtu))
+ goto err;
+ qdev->mtu = qdev->new_mtu;
+ /* If MTU has changed then update TPA too */
+ if (qdev->enable_lro)
+ if (qede_enable_tpa(eth_dev, true))
+ goto err;
+ }
+
+ /* Start queues */
+ if (qede_start_queues(eth_dev))
+ goto err;
+
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ goto err;
+
+ /* Enable vport*/
+ if (qede_activate_vport(eth_dev, true))
+ goto err;
+
+ /* Bring-up the link */
+ qede_dev_set_link_state(eth_dev, true);
+
+ /* Start/resume traffic */
+ qede_fastpath_start(edev);
+
+ DP_INFO(edev, "Device started\n");
+
+ return 0;
+err:
+ DP_ERR(edev, "Device start fails\n");
+ return -1; /* common error code is < 0 */
+}
+
+static void qede_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ /* Disable vport */
+ if (qede_activate_vport(eth_dev, false))
+ return;
+
+ if (qdev->enable_lro)
+ qede_enable_tpa(eth_dev, false);
+
+ /* TODO: Do we need disable LRO or RSS */
+ /* Stop queues */
+ qede_stop_queues(eth_dev);
+
+ /* Disable traffic */
+ ecore_hw_stop_fastpath(edev); /* TBD - loop */
+
+ /* Bring the link down */
+ qede_dev_set_link_state(eth_dev, false);
+
+ DP_INFO(edev, "Device is stopped\n");
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
/* Check requirements for 100G mode */
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
- eth_dev->data->nb_tx_queues < 2) {
+ eth_dev->data->nb_tx_queues < 2) {
DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
- (eth_dev->data->nb_tx_queues % 2 != 0)) {
+ (eth_dev->data->nb_tx_queues % 2 != 0)) {
DP_ERR(edev,
- "100G mode needs even no. of RX/TX queues\n");
+ "100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
}
/* Sanity checks and throw warnings */
- if (rxmode->enable_scatter == 1)
+ if (rxmode->enable_scatter)
eth_dev->data->scattered_rx = 1;
if (!rxmode->hw_strip_crc)
@@ -852,83 +1185,77 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (!rxmode->hw_ip_checksum)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
- "in hw\n");
-
- if (rxmode->enable_lro) {
- qdev->enable_lro = true;
- /* Enable scatter mode for LRO */
- if (!rxmode->enable_scatter)
- eth_dev->data->scattered_rx = 1;
+ "in hw\n");
+ if (rxmode->header_split)
+ DP_INFO(edev, "Header split enable is not supported\n");
+ if (!(rxmode->mq_mode == ETH_MQ_RX_NONE || rxmode->mq_mode ==
+ ETH_MQ_RX_RSS)) {
+ DP_ERR(edev, "Unsupported multi-queue mode\n");
+ return -ENOTSUP;
}
+ /* Flow director mode check */
+ if (qede_check_fdir_support(eth_dev))
+ return -ENOTSUP;
- /* Check for the port restart case */
- if (qdev->state != QEDE_DEV_INIT) {
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- return rc;
+ /* Deallocate resources if held previously. It is needed only if the
+ * queue count has been changed from previous configuration. If its
+ * going to change then it means RX/TX queue setup will be called
+ * again and the fastpath pointers will be reinitialized there.
+ */
+ if (qdev->num_tx_queues != eth_dev->data->nb_tx_queues ||
+ qdev->num_rx_queues != eth_dev->data->nb_rx_queues) {
qede_dealloc_fp_resc(eth_dev);
+ /* Proceed with updated queue count */
+ qdev->num_tx_queues = eth_dev->data->nb_tx_queues;
+ qdev->num_rx_queues = eth_dev->data->nb_rx_queues;
+ if (qede_alloc_fp_resc(qdev))
+ return -ENOMEM;
}
- qdev->fp_num_tx = eth_dev->data->nb_tx_queues;
- qdev->fp_num_rx = eth_dev->data->nb_rx_queues;
- qdev->num_queues = qdev->fp_num_tx + qdev->fp_num_rx;
-
- /* Fastpath status block should be initialized before sending
- * VPORT-START in the case of VF. Anyway, do it for both VF/PF.
+ /* VF's MTU has to be set using vport-start where as
+ * PF's MTU can be updated via vport-update.
*/
- rc = qede_alloc_fp_resc(qdev);
- if (rc != 0)
- return rc;
-
- /* Issue VPORT-START with default config values to allow
- * other port configurations early on.
- */
- rc = qede_init_vport(qdev);
- if (rc != 0)
- return rc;
-
- if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
- rxmode->mq_mode == ETH_MQ_RX_NONE)) {
- DP_ERR(edev, "Unsupported RSS mode\n");
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
+ if (IS_VF(edev)) {
+ if (qede_start_vport(qdev, rxmode->max_rx_pkt_len))
+ return -1;
+ } else {
+ if (qede_update_mtu(eth_dev, rxmode->max_rx_pkt_len))
+ return -1;
}
- /* Flow director mode check */
- rc = qede_check_fdir_support(eth_dev);
- if (rc) {
- qdev->ops->vport_stop(edev, 0);
- qede_dealloc_fp_resc(eth_dev);
- return -EINVAL;
- }
- SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+ qdev->mtu = rxmode->max_rx_pkt_len;
+ qdev->new_mtu = qdev->mtu;
- SLIST_INIT(&qdev->vlan_list_head);
+ /* Configure TPA parameters */
+ if (rxmode->enable_lro) {
+ if (qede_enable_tpa(eth_dev, true))
+ return -EINVAL;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+ qdev->enable_lro = rxmode->enable_lro;
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
- ETH_VLAN_FILTER_MASK |
- ETH_VLAN_EXTEND_MASK);
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
- qdev->state = QEDE_DEV_CONFIG;
-
- DP_INFO(edev, "Allocated RSS=%d TSS=%d (with CoS=%d)\n",
- (int)QEDE_RSS_COUNT(qdev), (int)QEDE_TSS_COUNT(qdev),
- qdev->num_tc);
+ DP_INFO(edev, "Device configured with RSS=%d TSS=%d\n",
+ QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));
return 0;
}
/* Info about HW descriptor ring limitations */
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
- .nb_max = NUM_RX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 128,
.nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
- .nb_max = NUM_TX_BDS_MAX,
+ .nb_max = 0x8000, /* 32K */
.nb_min = 256,
.nb_align = 256,
.nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
@@ -946,7 +1273,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
@@ -1103,44 +1430,34 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- qede_fdir_dealloc_resc(eth_dev);
-
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
* can release all the resources and device can be brought up newly
*/
- if (qdev->state != QEDE_DEV_STOP)
+ if (eth_dev->data->dev_started)
qede_dev_stop(eth_dev);
- else
- DP_INFO(edev, "Device is already stopped\n");
-
- rc = qdev->ops->vport_stop(edev, 0);
- if (rc != 0)
- DP_ERR(edev, "Failed to stop VPORT\n");
+ qede_stop_vport(edev);
+ qede_fdir_dealloc_resc(eth_dev);
qede_dealloc_fp_resc(eth_dev);
- qdev->ops->common->slowpath_stop(edev);
+ eth_dev->data->nb_rx_queues = 0;
+ eth_dev->data->nb_tx_queues = 0;
+ qdev->ops->common->slowpath_stop(edev);
qdev->ops->common->remove(edev);
-
rte_intr_disable(&pci_dev->intr_handle);
-
rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (edev->num_hwfns > 1)
rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev);
-
- qdev->state = QEDE_DEV_INIT; /* Go back to init state */
}
static void
@@ -1153,35 +1470,36 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
unsigned int rxq_stat_cntrs, txq_stat_cntrs;
struct qede_tx_queue *txq;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
/* RX Stats */
- eth_stats->ipackets = stats.rx_ucast_pkts +
- stats.rx_mcast_pkts + stats.rx_bcast_pkts;
+ eth_stats->ipackets = stats.common.rx_ucast_pkts +
+ stats.common.rx_mcast_pkts + stats.common.rx_bcast_pkts;
- eth_stats->ibytes = stats.rx_ucast_bytes +
- stats.rx_mcast_bytes + stats.rx_bcast_bytes;
+ eth_stats->ibytes = stats.common.rx_ucast_bytes +
+ stats.common.rx_mcast_bytes + stats.common.rx_bcast_bytes;
- eth_stats->ierrors = stats.rx_crc_errors +
- stats.rx_align_errors +
- stats.rx_carrier_errors +
- stats.rx_oversize_packets +
- stats.rx_jabbers + stats.rx_undersize_packets;
+ eth_stats->ierrors = stats.common.rx_crc_errors +
+ stats.common.rx_align_errors +
+ stats.common.rx_carrier_errors +
+ stats.common.rx_oversize_packets +
+ stats.common.rx_jabbers + stats.common.rx_undersize_packets;
- eth_stats->rx_nombuf = stats.no_buff_discards;
+ eth_stats->rx_nombuf = stats.common.no_buff_discards;
- eth_stats->imissed = stats.mftag_filter_discards +
- stats.mac_filter_discards +
- stats.no_buff_discards + stats.brb_truncates + stats.brb_discards;
+ eth_stats->imissed = stats.common.mftag_filter_discards +
+ stats.common.mac_filter_discards +
+ stats.common.no_buff_discards +
+ stats.common.brb_truncates + stats.common.brb_discards;
/* TX stats */
- eth_stats->opackets = stats.tx_ucast_pkts +
- stats.tx_mcast_pkts + stats.tx_bcast_pkts;
+ eth_stats->opackets = stats.common.tx_ucast_pkts +
+ stats.common.tx_mcast_pkts + stats.common.tx_bcast_pkts;
- eth_stats->obytes = stats.tx_ucast_bytes +
- stats.tx_mcast_bytes + stats.tx_bcast_bytes;
+ eth_stats->obytes = stats.common.tx_ucast_bytes +
+ stats.common.tx_mcast_bytes + stats.common.tx_bcast_bytes;
- eth_stats->oerrors = stats.tx_err_drop_pkts;
+ eth_stats->oerrors = stats.common.tx_err_drop_pkts;
/* Queue stats */
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
@@ -1195,38 +1513,34 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
" appropriately and retry.\n");
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
- eth_stats->q_ipackets[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rcv_pkts));
- eth_stats->q_errors[i] =
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_hw_errors)) +
- *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
- offsetof(struct qede_rx_queue,
- rx_alloc_errors));
- i++;
- }
+ for_each_rss(qid) {
+ eth_stats->q_ipackets[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rcv_pkts));
+ eth_stats->q_errors[i] =
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_hw_errors)) +
+ *(uint64_t *)(
+ ((char *)(qdev->fp_array[qid].rxq)) +
+ offsetof(struct qede_rx_queue,
+ rx_alloc_errors));
+ i++;
if (i == rxq_stat_cntrs)
break;
}
- for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) {
- txq = qdev->fp_array[(qid)].txqs[0];
- eth_stats->q_opackets[j] =
- *((uint64_t *)(uintptr_t)
- (((uint64_t)(uintptr_t)(txq)) +
- offsetof(struct qede_tx_queue,
- xmit_pkts)));
- j++;
- }
+ for_each_tss(qid) {
+ txq = qdev->fp_array[qid].txq;
+ eth_stats->q_opackets[j] =
+ *((uint64_t *)(uintptr_t)
+ (((uint64_t)(uintptr_t)(txq)) +
+ offsetof(struct qede_tx_queue,
+ xmit_pkts)));
+ j++;
if (j == txq_stat_cntrs)
break;
}
@@ -1234,18 +1548,27 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
static unsigned
qede_get_xstats_count(struct qede_dev *qdev) {
- return RTE_DIM(qede_xstats_strings) +
- (RTE_DIM(qede_rxq_xstats_strings) *
- RTE_MIN(QEDE_RSS_COUNT(qdev),
- RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ if (ECORE_IS_BB(&qdev->edev))
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_bb_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
+ else
+ return RTE_DIM(qede_xstats_strings) +
+ RTE_DIM(qede_ah_xstats_strings) +
+ (RTE_DIM(qede_rxq_xstats_strings) *
+ RTE_MIN(QEDE_RSS_COUNT(qdev),
+ RTE_ETHDEV_QUEUE_STAT_CNTRS));
}
static int
-qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
+qede_get_xstats_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
__rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
unsigned int i, qid, stat_idx = 0;
unsigned int rxq_stat_cntrs;
@@ -1259,6 +1582,24 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_bb_xstats_strings[i].name);
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ snprintf(xstats_names[stat_idx].name,
+ sizeof(xstats_names[stat_idx].name),
+ "%s",
+ qede_ah_xstats_strings[i].name);
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
@@ -1290,7 +1631,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
if (n < num)
return num;
- qdev->ops->get_vport_stats(edev, &stats);
+ ecore_get_vport_stats(edev, &stats);
for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) +
@@ -1299,13 +1640,31 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
stat_idx++;
}
+ if (ECORE_IS_BB(edev)) {
+ for (i = 0; i < RTE_DIM(qede_bb_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_bb_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ } else {
+ for (i = 0; i < RTE_DIM(qede_ah_xstats_strings); i++) {
+ xstats[stat_idx].value =
+ *(uint64_t *)(((char *)&stats) +
+ qede_ah_xstats_strings[i].offset);
+ xstats[stat_idx].id = stat_idx;
+ stat_idx++;
+ }
+ }
+
rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
for (qid = 0; qid < rxq_stat_cntrs; qid++) {
- if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) {
+ for_each_rss(qid) {
for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {
xstats[stat_idx].value = *(uint64_t *)(
- ((char *)(qdev->fp_array[(qid)].rxq)) +
+ ((char *)(qdev->fp_array[qid].rxq)) +
qede_rxq_xstats_strings[i].offset);
xstats[stat_idx].id = stat_idx;
stat_idx++;
@@ -1723,6 +2082,8 @@ static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
+
+
static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
@@ -1756,19 +2117,17 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
rte_delay_ms(1000);
qdev->mtu = mtu;
/* Fix up RX buf size for all queues of the port */
- for_each_queue(i) {
+ for_each_rss(i) {
fp = &qdev->fp_array[i];
- if (fp->type & QEDE_FASTPATH_RX) {
- bufsz = (uint16_t)rte_pktmbuf_data_room_size(
- fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- if (dev->data->scattered_rx)
- rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
- else
- rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
- rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
- fp->rxq->rx_buf_size = rx_buf_size;
- DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
- }
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
}
qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
@@ -1910,6 +2269,8 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
uint16_t filter_type;
int rc, i;
+ PMD_INIT_FUNC_TRACE(edev);
+
filter_type = conf->filter_type | qdev->vxlan_filter_type;
/* First determine if the given filter classification is supported */
qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
@@ -2163,7 +2524,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -2177,8 +2538,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- DP_NOTICE(edev, false,
- "Skipping device init from secondary process\n");
+ DP_ERR(edev, "Skipping device init from secondary process\n");
return 0;
}
@@ -2195,20 +2555,15 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
}
DP_INFO(edev, "Starting qede probe\n");
-
- rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH,
- dp_module, dp_level, is_vf);
-
+ rc = qed_ops->common->probe(edev, pci_dev, dp_module,
+ dp_level, is_vf);
if (rc != 0) {
DP_ERR(edev, "qede probe failed rc %d\n", rc);
return -ENODEV;
}
-
qede_update_pf_params(edev);
-
rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
-
if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
@@ -2306,8 +2661,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
ether_addr_copy(&eth_dev->data->mac_addrs[0],
&adapter->primary_mac);
} else {
- DP_NOTICE(edev, false,
- "No VF macaddr assigned\n");
+ DP_ERR(edev, "No VF macaddr assigned\n");
}
}
}
@@ -2321,17 +2675,28 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
do_once = false;
}
- adapter->state = QEDE_DEV_INIT;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ SLIST_INIT(&adapter->fdir_info.fdir_list_head);
+ SLIST_INIT(&adapter->vlan_list_head);
+ SLIST_INIT(&adapter->uc_list_head);
+ adapter->mtu = ETHER_MTU;
+ adapter->new_mtu = ETHER_MTU;
+ if (!is_vf)
+ if (qede_start_vport(adapter, adapter->mtu))
+ return -1;
- DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
- adapter->primary_mac.addr_bytes[0],
- adapter->primary_mac.addr_bytes[1],
- adapter->primary_mac.addr_bytes[2],
- adapter->primary_mac.addr_bytes[3],
- adapter->primary_mac.addr_bytes[4],
- adapter->primary_mac.addr_bytes[5]);
+ DP_INFO(edev, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ adapter->primary_mac.addr_bytes[0],
+ adapter->primary_mac.addr_bytes[1],
+ adapter->primary_mac.addr_bytes[2],
+ adapter->primary_mac.addr_bytes[3],
+ adapter->primary_mac.addr_bytes[4],
+ adapter->primary_mac.addr_bytes[5]);
- return rc;
+ DP_INFO(edev, "Device initialized\n");
+
+ return 0;
}
static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev)
@@ -2346,6 +2711,13 @@ static int qede_eth_dev_init(struct rte_eth_dev *eth_dev)
static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
+ struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
+
+ PMD_INIT_FUNC_TRACE(edev);
+#endif
+
/* only uninitialize in the primary process */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;