summaryrefslogtreecommitdiffstats
path: root/drivers/net/bnxt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnxt')
-rw-r--r--drivers/net/bnxt/bnxt.h68
-rw-r--r--drivers/net/bnxt/bnxt_cpr.c20
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c626
-rw-r--r--drivers/net/bnxt/bnxt_filter.c89
-rw-r--r--drivers/net/bnxt/bnxt_filter.h1
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c339
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.h14
-rw-r--r--drivers/net/bnxt/bnxt_irq.c4
-rw-r--r--drivers/net/bnxt/bnxt_ring.c19
-rw-r--r--drivers/net/bnxt/bnxt_ring.h5
-rw-r--r--drivers/net/bnxt/bnxt_rxq.c82
-rw-r--r--drivers/net/bnxt/bnxt_rxq.h8
-rw-r--r--drivers/net/bnxt/bnxt_rxr.c126
-rw-r--r--drivers/net/bnxt/bnxt_rxr.h3
-rw-r--r--drivers/net/bnxt/bnxt_stats.c25
-rw-r--r--drivers/net/bnxt/bnxt_stats.h2
-rw-r--r--drivers/net/bnxt/bnxt_txq.c15
-rw-r--r--drivers/net/bnxt/bnxt_txq.h1
-rw-r--r--drivers/net/bnxt/bnxt_txr.c53
-rw-r--r--drivers/net/bnxt/bnxt_txr.h2
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c14
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h1
-rw-r--r--drivers/net/bnxt/hsi_struct_def_dpdk.h226
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.c52
-rw-r--r--drivers/net/bnxt/rte_pmd_bnxt.h2
25 files changed, 1377 insertions, 420 deletions
diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h
index 8ab1c7f8..b5a0badf 100644
--- a/drivers/net/bnxt/bnxt.h
+++ b/drivers/net/bnxt/bnxt.h
@@ -40,10 +40,11 @@
#include <rte_pci.h>
#include <rte_bus_pci.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_memory.h>
#include <rte_lcore.h>
#include <rte_spinlock.h>
+#include <rte_time.h>
#include "bnxt_cpr.h"
@@ -162,6 +163,7 @@ struct bnxt_link_info {
uint16_t link_speed;
uint16_t support_speeds;
uint16_t auto_link_speed;
+ uint16_t force_link_speed;
uint16_t auto_link_speed_mask;
uint32_t preemphasis;
uint8_t phy_type;
@@ -180,6 +182,53 @@ struct rte_flow {
struct bnxt_vnic_info *vnic;
};
+struct bnxt_ptp_cfg {
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_GRCPF_REG_SYNC_TIME 0x480
+#define BNXT_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+ struct rte_timecounter tc;
+ struct rte_timecounter tx_tstamp_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct bnxt *bp;
+#define BNXT_MAX_TX_TS 1
+ uint16_t rxctl;
+#define BNXT_PTP_MSG_SYNC (1 << 0)
+#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
+#define BNXT_PTP_MSG_PDELAY_REQ (1 << 2)
+#define BNXT_PTP_MSG_PDELAY_RESP (1 << 3)
+#define BNXT_PTP_MSG_FOLLOW_UP (1 << 8)
+#define BNXT_PTP_MSG_DELAY_RESP (1 << 9)
+#define BNXT_PTP_MSG_PDELAY_RESP_FOLLOW_UP (1 << 10)
+#define BNXT_PTP_MSG_ANNOUNCE (1 << 11)
+#define BNXT_PTP_MSG_SIGNALING (1 << 12)
+#define BNXT_PTP_MSG_MANAGEMENT (1 << 13)
+#define BNXT_PTP_MSG_EVENTS (BNXT_PTP_MSG_SYNC | \
+ BNXT_PTP_MSG_DELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_REQ | \
+ BNXT_PTP_MSG_PDELAY_RESP)
+ uint8_t tx_tstamp_en:1;
+ int rx_filter;
+
+#define BNXT_PTP_RX_TS_L 0
+#define BNXT_PTP_RX_TS_H 1
+#define BNXT_PTP_RX_SEQ 2
+#define BNXT_PTP_RX_FIFO 3
+#define BNXT_PTP_RX_FIFO_PENDING 0x1
+#define BNXT_PTP_RX_FIFO_ADV 4
+#define BNXT_PTP_RX_REGS 5
+
+#define BNXT_PTP_TX_TS_L 0
+#define BNXT_PTP_TX_TS_H 1
+#define BNXT_PTP_TX_SEQ 2
+#define BNXT_PTP_TX_FIFO 3
+#define BNXT_PTP_TX_FIFO_EMPTY 0x2
+#define BNXT_PTP_TX_REGS 4
+ uint32_t rx_regs[BNXT_PTP_RX_REGS];
+ uint32_t rx_mapped_regs[BNXT_PTP_RX_REGS];
+ uint32_t tx_regs[BNXT_PTP_TX_REGS];
+ uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS];
+};
+
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
struct bnxt {
void *bar0;
@@ -195,10 +244,14 @@ struct bnxt {
#define BNXT_FLAG_JUMBO (1 << 3)
#define BNXT_FLAG_SHORT_CMD (1 << 4)
#define BNXT_FLAG_UPDATE_HASH (1 << 5)
+#define BNXT_FLAG_PTP_SUPPORTED (1 << 6)
+#define BNXT_FLAG_MULTI_HOST (1 << 7)
+#define BNXT_FLAG_INIT_DONE (1 << 31)
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
-#define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type)
-#define BNXT_NPAR_PF(bp) (BNXT_PF(bp) && BNXT_NPAR_ENABLED(bp))
+#define BNXT_NPAR(bp) ((bp)->port_partition_type)
+#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
unsigned int rx_nr_rings;
unsigned int rx_cp_nr_rings;
@@ -272,6 +325,7 @@ struct bnxt {
struct bnxt_led_info leds[BNXT_MAX_LED];
uint8_t num_leds;
+ struct bnxt_ptp_cfg *ptp_cfg;
};
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete);
@@ -281,4 +335,12 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg);
bool is_bnxt_supported(struct rte_eth_dev *dev);
extern const struct rte_flow_ops bnxt_flow_ops;
+
+extern int bnxt_logtype_driver;
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, bnxt_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt, ## args)
#endif
diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c
index 19c684ca..737bb060 100644
--- a/drivers/net/bnxt/bnxt_cpr.c
+++ b/drivers/net/bnxt/bnxt_cpr.c
@@ -57,8 +57,17 @@ void bnxt_handle_async_event(struct bnxt *bp,
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
bnxt_link_update_op(bp->eth_dev, 1);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
+ PMD_DRV_LOG(INFO, "Async event: PF driver unloaded\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ PMD_DRV_LOG(INFO, "Async event: VF config changed\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
+ PMD_DRV_LOG(INFO, "Port conn async event\n");
+ break;
default:
- RTE_LOG(DEBUG, PMD, "handle_async_event id = 0x%x\n", event_id);
+ PMD_DRV_LOG(INFO, "handle_async_event id = 0x%x\n", event_id);
break;
}
}
@@ -74,7 +83,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
int rc;
if (bp->pf.active_vfs <= 0) {
- RTE_LOG(ERR, PMD, "Forwarded VF with no active VFs\n");
+ PMD_DRV_LOG(ERR, "Forwarded VF with no active VFs\n");
return;
}
@@ -93,7 +102,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
if (fw_vf_id < bp->pf.first_vf_id ||
fw_vf_id >= (bp->pf.first_vf_id) + bp->pf.active_vfs) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"FWD req's source_id 0x%x out of range 0x%x - 0x%x (%d %d)\n",
fw_vf_id, bp->pf.first_vf_id,
(bp->pf.first_vf_id) + bp->pf.active_vfs - 1,
@@ -130,7 +139,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
/* Forward */
rc = bnxt_hwrm_exec_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send FWD req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -141,7 +150,7 @@ void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl)
reject:
rc = bnxt_hwrm_reject_fwd_resp(bp, fw_vf_id, fwd_cmd, req_len);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to send REJECT req VF 0x%x, type 0x%x.\n",
fw_vf_id - bp->pf.first_vf_id,
rte_le_to_cpu_16(fwd_cmd->req_type));
@@ -165,7 +174,6 @@ int bnxt_alloc_def_cp_ring(struct bnxt *bp)
goto err_out;
cpr->cp_doorbell = bp->pdev->mem_resource[2].addr;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
- bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
if (BNXT_PF(bp))
rc = bnxt_hwrm_func_cfg_def_cp(bp);
else
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 3b6813cb..21c46f83 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -35,7 +35,7 @@
#include <stdbool.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
@@ -58,6 +58,7 @@
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
"Broadcom Cumulus driver " DRV_MODULE_NAME "\n";
+int bnxt_logtype_driver;
#define PCI_VENDOR_ID_BROADCOM 0x14E4
@@ -201,7 +202,7 @@ alloc_mem_err:
static int bnxt_init_chip(struct bnxt *bp)
{
- unsigned int i, rss_idx, fw_idx;
+ unsigned int i;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -223,25 +224,25 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_alloc_all_hwrm_stat_ctxs(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM stat ctx alloc failure rc: %x\n", rc);
goto err_out;
}
rc = bnxt_alloc_hwrm_rings(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring alloc failure rc: %x\n", rc);
goto err_out;
}
rc = bnxt_alloc_all_hwrm_ring_grps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM ring grp alloc failure: %x\n", rc);
goto err_out;
}
rc = bnxt_mq_rx_configure(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "MQ mode configure failure rc: %x\n", rc);
goto err_out;
}
@@ -251,14 +252,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d alloc failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d alloc failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d ctx alloc failure rc: %x\n",
i, rc);
goto err_out;
@@ -266,39 +267,24 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
+ PMD_DRV_LOG(ERR, "HWRM vnic %d cfg failure rc: %x\n",
i, rc);
goto err_out;
}
rc = bnxt_set_hwrm_vnic_filters(bp, vnic);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM vnic %d filter failure rc: %x\n",
i, rc);
goto err_out;
}
- if (vnic->rss_table && vnic->hash_type) {
- /*
- * Fill the RSS hash & redirection table with
- * ring group ids for all VNICs
- */
- for (rss_idx = 0, fw_idx = 0;
- rss_idx < HW_HASH_INDEX_SIZE;
- rss_idx++, fw_idx++) {
- if (vnic->fw_grp_ids[fw_idx] ==
- INVALID_HW_RING_ID)
- fw_idx = 0;
- vnic->rss_table[rss_idx] =
- vnic->fw_grp_ids[fw_idx];
- }
- rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
- if (rc) {
- RTE_LOG(ERR, PMD,
- "HWRM vnic %d set RSS failure rc: %x\n",
- i, rc);
- goto err_out;
- }
+
+ rc = bnxt_vnic_rss_configure(bp, vnic);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "HWRM vnic set RSS failure rc: %x\n", rc);
+ goto err_out;
}
bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
@@ -310,7 +296,7 @@ static int bnxt_init_chip(struct bnxt *bp)
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM cfa l2 rx mask failure rc: %x\n", rc);
goto err_out;
}
@@ -320,10 +306,9 @@ static int bnxt_init_chip(struct bnxt *bp)
!RTE_ETH_DEV_SRIOV(bp->eth_dev).active) &&
bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = bp->eth_dev->data->nb_rx_queues;
- RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__,
- intr_vector);
+ PMD_DRV_LOG(DEBUG, "intr_vector = %d\n", intr_vector);
if (intr_vector > bp->rx_cp_nr_rings) {
- RTE_LOG(ERR, PMD, "At most %d intr queues supported",
+ PMD_DRV_LOG(ERR, "At most %d intr queues supported",
bp->rx_cp_nr_rings);
return -ENOTSUP;
}
@@ -337,13 +322,13 @@ static int bnxt_init_chip(struct bnxt *bp)
bp->eth_dev->data->nb_rx_queues *
sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues"
+ PMD_DRV_LOG(ERR, "Failed to allocate %d rx_queues"
" intr_vec", bp->eth_dev->data->nb_rx_queues);
return -ENOMEM;
}
- RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p "
+ PMD_DRV_LOG(DEBUG, "intr_handle->intr_vec = %p "
"intr_handle->nb_efd = %d intr_handle->max_intr = %d\n",
- __func__, intr_handle->intr_vec, intr_handle->nb_efd,
+ intr_handle->intr_vec, intr_handle->nb_efd,
intr_handle->max_intr);
}
@@ -359,14 +344,14 @@ static int bnxt_init_chip(struct bnxt *bp)
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc) {
- RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "HWRM Get link config failure rc: %x\n", rc);
goto err_out;
}
if (!bp->link_info.link_up) {
rc = bnxt_set_hwrm_link_config(bp, true);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM link config failure rc: %x\n", rc);
goto err_out;
}
@@ -378,6 +363,10 @@ static int bnxt_init_chip(struct bnxt *bp)
err_out:
bnxt_free_all_hwrm_resources(bp);
+ /* Some of the error status returned by FW may not be from errno.h */
+ if (rc > 0)
+ rc = -EIO;
+
return rc;
}
@@ -393,7 +382,10 @@ static int bnxt_init_nic(struct bnxt *bp)
{
int rc;
- bnxt_init_ring_grps(bp);
+ rc = bnxt_init_ring_grps(bp);
+ if (rc)
+ return rc;
+
bnxt_init_vnics(bp);
bnxt_init_filters(bp);
@@ -523,6 +515,26 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
bp->tx_queues = (void *)eth_dev->data->tx_queues;
/* Inherit new configurations */
+ if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
+ eth_dev->data->nb_tx_queues > bp->max_tx_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues + 1 >
+ bp->max_cp_rings ||
+ eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues >
+ bp->max_stat_ctx ||
+ (uint32_t)(eth_dev->data->nb_rx_queues + 1) > bp->max_ring_grps) {
+ PMD_DRV_LOG(ERR,
+ "Insufficient resources to support requested config\n");
+ PMD_DRV_LOG(ERR,
+ "Num Queues Requested: Tx %d, Rx %d\n",
+ eth_dev->data->nb_tx_queues,
+ eth_dev->data->nb_rx_queues);
+ PMD_DRV_LOG(ERR,
+ "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n",
+ bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings,
+ bp->max_stat_ctx, bp->max_ring_grps);
+ return -ENOSPC;
+ }
+
bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
bp->rx_cp_nr_rings = bp->rx_nr_rings;
@@ -540,13 +552,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
struct rte_eth_link *link = &eth_dev->data->dev_link;
if (link->link_status)
- RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Up - speed %u Mbps - %s\n",
eth_dev->data->port_id,
(uint32_t)link->link_speed,
(link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- RTE_LOG(INFO, PMD, "Port %d Link Down\n",
+ PMD_DRV_LOG(INFO, "Port %d Link Down\n",
eth_dev->data->port_id);
}
@@ -563,7 +575,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
int rc;
if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
@@ -583,6 +595,7 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
if (rc)
goto error;
+ bp->flags |= BNXT_FLAG_INIT_DONE;
return 0;
error:
@@ -628,6 +641,7 @@ static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
}
bnxt_set_hwrm_link_config(bp, false);
bnxt_hwrm_port_clr_stats(bp);
+ bp->flags &= ~BNXT_FLAG_INIT_DONE;
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
@@ -700,25 +714,25 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
struct bnxt_filter_info *filter;
if (BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
+ PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
+ PMD_DRV_LOG(ERR, "VNIC not found for pool %d!\n", pool);
return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC addr already existed for pool %d\n", pool);
- return -EINVAL;
+ return 0;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
@@ -741,7 +755,7 @@ int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
if (rc) {
new.link_speed = ETH_LINK_SPEED_100M;
new.link_duplex = ETH_LINK_FULL_DUPLEX;
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to retrieve link rc = 0x%x!\n", rc);
goto out;
}
@@ -832,7 +846,7 @@ static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
@@ -864,12 +878,12 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
return -EINVAL;
if (reta_size != HW_HASH_INDEX_SIZE) {
- RTE_LOG(ERR, PMD, "The configured hash table lookup size "
+ PMD_DRV_LOG(ERR, "The configured hash table lookup size "
"(%d) must equal the size supported by the hardware "
"(%d)\n", reta_size, HW_HASH_INDEX_SIZE);
return -EINVAL;
}
- /* EW - need to revisit here copying from u64 to u16 */
+ /* EW - need to revisit here copying from uint64_t to uint16_t */
memcpy(reta_conf, vnic->rss_table, reta_size);
if (rte_intr_allow_others(intr_handle)) {
@@ -895,7 +909,7 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev,
*/
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (!rss_conf->rss_hf)
- RTE_LOG(ERR, PMD, "Hash type NONE\n");
+ PMD_DRV_LOG(ERR, "Hash type NONE\n");
} else {
if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT)
return -EINVAL;
@@ -984,7 +998,7 @@ static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev,
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
if (hash_types) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unknwon RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
@@ -1032,8 +1046,8 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
{
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
- if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD, "Flow Control Settings cannot be modified\n");
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
return -ENOTSUP;
}
@@ -1093,10 +1107,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->vxlan_port_cnt++;
@@ -1108,10 +1122,10 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "Tunnel Port %d already programmed\n",
+ PMD_DRV_LOG(ERR, "Tunnel Port %d already programmed\n",
udp_tunnel->udp_port);
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Only one port allowed\n");
+ PMD_DRV_LOG(ERR, "Only one port allowed\n");
return -ENOSPC;
}
bp->geneve_port_cnt++;
@@ -1122,7 +1136,7 @@ bnxt_udp_tunnel_port_add_op(struct rte_eth_dev *eth_dev,
bp->geneve_port_cnt++;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
rc = bnxt_hwrm_tunnel_dst_port_alloc(bp, udp_tunnel->udp_port,
@@ -1142,11 +1156,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
switch (udp_tunnel->prot_type) {
case RTE_TUNNEL_TYPE_VXLAN:
if (!bp->vxlan_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->vxlan_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->vxlan_port);
return -EINVAL;
}
@@ -1159,11 +1173,11 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
break;
case RTE_TUNNEL_TYPE_GENEVE:
if (!bp->geneve_port_cnt) {
- RTE_LOG(ERR, PMD, "No Tunnel port configured yet\n");
+ PMD_DRV_LOG(ERR, "No Tunnel port configured yet\n");
return -EINVAL;
}
if (bp->geneve_port != udp_tunnel->udp_port) {
- RTE_LOG(ERR, PMD, "Req Port: %d. Configured port: %d\n",
+ PMD_DRV_LOG(ERR, "Req Port: %d. Configured port: %d\n",
udp_tunnel->udp_port, bp->geneve_port);
return -EINVAL;
}
@@ -1175,7 +1189,7 @@ bnxt_udp_tunnel_port_del_op(struct rte_eth_dev *eth_dev,
port = bp->geneve_fw_dst_port_id;
break;
default:
- RTE_LOG(ERR, PMD, "Tunnel type is not supported\n");
+ PMD_DRV_LOG(ERR, "Tunnel type is not supported\n");
return -ENOTSUP;
}
@@ -1232,7 +1246,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1250,7 +1264,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Del Vlan filter for %d\n",
vlan_id);
}
@@ -1305,7 +1319,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
}
new_filter = bnxt_alloc_filter(bp);
if (!new_filter) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"MAC/VLAN filter alloc failed\n");
rc = -ENOMEM;
goto exit;
@@ -1325,7 +1339,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
new_filter);
if (rc)
goto exit;
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Added Vlan filter for %d\n", vlan_id);
cont:
filter = temp_filter;
@@ -1360,7 +1374,7 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
for (i = 0; i < 4095; i++)
bnxt_del_vlan_filter(bp, i);
}
- RTE_LOG(INFO, PMD, "VLAN Filtering: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Filtering: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_filter);
}
@@ -1374,12 +1388,12 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask)
vnic->vlan_strip = false;
bnxt_hwrm_vnic_cfg(bp, vnic);
}
- RTE_LOG(INFO, PMD, "VLAN Strip Offload: %d\n",
+ PMD_DRV_LOG(DEBUG, "VLAN Strip Offload: %d\n",
dev->data->dev_conf.rxmode.hw_vlan_strip);
}
if (mask & ETH_VLAN_EXTEND_MASK)
- RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n");
+ PMD_DRV_LOG(ERR, "Extend VLAN Not supported\n");
return 0;
}
@@ -1397,7 +1411,6 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
return;
memcpy(bp->mac_addr, addr, sizeof(bp->mac_addr));
- memcpy(&dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
STAILQ_FOREACH(filter, &vnic->filter, next) {
/* Default Filter is at Index 0 */
@@ -1416,7 +1429,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr)
if (rc)
break;
filter->mac_index = 0;
- RTE_LOG(DEBUG, PMD, "Set MAC addr\n");
+ PMD_DRV_LOG(DEBUG, "Set MAC addr\n");
}
}
@@ -1519,7 +1532,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE * 2;
if (new_mtu < ETHER_MIN_MTU || new_mtu > max_dev_mtu) {
- RTE_LOG(ERR, PMD, "MTU requested must be within (%d, %d)\n",
+ PMD_DRV_LOG(ERR, "MTU requested must be within (%d, %d)\n",
ETHER_MIN_MTU, max_dev_mtu);
return -EINVAL;
}
@@ -1537,7 +1550,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
eth_dev->data->mtu = new_mtu;
- RTE_LOG(INFO, PMD, "New MTU is %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(INFO, "New MTU is %d\n", eth_dev->data->mtu);
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
@@ -1563,8 +1576,8 @@ bnxt_vlan_pvid_set_op(struct rte_eth_dev *dev, uint16_t pvid, int on)
uint16_t vlan = bp->vlan;
int rc;
- if (BNXT_NPAR_PF(bp) || BNXT_VF(bp)) {
- RTE_LOG(ERR, PMD,
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
+ PMD_DRV_LOG(ERR,
"PVID cannot be modified for this function\n");
return -ENOTSUP;
}
@@ -1723,15 +1736,15 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
int match = 0;
*ret = 0;
- if (efilter->ether_type != ETHER_TYPE_IPv4 &&
- efilter->ether_type != ETHER_TYPE_IPv6) {
- RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
+ if (efilter->ether_type == ETHER_TYPE_IPv4 ||
+ efilter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
if (efilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1739,7 +1752,7 @@ bnxt_match_and_validate_ether_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", efilter->queue);
*ret = -EINVAL;
goto exit;
}
@@ -1790,7 +1803,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
return 0;
if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -1807,7 +1820,7 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -1851,11 +1864,11 @@ bnxt_ethertype_filter(struct rte_eth_dev *dev,
next);
bnxt_free_filter(bp, filter1);
} else if (ret == 0) {
- RTE_LOG(ERR, PMD, "No matching filter found\n");
+ PMD_DRV_LOG(ERR, "No matching filter found\n");
}
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
goto error;
}
@@ -1874,7 +1887,7 @@ parse_ntuple_filter(struct bnxt *bp,
uint32_t en = 0;
if (nfilter->queue >= bp->rx_nr_rings) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", nfilter->queue);
return -EINVAL;
}
@@ -1886,7 +1899,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_port mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
return -EINVAL;
}
@@ -1904,7 +1917,7 @@ parse_ntuple_filter(struct bnxt *bp,
en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
break;
default:
- RTE_LOG(ERR, PMD, "invalid protocol mask.");
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
return -EINVAL;
}
@@ -1916,7 +1929,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid dst_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
return -EINVAL;
}
@@ -1928,7 +1941,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_ip mask.");
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
return -EINVAL;
}
@@ -1940,7 +1953,7 @@ parse_ntuple_filter(struct bnxt *bp,
NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
break;
default:
- RTE_LOG(ERR, PMD, "invalid src_port mask.");
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
return -EINVAL;
}
@@ -1953,7 +1966,8 @@ parse_ntuple_filter(struct bnxt *bp,
static struct bnxt_filter_info*
bnxt_match_ntuple_filter(struct bnxt *bp,
- struct bnxt_filter_info *bfilter)
+ struct bnxt_filter_info *bfilter,
+ struct bnxt_vnic_info **mvnic)
{
struct bnxt_filter_info *mfilter = NULL;
int i;
@@ -1972,8 +1986,11 @@ bnxt_match_ntuple_filter(struct bnxt *bp,
bfilter->dst_port == mfilter->dst_port &&
bfilter->dst_port_mask == mfilter->dst_port_mask &&
bfilter->flags == mfilter->flags &&
- bfilter->enables == mfilter->enables)
+ bfilter->enables == mfilter->enables) {
+ if (mvnic)
+ *mvnic = vnic;
return mfilter;
+ }
}
}
return NULL;
@@ -1985,22 +2002,22 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
enum rte_filter_op filter_op)
{
struct bnxt_filter_info *bfilter, *mfilter, *filter1;
- struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
int ret;
if (nfilter->flags != RTE_5TUPLE_FLAGS) {
- RTE_LOG(ERR, PMD, "only 5tuple is supported.");
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
return -EINVAL;
}
if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
- RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n");
+ PMD_DRV_LOG(ERR, "Ntuple filter: TCP flags not supported\n");
return -EINVAL;
}
bfilter = bnxt_get_unused_filter(bp);
if (bfilter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new filter.\n");
return -ENOMEM;
}
@@ -2023,15 +2040,25 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
bfilter->ethertype = 0x800;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
- mfilter = bnxt_match_ntuple_filter(bp, bfilter);
+ mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
- if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "filter exists.");
+ if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id == mfilter->dst_id) {
+ PMD_DRV_LOG(ERR, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
+ } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id != mfilter->dst_id) {
+ mfilter->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
+ STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
+ PMD_DRV_LOG(ERR, "filter with matching pattern exists.\n");
+ PMD_DRV_LOG(ERR, " Updated it to the new destination queue\n");
+ goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "filter doesn't exist.");
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
ret = -ENOENT;
goto free_filter;
}
@@ -2050,11 +2077,11 @@ bnxt_cfg_ntuple_filter(struct bnxt *bp,
}
ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
- STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
- next);
+ STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
bnxt_free_filter(bp, mfilter);
- bfilter->fw_l2_filter_id = -1;
+ mfilter->fw_l2_filter_id = -1;
bnxt_free_filter(bp, bfilter);
+ bfilter->fw_l2_filter_id = -1;
}
return 0;
@@ -2076,7 +2103,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
return 0;
if (arg == NULL) {
- RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.",
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
filter_op);
return -EINVAL;
}
@@ -2093,7 +2120,7 @@ bnxt_ntuple_filter(struct rte_eth_dev *dev,
filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
ret = -EINVAL;
break;
}
@@ -2295,7 +2322,7 @@ bnxt_parse_fdir_filter(struct bnxt *bp,
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]);
if (vnic == NULL) {
- RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue);
+ PMD_DRV_LOG(ERR, "Invalid queue %d\n", fdir->action.rx_queue);
return -EINVAL;
}
@@ -2399,7 +2426,7 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
/* FALLTHROUGH */
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -2411,12 +2438,12 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
match = bnxt_match_fdir(bp, filter);
if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "Flow already exists.\n");
+ PMD_DRV_LOG(ERR, "Flow already exists.\n");
ret = -EEXIST;
goto free_filter;
}
if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
- RTE_LOG(ERR, PMD, "Flow does not exist.\n");
+ PMD_DRV_LOG(ERR, "Flow does not exist.\n");
ret = -ENOENT;
goto free_filter;
}
@@ -2463,10 +2490,10 @@ bnxt_fdir_filter(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_STATS:
case RTE_ETH_FILTER_INFO:
/* FALLTHROUGH */
- RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op);
+ PMD_DRV_LOG(ERR, "operation %u not implemented", filter_op);
break;
default:
- RTE_LOG(ERR, PMD, "unknown operation %u", filter_op);
+ PMD_DRV_LOG(ERR, "unknown operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -2487,7 +2514,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
switch (filter_type) {
case RTE_ETH_FILTER_TUNNEL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"filter type: %d: To be implemented\n", filter_type);
break;
case RTE_ETH_FILTER_FDIR:
@@ -2505,7 +2532,7 @@ bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused,
*(const void **)arg = &bnxt_flow_ops;
break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Filter type (%d) not supported", filter_type);
ret = -EINVAL;
break;
@@ -2536,7 +2563,260 @@ bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev)
return NULL;
}
+static int bnxt_map_regs(struct bnxt *bp, uint32_t *reg_arr, int count,
+ int reg_win)
+{
+ uint32_t reg_base = *reg_arr & 0xfffff000;
+ uint32_t win_off;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ if ((reg_arr[i] & 0xfffff000) != reg_base)
+ return -ERANGE;
+ }
+ win_off = BNXT_GRCPF_REG_WINDOW_BASE_OUT + (reg_win - 1) * 4;
+ rte_cpu_to_le_32(rte_write32(reg_base, (uint8_t *)bp->bar0 + win_off));
+ return 0;
+}
+
+static int bnxt_map_ptp_regs(struct bnxt *bp)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t *reg_arr;
+ int rc, i;
+
+ reg_arr = ptp->rx_regs;
+ rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_RX_REGS, 5);
+ if (rc)
+ return rc;
+
+ reg_arr = ptp->tx_regs;
+ rc = bnxt_map_regs(bp, reg_arr, BNXT_PTP_TX_REGS, 6);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < BNXT_PTP_RX_REGS; i++)
+ ptp->rx_mapped_regs[i] = 0x5000 + (ptp->rx_regs[i] & 0xfff);
+
+ for (i = 0; i < BNXT_PTP_TX_REGS; i++)
+ ptp->tx_mapped_regs[i] = 0x6000 + (ptp->tx_regs[i] & 0xfff);
+ return 0;
+}
+
+static void bnxt_unmap_ptp_regs(struct bnxt *bp)
+{
+ rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 16));
+ rte_cpu_to_le_32(rte_write32(0, (uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT + 20));
+}
+
+static uint64_t bnxt_cc_read(struct bnxt *bp)
+{
+ uint64_t ns;
+
+ ns = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_SYNC_TIME));
+ ns |= (uint64_t)(rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ BNXT_GRCPF_REG_SYNC_TIME + 4))) << 32;
+ return ns;
+}
+
+static int bnxt_get_tx_ts(struct bnxt *bp, uint64_t *ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t fifo;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
+ if (fifo & BNXT_PTP_TX_FIFO_EMPTY)
+ return -EAGAIN;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_FIFO]));
+ *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_TS_L]));
+ *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->tx_mapped_regs[BNXT_PTP_TX_TS_H])) << 32;
+
+ return 0;
+}
+
+static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pf_info *pf = &bp->pf;
+ uint16_t port_id;
+ uint32_t fifo;
+
+ if (!ptp)
+ return -ENODEV;
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ if (!(fifo & BNXT_PTP_RX_FIFO_PENDING))
+ return -EAGAIN;
+
+ port_id = pf->port_id;
+ rte_cpu_to_le_32(rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]));
+
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
+/* bnxt_clr_rx_ts(bp); TBD */
+ return -EBUSY;
+ }
+
+ *ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
+ *ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ ptp->tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ systime_cycles = bnxt_cc_read(bp);
+ ns = rte_timecounter_update(&ptp->tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+static int
+bnxt_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t shift = 0;
+
+ if (!ptp)
+ return 0;
+
+ ptp->rx_filter = 1;
+ ptp->tx_tstamp_en = 1;
+ ptp->rxctl = BNXT_PTP_MSG_EVENTS;
+
+ if (!bnxt_hwrm_ptp_cfg(bp))
+ bnxt_map_ptp_regs(bp);
+
+ memset(&ptp->tc, 0, sizeof(struct rte_timecounter));
+ memset(&ptp->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&ptp->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ ptp->tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->tc.cc_shift = shift;
+ ptp->tc.nsec_mask = (1ULL << shift) - 1;
+
+ ptp->rx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->rx_tstamp_tc.cc_shift = shift;
+ ptp->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ ptp->tx_tstamp_tc.cc_mask = BNXT_CYCLECOUNTER_MASK;
+ ptp->tx_tstamp_tc.cc_shift = shift;
+ ptp->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ return 0;
+}
+
+static int
+bnxt_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ptp->rx_filter = 0;
+ ptp->tx_tstamp_en = 0;
+ ptp->rxctl = 0;
+
+ bnxt_hwrm_ptp_cfg(bp);
+
+ bnxt_unmap_ptp_regs(bp);
+
+ return 0;
+}
+
+static int
+bnxt_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t rx_tstamp_cycles = 0;
+ uint64_t ns;
+
+ if (!ptp)
+ return 0;
+
+ bnxt_get_rx_ts(bp, &rx_tstamp_cycles);
+ ns = rte_timecounter_update(&ptp->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+ return 0;
+}
+
+static int
+bnxt_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint64_t tx_tstamp_cycles = 0;
+ uint64_t ns;
+
+ if (!ptp)
+ return 0;
+
+ bnxt_get_tx_ts(bp, &tx_tstamp_cycles);
+ ns = rte_timecounter_update(&ptp->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+bnxt_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (!ptp)
+ return 0;
+
+ ptp->tc.nsec += delta;
+
+ return 0;
+}
static int
bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
@@ -2546,8 +2826,8 @@ bnxt_get_eeprom_length_op(struct rte_eth_dev *dev)
uint32_t dir_entries;
uint32_t entry_length;
- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n",
- __func__, bp->pdev->addr.domain, bp->pdev->addr.bus,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x\n",
+ bp->pdev->addr.domain, bp->pdev->addr.bus,
bp->pdev->addr.devid, bp->pdev->addr.function);
rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
@@ -2565,8 +2845,8 @@ bnxt_get_eeprom_op(struct rte_eth_dev *dev,
uint32_t index;
uint32_t offset;
- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
@@ -2634,13 +2914,13 @@ bnxt_set_eeprom_op(struct rte_eth_dev *dev,
uint8_t index, dir_op;
uint16_t type, ext, ordinal, attr;
- RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d "
- "len = %d\n", __func__, bp->pdev->addr.domain,
+ PMD_DRV_LOG(INFO, "%04x:%02x:%02x:%02x in_eeprom->offset = %d "
+ "len = %d\n", bp->pdev->addr.domain,
bp->pdev->addr.bus, bp->pdev->addr.devid,
bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length);
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n");
+ PMD_DRV_LOG(ERR, "NVM write not supported from a VF\n");
return -EINVAL;
}
@@ -2727,11 +3007,22 @@ static const struct eth_dev_ops bnxt_dev_ops = {
.rx_queue_count = bnxt_rx_queue_count_op,
.rx_descriptor_status = bnxt_rx_descriptor_status_op,
.tx_descriptor_status = bnxt_tx_descriptor_status_op,
+ .rx_queue_start = bnxt_rx_queue_start,
+ .rx_queue_stop = bnxt_rx_queue_stop,
+ .tx_queue_start = bnxt_tx_queue_start,
+ .tx_queue_stop = bnxt_tx_queue_stop,
.filter_ctrl = bnxt_filter_ctrl_op,
.dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op,
.get_eeprom_length = bnxt_get_eeprom_length_op,
.get_eeprom = bnxt_get_eeprom_op,
.set_eeprom = bnxt_set_eeprom_op,
+ .timesync_enable = bnxt_timesync_enable,
+ .timesync_disable = bnxt_timesync_disable,
+ .timesync_read_time = bnxt_timesync_read_time,
+ .timesync_write_time = bnxt_timesync_write_time,
+ .timesync_adjust_time = bnxt_timesync_adjust_time,
+ .timesync_read_rx_timestamp = bnxt_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = bnxt_timesync_read_tx_timestamp,
};
static bool bnxt_vf_pciid(uint16_t id)
@@ -2754,7 +3045,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
/* enable device (incl. PCI PM wakeup), and bus-mastering */
if (!pci_dev->mem_resource[0].addr) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto init_err_disable;
@@ -2765,7 +3056,7 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
- RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
+ PMD_DRV_LOG(ERR, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
goto init_err_release;
}
@@ -2801,7 +3092,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
int rc;
if (version_printed++ == 0)
- RTE_LOG(INFO, PMD, "%s\n", bnxt_version);
+ PMD_DRV_LOG(INFO, "%s\n", bnxt_version);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -2818,7 +3109,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
rc = bnxt_init_board(eth_dev);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Board initialization failed rc: %x\n", rc);
goto error;
}
@@ -2849,13 +3140,13 @@ skip_init:
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -2884,13 +3175,13 @@ skip_init:
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map address to physical memory\n");
return -ENOMEM;
}
@@ -2905,45 +3196,71 @@ skip_init:
rc = bnxt_alloc_hwrm_resources(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm resource allocation failure rc: %x\n", rc);
goto error_free;
}
rc = bnxt_hwrm_ver_get(bp);
if (rc)
goto error_free;
- bnxt_hwrm_queue_qportcfg(bp);
+ rc = bnxt_hwrm_queue_qportcfg(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm queue qportcfg failed\n");
+ goto error_free;
+ }
- bnxt_hwrm_func_qcfg(bp);
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ PMD_DRV_LOG(ERR, "hwrm func qcfg failed\n");
+ goto error_free;
+ }
/* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm query capability failure rc: %x\n", rc);
goto error_free;
}
if (bp->max_tx_rings == 0) {
- RTE_LOG(ERR, PMD, "No TX rings available!\n");
+ PMD_DRV_LOG(ERR, "No TX rings available!\n");
rc = -EBUSY;
goto error_free;
}
eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx, 0);
if (eth_dev->data->mac_addrs == NULL) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to alloc %u bytes needed to store MAC addr tbl",
ETHER_ADDR_LEN * bp->max_l2_ctx);
rc = -ENOMEM;
goto error_free;
}
+
+ if (check_zero_bytes(bp->dflt_mac_addr, ETHER_ADDR_LEN)) {
+ PMD_DRV_LOG(ERR,
+ "Invalid MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ bp->dflt_mac_addr[0], bp->dflt_mac_addr[1],
+ bp->dflt_mac_addr[2], bp->dflt_mac_addr[3],
+ bp->dflt_mac_addr[4], bp->dflt_mac_addr[5]);
+ rc = -EINVAL;
+ goto error_free;
+ }
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(&eth_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+ /* 1 ring is for default completion ring */
+ PMD_DRV_LOG(ERR, "Insufficient resource: Ring Group\n");
+ rc = -ENOSPC;
+ goto error_free;
+ }
+
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
- RTE_LOG(ERR, PMD,
- "Failed to alloc %zu bytes needed to store group info table\n",
+ PMD_DRV_LOG(ERR,
+ "Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
goto error_free;
@@ -2955,7 +3272,7 @@ skip_init:
((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
memset(bp->pf.vf_req_fwd, 0xff, sizeof(bp->pf.vf_req_fwd));
} else {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Firmware too old for VF mailbox functionality\n");
memset(bp->pf.vf_req_fwd, 0, sizeof(bp->pf.vf_req_fwd));
}
@@ -2975,21 +3292,21 @@ skip_init:
ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
rc = bnxt_hwrm_func_driver_register(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to register driver");
rc = -EBUSY;
goto error_free;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
rc = bnxt_hwrm_func_reset(bp);
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc);
- rc = -1;
+ PMD_DRV_LOG(ERR, "hwrm chip reset failure rc: %x\n", rc);
+ rc = -EIO;
goto error_free;
}
@@ -3000,13 +3317,13 @@ skip_init:
if (bp->pdev->max_vfs) {
rc = bnxt_hwrm_allocate_vfs(bp, bp->pdev->max_vfs);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to allocate VFs\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate VFs\n");
goto error_free;
}
} else {
rc = bnxt_hwrm_allocate_pf_only(bp);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to allocate PF resources\n");
goto error_free;
}
@@ -3115,6 +3432,15 @@ bool is_bnxt_supported(struct rte_eth_dev *dev)
return is_device_supported(dev, &bnxt_rte_pmd);
}
+RTE_INIT(bnxt_init_log);
+static void
+bnxt_init_log(void)
+{
+ bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver");
+ if (bnxt_logtype_driver >= 0)
+ rte_log_set_level(bnxt_logtype_driver, RTE_LOG_NOTICE);
+}
+
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 65d30fb3..032e8eed 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -56,7 +56,7 @@ struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -77,7 +77,7 @@ struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
if (!filter) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
vf);
return NULL;
}
@@ -145,11 +145,11 @@ void bnxt_free_filter_mem(struct bnxt *bp)
for (i = 0; i < max_filters; i++) {
filter = &bp->filter_info[i];
if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
- RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
+ PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
/* Call HWRM to try to free filter again */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
if (rc)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"HWRM filter cannot be freed rc = %d\n",
rc);
}
@@ -172,7 +172,7 @@ int bnxt_alloc_filter_mem(struct bnxt *bp)
max_filters * sizeof(struct bnxt_filter_info),
0);
if (filter_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
max_filters);
return -ENOMEM;
}
@@ -187,7 +187,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
/* Find the 1st unused filter from the free_filter_list pool*/
filter = STAILQ_FIRST(&bp->free_filter_list);
if (!filter) {
- RTE_LOG(ERR, PMD, "No more free filter resources\n");
+ PMD_DRV_LOG(ERR, "No more free filter resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
@@ -250,7 +250,7 @@ nxt_non_void_action(const struct rte_flow_action *cur)
}
}
-static inline int check_zero_bytes(const uint8_t *bytes, int len)
+int check_zero_bytes(const uint8_t *bytes, int len)
{
int i;
for (i = 0; i < len; i++)
@@ -281,7 +281,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
/* FALLTHROUGH */
/* need ntuple match, reset exact match */
if (!use_ntuple) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN flow cannot use NTUPLE filter\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@ -292,7 +292,7 @@ bnxt_filter_type_check(const struct rte_flow_item pattern[],
use_ntuple |= 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unknown Flow type");
+ PMD_DRV_LOG(ERR, "Unknown Flow type");
use_ntuple |= 1;
}
item++;
@@ -329,7 +329,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
int dflt_vnic;
use_ntuple = bnxt_filter_type_check(pattern, error);
- RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
+ PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
if (use_ntuple < 0)
return use_ntuple;
@@ -791,7 +791,7 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
return f0;
//This flow needs DST MAC which is not same as port/l2
- RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
+ PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
filter1 = bnxt_get_unused_filter(bp);
if (filter1 == NULL)
return NULL;
@@ -806,7 +806,6 @@ bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
bnxt_free_filter(bp, filter1);
return NULL;
}
- STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
return filter1;
}
@@ -829,7 +828,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
int rc;
if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
- RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
+ PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Cannot create flow on RSS queues");
@@ -858,7 +857,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
rc = -rte_errno;
goto ret;
}
- RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
+ PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
@@ -876,7 +875,7 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
- RTE_LOG(DEBUG, PMD, "VNIC found\n");
+ PMD_DRV_LOG(DEBUG, "VNIC found\n");
break;
case RTE_FLOW_ACTION_TYPE_DROP:
vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
@@ -957,7 +956,11 @@ bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
goto ret;
}
-//done:
+ if (filter1) {
+ bnxt_free_filter(bp, filter1);
+ filter1->fw_l2_filter_id = -1;
+ }
+
act = nxt_non_void_action(++act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
rte_flow_error_set(error, EINVAL,
@@ -987,7 +990,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
return -ENOMEM;
}
@@ -1042,8 +1045,23 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
!memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
sizeof(nf->dst_ipaddr)) &&
!memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
- sizeof(nf->dst_ipaddr_mask)))
- return -EEXIST;
+ sizeof(nf->dst_ipaddr_mask))) {
+ if (mf->dst_id == nf->dst_id)
+ return -EEXIST;
+ /* Same Flow, Different queue
+ * Clear the old ntuple filter
+ */
+ if (nf->filter_type == HWRM_CFA_EM_FILTER)
+ bnxt_hwrm_clear_em_filter(bp, mf);
+ if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
+ bnxt_hwrm_clear_ntuple_filter(bp, mf);
+ /* Free the old filter, update flow
+ * with new filter
+ */
+ bnxt_free_filter(bp, mf);
+ flow->filter = nf;
+ return -EXDEV;
+ }
}
}
return 0;
@@ -1059,6 +1077,7 @@ bnxt_flow_create(struct rte_eth_dev *dev,
struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
struct bnxt_filter_info *filter;
struct bnxt_vnic_info *vnic = NULL;
+ bool update_flow = false;
struct rte_flow *flow;
unsigned int i;
int ret = 0;
@@ -1073,13 +1092,13 @@ bnxt_flow_create(struct rte_eth_dev *dev,
ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
if (ret != 0) {
- RTE_LOG(ERR, PMD, "Not a validate flow.\n");
+ PMD_DRV_LOG(ERR, "Not a validate flow.\n");
goto free_flow;
}
filter = bnxt_get_unused_filter(bp);
if (filter == NULL) {
- RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
+ PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
goto free_flow;
}
@@ -1089,9 +1108,17 @@ bnxt_flow_create(struct rte_eth_dev *dev,
goto free_filter;
ret = bnxt_match_filter(bp, filter);
- if (ret != 0) {
- RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
+ if (ret == -EEXIST) {
+ PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
+ /* Clear the filter that was created as part of
+ * validate_and_parse_flow() above
+ */
+ bnxt_hwrm_clear_l2_filter(bp, filter);
goto free_filter;
+ } else if (ret == -EXDEV) {
+ PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
+ PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
+ update_flow = true;
}
if (filter->filter_type == HWRM_CFA_EM_FILTER) {
@@ -1114,22 +1141,29 @@ bnxt_flow_create(struct rte_eth_dev *dev,
if (!ret) {
flow->filter = filter;
flow->vnic = vnic;
- RTE_LOG(ERR, PMD, "Successfully created flow.\n");
+ if (update_flow) {
+ ret = -EXDEV;
+ goto free_flow;
+ }
+ PMD_DRV_LOG(ERR, "Successfully created flow.\n");
STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
return flow;
}
free_filter:
- filter->fw_l2_filter_id = -1;
bnxt_free_filter(bp, filter);
free_flow:
if (ret == -EEXIST)
rte_flow_error_set(error, ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
"Matching Flow exists.");
+ else if (ret == -EXDEV)
+ rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Flow with pattern exists, updating destination queue");
else
rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
- "Failed to create flow.");
+ "Failed to create flow.");
rte_free(flow);
flow = NULL;
return flow;
@@ -1147,12 +1181,13 @@ bnxt_flow_destroy(struct rte_eth_dev *dev,
ret = bnxt_match_filter(bp, filter);
if (ret == 0)
- RTE_LOG(ERR, PMD, "Could not find matching flow\n");
+ PMD_DRV_LOG(ERR, "Could not find matching flow\n");
if (filter->filter_type == HWRM_CFA_EM_FILTER)
ret = bnxt_hwrm_clear_em_filter(bp, filter);
if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
+ bnxt_hwrm_clear_l2_filter(bp, filter);
if (!ret) {
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h
index 2591a87e..a3c702df 100644
--- a/drivers/net/bnxt/bnxt_filter.h
+++ b/drivers/net/bnxt/bnxt_filter.h
@@ -97,6 +97,7 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp);
void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter);
struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp,
struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic);
+int check_zero_bytes(const uint8_t *bytes, int len);
#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index d2c800dd..b7843afe 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -79,7 +79,7 @@ static int page_getenum(size_t size)
return 22;
if (size <= 1 << 30)
return 30;
- RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
+ PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
return sizeof(void *) * 8 - 1;
}
@@ -161,7 +161,7 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}
if (i >= HWRM_CMD_TIMEOUT) {
- RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
+ PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
req->req_type);
goto err_ret;
}
@@ -194,8 +194,7 @@ err_ret:
#define HWRM_CHECK_RESULT() do {\
if (rc) { \
- RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
- __func__, rc); \
+ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
} \
@@ -204,18 +203,15 @@ err_ret:
if (resp->resp_len >= 16) { \
struct hwrm_err_output *tmp_hwrm_err_op = \
(void *)resp; \
- RTE_LOG(ERR, PMD, \
- "%s error %d:%d:%08x:%04x\n", \
- __func__, \
+ PMD_DRV_LOG(ERR, \
+ "error %d:%d:%08x:%04x\n", \
rc, tmp_hwrm_err_op->cmd_err, \
rte_le_to_cpu_32(\
tmp_hwrm_err_op->opaque_0), \
rte_le_to_cpu_16(\
tmp_hwrm_err_op->opaque_1)); \
- } \
- else { \
- RTE_LOG(ERR, PMD, \
- "%s error %d\n", __func__, rc); \
+ } else { \
+ PMD_DRV_LOG(ERR, "error %d\n", rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
return rc; \
@@ -369,7 +365,7 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
//TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
conf->pool_map[j].pools & (1UL << j)) {
- RTE_LOG(DEBUG, PMD,
+ PMD_DRV_LOG(DEBUG,
"Add vlan %u to vmdq pool %u\n",
conf->pool_map[j].vlan_id, j);
@@ -427,12 +423,95 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
return rc;
}
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
+{
+ struct hwrm_port_mac_cfg_input req = {.req_type = 0};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ uint32_t flags = 0;
+ int rc;
+
+ if (!ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_MAC_CFG);
+
+ if (ptp->rx_filter)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
+ if (ptp->tx_tstamp_en)
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
+ else
+ flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
+ req.flags = rte_cpu_to_le_32(flags);
+ req.enables =
+ rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
+ req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ HWRM_UNLOCK();
+
+ return rc;
+}
+
+static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
+ struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
+ if (ptp)
+ return 0;
+
+ HWRM_PREP(req, PORT_MAC_PTP_QCFG);
+
+ req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT();
+
+ if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
+ return 0;
+
+ ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->rx_regs[BNXT_PTP_RX_TS_L] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
+ ptp->rx_regs[BNXT_PTP_RX_TS_H] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
+ ptp->rx_regs[BNXT_PTP_RX_SEQ] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
+ ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
+ rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
+ ptp->tx_regs[BNXT_PTP_TX_TS_L] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
+ ptp->tx_regs[BNXT_PTP_TX_TS_H] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
+ ptp->tx_regs[BNXT_PTP_TX_SEQ] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
+ ptp->tx_regs[BNXT_PTP_TX_FIFO] =
+ rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
+
+ ptp->bp = bp;
+ bp->ptp_cfg = ptp;
+
+ return 0;
+}
+
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
struct hwrm_func_qcaps_input req = {.req_type = 0 };
struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t new_max_vfs;
+ uint32_t flags;
int i;
HWRM_PREP(req, FUNC_QCAPS);
@@ -444,6 +523,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
HWRM_CHECK_RESULT();
bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+ flags = rte_le_to_cpu_32(resp->flags);
if (BNXT_PF(bp)) {
bp->pf.port_id = resp->port_id;
bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
@@ -461,7 +541,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Fail to alloc VLAN table for VF %d\n",
i);
else
@@ -472,7 +552,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
getpagesize(),
getpagesize());
if (bp->pf.vf_info[i].vlan_as_table == NULL)
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Alloc VLAN AS table for VF %d fail\n",
i);
else
@@ -500,8 +580,16 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->max_vnics = 1;
}
bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
+ bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
+ PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
+ HWRM_UNLOCK();
+ bnxt_hwrm_ptp_qcfg(bp);
+ }
+ }
+
HWRM_UNLOCK();
return rc;
@@ -549,8 +637,13 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
sizeof(bp->pf.vf_req_fwd)));
}
- req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
- //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+ req.async_event_fwd[0] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
+ ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
+ ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
+ req.async_event_fwd[1] |=
+ rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
+ ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -584,13 +677,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_CHECK_RESULT();
- RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
resp->hwrm_intf_upd,
resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
(resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
- RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
+ PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
my_version = HWRM_VERSION_MAJOR << 16;
@@ -602,28 +695,28 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
fw_version |= resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
- RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
+ PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
rc = -EINVAL;
goto error;
}
if (my_version != fw_version) {
- RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
+ PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
if (my_version < fw_version) {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is newer than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"The driver may be missing features.\n");
} else {
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Firmware API version is older than driver.\n");
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Not all driver features may be functional.\n");
}
}
if (bp->max_req_len > resp->max_req_win_len) {
- RTE_LOG(ERR, PMD, "Unsupported request length\n");
+ PMD_DRV_LOG(ERR, "Unsupported request length\n");
rc = -EINVAL;
}
bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
@@ -646,7 +739,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map response buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -658,7 +751,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg &
HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
- RTE_LOG(DEBUG, PMD, "Short command supported\n");
+ PMD_DRV_LOG(DEBUG, "Short command supported\n");
rte_free(bp->hwrm_short_cmd_req_addr);
@@ -673,7 +766,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp)
rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
if (bp->hwrm_short_cmd_req_dma_addr == 0) {
rte_free(bp->hwrm_short_cmd_req_addr);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unable to map buffer to physical memory.\n");
rc = -ENOMEM;
goto error;
@@ -722,7 +815,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
/* Setting Fixed Speed. But AutoNeg is ON, So disable it */
if (bp->link_info.auto_mode && conf->link_speed) {
req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
- RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+ PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
}
req.flags = rte_cpu_to_le_32(conf->phy_flags);
@@ -738,7 +831,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
}
/* AutoNeg - Advertise speeds specified. */
- if (conf->auto_link_speed_mask) {
+ if (conf->auto_link_speed_mask &&
+ !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
req.auto_mode =
HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
req.auto_link_speed_mask =
@@ -761,7 +855,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
} else {
req.flags =
rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
- RTE_LOG(INFO, PMD, "Force Link Down\n");
+ PMD_DRV_LOG(INFO, "Force Link Down\n");
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -801,12 +895,22 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+ link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
HWRM_UNLOCK();
+ PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
+ PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
+ PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
+ PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
+ PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
+ link_info->auto_link_speed_mask);
+ PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
+ link_info->force_link_speed);
+
return rc;
}
@@ -879,7 +983,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
default:
- RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
+ PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
ring_type);
HWRM_UNLOCK();
return -1;
@@ -893,22 +997,22 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp,
rc = rte_le_to_cpu_16(resp->error_code);
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc cp failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc rx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"hwrm_ring_alloc tx failed. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
HWRM_UNLOCK();
return rc;
}
@@ -940,19 +1044,19 @@ int bnxt_hwrm_ring_free(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
- RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
rc);
return rc;
case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
- RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
+ PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
rc);
return rc;
default:
- RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
return rc;
}
}
@@ -1046,7 +1150,6 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
HWRM_UNLOCK();
- bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
return rc;
}
@@ -1077,7 +1180,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
/* map ring groups to this vnic */
- RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
+ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
@@ -1097,7 +1200,7 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1167,7 +1270,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct bnxt_plcmodes_cfg pmodes;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1232,7 +1335,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
return rc;
}
HWRM_PREP(req, VNIC_QCFG);
@@ -1284,7 +1387,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
HWRM_UNLOCK();
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
@@ -1297,7 +1400,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
bp->hwrm_cmd_resp_addr;
if (vnic->rss_rule == 0xffff) {
- RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
+ PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
return rc;
}
HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
@@ -1321,7 +1424,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
- RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
+ PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
return rc;
}
@@ -1569,19 +1672,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
- if (i >= bp->rx_cp_nr_rings)
+ if (i >= bp->rx_cp_nr_rings) {
cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
- else
+ } else {
cpr = bp->rx_queues[i]->cp_ring;
+ bp->grp_info[i].fw_stats_ctx = -1;
+ }
if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
- /*
- * TODO. Need a better way to reset grp_info.stats_ctx
- * for Rx rings only. stats_ctx is not saved for Tx
- * in grp_info.
- */
- bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
if (rc)
return rc;
}
@@ -1641,7 +1740,6 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
bnxt_hwrm_ring_free(bp, cp_ring,
HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
cp_ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
sizeof(*cpr->cp_desc_ring));
cpr->cp_raw_cons = 0;
@@ -1697,10 +1795,17 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
rxr->rx_ring_struct->ring_size *
sizeof(*rxr->rx_buf_ring));
rxr->rx_prod = 0;
+ }
+ ring = rxr->ag_ring_struct;
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+ bnxt_hwrm_ring_free(bp, ring,
+ HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
memset(rxr->ag_buf_ring, 0,
- rxr->ag_ring_struct->ring_size *
- sizeof(*rxr->ag_buf_ring));
+ rxr->ag_ring_struct->ring_size *
+ sizeof(*rxr->ag_buf_ring));
rxr->ag_prod = 0;
+ bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
}
if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
bnxt_free_cp_ring(bp, cpr, idx);
@@ -1761,7 +1866,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp)
bp->hwrm_cmd_resp_dma_addr =
rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
if (bp->hwrm_cmd_resp_dma_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -1797,7 +1902,7 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
STAILQ_FOREACH(flow, &vnic->flow_list, next) {
filter = flow->filter;
- RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
+ PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
if (filter->filter_type == HWRM_CFA_EM_FILTER)
rc = bnxt_hwrm_clear_em_filter(bp, filter);
else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
@@ -1938,8 +2043,12 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
eth_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
break;
+ case ETH_LINK_SPEED_100G:
+ eth_link_speed =
+ HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
+ break;
default:
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported link speed %d; default to AUTO\n",
conf_link_speed);
break;
@@ -1950,7 +2059,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
- ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
+ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
{
@@ -1963,20 +2072,20 @@ static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
if (one_speed & (one_speed - 1)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Invalid advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speed (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
}
} else {
if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Unsupported advertised speeds (%u) for port %u\n",
link_speed, port_id);
return -EINVAL;
@@ -2014,6 +2123,8 @@ bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
if (link_speed & ETH_LINK_SPEED_50G)
ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
+ if (link_speed & ETH_LINK_SPEED_100G)
+ ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
return ret;
}
@@ -2046,9 +2157,12 @@ static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
eth_link_speed = ETH_SPEED_NUM_50G;
break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
+ eth_link_speed = ETH_SPEED_NUM_100G;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
default:
- RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
hw_link_speed);
break;
}
@@ -2068,7 +2182,7 @@ static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
eth_link_duplex = ETH_LINK_HALF_DUPLEX;
break;
default:
- RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
+ PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
hw_link_duplex);
break;
}
@@ -2082,7 +2196,7 @@ int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Get link config failed with rc %d\n", rc);
goto exit;
}
@@ -2107,7 +2221,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
struct bnxt_link_info link_req;
uint16_t speed, autoneg;
- if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
+ if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
return 0;
rc = bnxt_valid_link_speed(dev_conf->link_speeds,
@@ -2123,7 +2237,9 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
- if (autoneg == 1) {
+ /* Autoneg can be done only when the FW allows */
+ if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+ bp->link_info.force_link_speed)) {
link_req.phy_flags |=
HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
link_req.auto_link_speed_mask =
@@ -2136,12 +2252,18 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
bp->link_info.media_type ==
HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
- RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+ PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
return -EINVAL;
}
link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
- link_req.link_speed = speed;
+ /* If user wants a particular speed try that first. */
+ if (speed)
+ link_req.link_speed = speed;
+ else if (bp->link_info.force_link_speed)
+ link_req.link_speed = bp->link_info.force_link_speed;
+ else
+ link_req.link_speed = bp->link_info.auto_link_speed;
}
link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
link_req.auto_pause = bp->link_info.auto_pause;
@@ -2150,7 +2272,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
port_phy_cfg:
rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
if (rc) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Set link config failed with rc %d\n", rc);
}
@@ -2163,6 +2285,7 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ uint16_t flags;
int rc = 0;
HWRM_PREP(req, FUNC_QCFG);
@@ -2174,6 +2297,9 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp)
/* Hard Coded.. 0xfff VLAN ID mask */
bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
+ flags = rte_le_to_cpu_16(resp->flags);
+ if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
+ bp->flags |= BNXT_FLAG_MULTI_HOST;
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
@@ -2323,11 +2449,11 @@ static void reserve_resources_from_vf(struct bnxt *bp,
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
copy_func_cfg_to_qcaps(cfg_req, resp);
}
@@ -2358,11 +2484,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
- RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_16(resp->vlan);
@@ -2398,7 +2524,7 @@ int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
int rc;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}
@@ -2425,7 +2551,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
size_t req_buf_sz;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
return -1;
}
@@ -2491,9 +2617,9 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
if (rc || resp->error_code) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Failed to initizlie VF %d\n", i);
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Not all VFs available. (%d, %d)\n",
rc, resp->error_code);
HWRM_UNLOCK();
@@ -2643,7 +2769,7 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
req.req_buf_page_addr[0] =
rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
if (req.req_buf_page_addr[0] == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map buffer address to physical memory\n");
return -ENOMEM;
}
@@ -3065,7 +3191,7 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
return -ENOMEM;
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3101,7 +3227,7 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3162,7 +3288,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
dma_handle = rte_mem_virt2iova(buf);
if (dma_handle == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map response address to physical memory\n");
return -ENOMEM;
}
@@ -3219,19 +3345,19 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
if (req.vnic_id_tbl_addr == 0) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map VNIC ID table address to physical memory\n");
return -ENOMEM;
}
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
if (rc) {
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
return -1;
} else if (resp->error_code) {
rc = rte_le_to_cpu_16(resp->error_code);
HWRM_UNLOCK();
- RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
+ PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
return -1;
}
rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
@@ -3362,7 +3488,7 @@ int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
}
}
/* Could not find a default VNIC. */
- RTE_LOG(ERR, PMD, "No default VNIC\n");
+ PMD_DRV_LOG(ERR, "No default VNIC\n");
exit:
rte_free(vnic_ids);
return -1;
@@ -3452,7 +3578,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
if (filter->fw_em_filter_id == UINT64_MAX)
return 0;
- RTE_LOG(ERR, PMD, "Clear EM filter\n");
+ PMD_DRV_LOG(ERR, "Clear EM filter\n");
HWRM_PREP(req, CFA_EM_FLOW_FREE);
req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
@@ -3575,7 +3701,34 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
HWRM_UNLOCK();
filter->fw_ntuple_filter_id = -1;
- filter->fw_l2_filter_id = -1;
return 0;
}
+
+int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ unsigned int rss_idx, fw_idx, i;
+
+ if (vnic->rss_table && vnic->hash_type) {
+ /*
+ * Fill the RSS hash & redirection table with
+ * ring group ids for all VNICs
+ */
+ for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
+ rss_idx++, fw_idx++) {
+ for (i = 0; i < bp->rx_cp_nr_rings; i++) {
+ fw_idx %= bp->rx_cp_nr_rings;
+ if (vnic->fw_grp_ids[fw_idx] !=
+ INVALID_HW_RING_ID)
+ break;
+ fw_idx++;
+ }
+ if (i == bp->rx_cp_nr_rings)
+ return 0;
+ vnic->rss_table[rss_idx] =
+ vnic->fw_grp_ids[fw_idx];
+ }
+ return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 85083e61..f11e72a3 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -42,6 +42,17 @@ struct bnxt_filter_info;
struct bnxt_cp_ring_info;
#define HWRM_SEQ_ID_INVALID -1U
+/* Convert Bit field location to value */
+#define ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED)
+#define ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE \
+ (1 << HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE)
+#define ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD - 32))
+#define ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE \
+ (1 << (HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE - 32))
int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
@@ -175,4 +186,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
uint16_t dir_ordinal, uint16_t dir_ext,
uint16_t dir_attr, const uint8_t *data,
size_t data_len);
+int bnxt_hwrm_ptp_cfg(struct bnxt *bp);
+int bnxt_vnic_rss_configure(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic);
#endif
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index 49436cfd..8ab98693 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -84,7 +84,7 @@ static void bnxt_int_handler(void *param)
cpr->cp_ring_struct))
goto no_more;
}
- RTE_LOG(INFO, PMD,
+ PMD_DRV_LOG(INFO,
"Ignoring %02x completion\n", CMP_TYPE(cmp));
break;
}
@@ -154,7 +154,7 @@ int bnxt_setup_int(struct bnxt *bp)
return 0;
setup_exit:
- RTE_LOG(ERR, PMD, "bnxt_irq_tbl setup failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_irq_tbl setup failed\n");
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 0fa2f0c0..8fb89721 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -63,13 +63,15 @@ void bnxt_free_ring(struct bnxt_ring *ring)
* Ring groups
*/
-void bnxt_init_ring_grps(struct bnxt *bp)
+int bnxt_init_ring_grps(struct bnxt *bp)
{
unsigned int i;
for (i = 0; i < bp->max_ring_grps; i++)
memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE,
sizeof(struct bnxt_ring_grp_info));
+
+ return 0;
}
/*
@@ -174,15 +176,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
memset(mz->addr, 0, mz->len);
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
for (sz = 0; sz < total_alloc_len; sz += getpagesize())
rte_mem_lock_page(((char *)mz->addr) + sz);
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map ring address to physical memory\n");
return -ENOMEM;
}
@@ -324,7 +326,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
ring = rxr->ag_ring_struct;
/* Agg ring */
if (ring == NULL) {
- RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n");
+ PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n");
goto err_out;
}
@@ -334,7 +336,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
cp_ring->fw_ring_id);
if (rc)
goto err_out;
- RTE_LOG(DEBUG, PMD, "Alloc AGG Done!\n");
+ PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n");
rxr->ag_prod = 0;
rxr->ag_doorbell =
(char *)pci_dev->mem_resource[2].addr +
@@ -345,7 +347,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN +
ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE);
if (bnxt_init_one_rx_ring(rxq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
return -ENOMEM;
}
@@ -362,9 +364,6 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
struct bnxt_ring *ring = txr->tx_ring_struct;
unsigned int idx = i + 1 + bp->rx_cp_nr_rings;
- /* Account for AGG Rings. AGG ring cnt = Rx Cmpl ring cnt */
- idx += bp->rx_cp_nr_rings;
-
/* Tx cmpl */
rc = bnxt_hwrm_ring_alloc(bp, cp_ring,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index 164f482e..ebf7228e 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -40,9 +40,6 @@
#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask)
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- ((uint64_t)((mb)->buf_iova + (mb)->data_off))
-
#define DB_IDX_MASK 0xffffff
#define DB_IDX_VALID (0x1 << 26)
#define DB_IRQ_DIS (0x1 << 27)
@@ -94,7 +91,7 @@ struct bnxt_tx_ring_info;
struct bnxt_rx_ring_info;
struct bnxt_cp_ring_info;
void bnxt_free_ring(struct bnxt_ring *ring);
-void bnxt_init_ring_grps(struct bnxt *bp);
+int bnxt_init_ring_grps(struct bnxt *bp);
int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
struct bnxt_tx_ring_info *tx_ring_info,
struct bnxt_rx_ring_info *rx_ring_info,
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index c4da474e..d49f3546 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -75,7 +75,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
if (bp->rx_cp_nr_rings < 2) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -92,7 +92,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
vnic->end_grp_id = vnic->start_grp_id;
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -118,10 +118,10 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
pools = max_pools;
break;
case ETH_MQ_RX_RSS:
- pools = bp->rx_cp_nr_rings;
+ pools = 1;
break;
default:
- RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
+ PMD_DRV_LOG(ERR, "Unsupported mq_mod %d\n",
dev_conf->rxmode.mq_mode);
rc = -EINVAL;
goto err_out;
@@ -135,7 +135,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
for (i = 0; i < pools; i++) {
vnic = bnxt_alloc_vnic(bp);
if (!vnic) {
- RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
+ PMD_DRV_LOG(ERR, "VNIC alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -166,7 +166,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
- RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
+ PMD_DRV_LOG(ERR, "L2 filter alloc failed\n");
rc = -ENOMEM;
goto err_out;
}
@@ -311,8 +311,15 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct bnxt_rx_queue *rxq;
int rc = 0;
+ if (queue_idx >= bp->max_rx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Rx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_rx_rings);
+ return -ENOSPC;
+ }
+
if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid\n", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -325,7 +332,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!rxq) {
- RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
+ PMD_DRV_LOG(ERR, "bnxt_rx_queue allocation failed!\n");
rc = -ENOMEM;
goto out;
}
@@ -334,8 +341,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
- RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
- RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+ PMD_DRV_LOG(DEBUG, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+ PMD_DRV_LOG(DEBUG, "RX Buf MTU %d\n", eth_dev->data->mtu);
rc = bnxt_init_rx_ring_struct(rxq, socket_id);
if (rc)
@@ -350,7 +357,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate RX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
"rxr")) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"ring_dma_zone_reserve for rx_ring failed!\n");
bnxt_rx_queue_release_op(rxq);
rc = -ENOMEM;
@@ -398,3 +405,56 @@ bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id)
}
return rc;
}
+
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ rxq->rx_deferred_start = false;
+ PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
+ return 0;
+ PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
+ vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+ vnic->fw_grp_ids[rx_queue_id] =
+ bp->grp_info[rx_queue_id + 1].fw_grp_id;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+
+ return 0;
+}
+
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
+ struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
+ struct bnxt_vnic_info *vnic = NULL;
+
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ rxq->rx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Rx queue stopped\n");
+
+ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
+ vnic = rxq->vnic;
+ vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
+ return bnxt_vnic_rss_configure(bp, vnic);
+ }
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 508731ee..c7acaa75 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -50,6 +50,7 @@ struct bnxt_rx_queue {
uint16_t reg_idx; /* RX queue register index */
uint16_t port_id; /* Device port identifier */
uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */
+ uint8_t rx_deferred_start; /* not in global dev start */
struct bnxt *bp;
int index;
@@ -59,8 +60,6 @@ struct bnxt_rx_queue {
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
-
- struct bnxt_tpa_info *rx_tpa;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
@@ -77,5 +76,8 @@ int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev,
uint16_t queue_id);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 30891b74..aae9a635 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -65,17 +65,17 @@ static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq,
{
struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
- struct rte_mbuf *data;
+ struct rte_mbuf *mbuf;
- data = __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!data) {
+ mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!mbuf) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
- rx_buf->mbuf = data;
+ rx_buf->mbuf = mbuf;
- rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+ rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -86,23 +86,23 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
{
struct rx_prod_pkt_bd *rxbd = &rxr->ag_desc_ring[prod];
struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod];
- struct rte_mbuf *data;
+ struct rte_mbuf *mbuf;
- data = __bnxt_alloc_rx_data(rxq->mb_pool);
- if (!data) {
+ mbuf = __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!mbuf) {
rte_atomic64_inc(&rxq->bp->rx_mbuf_alloc_fail);
return -ENOMEM;
}
if (rxbd == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rxbd is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n");
if (rx_buf == NULL)
- RTE_LOG(ERR, PMD, "Jumbo Frame. rx_buf is NULL\n");
+ PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n");
- rx_buf->mbuf = data;
+ rx_buf->mbuf = mbuf;
- rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf));
+ rxbd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
return 0;
}
@@ -123,7 +123,7 @@ static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
prod_bd = &rxr->rx_desc_ring[prod];
- prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+ prod_bd->addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
rxr->rx_prod = prod;
}
@@ -234,7 +234,7 @@ static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
/* TODO batch allocation for better performance */
while (rte_bitmap_get(rxr->ag_bitmap, next)) {
if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"agg mbuf alloc failed: prod=0x%x\n", next);
break;
}
@@ -338,41 +338,57 @@ static inline struct rte_mbuf *bnxt_tpa_end(
static uint32_t
bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
{
- uint32_t pkt_type = 0;
- uint32_t t_ipcs = 0, ip = 0, ip6 = 0;
- uint32_t tcp = 0, udp = 0, icmp = 0;
- uint32_t vlan = 0;
+ uint32_t l3, pkt_type = 0;
+ uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
+ uint32_t flags_type;
vlan = !!(rxcmp1->flags2 &
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
+ pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
+
t_ipcs = !!(rxcmp1->flags2 &
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
ip6 = !!(rxcmp1->flags2 &
rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
- icmp = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_ICMP));
- tcp = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_TCP));
- udp = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_UDP));
- ip = !!(rxcmp->flags_type &
- rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_IP));
-
- pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && !ip6) ?
- RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0;
- pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && ip6) ?
- RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0;
- pkt_type |= (!t_ipcs && icmp) ? RTE_PTYPE_L4_ICMP : 0;
- pkt_type |= (!t_ipcs && udp) ? RTE_PTYPE_L4_UDP : 0;
- pkt_type |= (!t_ipcs && tcp) ? RTE_PTYPE_L4_TCP : 0;
- pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && !ip6) ?
- RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN : 0;
- pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && ip6) ?
- RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN : 0;
- pkt_type |= (t_ipcs && icmp) ? RTE_PTYPE_INNER_L4_ICMP : 0;
- pkt_type |= (t_ipcs && udp) ? RTE_PTYPE_INNER_L4_UDP : 0;
- pkt_type |= (t_ipcs && tcp) ? RTE_PTYPE_INNER_L4_TCP : 0;
- pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : 0;
+
+ flags_type = rxcmp->flags_type &
+ rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+
+ if (!t_ipcs && !ip6)
+ l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ else if (!t_ipcs && ip6)
+ l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ else if (t_ipcs && !ip6)
+ l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
+ else
+ l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
+
+ switch (flags_type) {
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_TCP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
+ if (!t_ipcs)
+ pkt_type |= l3 | RTE_PTYPE_L4_UDP;
+ else
+ pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
+ break;
+
+ case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
+ pkt_type |= l3;
+ break;
+ }
return pkt_type;
}
@@ -442,6 +458,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
rte_prefetch0(mbuf);
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
mbuf->nb_segs = 1;
mbuf->next = NULL;
mbuf->pkt_len = rxcmp->len;
@@ -456,6 +473,10 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
+ if ((rxcmp->flags_type & rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_MASK)) ==
+ RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST;
+
if (agg_buf)
bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
@@ -470,12 +491,12 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (likely(RX_CMP_IP_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if (likely(RX_CMP_L4_CS_OK(rxcmp1)))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
@@ -507,7 +528,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
prod = RING_NEXT(rxr->rx_ring_struct, prod);
if (bnxt_alloc_rx_data(rxq, rxr, prod)) {
- RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod);
+ PMD_DRV_LOG(ERR, "mbuf alloc failed with prod=0x%x\n", prod);
rc = -ENOMEM;
goto rx;
}
@@ -540,6 +561,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t ag_prod = rxr->ag_prod;
int rc = 0;
+ /* If Rx Q was stopped return */
+ if (rxq->rx_deferred_start)
+ return 0;
+
/* Handle RX burst request */
while (1) {
cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
@@ -596,7 +621,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxr->rx_prod = i;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
} else {
- RTE_LOG(ERR, PMD, "Alloc mbuf failed\n");
+ PMD_DRV_LOG(ERR, "Alloc mbuf failed\n");
break;
}
}
@@ -739,7 +764,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
prod = rxr->rx_prod;
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed rx ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -747,7 +772,6 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->rx_prod = prod;
prod = RING_NEXT(rxr->rx_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s\n", __func__);
ring = rxr->ag_ring_struct;
type = RX_PROD_AGG_BD_TYPE_RX_PROD_AGG;
@@ -756,7 +780,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
for (i = 0; i < ring->ring_size; i++) {
if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"init'ed AG ring %d with %d/%d mbufs only\n",
rxq->queue_id, i, ring->ring_size);
break;
@@ -764,7 +788,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
- RTE_LOG(DEBUG, PMD, "%s AGG Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "AGG Done!\n");
if (rxr->tpa_info) {
for (i = 0; i < BNXT_TPA_MAX; i++) {
@@ -776,7 +800,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
}
}
}
- RTE_LOG(DEBUG, PMD, "%s TPA alloc Done!\n", __func__);
+ PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n");
return 0;
}
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index a94373d1..f3ed49bd 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -120,5 +120,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
void bnxt_free_rx_rings(struct bnxt *bp);
int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id);
int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq);
-
+int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
#endif
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index fe83d370..bd93cc83 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -236,6 +236,10 @@ int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = eth_dev->data->dev_private;
memset(bnxt_stats, 0, sizeof(*bnxt_stats));
+ if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ return 0;
+ }
for (i = 0; i < bp->rx_cp_nr_rings; i++) {
struct bnxt_rx_queue *rxq = bp->rx_queues[i];
@@ -267,6 +271,11 @@ void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ if (!(bp->flags & BNXT_FLAG_INIT_DONE)) {
+ PMD_DRV_LOG(ERR, "Device Initialization not complete!\n");
+ return;
+ }
+
bnxt_clear_all_hwrm_stat_ctxs(bp);
rte_atomic64_clear(&bp->rx_mbuf_alloc_fail);
}
@@ -280,7 +289,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
uint64_t tx_drop_pkts;
if (!(bp->flags & BNXT_FLAG_PORT_STATS)) {
- RTE_LOG(ERR, PMD, "xstats not supported for VF\n");
+ PMD_DRV_LOG(ERR, "xstats not supported for VF\n");
return 0;
}
@@ -358,15 +367,15 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
- if (bp->flags & BNXT_FLAG_PORT_STATS && !BNXT_NPAR_PF(bp))
+ if (bp->flags & BNXT_FLAG_PORT_STATS && BNXT_SINGLE_PF(bp))
bnxt_hwrm_port_clr_stats(bp);
if (BNXT_VF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a VF device\n");
- if (BNXT_NPAR_PF(bp))
- RTE_LOG(ERR, PMD, "Operation not supported on a MF device\n");
+ PMD_DRV_LOG(ERR, "Operation not supported on a VF device\n");
+ if (!BNXT_SINGLE_PF(bp))
+ PMD_DRV_LOG(ERR, "Operation not supported on a MF device\n");
if (!(bp->flags & BNXT_FLAG_PORT_STATS))
- RTE_LOG(ERR, PMD, "Operation not supported\n");
+ PMD_DRV_LOG(ERR, "Operation not supported\n");
}
int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
@@ -385,7 +394,7 @@ int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids,
bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt);
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
values[i] = values_copy[ids[i]];
@@ -411,7 +420,7 @@ int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev,
for (i = 0; i < limit; i++) {
if (ids[i] >= stat_cnt) {
- RTE_LOG(ERR, PMD, "id value isn't valid");
+ PMD_DRV_LOG(ERR, "id value isn't valid");
return -1;
}
strcpy(xstats_names[i].name,
diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h
index 51d16f5d..c1c83d57 100644
--- a/drivers/net/bnxt/bnxt_stats.h
+++ b/drivers/net/bnxt/bnxt_stats.h
@@ -34,7 +34,7 @@
#ifndef _BNXT_STATS_H_
#define _BNXT_STATS_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
void bnxt_free_stats(struct bnxt *bp);
int bnxt_stats_get_op(struct rte_eth_dev *eth_dev,
diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c
index 99dddddf..53524346 100644
--- a/drivers/net/bnxt/bnxt_txq.c
+++ b/drivers/net/bnxt/bnxt_txq.c
@@ -108,8 +108,15 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
struct bnxt_tx_queue *txq;
int rc = 0;
+ if (queue_idx >= bp->max_tx_rings) {
+ PMD_DRV_LOG(ERR,
+ "Cannot create Tx ring %d. Only %d rings available\n",
+ queue_idx, bp->max_tx_rings);
+ return -ENOSPC;
+ }
+
if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) {
- RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+ PMD_DRV_LOG(ERR, "nb_desc %d is invalid", nb_desc);
rc = -EINVAL;
goto out;
}
@@ -124,7 +131,7 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue),
RTE_CACHE_LINE_SIZE, socket_id);
if (!txq) {
- RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!");
+ PMD_DRV_LOG(ERR, "bnxt_tx_queue allocation failed!");
rc = -ENOMEM;
goto out;
}
@@ -142,14 +149,14 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
/* Allocate TX ring hardware descriptors */
if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring,
"txr")) {
- RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!");
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
}
if (bnxt_init_one_tx_ring(txq)) {
- RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!");
+ PMD_DRV_LOG(ERR, "bnxt_init_one_tx_ring failed!");
bnxt_tx_queue_release_op(txq);
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h
index f753c10f..e27c34fa 100644
--- a/drivers/net/bnxt/bnxt_txq.h
+++ b/drivers/net/bnxt/bnxt_txq.h
@@ -71,5 +71,4 @@ int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev,
uint16_t nb_desc,
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
-
#endif
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 8ca4bbd8..2c81a37c 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -101,7 +101,7 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
if (ring == NULL)
return -ENOMEM;
txr->tx_ring_struct = ring;
- ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1);
+ ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
ring->ring_mask = ring->ring_size - 1;
ring->bd = (void *)txr->tx_desc_ring;
ring->bd_dma = txr->tx_desc_mapping;
@@ -181,7 +181,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
else
txbd->flags_type |= lhint_arr[txbd->len >> 9];
- txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf));
+ txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
if (long_bd) {
txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
@@ -217,23 +217,28 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_pkt->outer_l3_len;
txbd1->mss = tx_pkt->tso_segsz;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
+ PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
+ PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
+ PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
+ PKT_TX_TCP_UDP_CKSUM) {
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
@@ -257,7 +262,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
txbd = &txr->tx_desc_ring[txr->tx_prod];
- txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg));
+ txbd->addr = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
txbd->len = m_seg->data_len;
@@ -344,6 +349,11 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Handle TX completions */
bnxt_handle_tx_cp(txq);
+ /* Tx queue was stopped; wait for it to be restarted */
+ if (txq->tx_deferred_start) {
+ PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
+ return 0;
+ }
/* Handle TX burst request */
for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
@@ -359,3 +369,30 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx_pkts;
}
+
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq->tx_deferred_start = false;
+ PMD_DRV_LOG(DEBUG, "Tx queue started\n");
+
+ return 0;
+}
+
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
+ struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
+
+ /* Handle TX completions */
+ bnxt_handle_tx_cp(txq);
+
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ txq->tx_deferred_start = true;
+ PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");
+
+ return 0;
+}
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51d..d88b15ab 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -68,6 +68,8 @@ int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq);
int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id);
uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac2605..d4aeb4ca 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -107,7 +107,7 @@ int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic,
}
temp = STAILQ_NEXT(temp, next);
}
- RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool);
+ PMD_DRV_LOG(ERR, "VNIC %p is not found in pool[%d]\n", vnic, pool);
return -EINVAL;
}
@@ -118,7 +118,7 @@ struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp)
/* Find the 1st unused vnic from the free_vnic_list pool*/
vnic = STAILQ_FIRST(&bp->free_vnic_list);
if (!vnic) {
- RTE_LOG(ERR, PMD, "No more free VNIC resources\n");
+ PMD_DRV_LOG(ERR, "No more free VNIC resources\n");
return NULL;
}
STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next);
@@ -194,13 +194,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp)
}
mz_phys_addr = mz->iova;
if ((unsigned long)mz->addr == mz_phys_addr) {
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Memzone physical address same as virtual.\n");
- RTE_LOG(WARNING, PMD,
+ PMD_DRV_LOG(WARNING,
"Using rte_mem_virt2iova()\n");
mz_phys_addr = rte_mem_virt2iova(mz->addr);
if (mz_phys_addr == 0) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"unable to map vnic address to physical memory\n");
return -ENOMEM;
}
@@ -241,7 +241,7 @@ void bnxt_free_vnic_mem(struct bnxt *bp)
for (i = 0; i < max_vnics; i++) {
vnic = &bp->vnic_info[i];
if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) {
- RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n");
+ PMD_DRV_LOG(ERR, "VNIC is not freed yet!\n");
/* TODO Call HWRM to free VNIC */
}
}
@@ -260,7 +260,7 @@ int bnxt_alloc_vnic_mem(struct bnxt *bp)
vnic_mem = rte_zmalloc("bnxt_vnic_info",
max_vnics * sizeof(struct bnxt_vnic_info), 0);
if (vnic_mem == NULL) {
- RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs",
+ PMD_DRV_LOG(ERR, "Failed to alloc memory for %d VNICs",
max_vnics);
return -ENOMEM;
}
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 875dc3c1..d8d35c7d 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -93,5 +93,4 @@ void bnxt_free_vnic_attributes(struct bnxt *bp);
int bnxt_alloc_vnic_attributes(struct bnxt *bp);
void bnxt_free_vnic_mem(struct bnxt *bp);
int bnxt_alloc_vnic_mem(struct bnxt *bp);
-
#endif
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index c16edbad..1e9c39f4 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -85,6 +85,7 @@
#define HWRM_PORT_CLR_STATS (UINT32_C(0x25))
#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27))
#define HWRM_PORT_MAC_QCFG (UINT32_C(0x28))
+#define HWRM_PORT_MAC_PTP_QCFG (UINT32_C(0x29))
#define HWRM_PORT_PHY_QCAPS (UINT32_C(0x2a))
#define HWRM_PORT_LED_CFG (UINT32_C(0x2d))
#define HWRM_PORT_LED_QCFG (UINT32_C(0x2e))
@@ -909,7 +910,7 @@ struct rx_pkt_cmpl {
* This is the length of the data for the packet stored in the
* buffer(s) identified by the opaque value. This includes the
* packet BD and any associated buffer BDs. This does not
- * include the the length of any data places in aggregation BDs.
+ * include the length of any data places in aggregation BDs.
*/
uint32_t opaque;
/*
@@ -3275,7 +3276,7 @@ struct hwrm_func_cfg_input {
uint16_t fid;
/*
* Function ID of the function that is being configured. If set
- * to 0xFF... (All Fs), then the the configuration is for the
+ * to 0xFF... (All Fs), then the configuration is for the
* requesting function.
*/
uint8_t unused_0;
@@ -7121,6 +7122,227 @@ struct hwrm_queue_qportcfg_output {
*/
} __attribute__((packed));
+/*********************
+ * hwrm_port_mac_cfg *
+ *********************/
+
+
+/* hwrm_port_mac_cfg_input (size:320b/40B) */
+struct hwrm_port_mac_cfg_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint32_t flags;
+ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ uint32_t enables;
+ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ uint16_t port_id;
+ uint8_t ipg;
+ uint8_t lpbk;
+ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
+ uint8_t vlan_pri2cos_map_pri;
+ uint8_t reserved1;
+ uint8_t tunnel_pri2cos_map_pri;
+ uint8_t dscp2pri_map_pri;
+ uint16_t rx_ts_capture_ptp_msg_type;
+ uint16_t tx_ts_capture_ptp_msg_type;
+ uint8_t cos_field_cfg;
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 1)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 3)
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ uint8_t unused_0[3];
+};
+
+
+/* hwrm_port_mac_cfg_output (size:128b/16B) */
+struct hwrm_port_mac_cfg_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint16_t mru;
+ uint16_t mtu;
+ uint8_t ipg;
+ uint8_t lpbk;
+ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
+ uint8_t unused_0;
+ uint8_t valid;
+};
+
+
+/**********************
+ * hwrm_port_mac_qcfg *
+ **********************/
+
+
+/* hwrm_port_mac_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_qcfg_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint16_t port_id;
+ uint8_t unused_0[6];
+};
+
+
+/* hwrm_port_mac_qcfg_output (size:192b/24B) */
+struct hwrm_port_mac_qcfg_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint16_t mru;
+ uint16_t mtu;
+ uint8_t ipg;
+ uint8_t lpbk;
+ #define PORT_MAC_QCFG_RESP_LPBK_NONE 0x0UL
+ #define PORT_MAC_QCFG_RESP_LPBK_LOCAL 0x1UL
+ #define PORT_MAC_QCFG_RESP_LPBK_REMOTE 0x2UL
+ #define PORT_MAC_QCFG_RESP_LPBK_LAST PORT_MAC_QCFG_RESP_LPBK_REMOTE
+ uint8_t vlan_pri2cos_map_pri;
+ uint8_t flags;
+ #define PORT_MAC_QCFG_RESP_FLAGS_VLAN_PRI2COS_ENABLE 0x1UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_TUNNEL_PRI2COS_ENABLE 0x2UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_IP_DSCP2COS_ENABLE 0x4UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_OOB_WOL_ENABLE 0x8UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_QCFG_RESP_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x20UL
+ uint8_t tunnel_pri2cos_map_pri;
+ uint8_t dscp2pri_map_pri;
+ uint16_t rx_ts_capture_ptp_msg_type;
+ uint16_t tx_ts_capture_ptp_msg_type;
+ uint8_t cos_field_cfg;
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_RSVD 0x1UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 1)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_LAST \
+ PORT_MAC_QCFG_RESP_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST \
+ (0x0UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER \
+ (0x1UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST \
+ (0x2UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED \
+ (0x3UL << 3)
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST \
+ PORT_MAC_QCFG_RESP_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+ #define PORT_MAC_QCFG_RESP_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+ uint8_t valid;
+};
+
+
+/**************************
+ * hwrm_port_mac_ptp_qcfg *
+ **************************/
+
+
+/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+struct hwrm_port_mac_ptp_qcfg_input {
+ uint16_t req_type;
+ uint16_t cmpl_ring;
+ uint16_t seq_id;
+ uint16_t target_id;
+ uint64_t resp_addr;
+ uint16_t port_id;
+ uint8_t unused_0[6];
+};
+
+
+/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+struct hwrm_port_mac_ptp_qcfg_output {
+ uint16_t error_code;
+ uint16_t req_type;
+ uint16_t seq_id;
+ uint16_t resp_len;
+ uint8_t flags;
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ uint8_t unused_0[3];
+ uint32_t rx_ts_reg_off_lower;
+ uint32_t rx_ts_reg_off_upper;
+ uint32_t rx_ts_reg_off_seq_id;
+ uint32_t rx_ts_reg_off_src_id_0;
+ uint32_t rx_ts_reg_off_src_id_1;
+ uint32_t rx_ts_reg_off_src_id_2;
+ uint32_t rx_ts_reg_off_domain_id;
+ uint32_t rx_ts_reg_off_fifo;
+ uint32_t rx_ts_reg_off_fifo_adv;
+ uint32_t rx_ts_reg_off_granularity;
+ uint32_t tx_ts_reg_off_lower;
+ uint32_t tx_ts_reg_off_upper;
+ uint32_t tx_ts_reg_off_seq_id;
+ uint32_t tx_ts_reg_off_fifo;
+ uint32_t tx_ts_reg_off_granularity;
+ uint8_t unused_1[7];
+ uint8_t valid;
+};
+
+
/* hwrm_vnic_alloc */
/*
* Description: This VNIC is a resource in the RX side of the chip that is used
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c
index a3134074..cae95f8f 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.c
+++ b/drivers/net/bnxt/rte_pmd_bnxt.c
@@ -36,7 +36,7 @@
#include <unistd.h>
#include <rte_dev.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
#include <rte_byteorder.h>
@@ -57,7 +57,7 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg)
ret_param.msg = msg;
_rte_eth_dev_callback_process(bp->eth_dev, RTE_ETH_EVENT_VF_MBOX,
- NULL, &ret_param);
+ &ret_param);
/* Default to approve */
if (ret_param.retval == RTE_PMD_BNXT_MB_EVENT_PROCEED)
@@ -85,7 +85,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set Tx loopback on non-PF port %d!\n",
port);
return -ENOTSUP;
@@ -127,7 +127,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp = (struct bnxt *)eth_dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set all queues drop on non-PF port!\n");
return -ENOTSUP;
}
@@ -140,7 +140,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
bp->vnic_info[i].bd_stall = !on;
rc = bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[i]);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update PF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update PF VNIC %d.\n", i);
return rc;
}
}
@@ -151,7 +151,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on)
rte_pmd_bnxt_set_all_queues_drop_en_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc) {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", i);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", i);
break;
}
}
@@ -180,7 +180,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d mac address on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -224,7 +224,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf,
/* Requested BW can't be greater than link speed */
if (tot_rate > eth_dev->data->dev_link.link_speed) {
- RTE_LOG(ERR, PMD, "Rate > Link speed. Set to %d\n", tot_rate);
+ PMD_DRV_LOG(ERR, "Rate > Link speed. Set to %d\n", tot_rate);
return -EINVAL;
}
@@ -262,7 +262,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set mac spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -314,7 +314,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN spoof on non-PF port %d!\n", port);
return -EINVAL;
}
@@ -333,7 +333,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on)
rc = -1;
}
} else {
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
}
return rc;
@@ -367,7 +367,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d stripq on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -377,7 +377,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on)
rte_pmd_bnxt_set_vf_vlan_stripq_cb, &on,
bnxt_hwrm_vnic_cfg);
if (rc)
- RTE_LOG(ERR, PMD, "Failed to update VF VNIC %d.\n", vf);
+ PMD_DRV_LOG(ERR, "Failed to update VF VNIC %d.\n", vf);
return rc;
}
@@ -407,7 +407,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
return -EINVAL;
if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) {
- RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n");
+ PMD_DRV_LOG(ERR, "Currently cannot toggle this setting\n");
return -ENOTSUP;
}
@@ -430,7 +430,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf,
&bp->pf.vf_info[vf].l2_rx_mask,
bnxt_set_rx_mask_no_vlan);
if (rc)
- RTE_LOG(ERR, PMD, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
+ PMD_DRV_LOG(ERR, "bnxt_hwrm_func_vf_vnic_set_rxmask failed\n");
return rc;
}
@@ -442,7 +442,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
struct bnxt_vnic_info vnic;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VLAN table on non-PF port!\n");
return -EINVAL;
}
@@ -455,7 +455,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf)
/* This simply indicates there's no driver loaded.
* This is not an error.
*/
- RTE_LOG(ERR, PMD, "Unable to get default VNIC for VF %d\n", vf);
+ PMD_DRV_LOG(ERR, "Unable to get default VNIC for VF %d\n", vf);
} else {
memset(&vnic, 0, sizeof(vnic));
vnic.fw_vnic_id = dflt_vnic;
@@ -518,9 +518,9 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan,
/* Now check that there's space */
if (cnt == getpagesize() / sizeof(struct
bnxt_vlan_antispoof_table_entry)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VLAN anti-spoof table is full\n");
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"VF %d cannot add VLAN %u\n",
i, vlan);
rc = -1;
@@ -585,7 +585,7 @@ int rte_pmd_bnxt_get_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to get VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -612,7 +612,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint16_t port,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to reset VF %d stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -638,7 +638,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id)
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d RX stats on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -665,7 +665,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to query VF %d TX drops on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -697,7 +697,7 @@ int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to config VF %d MAC on non-PF port %d!\n",
vf_id, port);
return -ENOTSUP;
@@ -773,7 +773,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf,
return -EINVAL;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set VF %d vlan insert on non-PF port %d!\n",
vf, port);
return -ENOTSUP;
@@ -807,7 +807,7 @@ int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on)
bp = (struct bnxt *)dev->data->dev_private;
if (!BNXT_PF(bp)) {
- RTE_LOG(ERR, PMD,
+ PMD_DRV_LOG(ERR,
"Attempt to set persist stats on non-PF port %d!\n",
port);
return -EINVAL;
diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h
index f881d30d..cd7227ac 100644
--- a/drivers/net/bnxt/rte_pmd_bnxt.h
+++ b/drivers/net/bnxt/rte_pmd_bnxt.h
@@ -34,7 +34,7 @@
#ifndef _PMD_BNXT_H_
#define _PMD_BNXT_H_
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
/*
* Response sent back to the caller after callback