summaryrefslogtreecommitdiffstats
path: root/drivers/net/bnxt
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/bnxt')
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c35
-rw-r--r--drivers/net/bnxt/bnxt_filter.c6
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c51
-rw-r--r--drivers/net/bnxt/bnxt_stats.c3
-rw-r--r--drivers/net/bnxt/bnxt_txr.c51
-rw-r--r--drivers/net/bnxt/bnxt_txr.h10
-rw-r--r--drivers/net/bnxt/bnxt_vnic.c5
-rw-r--r--drivers/net/bnxt/bnxt_vnic.h6
8 files changed, 128 insertions, 39 deletions
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 52c511ee..7466a642 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -248,6 +248,17 @@ static int bnxt_init_chip(struct bnxt *bp)
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ RTE_LOG(ERR, PMD,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
@@ -429,7 +440,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = bp->max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
@@ -1268,9 +1279,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
struct bnxt_vnic_info *vnic;
unsigned int i;
int rc = 0;
- uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
- HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
- uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
/* Cycle through all VNICs */
for (i = 0; i < bp->nr_vnics; i++) {
@@ -1317,8 +1328,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id)
memcpy(new_filter->l2_addr, filter->l2_addr,
ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
- new_filter->l2_ovlan = vlan_id;
- new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
new_filter->enables |= en;
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
@@ -1541,6 +1552,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint16_t size = 0;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
@@ -1548,9 +1560,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
if (rc)
break;
- rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (rc)
- return rc;
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
}
return rc;
diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c
index 8d3ddf1d..67daec41 100644
--- a/drivers/net/bnxt/bnxt_filter.c
+++ b/drivers/net/bnxt/bnxt_filter.c
@@ -1053,9 +1053,13 @@ bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
sizeof(nf->dst_ipaddr_mask))) {
if (mf->dst_id == nf->dst_id)
return -EEXIST;
- /* Same Flow, Different queue
+ /*
+ * Same Flow, Different queue
* Clear the old ntuple filter
+ * Reuse the matching L2 filter
+ * ID for the new filter
*/
+ nf->fw_l2_filter_id = mf->fw_l2_filter_id;
if (nf->filter_type == HWRM_CFA_EM_FILTER)
bnxt_hwrm_clear_em_filter(bp, mf);
if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 22f092f1..db3222f4 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -197,6 +197,10 @@ err_ret:
RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
__func__, rc); \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
if (resp->error_code) { \
@@ -218,6 +222,10 @@ err_ret:
"%s error %d\n", __func__, rc); \
} \
rte_spinlock_unlock(&bp->hwrm_lock); \
+ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+ rc = -EACCES; \
+ else if (rc > 0) \
+ rc = -EINVAL; \
return rc; \
} \
} while (0)
@@ -406,13 +414,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
req.l2_ovlan = filter->l2_ovlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
- req.l2_ovlan = filter->l2_ivlan;
+ req.l2_ivlan = filter->l2_ivlan;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
req.l2_ovlan_mask = filter->l2_ovlan_mask;
if (enables &
HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
- req.l2_ovlan_mask = filter->l2_ivlan_mask;
+ req.l2_ivlan_mask = filter->l2_ivlan_mask;
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
req.src_id = rte_cpu_to_le_32(filter->src_id);
if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -1092,8 +1100,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
/* map ring groups to this vnic */
RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
vnic->start_grp_id, vnic->end_grp_id);
- for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
+ for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+
vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1385,6 +1394,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
uint16_t size;
+ if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+ RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+ return rc;
+ }
+
HWRM_PREP(req, VNIC_PLCMODES_CFG);
req.flags = rte_cpu_to_le_32(
@@ -1798,6 +1812,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
else
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
//if (rc)
//break;
}
@@ -1885,6 +1900,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
bnxt_hwrm_vnic_free(bp, vnic);
+
+ rte_free(vnic->fw_grp_ids);
}
/* Ring resources */
bnxt_free_all_hwrm_rings(bp);
@@ -3097,13 +3114,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
-
if (rc == 0)
memcpy(data, buf, len > buflen ? buflen : len);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
return rc;
}
@@ -3135,12 +3151,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
req.offset = rte_cpu_to_le_32(offset);
req.len = rte_cpu_to_le_32(length);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
- HWRM_CHECK_RESULT();
- HWRM_UNLOCK();
if (rc == 0)
memcpy(data, buf, length);
rte_free(buf);
+ HWRM_CHECK_RESULT();
+ HWRM_UNLOCK();
+
return rc;
}
@@ -3171,14 +3188,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
rte_iova_t dma_handle;
uint8_t *buf;
- HWRM_PREP(req, NVM_WRITE);
-
- req.dir_type = rte_cpu_to_le_16(dir_type);
- req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
- req.dir_ext = rte_cpu_to_le_16(dir_ext);
- req.dir_attr = rte_cpu_to_le_16(dir_attr);
- req.dir_data_length = rte_cpu_to_le_32(data_len);
-
buf = rte_malloc("nvm_write", data_len, 0);
rte_mem_lock_page(buf);
if (!buf)
@@ -3191,14 +3200,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
return -ENOMEM;
}
memcpy(buf, data, data_len);
+
+ HWRM_PREP(req, NVM_WRITE);
+
+ req.dir_type = rte_cpu_to_le_16(dir_type);
+ req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+ req.dir_ext = rte_cpu_to_le_16(dir_ext);
+ req.dir_attr = rte_cpu_to_le_16(dir_attr);
+ req.dir_data_length = rte_cpu_to_le_32(data_len);
req.host_src_addr = rte_cpu_to_le_64(dma_handle);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+ rte_free(buf);
HWRM_CHECK_RESULT();
HWRM_UNLOCK();
- rte_free(buf);
return rc;
}
diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c
index fe83d370..f8bb4ed9 100644
--- a/drivers/net/bnxt/bnxt_stats.c
+++ b/drivers/net/bnxt/bnxt_stats.c
@@ -296,6 +296,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
count = 0;
for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) {
uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)rx_stats +
bnxt_rx_stats_strings[i].offset));
@@ -304,6 +305,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) {
uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats;
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(
*(uint64_t *)((char *)tx_stats +
bnxt_tx_stats_strings[i].offset));
@@ -311,6 +313,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev,
}
/* The Tx drop pkts aka the Anti spoof coounter */
+ xstats[count].id = count;
xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts);
count++;
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 03d2652c..f5ed03f1 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -161,7 +161,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
- PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
+ PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
+ PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
+ PKT_TX_TUNNEL_GENEVE))
long_bd = true;
tx_buf = &txr->tx_buf_ring[txr->tx_prod];
@@ -222,16 +224,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* Outer IP, Inner IP, Inner TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
+ PKT_TX_OIP_IIP_TCP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
+ PKT_TX_OIP_IIP_UDP_CKSUM) {
+ /* Outer IP, Inner IP, Inner TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
PKT_TX_IIP_TCP_UDP_CKSUM) {
/* (Inner) IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
+ PKT_TX_IIP_UDP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
+ PKT_TX_IIP_TCP_CKSUM) {
+ /* (Inner) IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
PKT_TX_OIP_TCP_UDP_CKSUM) {
/* Outer IP, (Inner) TCP/UDP CSO */
txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
+ PKT_TX_OIP_UDP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
+ PKT_TX_OIP_TCP_CKSUM) {
+ /* Outer IP, (Inner) TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
} else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
PKT_TX_OIP_IIP_CKSUM) {
/* Outer IP, Inner IP CSO */
@@ -242,11 +274,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
/* TCP/UDP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
+ PKT_TX_TCP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
+ PKT_TX_UDP_CKSUM) {
+ /* TCP/UDP CSO */
+ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
+ txbd1->mss = 0;
+ } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
+ PKT_TX_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
txbd1->mss = 0;
- } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
+ PKT_TX_OUTER_IP_CKSUM) {
/* IP CSO */
txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
txbd1->mss = 0;
@@ -270,6 +314,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
}
txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
+ txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags);
txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 2feac51d..0bc217f0 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -71,10 +71,20 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM)
+#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM)
#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
+#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \
PKT_TX_OUTER_IP_CKSUM)
#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)
diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c
index 5bac2605..293f9604 100644
--- a/drivers/net/bnxt/bnxt_vnic.c
+++ b/drivers/net/bnxt/bnxt_vnic.c
@@ -67,7 +67,7 @@ void bnxt_init_vnics(struct bnxt *bp)
{
struct bnxt_vnic_info *vnic;
uint16_t max_vnics;
- int i, j;
+ int i;
max_vnics = bp->max_vnics;
STAILQ_INIT(&bp->free_vnic_list);
@@ -78,9 +78,6 @@ void bnxt_init_vnics(struct bnxt *bp)
vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
- for (j = 0; j < MAX_QUEUES_PER_VNIC; j++)
- vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE;
-
prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE);
STAILQ_INIT(&vnic->filter);
STAILQ_INIT(&vnic->flow_list);
diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h
index 875dc3c1..f18fb0a4 100644
--- a/drivers/net/bnxt/bnxt_vnic.h
+++ b/drivers/net/bnxt/bnxt_vnic.h
@@ -43,13 +43,9 @@ struct bnxt_vnic_info {
uint16_t fw_vnic_id; /* returned by Chimp during alloc */
uint16_t rss_rule;
-#define MAX_NUM_TRAFFIC_CLASSES 8
-#define MAX_NUM_RSS_QUEUES_PER_VNIC 16
-#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \
- MAX_NUM_TRAFFIC_CLASSES)
uint16_t start_grp_id;
uint16_t end_grp_id;
- uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC];
+ uint16_t *fw_grp_ids;
uint16_t dflt_ring_grp;
uint16_t mru;
uint16_t hash_type;