diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:15:11 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:45:54 +0000 |
commit | 055c52583a2794da8ba1e85a48cce3832372b12f (patch) | |
tree | 8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/net/bnxt | |
parent | f239aed5e674965691846e8ce3f187dd47523689 (diff) |
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/bnxt')
28 files changed, 5507 insertions, 844 deletions
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile index b03f65dc..2aa04411 100644 --- a/drivers/net/bnxt/Makefile +++ b/drivers/net/bnxt/Makefile @@ -40,10 +40,13 @@ LIB = librte_pmd_bnxt.a EXPORT_MAP := rte_pmd_bnxt_version.map -LIBABIVER := 1 +LIBABIVER := 2 CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_bus_pci EXPORT_MAP := rte_pmd_bnxt_version.map diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index 405d94de..646fe79e 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -39,6 +39,7 @@ #include <sys/queue.h> #include <rte_pci.h> +#include <rte_bus_pci.h> #include <rte_ethdev.h> #include <rte_memory.h> #include <rte_lcore.h> @@ -126,13 +127,13 @@ struct bnxt_pf_info { #define BNXT_FIRST_VF_FID 128 #define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) #define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp)) - uint8_t port_id; + uint16_t port_id; uint16_t first_vf_id; uint16_t active_vfs; uint16_t max_vfs; uint32_t func_cfg_flags; void *vf_req_buf; - phys_addr_t vf_req_buf_dma_addr; + rte_iova_t vf_req_buf_dma_addr; uint32_t vf_req_fwd[8]; uint16_t total_vnics; struct bnxt_child_vf_info *vf_info; @@ -171,11 +172,18 @@ struct bnxt_cos_queue_info { uint8_t profile; }; +struct rte_flow { + STAILQ_ENTRY(rte_flow) next; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic; +}; + #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) struct bnxt { void *bar0; struct rte_eth_dev *eth_dev; + struct rte_eth_rss_conf rss_conf; struct rte_pci_device *pdev; uint32_t flags; @@ -184,6 +192,7 @@ struct bnxt { #define BNXT_FLAG_PORT_STATS (1 << 2) #define BNXT_FLAG_JUMBO (1 << 3) #define BNXT_FLAG_SHORT_CMD (1 << 4) +#define BNXT_FLAG_UPDATE_HASH (1 << 5) #define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) #define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) #define BNXT_NPAR_ENABLED(bp) ((bp)->port_partition_type) @@ -194,14 +203,14 @@ struct bnxt { struct bnxt_rx_queue **rx_queues; const void *rx_mem_zone; struct rx_port_stats *hw_rx_port_stats; - phys_addr_t hw_rx_port_stats_map; + rte_iova_t hw_rx_port_stats_map; unsigned int tx_nr_rings; unsigned int tx_cp_nr_rings; struct bnxt_tx_queue **tx_queues; const void *tx_mem_zone; struct tx_port_stats *hw_tx_port_stats; - phys_addr_t hw_tx_port_stats_map; + rte_iova_t hw_tx_port_stats_map; /* Default completion ring */ struct bnxt_cp_ring_info *def_cp_ring; @@ -217,7 +226,7 @@ struct bnxt { STAILQ_HEAD(, bnxt_filter_info) free_filter_list; /* VNIC pointer for flow filter (VMDq) pools */ -#define MAX_FF_POOLS ETH_64_POOLS +#define MAX_FF_POOLS 256 STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS]; struct bnxt_irq *irq_tbl; @@ -227,9 +236,9 @@ struct bnxt { uint16_t hwrm_cmd_seq; void *hwrm_cmd_resp_addr; - phys_addr_t hwrm_cmd_resp_dma_addr; + rte_iova_t hwrm_cmd_resp_dma_addr; void *hwrm_short_cmd_req_addr; - phys_addr_t hwrm_short_cmd_req_dma_addr; + rte_iova_t hwrm_short_cmd_req_dma_addr; rte_spinlock_t hwrm_lock; uint16_t max_req_len; uint16_t max_resp_len; @@ -269,4 +278,5 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg); #define RX_PROD_AGG_BD_TYPE_RX_PROD_AGG 0x6 bool is_bnxt_supported(struct rte_eth_dev *dev); +extern const struct rte_flow_ops bnxt_flow_ops; #endif diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c index 68979bc4..26b2755e 100644 --- a/drivers/net/bnxt/bnxt_cpr.c +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -183,8 +183,10 @@ void bnxt_free_def_cp_ring(struct bnxt *bp) return; bnxt_free_ring(cpr->cp_ring_struct); + cpr->cp_ring_struct = NULL; rte_free(cpr->cp_ring_struct); rte_free(cpr); + bp->def_cp_ring = NULL; } /* For the default completion ring only */ diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h index a6e87858..ce2b0cb8 100644 --- a/drivers/net/bnxt/bnxt_cpr.h +++ b/drivers/net/bnxt/bnxt_cpr.h @@ -41,6 +41,9 @@ (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \ !((raw_cons) & ((ring)->ring_size))) +#define CMPL_VALID(cmp, v) \ + (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == !(v)) + #define CMP_TYPE(cmp) \ (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK) @@ -48,6 +51,7 @@ #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) #define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) +#define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val)) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) @@ -82,15 +86,15 @@ struct bnxt_cp_ring_info { struct cmpl_base *cp_desc_ring; - phys_addr_t cp_desc_mapping; + rte_iova_t cp_desc_mapping; struct ctx_hw_stats *hw_stats; - phys_addr_t hw_stats_map; + rte_iova_t hw_stats_map; uint32_t hw_stats_ctx_id; struct bnxt_ring *cp_ring_struct; uint16_t cp_cons; - bool v; + bool valid; }; #define RX_CMP_L2_ERRORS \ diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index c9d11228..e8c7d0e7 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -53,6 +53,7 @@ #include "bnxt_txr.h" #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" +#include "bnxt_nvm_defs.h" #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = @@ -144,7 +145,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { ETH_RSS_NONFRAG_IPV6_TCP | \ ETH_RSS_NONFRAG_IPV6_UDP) -static void bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); +static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); /***********************/ @@ -201,8 +202,16 @@ static int bnxt_init_chip(struct bnxt *bp) { unsigned int i, rss_idx, fw_idx; struct rte_eth_link new; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t intr_vector = 0; + uint32_t queue_id, base = BNXT_MISC_VEC_ID; + uint32_t vec = BNXT_MISC_VEC_ID; int rc; + /* disable uio/vfio intr/eventfd mapping */ + rte_intr_disable(intr_handle); + if (bp->eth_dev->data->mtu > ETHER_MTU) { bp->eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; bp->flags |= BNXT_FLAG_JUMBO; @@ -305,6 +314,48 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } + /* check and configure queue intr-vector mapping */ + if ((rte_intr_cap_multiple(intr_handle) || + !RTE_ETH_DEV_SRIOV(bp->eth_dev).active) && + bp->eth_dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = bp->eth_dev->data->nb_rx_queues; + RTE_LOG(INFO, PMD, "%s(): intr_vector = %d\n", __func__, + intr_vector); + if (intr_vector > bp->rx_cp_nr_rings) { + RTE_LOG(ERR, PMD, "At most %d intr queues supported", + bp->rx_cp_nr_rings); + return -ENOTSUP; + } + if (rte_intr_efd_enable(intr_handle, intr_vector)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + bp->eth_dev->data->nb_rx_queues * + sizeof(int), 0); + if (intr_handle->intr_vec == NULL) { + RTE_LOG(ERR, PMD, "Failed to allocate %d rx_queues" + " intr_vec", bp->eth_dev->data->nb_rx_queues); + return -ENOMEM; + } + RTE_LOG(DEBUG, PMD, "%s(): intr_handle->intr_vec = %p " + "intr_handle->nb_efd = %d intr_handle->max_intr = %d\n", + __func__, intr_handle->intr_vec, intr_handle->nb_efd, + intr_handle->max_intr); + } + + for (queue_id = 0; queue_id < bp->eth_dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + rc = bnxt_get_hwrm_link_config(bp, &new); if (rc) { RTE_LOG(ERR, PMD, "HWRM Get link config failure rc: %x\n", rc); @@ -360,27 +411,38 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; uint16_t max_vnics, i, j, vpool, vrxq; + unsigned int max_rx_rings; dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); /* MAC Specifics */ - dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR; + dev_info->max_mac_addrs = bp->max_l2_ctx; dev_info->max_hash_mac_addrs = 0; /* PF/VF specifics */ if (BNXT_PF(bp)) dev_info->max_vfs = bp->pdev->max_vfs; - dev_info->max_rx_queues = bp->max_rx_rings; - dev_info->max_tx_queues = bp->max_tx_rings; + max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx, + RTE_MIN(bp->max_rsscos_ctx, + bp->max_stat_ctx))); + /* For the sake of symmetry, max_rx_queues = max_tx_queues */ + dev_info->max_rx_queues = max_rx_rings; + dev_info->max_tx_queues = max_rx_rings; dev_info->reta_size = bp->max_rsscos_ctx; + dev_info->hash_key_size = 40; max_vnics = bp->max_vnics; /* Fast path specifics */ dev_info->min_rx_bufsize = 1; dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; - dev_info->rx_offload_capa = 0; - dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | @@ -414,6 +476,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, }; eth_dev->data->dev_conf.intr_conf.lsc = 1; + eth_dev->data->dev_conf.intr_conf.rxq = 1; + /* *INDENT-ON* */ /* @@ -489,13 +553,13 @@ static void bnxt_print_link_info(struct rte_eth_dev *eth_dev) if (link->link_status) RTE_LOG(INFO, PMD, "Port %d Link Up - speed %u Mbps - %s\n", - (uint8_t)(eth_dev->data->port_id), + eth_dev->data->port_id, (uint32_t)link->link_speed, (link->link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else RTE_LOG(INFO, PMD, "Port %d Link Down\n", - (uint8_t)(eth_dev->data->port_id)); + eth_dev->data->port_id); } static int bnxt_dev_lsc_intr_setup(struct rte_eth_dev *eth_dev) @@ -510,6 +574,11 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) int vlan_mask = 0; int rc; + if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) { + RTE_LOG(ERR, PMD, + "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n", + bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS); + } bp->dev_stopped = 0; rc = bnxt_init_nic(bp); @@ -522,7 +591,9 @@ static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) vlan_mask |= ETH_VLAN_FILTER_MASK; if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) vlan_mask |= ETH_VLAN_STRIP_MASK; - bnxt_vlan_offload_set_op(eth_dev, vlan_mask); + rc = bnxt_vlan_offload_set_op(eth_dev, vlan_mask); + if (rc) + goto error; return 0; @@ -593,13 +664,14 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; struct bnxt_vnic_info *vnic; struct bnxt_filter_info *filter, *temp_filter; - int i; + uint32_t pool = RTE_MIN(MAX_FF_POOLS, ETH_64_POOLS); + uint32_t i; /* * Loop through all VNICs from the specified filter flow pools to * remove the corresponding MAC addr filter */ - for (i = 0; i < MAX_FF_POOLS; i++) { + for (i = 0; i < pool; i++) { if (!(pool_mask & (1ULL << i))) continue; @@ -610,7 +682,7 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, if (filter->mac_index == index) { STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); filter->mac_index = INVALID_MAC_INDEX; memset(&filter->l2_addr, 0, ETHER_ADDR_LEN); @@ -657,7 +729,7 @@ static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, STAILQ_INSERT_TAIL(&vnic->filter, filter, next); filter->mac_index = index; memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); - return bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + return bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); } int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete) @@ -827,11 +899,15 @@ static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, */ if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { if (!rss_conf->rss_hf) - return -EINVAL; + RTE_LOG(ERR, PMD, "Hash type NONE\n"); } else { if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) return -EINVAL; } + + bp->flags |= BNXT_FLAG_UPDATE_HASH; + memcpy(&bp->rss_conf, rss_conf, sizeof(*rss_conf)); + if (rss_conf->rss_hf & ETH_RSS_IPV4) hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) @@ -1147,7 +1223,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) /* Must delete the filter */ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_INSERT_TAIL( &bp->free_filter_list, filter, next); @@ -1173,7 +1249,7 @@ static int bnxt_del_vlan_filter(struct bnxt *bp, uint16_t vlan_id) memcpy(new_filter->l2_addr, filter->l2_addr, ETHER_ADDR_LEN); /* MAC only filter */ - rc = bnxt_hwrm_set_filter(bp, + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, new_filter); if (rc) @@ -1225,7 +1301,7 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) /* Must delete the MAC filter */ STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); filter->l2_ovlan = 0; STAILQ_INSERT_TAIL( &bp->free_filter_list, @@ -1248,8 +1324,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) new_filter->l2_ovlan = vlan_id; new_filter->l2_ovlan_mask = 0xF000; new_filter->enables |= en; - rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, - new_filter); + rc = bnxt_hwrm_set_l2_filter(bp, + vnic->fw_vnic_id, + new_filter); if (rc) goto exit; RTE_LOG(INFO, PMD, @@ -1275,7 +1352,7 @@ static int bnxt_vlan_filter_set_op(struct rte_eth_dev *eth_dev, return bnxt_del_vlan_filter(bp, vlan_id); } -static void +static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; @@ -1307,6 +1384,8 @@ bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask) if (mask & ETH_VLAN_EXTEND_MASK) RTE_LOG(ERR, PMD, "Extend VLAN Not supported\n"); + + return 0; } static void @@ -1328,7 +1407,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) /* Default Filter is at Index 0 */ if (filter->mac_index != 0) continue; - rc = bnxt_hwrm_clear_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) break; memcpy(filter->l2_addr, bp->mac_addr, ETHER_ADDR_LEN); @@ -1337,7 +1416,7 @@ bnxt_set_default_mac_addr_op(struct rte_eth_dev *dev, struct ether_addr *addr) filter->enables |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; - rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, filter); if (rc) break; filter->mac_index = 0; @@ -1517,6 +1596,1083 @@ bnxt_dev_led_off_op(struct rte_eth_dev *dev) return bnxt_hwrm_port_led_cfg(bp, false); } +static uint32_t +bnxt_rx_queue_count_op(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + uint32_t desc = 0, raw_cons = 0, cons; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + struct rx_pkt_cmpl *rxcmp; + uint16_t cmp_type; + uint8_t cmp = 1; + bool valid; + + rxq = dev->data->rx_queues[rx_queue_id]; + cpr = rxq->cp_ring; + valid = cpr->valid; + + while (raw_cons < rxq->nb_rx_desc) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMPL_VALID(rxcmp, valid)) + goto nothing_to_do; + valid = FLIP_VALID(cons, cpr->cp_ring_struct->ring_mask, valid); + cmp_type = CMP_TYPE(rxcmp); + if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { + cmp = (rte_le_to_cpu_32( + ((struct rx_tpa_end_cmpl *) + (rxcmp))->agg_bufs_v1) & + RX_TPA_END_CMPL_AGG_BUFS_MASK) >> + RX_TPA_END_CMPL_AGG_BUFS_SFT; + desc++; + } else if (cmp_type == 0x11) { + desc++; + cmp = (rxcmp->agg_bufs_v1 & + RX_PKT_CMPL_AGG_BUFS_MASK) >> + RX_PKT_CMPL_AGG_BUFS_SFT; + } else { + cmp = 1; + } +nothing_to_do: + raw_cons += cmp ? cmp : 2; + } + + return desc; +} + +static int +bnxt_rx_descriptor_status_op(void *rx_queue, uint16_t offset) +{ + struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_rx_bd *rx_buf; + struct rx_pkt_cmpl *rxcmp; + uint32_t cons, cp_cons; + + if (!rxq) + return -EINVAL; + + cpr = rxq->cp_ring; + rxr = rxq->rx_ring; + + if (offset >= rxq->nb_rx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + cp_cons = cpr->cp_raw_cons; + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (cons > cp_cons) { + if (CMPL_VALID(rxcmp, cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } else { + if (CMPL_VALID(rxcmp, !cpr->valid)) + return RTE_ETH_RX_DESC_DONE; + } + rx_buf = &rxr->rx_buf_ring[cons]; + if (rx_buf->mbuf == NULL) + return RTE_ETH_RX_DESC_UNAVAIL; + + + return RTE_ETH_RX_DESC_AVAIL; +} + +static int +bnxt_tx_descriptor_status_op(void *tx_queue, uint16_t offset) +{ + struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + struct bnxt_tx_ring_info *txr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_sw_tx_bd *tx_buf; + struct tx_pkt_cmpl *txcmp; + uint32_t cons, cp_cons; + + if (!txq) + return -EINVAL; + + cpr = txq->cp_ring; + txr = txq->tx_ring; + + if (offset >= txq->nb_tx_desc) + return -EINVAL; + + cons = RING_CMP(cpr->cp_ring_struct, offset); + txcmp = (struct tx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + cp_cons = cpr->cp_raw_cons; + + if (cons > cp_cons) { + if (CMPL_VALID(txcmp, cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } else { + if (CMPL_VALID(txcmp, !cpr->valid)) + return RTE_ETH_TX_DESC_UNAVAIL; + } + tx_buf = &txr->tx_buf_ring[cons]; + if (tx_buf->mbuf == NULL) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +static struct bnxt_filter_info * +bnxt_match_and_validate_ether_filter(struct bnxt *bp, + struct rte_eth_ethertype_filter *efilter, + struct bnxt_vnic_info *vnic0, + struct bnxt_vnic_info *vnic, + int *ret) +{ + struct bnxt_filter_info *mfilter = NULL; + int match = 0; + *ret = 0; + + if (efilter->ether_type != ETHER_TYPE_IPv4 && + efilter->ether_type != ETHER_TYPE_IPv6) { + RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in" + " ethertype filter.", efilter->ether_type); + *ret = -EINVAL; + goto exit; + } + if (efilter->queue >= bp->rx_nr_rings) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + goto exit; + } + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); + if (vnic == NULL) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", efilter->queue); + *ret = -EINVAL; + goto exit; + } + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + STAILQ_FOREACH(mfilter, &vnic0->filter, next) { + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, ETHER_ADDR_LEN) && + mfilter->flags == + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP && + mfilter->ethertype == efilter->ether_type)) { + match = 1; + break; + } + } + } else { + STAILQ_FOREACH(mfilter, &vnic->filter, next) + if ((!memcmp(efilter->mac_addr.addr_bytes, + mfilter->l2_addr, ETHER_ADDR_LEN) && + mfilter->ethertype == efilter->ether_type && + mfilter->flags == + HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX)) { + match = 1; + break; + } + } + + if (match) + *ret = -EEXIST; + +exit: + return mfilter; +} + +static int +bnxt_ethertype_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct rte_eth_ethertype_filter *efilter = + (struct rte_eth_ethertype_filter *)arg; + struct bnxt_filter_info *bfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[efilter->queue]); + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret < 0) + return ret; + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + RTE_LOG(ERR, PMD, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + memcpy(bfilter->l2_addr, efilter->mac_addr.addr_bytes, + ETHER_ADDR_LEN); + memcpy(bfilter->dst_macaddr, efilter->mac_addr.addr_bytes, + ETHER_ADDR_LEN); + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + bfilter->ethertype = efilter->ether_type; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + filter1 = bnxt_get_l2_filter(bp, bfilter, vnic0); + if (filter1 == NULL) { + ret = -1; + goto cleanup; + } + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + bfilter->dst_id = vnic->fw_vnic_id; + + if (efilter->flags & RTE_ETHTYPE_FLAGS_DROP) { + bfilter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + } + + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto cleanup; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + break; + case RTE_ETH_FILTER_DELETE: + filter1 = bnxt_match_and_validate_ether_filter(bp, efilter, + vnic0, vnic, &ret); + if (ret == -EEXIST) { + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter1); + + STAILQ_REMOVE(&vnic->filter, filter1, bnxt_filter_info, + next); + bnxt_free_filter(bp, filter1); + } else if (ret == 0) { + RTE_LOG(ERR, PMD, "No matching filter found\n"); + } + break; + default: + RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); + ret = -EINVAL; + goto error; + } + return ret; +cleanup: + bnxt_free_filter(bp, bfilter); +error: + return ret; +} + +static inline int +parse_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + struct bnxt_filter_info *bfilter) +{ + uint32_t en = 0; + + if (nfilter->queue >= bp->rx_nr_rings) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", nfilter->queue); + return -EINVAL; + } + + switch (nfilter->dst_port_mask) { + case UINT16_MAX: + bfilter->dst_port_mask = -1; + bfilter->dst_port = nfilter->dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid dst_port mask."); + return -EINVAL; + } + + bfilter->ip_addr_type = NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + + switch (nfilter->proto_mask) { + case UINT8_MAX: + if (nfilter->proto == 17) /* IPPROTO_UDP */ + bfilter->ip_protocol = 17; + else if (nfilter->proto == 6) /* IPPROTO_TCP */ + bfilter->ip_protocol = 6; + else + return -EINVAL; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + break; + default: + RTE_LOG(ERR, PMD, "invalid protocol mask."); + return -EINVAL; + } + + switch (nfilter->dst_ip_mask) { + case UINT32_MAX: + bfilter->dst_ipaddr_mask[0] = -1; + bfilter->dst_ipaddr[0] = nfilter->dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid dst_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_ip_mask) { + case UINT32_MAX: + bfilter->src_ipaddr_mask[0] = -1; + bfilter->src_ipaddr[0] = nfilter->src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid src_ip mask."); + return -EINVAL; + } + + switch (nfilter->src_port_mask) { + case UINT16_MAX: + bfilter->src_port_mask = -1; + bfilter->src_port = nfilter->src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + break; + default: + RTE_LOG(ERR, PMD, "invalid src_port mask."); + return -EINVAL; + } + + //TODO Priority + //nfilter->priority = (uint8_t)filter->priority; + + bfilter->enables = en; + return 0; +} + +static struct bnxt_filter_info* +bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic, + struct bnxt_filter_info *bfilter) +{ + struct bnxt_filter_info *mfilter = NULL; + + STAILQ_FOREACH(mfilter, &vnic->filter, next) { + if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] && + bfilter->src_ipaddr_mask[0] == + mfilter->src_ipaddr_mask[0] && + bfilter->src_port == mfilter->src_port && + bfilter->src_port_mask == mfilter->src_port_mask && + bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] && + bfilter->dst_ipaddr_mask[0] == + mfilter->dst_ipaddr_mask[0] && + bfilter->dst_port == mfilter->dst_port && + bfilter->dst_port_mask == mfilter->dst_port_mask && + bfilter->flags == mfilter->flags && + bfilter->enables == mfilter->enables) + return mfilter; + } + return NULL; +} + +static int +bnxt_cfg_ntuple_filter(struct bnxt *bp, + struct rte_eth_ntuple_filter *nfilter, + enum rte_filter_op filter_op) +{ + struct bnxt_filter_info *bfilter, *mfilter, *filter1; + struct bnxt_vnic_info *vnic, *vnic0; + int ret; + + if (nfilter->flags != RTE_5TUPLE_FLAGS) { + RTE_LOG(ERR, PMD, "only 5tuple is supported."); + return -EINVAL; + } + + if (nfilter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) { + RTE_LOG(ERR, PMD, "Ntuple filter: TCP flags not supported\n"); + return -EINVAL; + } + + bfilter = bnxt_get_unused_filter(bp); + if (bfilter == NULL) { + RTE_LOG(ERR, PMD, + "Not enough resources for a new filter.\n"); + return -ENOMEM; + } + ret = parse_ntuple_filter(bp, nfilter, bfilter); + if (ret < 0) + goto free_filter; + + vnic = STAILQ_FIRST(&bp->ff_pool[nfilter->queue]); + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = STAILQ_FIRST(&vnic0->filter); + if (filter1 == NULL) { + ret = -1; + goto free_filter; + } + + bfilter->dst_id = vnic->fw_vnic_id; + bfilter->fw_l2_filter_id = filter1->fw_l2_filter_id; + bfilter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + bfilter->ethertype = 0x800; + bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + + mfilter = bnxt_match_ntuple_filter(vnic, bfilter); + + if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) { + RTE_LOG(ERR, PMD, "filter exists."); + ret = -EEXIST; + goto free_filter; + } + if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + RTE_LOG(ERR, PMD, "filter doesn't exist."); + ret = -ENOENT; + goto free_filter; + } + + if (filter_op == RTE_ETH_FILTER_ADD) { + bfilter->filter_type = HWRM_CFA_NTUPLE_FILTER; + ret = bnxt_hwrm_set_ntuple_filter(bp, bfilter->dst_id, bfilter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, bfilter, next); + } else { + if (mfilter == NULL) { + /* This should not happen. But for Coverity! */ + ret = -ENOENT; + goto free_filter; + } + ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter); + + STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, + next); + bnxt_free_filter(bp, mfilter); + bfilter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, bfilter); + } + + return 0; +free_filter: + bfilter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, bfilter); + return ret; +} + +static int +bnxt_ntuple_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + int ret; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL) { + RTE_LOG(ERR, PMD, "arg shouldn't be NULL for operation %u.", + filter_op); + return -EINVAL; + } + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + case RTE_ETH_FILTER_DELETE: + ret = bnxt_cfg_ntuple_filter(bp, + (struct rte_eth_ntuple_filter *)arg, + filter_op); + break; + default: + RTE_LOG(ERR, PMD, "unsupported operation %u.", filter_op); + ret = -EINVAL; + break; + } + return ret; +} + +static int +bnxt_parse_fdir_filter(struct bnxt *bp, + struct rte_eth_fdir_filter *fdir, + struct bnxt_filter_info *filter) +{ + enum rte_fdir_mode fdir_mode = + bp->eth_dev->data->dev_conf.fdir_conf.mode; + struct bnxt_vnic_info *vnic0, *vnic; + struct bnxt_filter_info *filter1; + uint32_t en = 0; + int i; + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + return -EINVAL; + + filter->l2_ovlan = fdir->input.flow_ext.vlan_tci; + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + + switch (fdir->input.flow_type) { + case RTE_ETH_FLOW_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + /* FALLTHROUGH */ + filter->src_ipaddr[0] = fdir->input.flow.ip4_flow.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.ip4_flow.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = fdir->input.flow.ip4_flow.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + filter->src_port = fdir->input.flow.tcp4_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.tcp4_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->src_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.tcp4_flow.ip.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = 6; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + filter->src_port = fdir->input.flow.udp4_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.udp4_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->src_ipaddr[0] = fdir->input.flow.udp4_flow.ip.src_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + filter->dst_ipaddr[0] = fdir->input.flow.udp4_flow.ip.dst_ip; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + filter->ip_protocol = 17; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + filter->src_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->dst_ipaddr_mask[0] = 0xffffffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + filter->ethertype = 0x800; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + /* FALLTHROUGH */ + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.ipv6_flow.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.ipv6_flow.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.ipv6_flow.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + filter->src_port = fdir->input.flow.tcp6_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.tcp6_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.tcp6_flow.ip.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.tcp6_flow.ip.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.tcp6_flow.ip.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + filter->src_port = fdir->input.flow.udp6_flow.src_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT; + filter->dst_port = fdir->input.flow.udp6_flow.dst_port; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + filter->dst_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + filter->src_port_mask = 0xffff; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + filter->ip_addr_type = + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + filter->ip_protocol = fdir->input.flow.udp6_flow.ip.proto; + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + rte_memcpy(filter->src_ipaddr, + fdir->input.flow.udp6_flow.ip.src_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR; + rte_memcpy(filter->dst_ipaddr, + fdir->input.flow.udp6_flow.ip.dst_ip, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + memset(filter->dst_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + memset(filter->src_ipaddr_mask, 0xff, 16); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + filter->ethertype = 0x86dd; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_L2_PAYLOAD: + filter->ethertype = fdir->input.flow.l2_flow.ether_type; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE; + break; + case RTE_ETH_FLOW_VXLAN: + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + return -EINVAL; + filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; + break; + case RTE_ETH_FLOW_NVGRE: + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + return -EINVAL; + filter->vni = fdir->input.flow.tunnel_flow.tunnel_id; + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE; + break; + case RTE_ETH_FLOW_UNKNOWN: + case RTE_ETH_FLOW_RAW: + case RTE_ETH_FLOW_FRAG_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_FRAG_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_IPV6_EX: + case RTE_ETH_FLOW_IPV6_TCP_EX: + case RTE_ETH_FLOW_IPV6_UDP_EX: + case RTE_ETH_FLOW_GENEVE: + /* FALLTHROUGH */ + default: + return -EINVAL; + } + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); + if (vnic == NULL) { + RTE_LOG(ERR, PMD, "Invalid queue %d\n", fdir->action.rx_queue); + return -EINVAL; + } + + + if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + rte_memcpy(filter->dst_macaddr, + fdir->input.flow.mac_vlan_flow.mac_addr.addr_bytes, 6); + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR; + } + + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) { + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + filter1 = STAILQ_FIRST(&vnic0->filter); + //filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + } else { + filter->dst_id = vnic->fw_vnic_id; + for (i = 0; i < ETHER_ADDR_LEN; i++) + if (filter->dst_macaddr[i] == 0x00) + filter1 = STAILQ_FIRST(&vnic0->filter); + else + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + } + + if (filter1 == NULL) + return -EINVAL; + + en |= HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + + filter->enables = en; + + return 0; +} + +static struct bnxt_filter_info * +bnxt_match_fdir(struct bnxt *bp, struct bnxt_filter_info *nf) +{ + struct bnxt_filter_info *mf = NULL; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(mf, &vnic->filter, next) { + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) + return mf; + } + } + return NULL; +} + +static int +bnxt_fdir_filter(struct rte_eth_dev *dev, + enum rte_filter_op filter_op, + void *arg) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct rte_eth_fdir_filter *fdir = (struct rte_eth_fdir_filter *)arg; + struct bnxt_filter_info *filter, *match; + struct bnxt_vnic_info *vnic; + int ret = 0, i; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; + + if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) + return -EINVAL; + + switch (filter_op) { + case RTE_ETH_FILTER_ADD: + case RTE_ETH_FILTER_DELETE: + /* FALLTHROUGH */ + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + RTE_LOG(ERR, PMD, + "Not enough resources for a new flow.\n"); + return -ENOMEM; + } + + ret = bnxt_parse_fdir_filter(bp, fdir, filter); + if (ret != 0) + goto free_filter; + filter->filter_type = HWRM_CFA_NTUPLE_FILTER; + + match = bnxt_match_fdir(bp, filter); + if (match != NULL && filter_op == RTE_ETH_FILTER_ADD) { + RTE_LOG(ERR, PMD, "Flow already exists.\n"); + ret = -EEXIST; + goto free_filter; + } + if (match == NULL && filter_op == RTE_ETH_FILTER_DELETE) { + RTE_LOG(ERR, PMD, "Flow does not exist.\n"); + ret = -ENOENT; + goto free_filter; + } + + if (fdir->action.behavior == RTE_ETH_FDIR_REJECT) + vnic = STAILQ_FIRST(&bp->ff_pool[0]); + else + vnic = + STAILQ_FIRST(&bp->ff_pool[fdir->action.rx_queue]); + + if (filter_op == RTE_ETH_FILTER_ADD) { + ret = bnxt_hwrm_set_ntuple_filter(bp, + filter->dst_id, + filter); + if (ret) + goto free_filter; + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + } else { + ret = bnxt_hwrm_clear_ntuple_filter(bp, match); + STAILQ_REMOVE(&vnic->filter, match, + bnxt_filter_info, next); + bnxt_free_filter(bp, match); + filter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, filter); + } + break; + case RTE_ETH_FILTER_FLUSH: + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->filter_type == + HWRM_CFA_NTUPLE_FILTER) { + ret = + bnxt_hwrm_clear_ntuple_filter(bp, + filter); + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + } + } + } + return ret; + case RTE_ETH_FILTER_UPDATE: + case RTE_ETH_FILTER_STATS: + case RTE_ETH_FILTER_INFO: + /* FALLTHROUGH */ + RTE_LOG(ERR, PMD, "operation %u not implemented", filter_op); + break; + default: + RTE_LOG(ERR, PMD, "unknown operation %u", filter_op); + ret = -EINVAL; + break; + } + return ret; + +free_filter: + filter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, filter); + return ret; +} + +static int +bnxt_filter_ctrl_op(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, void *arg) +{ + int ret = 0; + + switch (filter_type) { + case RTE_ETH_FILTER_TUNNEL: + RTE_LOG(ERR, PMD, + "filter type: %d: To be implemented\n", filter_type); + break; + case RTE_ETH_FILTER_FDIR: + ret = bnxt_fdir_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_NTUPLE: + ret = bnxt_ntuple_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_ETHERTYPE: + ret = bnxt_ethertype_filter(dev, filter_op, arg); + break; + case RTE_ETH_FILTER_GENERIC: + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + *(const void **)arg = &bnxt_flow_ops; + break; + default: + RTE_LOG(ERR, PMD, + "Filter type (%d) not supported", filter_type); + ret = -EINVAL; + break; + } + return ret; +} + +static const uint32_t * +bnxt_dev_supported_ptypes_get_op(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_INNER_L4_ICMP, + RTE_PTYPE_INNER_L4_TCP, + RTE_PTYPE_INNER_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == bnxt_recv_pkts) + return ptypes; + return NULL; +} + + + +static int +bnxt_get_eeprom_length_op(struct rte_eth_dev *dev) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + int rc; + uint32_t dir_entries; + uint32_t entry_length; + + RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x\n", + __func__, bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + return dir_entries * entry_length; +} + +static int +bnxt_get_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint32_t index; + uint32_t offset; + + RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", __func__, bp->pdev->addr.domain, + bp->pdev->addr.bus, bp->pdev->addr.devid, + bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); + + if (in_eeprom->offset == 0) /* special offset value to get directory */ + return bnxt_get_nvram_directory(bp, in_eeprom->length, + in_eeprom->data); + + index = in_eeprom->offset >> 24; + offset = in_eeprom->offset & 0xffffff; + + if (index != 0) + return bnxt_hwrm_get_nvram_item(bp, index - 1, offset, + in_eeprom->length, in_eeprom->data); + + return 0; +} + +static bool bnxt_dir_type_is_ape_bin_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_CHIMP_PATCH: + case BNX_DIR_TYPE_BOOTCODE: + case BNX_DIR_TYPE_BOOTCODE_2: + case BNX_DIR_TYPE_APE_FW: + case BNX_DIR_TYPE_APE_PATCH: + case BNX_DIR_TYPE_KONG_FW: + case BNX_DIR_TYPE_KONG_PATCH: + case BNX_DIR_TYPE_BONO_FW: + case BNX_DIR_TYPE_BONO_PATCH: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_other_exec_format(uint16_t dir_type) +{ + switch (dir_type) { + case BNX_DIR_TYPE_AVS: + case BNX_DIR_TYPE_EXP_ROM_MBA: + case BNX_DIR_TYPE_PCIE: + case BNX_DIR_TYPE_TSCF_UCODE: + case BNX_DIR_TYPE_EXT_PHY: + case BNX_DIR_TYPE_CCM: + case BNX_DIR_TYPE_ISCSI_BOOT: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV6: + case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6: + return true; + } + + return false; +} + +static bool bnxt_dir_type_is_executable(uint16_t dir_type) +{ + return bnxt_dir_type_is_ape_bin_format(dir_type) || + bnxt_dir_type_is_other_exec_format(dir_type); +} + +static int +bnxt_set_eeprom_op(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *in_eeprom) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + uint8_t index, dir_op; + uint16_t type, ext, ordinal, attr; + + RTE_LOG(INFO, PMD, "%s(): %04x:%02x:%02x:%02x in_eeprom->offset = %d " + "len = %d\n", __func__, bp->pdev->addr.domain, + bp->pdev->addr.bus, bp->pdev->addr.devid, + bp->pdev->addr.function, in_eeprom->offset, in_eeprom->length); + + if (!BNXT_PF(bp)) { + RTE_LOG(ERR, PMD, "NVM write not supported from a VF\n"); + return -EINVAL; + } + + type = in_eeprom->magic >> 16; + + if (type == 0xffff) { /* special value for directory operations */ + index = in_eeprom->magic & 0xff; + dir_op = in_eeprom->magic >> 8; + if (index == 0) + return -EINVAL; + switch (dir_op) { + case 0x0e: /* erase */ + if (in_eeprom->offset != ~in_eeprom->magic) + return -EINVAL; + return bnxt_hwrm_erase_nvram_directory(bp, index - 1); + default: + return -EINVAL; + } + } + + /* Create or re-write an NVM item: */ + if (bnxt_dir_type_is_executable(type) == true) + return -EOPNOTSUPP; + ext = in_eeprom->magic & 0xffff; + ordinal = in_eeprom->offset >> 16; + attr = in_eeprom->offset & 0xffff; + + return bnxt_hwrm_flash_nvram(bp, type, ordinal, ext, attr, + in_eeprom->data, in_eeprom->length); + return 0; +} + /* * Initialization */ @@ -1535,6 +2691,8 @@ static const struct eth_dev_ops bnxt_dev_ops = { .rx_queue_release = bnxt_rx_queue_release_op, .tx_queue_setup = bnxt_tx_queue_setup_op, .tx_queue_release = bnxt_tx_queue_release_op, + .rx_queue_intr_enable = bnxt_rx_queue_intr_enable_op, + .rx_queue_intr_disable = bnxt_rx_queue_intr_disable_op, .reta_update = bnxt_reta_update_op, .reta_query = bnxt_reta_query_op, .rss_hash_update = bnxt_rss_hash_update_op, @@ -1564,6 +2722,16 @@ static const struct eth_dev_ops bnxt_dev_ops = { .txq_info_get = bnxt_txq_info_get_op, .dev_led_on = bnxt_dev_led_on_op, .dev_led_off = bnxt_dev_led_off_op, + .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op, + .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op, + .rx_queue_count = bnxt_rx_queue_count_op, + .rx_descriptor_status = bnxt_rx_descriptor_status_op, + .tx_descriptor_status = bnxt_tx_descriptor_status_op, + .filter_ctrl = bnxt_filter_ctrl_op, + .dev_supported_ptypes_get = bnxt_dev_supported_ptypes_get_op, + .get_eeprom_length = bnxt_get_eeprom_length_op, + .get_eeprom = bnxt_get_eeprom_op, + .set_eeprom = bnxt_set_eeprom_op, }; static bool bnxt_vf_pciid(uint16_t id) @@ -1628,7 +2796,7 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) const struct rte_memzone *mz = NULL; static int version_printed; uint32_t total_alloc_len; - phys_addr_t mz_phys_addr; + rte_iova_t mz_phys_addr; struct bnxt *bp; int rc; @@ -1636,13 +2804,15 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) RTE_LOG(INFO, PMD, "%s\n", bnxt_version); rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; bp = eth_dev->data->dev_private; rte_atomic64_init(&bp->rx_mbuf_alloc_fail); bp->dev_stopped = 1; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + goto skip_init; + if (bnxt_vf_pciid(pci_dev->id.device_id)) bp->flags |= BNXT_FLAG_VF; @@ -1652,7 +2822,10 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) "Board initialization failed rc: %x\n", rc); goto error; } +skip_init: eth_dev->dev_ops = &bnxt_dev_ops; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; eth_dev->rx_pkt_burst = &bnxt_recv_pkts; eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; @@ -1674,13 +2847,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) return -ENOMEM; } memset(mz->addr, 0, mz->len); - mz_phys_addr = mz->phys_addr; + mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { RTE_LOG(WARNING, PMD, "Memzone physical address same as virtual.\n"); RTE_LOG(WARNING, PMD, - "Using rte_mem_virt2phy()\n"); - mz_phys_addr = rte_mem_virt2phy(mz->addr); + "Using rte_mem_virt2iova()\n"); + mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == 0) { RTE_LOG(ERR, PMD, "unable to map address to physical memory\n"); @@ -1709,13 +2882,13 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) return -ENOMEM; } memset(mz->addr, 0, mz->len); - mz_phys_addr = mz->phys_addr; + mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { RTE_LOG(WARNING, PMD, "Memzone physical address same as virtual.\n"); RTE_LOG(WARNING, PMD, - "Using rte_mem_virt2phy()\n"); - mz_phys_addr = rte_mem_virt2phy(mz->addr); + "Using rte_mem_virt2iova()\n"); + mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == 0) { RTE_LOG(ERR, PMD, "unable to map address to physical memory\n"); @@ -1755,11 +2928,11 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) goto error_free; } eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", - ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0); + ETHER_ADDR_LEN * bp->max_l2_ctx, 0); if (eth_dev->data->mac_addrs == NULL) { RTE_LOG(ERR, PMD, "Failed to alloc %u bytes needed to store MAC addr tbl", - ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR); + ETHER_ADDR_LEN * bp->max_l2_ctx); rc = -ENOMEM; goto error_free; } @@ -1798,6 +2971,8 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev) ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE); ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE); ALLOW_FUNC(HWRM_STAT_CTX_FREE); + ALLOW_FUNC(HWRM_PORT_PHY_QCFG); + ALLOW_FUNC(HWRM_VNIC_TPA_CFG); rc = bnxt_hwrm_func_driver_register(bp); if (rc) { RTE_LOG(ERR, PMD, @@ -1877,6 +3052,9 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; int rc; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -EPERM; + bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp); diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c index e9aac271..65d30fb3 100644 --- a/drivers/net/bnxt/bnxt_filter.c +++ b/drivers/net/bnxt/bnxt_filter.c @@ -35,6 +35,9 @@ #include <rte_log.h> #include <rte_malloc.h> +#include <rte_flow.h> +#include <rte_flow_driver.h> +#include <rte_tailq.h> #include "bnxt.h" #include "bnxt_filter.h" @@ -94,6 +97,8 @@ void bnxt_init_filters(struct bnxt *bp) for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; filter->fw_l2_filter_id = -1; + filter->fw_em_filter_id = -1; + filter->fw_ntuple_filter_id = -1; STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); } } @@ -121,7 +126,7 @@ void bnxt_free_all_filters(struct bnxt *bp) for (i = 0; i < bp->pf.max_vfs; i++) { STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) { - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); } } } @@ -142,7 +147,7 @@ void bnxt_free_filter_mem(struct bnxt *bp) if (filter->fw_l2_filter_id != ((uint64_t)-1)) { RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n"); /* Call HWRM to try to free filter again */ - rc = bnxt_hwrm_clear_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) RTE_LOG(ERR, PMD, "HWRM filter cannot be freed rc = %d\n", @@ -174,3 +179,1031 @@ int bnxt_alloc_filter_mem(struct bnxt *bp) bp->filter_info = filter_mem; return 0; } + +struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + + /* Find the 1st unused filter from the free_filter_list pool*/ + filter = STAILQ_FIRST(&bp->free_filter_list); + if (!filter) { + RTE_LOG(ERR, PMD, "No more free filter resources\n"); + return NULL; + } + STAILQ_REMOVE_HEAD(&bp->free_filter_list, next); + + return filter; +} + +void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter) +{ + STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); +} + +static int +bnxt_flow_agrs_validate(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + if (!pattern) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "NULL attribute."); + return -rte_errno; + } + + return 0; +} + +static const struct rte_flow_item * +nxt_non_void_pattern(const struct rte_flow_item *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) + return cur; + cur++; + } +} + +static const struct rte_flow_action * +nxt_non_void_action(const struct rte_flow_action *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) + return cur; + cur++; + } +} + +static inline int check_zero_bytes(const uint8_t *bytes, int len) +{ + int i; + for (i = 0; i < len; i++) + if (bytes[i] != 0x00) + return 0; + return 1; +} + +static int +bnxt_filter_type_check(const struct rte_flow_item pattern[], + struct rte_flow_error *error __rte_unused) +{ + const struct rte_flow_item *item = nxt_non_void_pattern(pattern); + int use_ntuple = 1; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + use_ntuple = 1; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + use_ntuple = 0; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_UDP: + /* FALLTHROUGH */ + /* need ntuple match, reset exact match */ + if (!use_ntuple) { + RTE_LOG(ERR, PMD, + "VLAN flow cannot use NTUPLE filter\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Cannot use VLAN with NTUPLE"); + return -rte_errno; + } + use_ntuple |= 1; + break; + default: + RTE_LOG(ERR, PMD, "Unknown Flow type"); + use_ntuple |= 1; + } + item++; + } + return use_ntuple; +} + +static int +bnxt_validate_and_parse_flow_type(struct bnxt *bp, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_item *item = nxt_non_void_pattern(pattern); + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; + const struct rte_flow_item_vf *vf_spec; + uint32_t tenant_id_be = 0; + bool vni_masked = 0; + bool tni_masked = 0; + uint32_t vf = 0; + int use_ntuple; + uint32_t en = 0; + int dflt_vnic; + + use_ntuple = bnxt_filter_type_check(pattern, error); + RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple); + if (use_ntuple < 0) + return use_ntuple; + + filter->filter_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (item->last) { + /* last or range is NOT supported as match criteria */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "No support for range"); + return -rte_errno; + } + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "spec/mask is NULL"); + return -rte_errno; + } + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = (const struct rte_flow_item_eth *)item->spec; + eth_mask = (const struct rte_flow_item_eth *)item->mask; + + /* Source MAC address mask cannot be partially set. + * Should be All 0's or all 1's. + * Destination MAC address mask must not be partially + * set. Should be all 1's or all 0's. + */ + if ((!is_zero_ether_addr(ð_mask->src) && + !is_broadcast_ether_addr(ð_mask->src)) || + (!is_zero_ether_addr(ð_mask->dst) && + !is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MAC_addr mask not valid"); + return -rte_errno; + } + + /* Mask is not allowed. Only exact matches are */ + if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ethertype mask not valid"); + return -rte_errno; + } + + if (is_broadcast_ether_addr(ð_mask->dst)) { + rte_memcpy(filter->dst_macaddr, + ð_spec->dst, 6); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; + } + if (is_broadcast_ether_addr(ð_mask->src)) { + rte_memcpy(filter->src_macaddr, + ð_spec->src, 6); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; + } /* + * else { + * RTE_LOG(ERR, PMD, "Handle this condition\n"); + * } + */ + if (eth_spec->type) { + filter->ethertype = + rte_be_to_cpu_16(eth_spec->type); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : + EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; + } + + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = + (const struct rte_flow_item_vlan *)item->spec; + vlan_mask = + (const struct rte_flow_item_vlan *)item->mask; + if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) { + /* Only the VLAN ID can be matched. */ + filter->l2_ovlan = + rte_be_to_cpu_16(vlan_spec->tci & + 0xFFF); + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + } else { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN mask is invalid"); + return -rte_errno; + } + + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + /* If mask is not involved, we could use EM filters. */ + ipv4_spec = + (const struct rte_flow_item_ipv4 *)item->spec; + ipv4_mask = + (const struct rte_flow_item_ipv4 *)item->mask; + /* Only IP DST and SRC fields are maskable. */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; + filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + if (ipv4_mask->hdr.src_addr) { + filter->src_ipaddr_mask[0] = + ipv4_mask->hdr.src_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + if (ipv4_mask->hdr.dst_addr) { + filter->dst_ipaddr_mask[0] = + ipv4_mask->hdr.dst_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + filter->ip_addr_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + if (ipv4_spec->hdr.next_proto_id) { + filter->ip_protocol = + ipv4_spec->hdr.next_proto_id; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + else + en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = + (const struct rte_flow_item_ipv6 *)item->spec; + ipv6_mask = + (const struct rte_flow_item_ipv6 *)item->mask; + + /* Only IP DST and SRC fields are maskable. */ + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask."); + return -rte_errno; + } + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + rte_memcpy(filter->src_ipaddr, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->dst_ipaddr, + ipv6_spec->hdr.dst_addr, 16); + if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) { + rte_memcpy(filter->src_ipaddr_mask, + ipv6_mask->hdr.src_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) { + rte_memcpy(filter->dst_ipaddr_mask, + ipv6_mask->hdr.dst_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + filter->ip_addr_type = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : + EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; + + /* Check TCP mask. Only DST & SRC ports are maskable */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + filter->src_port = tcp_spec->hdr.src_port; + filter->dst_port = tcp_spec->hdr.dst_port; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + if (tcp_mask->hdr.dst_port) { + filter->dst_port_mask = tcp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + if (tcp_mask->hdr.src_port) { + filter->src_port_mask = tcp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = (const struct rte_flow_item_udp *)item->spec; + udp_mask = (const struct rte_flow_item_udp *)item->mask; + + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + filter->src_port = udp_spec->hdr.src_port; + filter->dst_port = udp_spec->hdr.dst_port; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (udp_mask->hdr.dst_port) { + filter->dst_port_mask = udp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + if (udp_mask->hdr.src_port) { + filter->src_port_mask = udp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = + (const struct rte_flow_item_vxlan *)item->spec; + vxlan_mask = + (const struct rte_flow_item_vxlan *)item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || + vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || + vxlan_spec->flags != 0x8) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + /* Check if VNI is masked. */ + if (vxlan_spec && vxlan_mask) { + vni_masked = + !!memcmp(vxlan_mask->vni, vni_mask, + RTE_DIM(vni_mask)); + if (vni_masked) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VNI mask"); + return -rte_errno; + } + + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + } + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = + (const struct rte_flow_item_nvgre *)item->spec; + nvgre_mask = + (const struct rte_flow_item_nvgre *)item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || + nvgre_spec->protocol != 0x6558) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec && nvgre_mask) { + tni_masked = + !!memcmp(nvgre_mask->tni, tni_mask, + RTE_DIM(tni_mask)); + if (tni_masked) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TNI mask"); + return -rte_errno; + } + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + nvgre_spec->tni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + } + break; + case RTE_FLOW_ITEM_TYPE_VF: + vf_spec = (const struct rte_flow_item_vf *)item->spec; + vf = vf_spec->id; + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Configuring on a VF!"); + return -rte_errno; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Incorrect VF id!"); + return -rte_errno; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver + * loaded. This is not an error. + */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unable to get default VNIC for VF"); + return -rte_errno; + } + filter->mirror_vnic_id = dflt_vnic; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + break; + default: + break; + } + item++; + } + filter->enables = en; + + return 0; +} + +/* Parse attributes */ +static int +bnxt_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, "No support for egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, "No support for priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, "No support for group."); + return -rte_errno; + } + + return 0; +} + +struct bnxt_filter_info * +bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter1, *f0; + struct bnxt_vnic_info *vnic0; + int rc; + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + f0 = STAILQ_FIRST(&vnic0->filter); + + //This flow has same DST MAC as the port/l2 filter. + if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0) + return f0; + + //This flow needs DST MAC which is not same as port/l2 + RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n"); + filter1 = bnxt_get_unused_filter(bp); + if (filter1 == NULL) + return NULL; + filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; + memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN); + memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN); + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter1); + if (rc) { + bnxt_free_filter(bp, filter1); + return NULL; + } + STAILQ_INSERT_TAIL(&vnic->filter, filter1, next); + return filter1; +} + +static int +bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + const struct rte_flow_attr *attr, + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_action *act = nxt_non_void_action(actions); + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + struct bnxt_vnic_info *vnic, *vnic0; + struct bnxt_filter_info *filter1; + uint32_t vf = 0; + int dflt_vnic; + int rc; + + if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n"); + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Cannot create flow on RSS queues"); + rc = -rte_errno; + goto ret; + } + + rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter); + if (rc != 0) + goto ret; + + rc = bnxt_flow_parse_attr(attr, error); + if (rc != 0) + goto ret; + //Since we support ingress attribute only - right now. + filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; + + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Allow this flow. Redirect to a VNIC. */ + act_q = (const struct rte_flow_action_queue *)act->conf; + if (act_q->index >= bp->rx_nr_rings) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid queue ID."); + rc = -rte_errno; + goto ret; + } + RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index); + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]); + if (vnic == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "No matching VNIC for queue ID."); + rc = -rte_errno; + goto ret; + } + filter->dst_id = vnic->fw_vnic_id; + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + RTE_LOG(DEBUG, PMD, "VNIC found\n"); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; + else + filter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; + break; + case RTE_FLOW_ACTION_TYPE_VF: + act_vf = (const struct rte_flow_action_vf *)act->conf; + vf = act_vf->id; + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Configuring on a VF!"); + rc = -rte_errno; + goto ret; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Incorrect VF id!"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver loaded. + * This is not an error. + */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Unable to get default VNIC for VF"); + rc = -rte_errno; + goto ret; + } + filter->mirror_vnic_id = dflt_vnic; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + break; + + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, act, + "Invalid action."); + rc = -rte_errno; + goto ret; + } + +//done: + act = nxt_non_void_action(++act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Invalid action."); + rc = -rte_errno; + goto ret; + } +ret: + return rc; +} + +static int +bnxt_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter; + int ret = 0; + + ret = bnxt_flow_agrs_validate(attr, pattern, actions, error); + if (ret != 0) + return ret; + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n"); + return -ENOMEM; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + /* No need to hold on to this filter if we are just validating flow */ + filter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, filter); + + return ret; +} + +static int +bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) +{ + struct bnxt_filter_info *mf; + struct rte_flow *flow; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + mf = flow->filter; + + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) + return -EEXIST; + } + } + return 0; +} + +static struct rte_flow * +bnxt_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic = NULL; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + ret = bnxt_flow_agrs_validate(attr, pattern, actions, error); + if (ret != 0) { + RTE_LOG(ERR, PMD, "Not a validate flow.\n"); + goto free_flow; + } + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n"); + goto free_flow; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + if (ret != 0) + goto free_filter; + + ret = bnxt_match_filter(bp, filter); + if (ret != 0) { + RTE_LOG(DEBUG, PMD, "Flow already exists.\n"); + goto free_filter; + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) { + filter->enables |= + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); + } + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + filter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); + } + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (filter->dst_id == vnic->fw_vnic_id) + break; + } + + if (!ret) { + flow->filter = filter; + flow->vnic = vnic; + RTE_LOG(ERR, PMD, "Successfully created flow.\n"); + STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + return flow; + } +free_filter: + filter->fw_l2_filter_id = -1; + bnxt_free_filter(bp, filter); +free_flow: + if (ret == -EEXIST) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Matching Flow exists."); + else + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(flow); + flow = NULL; + return flow; +} + +static int +bnxt_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter = flow->filter; + struct bnxt_vnic_info *vnic = flow->vnic; + int ret = 0; + + ret = bnxt_match_filter(bp, filter); + if (ret == 0) + RTE_LOG(ERR, PMD, "Could not find matching flow\n"); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + + if (!ret) { + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + } else { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_vnic_info *vnic; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + struct bnxt_filter_info *filter = flow->filter; + + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + + if (ret) { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to flush flow in HW."); + return -rte_errno; + } + + STAILQ_REMOVE(&vnic->flow_list, flow, + rte_flow, next); + rte_free(flow); + } + } + + return ret; +} + +const struct rte_flow_ops bnxt_flow_ops = { + .validate = bnxt_flow_validate, + .create = bnxt_flow_create, + .destroy = bnxt_flow_destroy, + .flush = bnxt_flow_flush, +}; diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h index 613b2eea..2591a87e 100644 --- a/drivers/net/bnxt/bnxt_filter.h +++ b/drivers/net/bnxt/bnxt_filter.h @@ -40,8 +40,15 @@ struct bnxt; struct bnxt_filter_info { STAILQ_ENTRY(bnxt_filter_info) next; uint64_t fw_l2_filter_id; + uint64_t fw_em_filter_id; + uint64_t fw_ntuple_filter_id; #define INVALID_MAC_INDEX ((uint16_t)-1) uint16_t mac_index; +#define HWRM_CFA_L2_FILTER 0 +#define HWRM_CFA_EM_FILTER 1 +#define HWRM_CFA_NTUPLE_FILTER 2 + uint8_t filter_type; //L2 or EM or NTUPLE filter + uint32_t dst_id; /* Filter Characteristics */ uint32_t flags; @@ -65,6 +72,19 @@ struct bnxt_filter_info { uint64_t l2_filter_id_hint; uint32_t src_id; uint8_t src_type; + uint8_t src_macaddr[6]; + uint8_t dst_macaddr[6]; + uint32_t dst_ipaddr[4]; + uint32_t dst_ipaddr_mask[4]; + uint32_t src_ipaddr[4]; + uint32_t src_ipaddr_mask[4]; + uint16_t dst_port; + uint16_t dst_port_mask; + uint16_t src_port; + uint16_t src_port_mask; + uint16_t ip_protocol; + uint16_t ip_addr_type; + uint16_t ethertype; }; struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp); @@ -73,5 +93,73 @@ void bnxt_init_filters(struct bnxt *bp); void bnxt_free_all_filters(struct bnxt *bp); void bnxt_free_filter_mem(struct bnxt *bp); int bnxt_alloc_filter_mem(struct bnxt *bp); +struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp); +void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter); +struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp, + struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic); +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR +#define EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR +#define EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE +#define EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE +#define EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT +#define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT +#define NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK +#define NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL +#define EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR +#define EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR +#define EM_FLOW_ALLOC_INPUT_EN_SRC_PORT \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT +#define EM_FLOW_ALLOC_INPUT_EN_DST_PORT \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT +#define EM_FLOW_ALLOC_INPUT_EN_IP_PROTO \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL +#define EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 +#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 +#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN +#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE +#define L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK +#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UDP \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP +#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_TCP \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP +#define NTUPLE_FLTR_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN +#define NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 +#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID +#define NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID #endif diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index e710e636..bf1fb469 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -54,7 +54,7 @@ #include <rte_io.h> -#define HWRM_CMD_TIMEOUT 2000 +#define HWRM_CMD_TIMEOUT 10000 struct bnxt_plcmodes_cfg { uint32_t flags; @@ -95,7 +95,7 @@ static int page_roundup(size_t size) * command was failed by the ChiMP. */ -static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, +static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len) { unsigned int i; @@ -171,52 +171,58 @@ err_ret: return -1; } -static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len) -{ - int rc; - - rte_spinlock_lock(&bp->hwrm_lock); - rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len); - rte_spinlock_unlock(&bp->hwrm_lock); - return rc; -} - -#define HWRM_PREP(req, type, cr, resp) \ +/* + * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the + * spinlock, and does initial processing. + * + * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It + * releases the spinlock only if it returns. If the regular int return codes + * are not used by the function, HWRM_CHECK_RESULT() should not be used + * directly, rather it should be copied and modified to suit the function. + * + * HWRM_UNLOCK() must be called after all response processing is completed. + */ +#define HWRM_PREP(req, type) do { \ + rte_spinlock_lock(&bp->hwrm_lock); \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ req.req_type = rte_cpu_to_le_16(HWRM_##type); \ - req.cmpl_ring = rte_cpu_to_le_16(cr); \ + req.cmpl_ring = rte_cpu_to_le_16(-1); \ req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ req.target_id = rte_cpu_to_le_16(0xffff); \ - req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr) - -#define HWRM_CHECK_RESULT \ - { \ - if (rc) { \ - RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \ - __func__, rc); \ - return rc; \ + req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ +} while (0) + +#define HWRM_CHECK_RESULT() do {\ + if (rc) { \ + RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \ + __func__, rc); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + if (resp->resp_len >= 16) { \ + struct hwrm_err_output *tmp_hwrm_err_op = \ + (void *)resp; \ + RTE_LOG(ERR, PMD, \ + "%s error %d:%d:%08x:%04x\n", \ + __func__, \ + rc, tmp_hwrm_err_op->cmd_err, \ + rte_le_to_cpu_32(\ + tmp_hwrm_err_op->opaque_0), \ + rte_le_to_cpu_16(\ + tmp_hwrm_err_op->opaque_1)); \ } \ - if (resp->error_code) { \ - rc = rte_le_to_cpu_16(resp->error_code); \ - if (resp->resp_len >= 16) { \ - struct hwrm_err_output *tmp_hwrm_err_op = \ - (void *)resp; \ - RTE_LOG(ERR, PMD, \ - "%s error %d:%d:%08x:%04x\n", \ - __func__, \ - rc, tmp_hwrm_err_op->cmd_err, \ - rte_le_to_cpu_32(\ - tmp_hwrm_err_op->opaque_0), \ - rte_le_to_cpu_16(\ - tmp_hwrm_err_op->opaque_1)); \ - } \ - else { \ - RTE_LOG(ERR, PMD, \ - "%s error %d\n", __func__, rc); \ - } \ - return rc; \ + else { \ + RTE_LOG(ERR, PMD, \ + "%s error %d\n", __func__, rc); \ } \ - } + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + +#define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock) int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) { @@ -224,13 +230,14 @@ int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp); + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.mask = 0; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -245,14 +252,14 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; uint32_t mask = 0; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp); + HWRM_PREP(req, CFA_L2_SET_RX_MASK); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); /* FIXME add multicast flag, when multicast adding options is supported * by ethtool. */ if (vnic->flags & BNXT_VNIC_INFO_BCAST) - mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; + mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN; if (vnic->flags & BNXT_VNIC_INFO_PROMISC) @@ -269,16 +276,16 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vlan_table) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; - req.vlan_tag_tbl_addr = rte_cpu_to_le_16( - rte_mem_virt2phy(vlan_table)); + req.vlan_tag_tbl_addr = rte_cpu_to_le_64( + rte_mem_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } - req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | - mask); + req.mask = rte_cpu_to_le_32(mask); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -307,21 +314,22 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, return 0; } } - HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp); + HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG); req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table)); + rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } -int bnxt_hwrm_clear_filter(struct bnxt *bp, +int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { int rc = 0; @@ -331,32 +339,50 @@ int bnxt_hwrm_clear_filter(struct bnxt *bp, if (filter->fw_l2_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp); + HWRM_PREP(req, CFA_L2_FILTER_FREE); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); filter->fw_l2_filter_id = -1; return 0; } -int bnxt_hwrm_set_filter(struct bnxt *bp, +int bnxt_hwrm_set_l2_filter(struct bnxt *bp, uint16_t dst_id, struct bnxt_filter_info *filter) { int rc = 0; struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + const struct rte_eth_vmdq_rx_conf *conf = + &dev_conf->rx_adv_conf.vmdq_rx_conf; uint32_t enables = 0; + uint16_t j = dst_id - 1; + + //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ + if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) && + conf->pool_map[j].pools & (1UL << j)) { + RTE_LOG(DEBUG, PMD, + "Add vlan %u to vmdq pool %u\n", + conf->pool_map[j].vlan_id, j); + + filter->l2_ivlan = conf->pool_map[j].vlan_id; + filter->enables |= + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + } if (filter->fw_l2_filter_id != UINT64_MAX) - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); - HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp); + HWRM_PREP(req, CFA_L2_FILTER_ALLOC); req.flags = rte_cpu_to_le_32(filter->flags); @@ -376,8 +402,14 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN) req.l2_ovlan = filter->l2_ovlan; if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) + req.l2_ovlan = filter->l2_ivlan; + if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) + req.l2_ovlan_mask = filter->l2_ivlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) @@ -387,9 +419,10 @@ int bnxt_hwrm_set_filter(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + HWRM_UNLOCK(); return rc; } @@ -402,13 +435,13 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) uint16_t new_max_vfs; int i; - HWRM_PREP(req, FUNC_QCAPS, -1, resp); + HWRM_PREP(req, FUNC_QCAPS); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); if (BNXT_PF(bp)) { @@ -469,6 +502,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); if (BNXT_PF(bp)) bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); + HWRM_UNLOCK(); return rc; } @@ -479,13 +513,14 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) struct hwrm_func_reset_input req = {.req_type = 0 }; struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_RESET, -1, resp); + HWRM_PREP(req, FUNC_RESET); req.enables = rte_cpu_to_le_32(0); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -499,7 +534,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp); + HWRM_PREP(req, FUNC_DRV_RGTR); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); req.ver_maj = RTE_VER_YEAR; @@ -519,7 +554,8 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->flags |= BNXT_FLAG_REGISTERED; @@ -538,19 +574,15 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; - HWRM_PREP(req, VER_GET, -1, resp); + HWRM_PREP(req, VER_GET); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; req.hwrm_intf_upd = HWRM_VERSION_UPDATE; - /* - * Hold the lock since we may be adjusting the response pointers. - */ - rte_spinlock_lock(&bp->hwrm_lock); - rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n", resp->hwrm_intf_maj, resp->hwrm_intf_min, @@ -612,7 +644,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { RTE_LOG(ERR, PMD, "Unable to map response buffer to physical memory.\n"); @@ -638,7 +670,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr); + rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == 0) { rte_free(bp->hwrm_short_cmd_req_addr); RTE_LOG(ERR, PMD, @@ -651,7 +683,7 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) } error: - rte_spinlock_unlock(&bp->hwrm_lock); + HWRM_UNLOCK(); return rc; } @@ -664,12 +696,13 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) if (!(bp->flags & BNXT_FLAG_REGISTERED)) return 0; - HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp); + HWRM_PREP(req, FUNC_DRV_UNRGTR); req.flags = flags; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->flags &= ~BNXT_FLAG_REGISTERED; @@ -685,7 +718,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) uint32_t link_speed_mask = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; - HWRM_PREP(req, PORT_PHY_CFG, -1, resp); + HWRM_PREP(req, PORT_PHY_CFG); if (conf->link_up) { req.flags = rte_cpu_to_le_32(conf->phy_flags); @@ -729,7 +762,8 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -741,18 +775,18 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, PORT_PHY_QCFG, -1, resp); + HWRM_PREP(req, PORT_PHY_QCFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); link_info->phy_link_status = resp->link; link_info->link_up = (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0; link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; link_info->pause = resp->pause; link_info->auto_pause = resp->auto_pause; link_info->force_pause = resp->force_pause; @@ -765,6 +799,8 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, link_info->phy_ver[1] = resp->phy_min; link_info->phy_ver[2] = resp->phy_bld; + HWRM_UNLOCK(); + return rc; } @@ -774,11 +810,11 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp); + HWRM_PREP(req, QUEUE_QPORTCFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); #define GET_QUEUE_INFO(x) \ bp->cos_queue[x].id = resp->queue_id##x; \ @@ -793,6 +829,8 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) GET_QUEUE_INFO(6); GET_QUEUE_INFO(7); + HWRM_UNLOCK(); + return rc; } @@ -806,7 +844,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct hwrm_ring_alloc_input req = {.req_type = 0 }; struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_ALLOC, -1, resp); + HWRM_PREP(req, RING_ALLOC); req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); req.fbo = rte_cpu_to_le_32(0); @@ -837,6 +875,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, default: RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n", ring_type); + HWRM_UNLOCK(); return -1; } req.enables = rte_cpu_to_le_32(enables); @@ -850,22 +889,27 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: RTE_LOG(ERR, PMD, "hwrm_ring_alloc cp failed. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_RX: RTE_LOG(ERR, PMD, "hwrm_ring_alloc rx failed. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; case HWRM_RING_FREE_INPUT_RING_TYPE_TX: RTE_LOG(ERR, PMD, "hwrm_ring_alloc tx failed. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; default: RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc); + HWRM_UNLOCK(); return rc; } } ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id); + HWRM_UNLOCK(); return rc; } @@ -876,7 +920,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_FREE, -1, resp); + HWRM_PREP(req, RING_FREE); req.ring_type = ring_type; req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); @@ -886,6 +930,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, if (rc || resp->error_code) { if (rc == 0 && resp->error_code) rc = rte_le_to_cpu_16(resp->error_code); + HWRM_UNLOCK(); switch (ring_type) { case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL: @@ -905,6 +950,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, return rc; } } + HWRM_UNLOCK(); return 0; } @@ -914,7 +960,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_ALLOC, -1, resp); + HWRM_PREP(req, RING_GRP_ALLOC); req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); @@ -923,11 +969,13 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id); + HWRM_UNLOCK(); + return rc; } @@ -937,13 +985,14 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_free_input req = {.req_type = 0 }; struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_FREE, -1, resp); + HWRM_PREP(req, RING_GRP_FREE); req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID; return rc; @@ -958,13 +1007,14 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) return rc; - HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp); + HWRM_PREP(req, STAT_CTX_CLR_STATS); req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -976,7 +1026,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp); + HWRM_PREP(req, STAT_CTX_ALLOC); req.update_period_ms = rte_cpu_to_le_32(0); @@ -985,10 +1035,13 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); + HWRM_UNLOCK(); + bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; + return rc; } @@ -999,13 +1052,14 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_FREE, -1, resp); + HWRM_PREP(req, STAT_CTX_FREE); req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1027,15 +1081,16 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE; - HWRM_PREP(req, VNIC_ALLOC, -1, resp); + HWRM_PREP(req, VNIC_ALLOC); if (vnic->func_default) req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); + HWRM_UNLOCK(); RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id); return rc; } @@ -1048,13 +1103,13 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp); + HWRM_PREP(req, VNIC_PLCMODES_QCFG); req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); pmode->flags = rte_le_to_cpu_32(resp->flags); /* dflt_vnic bit doesn't exist in the _cfg command */ @@ -1063,6 +1118,8 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset); pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold); + HWRM_UNLOCK(); + return rc; } @@ -1074,7 +1131,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp); + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); @@ -1089,7 +1146,8 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1099,7 +1157,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; struct hwrm_vnic_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; - uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + uint32_t ctx_enable_flag = 0; struct bnxt_plcmodes_cfg pmodes; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { @@ -1111,18 +1169,19 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (rc) return rc; - HWRM_PREP(req, VNIC_CFG, -1, resp); + HWRM_PREP(req, VNIC_CFG); /* Only RSS support for now TBD: COS & LB */ req.enables = - rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP | - HWRM_VNIC_CFG_INPUT_ENABLES_MRU); + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP); if (vnic->lb_rule != 0xffff) - ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE; if (vnic->cos_rule != 0xffff) - ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; - if (vnic->rss_rule != 0xffff) - ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE; + if (vnic->rss_rule != 0xffff) { + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU; + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; + } req.enables |= rte_cpu_to_le_32(ctx_enable_flag); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp); @@ -1151,7 +1210,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes); @@ -1169,7 +1229,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); return rc; } - HWRM_PREP(req, VNIC_QCFG, -1, resp); + HWRM_PREP(req, VNIC_QCFG); req.enables = rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); @@ -1178,7 +1238,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp); vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule); @@ -1198,6 +1258,8 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE; + HWRM_UNLOCK(); + return rc; } @@ -1208,13 +1270,14 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp); + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + HWRM_UNLOCK(); RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; @@ -1231,13 +1294,14 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp); + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE); req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); vnic->rss_rule = INVALID_HW_RING_ID; @@ -1255,13 +1319,14 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } - HWRM_PREP(req, VNIC_FREE, -1, resp); + HWRM_PREP(req, VNIC_FREE); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); vnic->fw_vnic_id = INVALID_HW_RING_ID; return rc; @@ -1274,7 +1339,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_CFG, -1, resp); + HWRM_PREP(req, VNIC_RSS_CFG); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); @@ -1286,7 +1351,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1299,7 +1365,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t size; - HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp); + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.flags = rte_cpu_to_le_32( HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); @@ -1315,7 +1381,8 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1327,7 +1394,7 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_TPA_CFG, -1, resp); + HWRM_PREP(req, VNIC_TPA_CFG); if (enable) { req.enables = rte_cpu_to_le_32( @@ -1350,7 +1417,8 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1367,10 +1435,11 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); bp->pf.vf_info[vf].random_mac = false; @@ -1384,17 +1453,19 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, -1, resp); + HWRM_PREP(req, FUNC_QSTATS); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); if (dropped) *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts); + HWRM_UNLOCK(); + return rc; } @@ -1405,13 +1476,13 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, -1, resp); + HWRM_PREP(req, FUNC_QSTATS); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts); stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts); @@ -1432,6 +1503,8 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts); + HWRM_UNLOCK(); + return rc; } @@ -1441,13 +1514,14 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) struct hwrm_func_clr_stats_input req = {.req_type = 0}; struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_CLR_STATS, -1, resp); + HWRM_PREP(req, FUNC_CLR_STATS); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -1542,12 +1616,8 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) { - if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) { - RTE_LOG(ERR, PMD, - "Attempt to free invalid ring group %d\n", - idx); + if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) continue; - } rc = bnxt_hwrm_ring_grp_free(bp, idx); @@ -1683,7 +1753,7 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2phy(bp->hwrm_cmd_resp_addr); + rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == 0) { RTE_LOG(ERR, PMD, "unable to map response address to physical memory\n"); @@ -1700,9 +1770,39 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - rc = bnxt_hwrm_clear_filter(bp, filter); - if (rc) - break; + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + //if (rc) + //break; + } + return rc; +} + +static int +bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + struct rte_flow *flow; + int rc = 0; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + filter = flow->filter; + RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_clear_em_filter(bp, filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + rc = bnxt_hwrm_clear_l2_filter(bp, filter); + + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + //if (rc) + //break; } return rc; } @@ -1713,7 +1813,15 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) int rc = 0; STAILQ_FOREACH(filter, &vnic->filter, next) { - rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id, + filter); + else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, + filter); + else + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter); if (rc) break; } @@ -1734,20 +1842,20 @@ void bnxt_free_tunnel_ports(struct bnxt *bp) void bnxt_free_all_hwrm_resources(struct bnxt *bp) { - struct bnxt_vnic_info *vnic; - unsigned int i; + int i; if (bp->vnic_info == NULL) return; - vnic = &bp->vnic_info[0]; - if (BNXT_PF(bp)) - bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic); - - /* VNIC resources */ - for (i = 0; i < bp->nr_vnics; i++) { + /* + * Cleanup VNICs in reverse order, to make sure the L2 filter + * from vnic0 is last to be cleaned up. + */ + for (i = bp->nr_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + bnxt_clear_hwrm_vnic_flows(bp, vnic); + bnxt_clear_hwrm_vnic_filters(bp, vnic); bnxt_hwrm_vnic_ctx_free(bp, vnic); @@ -1833,7 +1941,7 @@ static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \ ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G) -static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id) +static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id) { uint32_t one_speed; @@ -2038,12 +2146,12 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); /* Hard Coded.. 0xfff VLAN ID mask */ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; @@ -2059,6 +2167,8 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp) break; } + HWRM_UNLOCK(); + return rc; } @@ -2118,10 +2228,12 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps); req.fid = rte_cpu_to_le_16(0xffff); - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2187,7 +2299,7 @@ static void reserve_resources_from_vf(struct bnxt *bp, int rc; /* Get the actual allocated values now */ - HWRM_PREP(req, FUNC_QCAPS, -1, resp); + HWRM_PREP(req, FUNC_QCAPS); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -2212,6 +2324,8 @@ static void reserve_resources_from_vf(struct bnxt *bp, */ //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics); bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); + + HWRM_UNLOCK(); } int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) @@ -2221,7 +2335,7 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) int rc; /* Check for zero MAC address */ - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { @@ -2232,7 +2346,11 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc); return -1; } - return rte_le_to_cpu_16(resp->vlan); + rc = rte_le_to_cpu_16(resp->vlan); + + HWRM_UNLOCK(); + + return rc; } static int update_pf_resource_max(struct bnxt *bp) @@ -2242,15 +2360,17 @@ static int update_pf_resource_max(struct bnxt *bp) int rc; /* And copy the allocated numbers into the pf struct */ - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); /* Only TX ring value reflects actual allocation? TODO */ bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings); bp->pf.evb_mode = resp->evb_mode; + HWRM_UNLOCK(); + return rc; } @@ -2342,7 +2462,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) for (i = 0; i < num_vfs; i++) { add_random_mac_if_needed(bp, &req, i); - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); @@ -2357,9 +2477,12 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) RTE_LOG(ERR, PMD, "Not all VFs available. (%d, %d)\n", rc, resp->error_code); + HWRM_UNLOCK(); break; } + HWRM_UNLOCK(); + reserve_resources_from_vf(bp, &req, i); bp->pf.active_vfs++; bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid); @@ -2392,14 +2515,15 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); req.evb_mode = bp->pf.evb_mode; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2411,11 +2535,11 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp); + HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC); req.tunnel_type = tunnel_type; req.tunnel_dst_port_val = port; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); switch (tunnel_type) { case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN: @@ -2429,6 +2553,9 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, default: break; } + + HWRM_UNLOCK(); + return rc; } @@ -2439,11 +2566,14 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp); + HWRM_PREP(req, TUNNEL_DST_PORT_FREE); + req.tunnel_type = tunnel_type; req.tunnel_dst_port_id = rte_cpu_to_be_16(port); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2455,11 +2585,14 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.flags = rte_cpu_to_le_32(flags); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2482,14 +2615,14 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp); + HWRM_PREP(req, FUNC_BUF_RGTR); req.req_buf_num_pages = rte_cpu_to_le_16(1); req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr[0] = - rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr[0] == 0) { RTE_LOG(ERR, PMD, "unable to map buffer address to physical memory\n"); @@ -2498,7 +2631,8 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2509,11 +2643,12 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp); + HWRM_PREP(req, FUNC_BUF_UNRGTR); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2524,7 +2659,8 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(0xffff); req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.enables = rte_cpu_to_le_32( @@ -2532,7 +2668,9 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) req.async_event_cr = rte_cpu_to_le_16( bp->def_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2543,13 +2681,16 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_vf_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_VF_CFG, -1, resp); + HWRM_PREP(req, FUNC_VF_CFG); + req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( bp->def_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2562,7 +2703,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) uint32_t func_cfg_flags; int rc = 0; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); if (is_vf) { dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; @@ -2580,7 +2721,9 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2592,13 +2735,16 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(enables); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.max_bw = rte_cpu_to_le_32(max_bw); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2609,14 +2755,17 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN); req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2631,14 +2780,15 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, REJECT_FWD_RESP, -1, resp); + HWRM_PREP(req, REJECT_FWD_RESP); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -2650,13 +2800,17 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_QCFG, -1, resp); + HWRM_PREP(req, FUNC_QCFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN); + + HWRM_UNLOCK(); + return rc; } @@ -2670,50 +2824,55 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, EXEC_FWD_RESP, -1, resp); + HWRM_PREP(req, EXEC_FWD_RESP); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, - struct rte_eth_stats *stats) + struct rte_eth_stats *stats, uint8_t rx) { int rc = 0; struct hwrm_stat_ctx_query_input req = {.req_type = 0}; struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_QUERY, -1, resp); + HWRM_PREP(req, STAT_CTX_QUERY); req.stat_ctx_id = rte_cpu_to_le_32(cid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; - - stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); - stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); - stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); - stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); + HWRM_CHECK_RESULT(); + + if (rx) { + stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts); + stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts); + stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes); + stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes); + stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + } else { + stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); + stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); + stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); + stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); + stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts); + } - stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts); - stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts); - stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes); - stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); - stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts); - stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts); + HWRM_UNLOCK(); return rc; } @@ -2728,12 +2887,16 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; - HWRM_PREP(req, PORT_QSTATS, -1, resp); + HWRM_PREP(req, PORT_QSTATS); + req.port_id = rte_cpu_to_le_16(pf->port_id); req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -2747,10 +2910,14 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; - HWRM_PREP(req, PORT_CLR_STATS, -1, resp); + HWRM_PREP(req, PORT_CLR_STATS); + req.port_id = rte_cpu_to_le_16(pf->port_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -2763,10 +2930,11 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp)) return 0; - HWRM_PREP(req, PORT_LED_QCAPS, -1, resp); + HWRM_PREP(req, PORT_LED_QCAPS); req.port_id = bp->pf.port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { unsigned int i; @@ -2786,6 +2954,9 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) } } } + + HWRM_UNLOCK(); + return rc; } @@ -2801,7 +2972,8 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; - HWRM_PREP(req, PORT_LED_CFG, -1, resp); + HWRM_PREP(req, PORT_LED_CFG); + if (led_on) { led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; duration = rte_cpu_to_le_16(500); @@ -2819,8 +2991,171 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, + uint32_t *length) +{ + int rc; + struct hwrm_nvm_get_dir_info_input req = {0}; + struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, NVM_GET_DIR_INFO); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + if (!rc) { + *entries = rte_le_to_cpu_32(resp->entries); + *length = rte_le_to_cpu_32(resp->entry_length); + } + return rc; +} + +int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) +{ + int rc; + uint32_t dir_entries; + uint32_t entry_length; + uint8_t *buf; + size_t buflen; + rte_iova_t dma_handle; + struct hwrm_nvm_get_dir_entries_input req = {0}; + struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr; + + rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length); + if (rc != 0) + return rc; + + *data++ = dir_entries; + *data++ = entry_length; + len -= 2; + memset(data, 0xff, len); + + buflen = dir_entries * entry_length; + buf = rte_malloc("nvm_dir", buflen, 0); + rte_mem_lock_page(buf); + if (buf == NULL) + return -ENOMEM; + dma_handle = rte_mem_virt2iova(buf); + if (dma_handle == 0) { + RTE_LOG(ERR, PMD, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + HWRM_PREP(req, NVM_GET_DIR_ENTRIES); + req.host_dest_addr = rte_cpu_to_le_64(dma_handle); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + if (rc == 0) + memcpy(data, buf, len > buflen ? buflen : len); + + rte_free(buf); + + return rc; +} + +int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, + uint32_t offset, uint32_t length, + uint8_t *data) +{ + int rc; + uint8_t *buf; + rte_iova_t dma_handle; + struct hwrm_nvm_read_input req = {0}; + struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; + + buf = rte_malloc("nvm_item", length, 0); + rte_mem_lock_page(buf); + if (!buf) + return -ENOMEM; + + dma_handle = rte_mem_virt2iova(buf); + if (dma_handle == 0) { + RTE_LOG(ERR, PMD, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + HWRM_PREP(req, NVM_READ); + req.host_dest_addr = rte_cpu_to_le_64(dma_handle); + req.dir_idx = rte_cpu_to_le_16(index); + req.offset = rte_cpu_to_le_32(offset); + req.len = rte_cpu_to_le_32(length); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + if (rc == 0) + memcpy(data, buf, length); + + rte_free(buf); + return rc; +} + +int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) +{ + int rc; + struct hwrm_nvm_erase_dir_entry_input req = {0}; + struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, NVM_ERASE_DIR_ENTRY); + req.dir_idx = rte_cpu_to_le_16(index); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + + +int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, + uint16_t dir_ordinal, uint16_t dir_ext, + uint16_t dir_attr, const uint8_t *data, + size_t data_len) +{ + int rc; + struct hwrm_nvm_write_input req = {0}; + struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr; + rte_iova_t dma_handle; + uint8_t *buf; + + HWRM_PREP(req, NVM_WRITE); + + req.dir_type = rte_cpu_to_le_16(dir_type); + req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); + req.dir_ext = rte_cpu_to_le_16(dir_ext); + req.dir_attr = rte_cpu_to_le_16(dir_attr); + req.dir_data_length = rte_cpu_to_le_32(data_len); + + buf = rte_malloc("nvm_write", data_len, 0); + rte_mem_lock_page(buf); + if (!buf) + return -ENOMEM; + + dma_handle = rte_mem_virt2iova(buf); + if (dma_handle == 0) { + RTE_LOG(ERR, PMD, + "unable to map response address to physical memory\n"); + return -ENOMEM; + } + memcpy(buf, data, data_len); + req.host_src_addr = rte_cpu_to_le_64(dma_handle); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + rte_free(buf); return rc; } @@ -2857,28 +3192,34 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, int rc; /* First query all VNIC ids */ - HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids); + HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY); req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == 0) { + HWRM_UNLOCK(); RTE_LOG(ERR, PMD, "unable to map VNIC ID table address to physical memory\n"); return -ENOMEM; } rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); if (rc) { + HWRM_UNLOCK(); RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc); return -1; } else if (resp->error_code) { rc = rte_le_to_cpu_16(resp->error_code); + HWRM_UNLOCK(); RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc); return -1; } + rc = rte_le_to_cpu_32(resp->vnic_id_cnt); + + HWRM_UNLOCK(); - return rte_le_to_cpu_32(resp->vnic_id_cnt); + return rc; } /* @@ -2943,7 +3284,8 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, -1, resp); + HWRM_PREP(req, FUNC_CFG); + req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE); @@ -2951,7 +3293,9 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN : HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT; + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -3004,3 +3348,215 @@ exit: rte_free(vnic_ids); return -1; } + +int bnxt_hwrm_set_em_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + if (filter->fw_em_filter_id != UINT64_MAX) + bnxt_hwrm_clear_em_filter(bp, filter); + + HWRM_PREP(req, CFA_EM_FLOW_ALLOC); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + if (filter->ip_addr_type) { + req.ip_addr_type = filter->ip_addr_type; + enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE; + } + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID) + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR) + memcpy(req.src_macaddr, filter->src_macaddr, + ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR) + memcpy(req.dst_macaddr, filter->dst_macaddr, + ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID) + req.ovlan_vid = filter->l2_ovlan; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID) + req.ivlan_vid = filter->l2_ivlan; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE) + req.ethertype = rte_cpu_to_be_16(filter->ethertype); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL) + req.ip_protocol = filter->ip_protocol; + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR) + req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR) + req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT) + req.src_port = rte_cpu_to_be_16(filter->src_port); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT) + req.dst_port = rte_cpu_to_be_16(filter->dst_port); + if (enables & + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID) + req.mirror_vnic_id = filter->mirror_vnic_id; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 }; + struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr; + + if (filter->fw_em_filter_id == UINT64_MAX) + return 0; + + RTE_LOG(ERR, PMD, "Clear EM filter\n"); + HWRM_PREP(req, CFA_EM_FLOW_FREE); + + req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_em_filter_id = -1; + filter->fw_l2_filter_id = -1; + + return 0; +} + +int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, + uint16_t dst_id, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_ntuple_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + if (filter->fw_ntuple_filter_id != UINT64_MAX) + bnxt_hwrm_clear_ntuple_filter(bp, filter); + + HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(dst_id); + + + if (filter->ip_addr_type) { + req.ip_addr_type = filter->ip_addr_type; + enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE; + } + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID) + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) + memcpy(req.src_macaddr, filter->src_macaddr, + ETHER_ADDR_LEN); + //if (enables & + //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR) + //memcpy(req.dst_macaddr, filter->dst_macaddr, + //ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) + req.ethertype = rte_cpu_to_be_16(filter->ethertype); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL) + req.ip_protocol = filter->ip_protocol; + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR) + req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK) + req.src_ipaddr_mask[0] = + rte_cpu_to_le_32(filter->src_ipaddr_mask[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR) + req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK) + req.dst_ipaddr_mask[0] = + rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT) + req.src_port = rte_cpu_to_le_16(filter->src_port); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK) + req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT) + req.dst_port = rte_cpu_to_le_16(filter->dst_port); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK) + req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask); + if (enables & + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID) + req.mirror_vnic_id = filter->mirror_vnic_id; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + + filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 }; + struct hwrm_cfa_ntuple_filter_free_output *resp = + bp->hwrm_cmd_resp_addr; + + if (filter->fw_ntuple_filter_id == UINT64_MAX) + return 0; + + HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE); + + req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + filter->fw_ntuple_filter_id = -1; + filter->fw_l2_filter_id = -1; + + return 0; +} diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h index 51cd0dd4..85083e61 100644 --- a/drivers/net/bnxt/bnxt_hwrm.h +++ b/drivers/net/bnxt/bnxt_hwrm.h @@ -51,9 +51,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic, int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, uint16_t vlan_count, struct bnxt_vlan_antispoof_table_entry *vlan_table); -int bnxt_hwrm_clear_filter(struct bnxt *bp, +int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, struct bnxt_filter_info *filter); -int bnxt_hwrm_set_filter(struct bnxt *bp, +int bnxt_hwrm_set_l2_filter(struct bnxt *bp, uint16_t dst_id, struct bnxt_filter_info *filter); int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, @@ -92,7 +92,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, unsigned int idx); int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, - struct rte_eth_stats *stats); + struct rte_eth_stats *stats, uint8_t rx); int bnxt_hwrm_ver_get(struct bnxt *bp); @@ -156,4 +156,23 @@ int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf, int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, bool on); int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf); +int bnxt_hwrm_set_em_filter(struct bnxt *bp, uint16_t dst_id, + struct bnxt_filter_info *filter); +int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter); + +int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, uint16_t dst_id, + struct bnxt_filter_info *filter); +int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, + struct bnxt_filter_info *filter); +int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data); +int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, + uint32_t *length); +int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, + uint32_t offset, uint32_t length, + uint8_t *data); +int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index); +int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, + uint16_t dir_ordinal, uint16_t dir_ext, + uint16_t dir_attr, const uint8_t *data, + size_t data_len); #endif diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c index 47cda7e5..49436cfd 100644 --- a/drivers/net/bnxt/bnxt_irq.c +++ b/drivers/net/bnxt/bnxt_irq.c @@ -50,11 +50,18 @@ static void bnxt_int_handler(void *param) struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - uint32_t raw_cons = cpr->cp_raw_cons; - uint32_t cons; struct cmpl_base *cmp; + uint32_t raw_cons; + uint32_t cons; + if (cpr == NULL) + return; + + raw_cons = cpr->cp_raw_cons; while (1) { + if (!cpr || !cpr->cp_ring_struct) + return; + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); cmp = &cpr->cp_desc_ring[cons]; diff --git a/drivers/net/bnxt/bnxt_irq.h b/drivers/net/bnxt/bnxt_irq.h index e21bec56..4d2f7af9 100644 --- a/drivers/net/bnxt/bnxt_irq.h +++ b/drivers/net/bnxt/bnxt_irq.h @@ -34,6 +34,9 @@ #ifndef _BNXT_IRQ_H_ #define _BNXT_IRQ_H_ +#define BNXT_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define BNXT_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + struct bnxt_irq { rte_intr_callback_fn handler; unsigned int vector; diff --git a/drivers/net/bnxt/bnxt_nvm_defs.h b/drivers/net/bnxt/bnxt_nvm_defs.h new file mode 100644 index 00000000..c5ccc9bc --- /dev/null +++ b/drivers/net/bnxt/bnxt_nvm_defs.h @@ -0,0 +1,75 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2014-2016 Broadcom Corporation + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef _BNXT_NVM_DEFS_H_ +#define _BNXT_NVM_DEFS_H_ + +enum bnxt_nvm_directory_type { + BNX_DIR_TYPE_UNUSED = 0, + BNX_DIR_TYPE_PKG_LOG = 1, + BNX_DIR_TYPE_UPDATE = 2, + BNX_DIR_TYPE_CHIMP_PATCH = 3, + BNX_DIR_TYPE_BOOTCODE = 4, + BNX_DIR_TYPE_VPD = 5, + BNX_DIR_TYPE_EXP_ROM_MBA = 6, + BNX_DIR_TYPE_AVS = 7, + BNX_DIR_TYPE_PCIE = 8, + BNX_DIR_TYPE_PORT_MACRO = 9, + BNX_DIR_TYPE_APE_FW = 10, + BNX_DIR_TYPE_APE_PATCH = 11, + BNX_DIR_TYPE_KONG_FW = 12, + BNX_DIR_TYPE_KONG_PATCH = 13, + BNX_DIR_TYPE_BONO_FW = 14, + BNX_DIR_TYPE_BONO_PATCH = 15, + BNX_DIR_TYPE_TANG_FW = 16, + BNX_DIR_TYPE_TANG_PATCH = 17, + BNX_DIR_TYPE_BOOTCODE_2 = 18, + BNX_DIR_TYPE_CCM = 19, + BNX_DIR_TYPE_PCI_CFG = 20, + BNX_DIR_TYPE_TSCF_UCODE = 21, + BNX_DIR_TYPE_ISCSI_BOOT = 22, + BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24, + BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25, + BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26, + BNX_DIR_TYPE_EXT_PHY = 27, + BNX_DIR_TYPE_SHARED_CFG = 40, + BNX_DIR_TYPE_PORT_CFG = 41, + BNX_DIR_TYPE_FUNC_CFG = 42, + BNX_DIR_TYPE_MGMT_CFG = 48, + BNX_DIR_TYPE_MGMT_DATA = 49, + BNX_DIR_TYPE_MGMT_WEB_DATA = 50, + BNX_DIR_TYPE_MGMT_WEB_META = 51, + BNX_DIR_TYPE_MGMT_EVENT_LOG = 52, + BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53 +}; + +#define BNX_DIR_ORDINAL_FIRST 0 + +#define BNX_DIR_EXT_NONE 0 +#define BNX_DIR_EXT_INACTIVE (1 << 0) +#define BNX_DIR_EXT_UPDATE (1 << 1) + +#define BNX_DIR_ATTR_NONE 0 +#define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) +#define BNX_DIR_ATTR_PROP_STREAM (1 << 1) + +#define BNX_PKG_LOG_MAX_LENGTH 4096 + +enum bnxnvm_pkglog_field_index { + BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, + BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, + BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2, + BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3, + BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6 +}; + +#endif /* Don't add anything after this line */ diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index 9d0ae277..0fa2f0c0 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -98,7 +98,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, struct rte_pci_device *pdev = bp->pdev; const struct rte_memzone *mz = NULL; char mz_name[RTE_MEMZONE_NAMESIZE]; - phys_addr_t mz_phys_addr; + rte_iova_t mz_phys_addr; int sz; int stats_len = (tx_ring_info || rx_ring_info) ? @@ -172,15 +172,15 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, return -ENOMEM; } memset(mz->addr, 0, mz->len); - mz_phys_addr = mz->phys_addr; + mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { RTE_LOG(WARNING, PMD, "Memzone physical address same as virtual.\n"); RTE_LOG(WARNING, PMD, - "Using rte_mem_virt2phy()\n"); + "Using rte_mem_virt2iova()\n"); for (sz = 0; sz < total_alloc_len; sz += getpagesize()) rte_mem_lock_page(((char *)mz->addr) + sz); - mz_phys_addr = rte_mem_virt2phy(mz->addr); + mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == 0) { RTE_LOG(ERR, PMD, "unable to map ring address to physical memory\n"); @@ -231,7 +231,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, rx_ring->bd = ((char *)mz->addr + ag_ring_start); rx_ring_info->ag_desc_ring = (struct rx_prod_pkt_bd *)rx_ring->bd; - rx_ring->bd_dma = mz->phys_addr + ag_ring_start; + rx_ring->bd_dma = mz->iova + ag_ring_start; rx_ring_info->ag_desc_mapping = rx_ring->bd_dma; rx_ring->mem_zone = (const void *)mz; @@ -323,8 +323,10 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) ring = rxr->ag_ring_struct; /* Agg ring */ - if (ring == NULL) + if (ring == NULL) { RTE_LOG(ERR, PMD, "Alloc AGG Ring is NULL!\n"); + goto err_out; + } rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index 6d1eb588..164f482e 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -41,7 +41,7 @@ #define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask) #define RTE_MBUF_DATA_DMA_ADDR(mb) \ - ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) + ((uint64_t)((mb)->buf_iova + (mb)->data_off)) #define DB_IDX_MASK 0xffffff #define DB_IDX_VALID (0x1 << 26) @@ -70,7 +70,7 @@ struct bnxt_ring { void *bd; - phys_addr_t bd_dma; + rte_iova_t bd_dma; uint32_t ring_size; uint32_t ring_mask; diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index 0793820b..c4da474e 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -60,10 +60,13 @@ void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) int bnxt_mq_rx_configure(struct bnxt *bp) { struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; - unsigned int i, j, nb_q_per_grp, ring_idx; - int start_grp_id, end_grp_id, rc = 0; + const struct rte_eth_vmdq_rx_conf *conf = + &dev_conf->rx_adv_conf.vmdq_rx_conf; + unsigned int i, j, nb_q_per_grp = 1, ring_idx = 0; + int start_grp_id, end_grp_id = 1, rc = 0; struct bnxt_vnic_info *vnic; struct bnxt_filter_info *filter; + enum rte_eth_nb_pools pools = bp->rx_cp_nr_rings, max_pools = 0; struct bnxt_rx_queue *rxq; bp->nr_vnics = 0; @@ -98,117 +101,125 @@ int bnxt_mq_rx_configure(struct bnxt *bp) } /* Multi-queue mode */ - if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) { + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB_RSS) { /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */ - enum rte_eth_nb_pools pools; switch (dev_conf->rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_RSS: case ETH_MQ_RX_VMDQ_ONLY: - { - const struct rte_eth_vmdq_rx_conf *conf = - &dev_conf->rx_adv_conf.vmdq_rx_conf; - - /* ETH_8/64_POOLs */ - pools = conf->nb_queue_pools; - break; - } + /* ETH_8/64_POOLs */ + pools = conf->nb_queue_pools; + /* For each pool, allocate MACVLAN CFA rule & VNIC */ + max_pools = RTE_MIN(bp->max_vnics, + RTE_MIN(bp->max_l2_ctx, + RTE_MIN(bp->max_rsscos_ctx, + ETH_64_POOLS))); + if (pools > max_pools) + pools = max_pools; + break; + case ETH_MQ_RX_RSS: + pools = bp->rx_cp_nr_rings; + break; default: RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n", dev_conf->rxmode.mq_mode); rc = -EINVAL; goto err_out; } - /* For each pool, allocate MACVLAN CFA rule & VNIC */ - if (!pools) { - pools = RTE_MIN(bp->max_vnics, - RTE_MIN(bp->max_l2_ctx, - RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS))); - RTE_LOG(ERR, PMD, - "VMDq pool not set, defaulted to 64\n"); - pools = ETH_64_POOLS; + } + + nb_q_per_grp = bp->rx_cp_nr_rings / pools; + start_grp_id = 0; + end_grp_id = nb_q_per_grp; + + for (i = 0; i < pools; i++) { + vnic = bnxt_alloc_vnic(bp); + if (!vnic) { + RTE_LOG(ERR, PMD, "VNIC alloc failed\n"); + rc = -ENOMEM; + goto err_out; } - nb_q_per_grp = bp->rx_cp_nr_rings / pools; - start_grp_id = 0; - end_grp_id = nb_q_per_grp; - - ring_idx = 0; - for (i = 0; i < pools; i++) { - vnic = bnxt_alloc_vnic(bp); - if (!vnic) { - RTE_LOG(ERR, PMD, - "VNIC alloc failed\n"); - rc = -ENOMEM; - goto err_out; - } - vnic->flags |= BNXT_VNIC_INFO_BCAST; - STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next); - bp->nr_vnics++; + vnic->flags |= BNXT_VNIC_INFO_BCAST; + STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next); + bp->nr_vnics++; - for (j = 0; j < nb_q_per_grp; j++, ring_idx++) { - rxq = bp->eth_dev->data->rx_queues[ring_idx]; - rxq->vnic = vnic; - } - if (i == 0) - vnic->func_default = true; - vnic->ff_pool_idx = i; - vnic->start_grp_id = start_grp_id; - vnic->end_grp_id = end_grp_id; - - filter = bnxt_alloc_filter(bp); - if (!filter) { - RTE_LOG(ERR, PMD, - "L2 filter alloc failed\n"); - rc = -ENOMEM; - goto err_out; + for (j = 0; j < nb_q_per_grp; j++, ring_idx++) { + rxq = bp->eth_dev->data->rx_queues[ring_idx]; + rxq->vnic = vnic; + } + if (i == 0) { + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB) { + bp->eth_dev->data->promiscuous = 1; + vnic->flags |= BNXT_VNIC_INFO_PROMISC; } - /* - * TODO: Configure & associate CFA rule for - * each VNIC for each VMDq with MACVLAN, MACVLAN+TC - */ - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); - - start_grp_id = end_grp_id + 1; - end_grp_id += nb_q_per_grp; + vnic->func_default = true; } - goto out; - } + vnic->ff_pool_idx = i; + vnic->start_grp_id = start_grp_id; + vnic->end_grp_id = end_grp_id; + + if (i) { + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_DCB || + !(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS)) + vnic->rss_dflt_cr = true; + goto skip_filter_allocation; + } + filter = bnxt_alloc_filter(bp); + if (!filter) { + RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + /* + * TODO: Configure & associate CFA rule for + * each VNIC for each VMDq with MACVLAN, MACVLAN+TC + */ + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); - /* Non-VMDq mode - RSS, DCB, RSS+DCB */ - /* Init default VNIC for RSS or DCB only */ - vnic = bnxt_alloc_vnic(bp); - if (!vnic) { - RTE_LOG(ERR, PMD, "VNIC alloc failed\n"); - rc = -ENOMEM; - goto err_out; - } - vnic->flags |= BNXT_VNIC_INFO_BCAST; - /* Partition the rx queues for the single pool */ - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - rxq = bp->eth_dev->data->rx_queues[i]; - rxq->vnic = vnic; - } - STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next); - bp->nr_vnics++; - - vnic->func_default = true; - vnic->ff_pool_idx = 0; - vnic->start_grp_id = 0; - vnic->end_grp_id = bp->rx_cp_nr_rings; - filter = bnxt_alloc_filter(bp); - if (!filter) { - RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); - rc = -ENOMEM; - goto err_out; +skip_filter_allocation: + start_grp_id = end_grp_id; + end_grp_id += nb_q_per_grp; } - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); - - if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - vnic->hash_type = - HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 | - HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; out: + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + struct rte_eth_rss_conf *rss = &dev_conf->rx_adv_conf.rss_conf; + uint16_t hash_type = 0; + + if (bp->flags & BNXT_FLAG_UPDATE_HASH) { + rss = &bp->rss_conf; + bp->flags &= ~BNXT_FLAG_UPDATE_HASH; + } + + if (rss->rss_hf & ETH_RSS_IPV4) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; + if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; + if (rss->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; + if (rss->rss_hf & ETH_RSS_IPV6) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; + if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; + if (rss->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; + + for (i = 0; i < bp->nr_vnics; i++) { + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + vnic->hash_type = hash_type; + + /* + * Use the supplied key if the key length is + * acceptable and the rss_key is not NULL + */ + if (rss->rss_key && + rss->rss_key_len <= HW_HASH_KEY_SIZE) + memcpy(vnic->rss_hash_key, + rss->rss_key, rss->rss_key_len); + } + } + } + return rc; err_out: @@ -349,3 +360,41 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, out: return rc; } + +int +bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + int rc = 0; + + if (eth_dev->data->rx_queues) { + rxq = eth_dev->data->rx_queues[queue_id]; + if (!rxq) { + rc = -EINVAL; + return rc; + } + cpr = rxq->cp_ring; + B_CP_DB_ARM(cpr); + } + return rc; +} + +int +bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, uint16_t queue_id) +{ + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + int rc = 0; + + if (eth_dev->data->rx_queues) { + rxq = eth_dev->data->rx_queues[queue_id]; + if (!rxq) { + rc = -EINVAL; + return rc; + } + cpr = rxq->cp_ring; + B_CP_DB_DISARM(cpr); + } + return rc; +} diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h index 01aaa007..508731ee 100644 --- a/drivers/net/bnxt/bnxt_rxq.h +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -48,7 +48,7 @@ struct bnxt_rx_queue { uint16_t rx_free_thresh; /* max free RX desc to hold */ uint16_t queue_id; /* RX queue index */ uint16_t reg_idx; /* RX queue register index */ - uint8_t port_id; /* Device port identifier */ + uint16_t port_id; /* Device port identifier */ uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ struct bnxt *bp; @@ -73,5 +73,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp); void bnxt_free_rx_mbufs(struct bnxt *bp); +int bnxt_rx_queue_intr_enable_op(struct rte_eth_dev *eth_dev, + uint16_t queue_id); +int bnxt_rx_queue_intr_disable_op(struct rte_eth_dev *eth_dev, + uint16_t queue_id); #endif diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index bee67d33..30891b74 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -199,7 +199,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, if (tpa_start1->flags2 & rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata); - mbuf->ol_flags |= PKT_RX_VLAN_PKT; + mbuf->ol_flags |= PKT_RX_VLAN; } if (likely(tpa_start1->flags2 & rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) @@ -219,6 +219,9 @@ static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs); last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons); agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons]; + cpr->valid = FLIP_VALID(raw_cp_cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct); } @@ -332,6 +335,48 @@ static inline struct rte_mbuf *bnxt_tpa_end( return mbuf; } +static uint32_t +bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) +{ + uint32_t pkt_type = 0; + uint32_t t_ipcs = 0, ip = 0, ip6 = 0; + uint32_t tcp = 0, udp = 0, icmp = 0; + uint32_t vlan = 0; + + vlan = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)); + t_ipcs = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)); + ip6 = !!(rxcmp1->flags2 & + rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE)); + icmp = !!(rxcmp->flags_type & + rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_ICMP)); + tcp = !!(rxcmp->flags_type & + rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_TCP)); + udp = !!(rxcmp->flags_type & + rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_UDP)); + ip = !!(rxcmp->flags_type & + rte_cpu_to_le_16(RX_PKT_CMPL_FLAGS_ITYPE_IP)); + + pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && !ip6) ? + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0; + pkt_type |= ((ip || tcp || udp || icmp) && !t_ipcs && ip6) ? + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0; + pkt_type |= (!t_ipcs && icmp) ? RTE_PTYPE_L4_ICMP : 0; + pkt_type |= (!t_ipcs && udp) ? RTE_PTYPE_L4_UDP : 0; + pkt_type |= (!t_ipcs && tcp) ? RTE_PTYPE_L4_TCP : 0; + pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && !ip6) ? + RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN : 0; + pkt_type |= ((ip || tcp || udp || icmp) && t_ipcs && ip6) ? + RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN : 0; + pkt_type |= (t_ipcs && icmp) ? RTE_PTYPE_INNER_L4_ICMP : 0; + pkt_type |= (t_ipcs && udp) ? RTE_PTYPE_INNER_L4_UDP : 0; + pkt_type |= (t_ipcs && tcp) ? RTE_PTYPE_INNER_L4_TCP : 0; + pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : 0; + + return pkt_type; +} + static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, struct bnxt_rx_queue *rxq, uint32_t *raw_cons) { @@ -360,13 +405,17 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct)) return -EBUSY; + cpr->valid = FLIP_VALID(cp_cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); + cmp_type = CMP_TYPE(rxcmp); - if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) { + if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) { bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, (struct rx_tpa_start_cmpl_hi *)rxcmp1); rc = -EINVAL; /* Continue w/o new mbuf */ goto next_rx; - } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) { + } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons, (struct rx_tpa_end_cmpl *)rxcmp, (struct rx_tpa_end_cmpl_hi *)rxcmp1); @@ -388,10 +437,10 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, cons = rxcmp->opaque; mbuf = bnxt_consume_rx_buf(rxr, cons); - rte_prefetch0(mbuf); - if (mbuf == NULL) - return -ENOMEM; + return -EBUSY; + + rte_prefetch0(mbuf); mbuf->nb_segs = 1; mbuf->next = NULL; @@ -415,9 +464,21 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, (RX_PKT_CMPL_METADATA_VID_MASK | RX_PKT_CMPL_METADATA_DE | RX_PKT_CMPL_METADATA_PRI_MASK); - mbuf->ol_flags |= PKT_RX_VLAN_PKT; + mbuf->ol_flags |= PKT_RX_VLAN; } + if (likely(RX_CMP_IP_CS_OK(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_NONE; + + if (likely(RX_CMP_L4_CS_OK(rxcmp1))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_NONE; + + mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + #ifdef BNXT_DEBUG if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { /* Re-install the mbuf back to the rx ring */ @@ -448,13 +509,14 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (bnxt_alloc_rx_data(rxq, rxr, prod)) { RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod); rc = -ENOMEM; + goto rx; } rxr->rx_prod = prod; /* * All MBUFs are allocated with the same size under DPDK, * no optimization for rx_copy_thresh */ - +rx: *rx_pkt = mbuf; next_rx: @@ -476,22 +538,24 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct rx_pkt_cmpl *rxcmp; uint16_t prod = rxr->rx_prod; uint16_t ag_prod = rxr->ag_prod; + int rc = 0; /* Handle RX burst request */ while (1) { - int rc; - cons = RING_CMP(cpr->cp_ring_struct, raw_cons); rte_prefetch0(&cpr->cp_desc_ring[cons]); rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) break; + cpr->valid = FLIP_VALID(cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); /* TODO: Avoid magic numbers... */ if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); - if (likely(!rc)) + if (likely(!rc) || rc == -ENOMEM) nb_rx_pkts++; if (rc == -EBUSY) /* partial completion */ break; @@ -514,6 +578,30 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); /* Ring the AGG ring DB */ B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + + /* Attempt to alloc Rx buf in case of a previous allocation failure. */ + if (rc == -ENOMEM) { + int i; + + for (i = prod; i <= nb_rx_pkts; + i = RING_NEXT(rxr->rx_ring_struct, i)) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + + /* Buffer already allocated for this index. */ + if (rx_buf->mbuf != NULL) + continue; + + /* This slot is empty. Alloc buffer for Rx */ + if (!bnxt_alloc_rx_data(rxq, rxr, i)) { + rxr->rx_prod = i; + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + } else { + RTE_LOG(ERR, PMD, "Alloc mbuf failed\n"); + break; + } + } + } + return nb_rx_pkts; } diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index f8d6dc80..a94373d1 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -52,6 +52,22 @@ #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) +#define RX_CMP_L4_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_L4_CS_CALC) + +#define RX_CMP_L4_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_L4_CS_ERROR) + +#define RX_CMP_L4_CS_OK(rxcmp1) \ + (((rxcmp1)->flags2 & RX_CMP_L4_CS_BITS) && \ + !((rxcmp1)->errors_v2 & RX_CMP_L4_CS_ERR_BITS)) + +#define RX_CMP_IP_CS_ERR_BITS rte_cpu_to_le_32(RX_PKT_CMPL_ERRORS_IP_CS_ERROR) + +#define RX_CMP_IP_CS_BITS rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_CS_CALC) + +#define RX_CMP_IP_CS_OK(rxcmp1) \ + (((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) && \ + !((rxcmp1)->errors_v2 & RX_CMP_IP_CS_ERR_BITS)) + enum pkt_hash_types { PKT_HASH_TYPE_NONE, /* Undefined type */ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ @@ -85,8 +101,8 @@ struct bnxt_rx_ring_info { struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */ struct bnxt_sw_rx_bd *ag_buf_ring; /* sw ring */ - phys_addr_t rx_desc_mapping; - phys_addr_t ag_desc_mapping; + rte_iova_t rx_desc_mapping; + rte_iova_t ag_desc_mapping; struct bnxt_ring *rx_ring_struct; struct bnxt_ring *ag_ring_struct; diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index d7d0e35c..fe83d370 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -228,9 +228,10 @@ void bnxt_free_stats(struct bnxt *bp) } } -void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, +int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_stats *bnxt_stats) { + int rc = 0; unsigned int i; struct bnxt *bp = eth_dev->data->dev_private; @@ -240,17 +241,26 @@ void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats); + rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, + bnxt_stats, 1); + if (unlikely(rc)) + return rc; } for (i = 0; i < bp->tx_cp_nr_rings; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, bnxt_stats); + rc = bnxt_hwrm_ctx_qstats(bp, cpr->hw_stats_ctx_id, i, + bnxt_stats, 0); + if (unlikely(rc)) + return rc; } - bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats); + rc = bnxt_hwrm_func_qstats(bp, 0xffff, bnxt_stats); + if (unlikely(rc)) + return rc; bnxt_stats->rx_nombuf = rte_atomic64_read(&bp->rx_mbuf_alloc_fail); + return rc; } void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) @@ -358,3 +368,54 @@ void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev) if (!(bp->flags & BNXT_FLAG_PORT_STATS)) RTE_LOG(ERR, PMD, "Operation not supported\n"); } + +int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int limit) +{ + /* Account for the Tx drop pkts aka the Anti spoof counter */ + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + 1; + struct rte_eth_xstat xstats[stat_cnt]; + uint64_t values_copy[stat_cnt]; + uint16_t i; + + if (!ids) + return bnxt_dev_xstats_get_op(dev, xstats, stat_cnt); + + bnxt_dev_xstats_get_by_id_op(dev, NULL, values_copy, stat_cnt); + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + RTE_LOG(ERR, PMD, "id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return stat_cnt; +} + +int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit) +{ + /* Account for the Tx drop pkts aka the Anti spoof counter */ + const unsigned int stat_cnt = RTE_DIM(bnxt_rx_stats_strings) + + RTE_DIM(bnxt_tx_stats_strings) + 1; + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + uint16_t i; + + if (!ids) + return bnxt_dev_xstats_get_names_op(dev, xstats_names, + stat_cnt); + bnxt_dev_xstats_get_names_by_id_op(dev, xstats_names_copy, NULL, + stat_cnt); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + RTE_LOG(ERR, PMD, "id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, + xstats_names_copy[ids[i]].name); + } + return stat_cnt; +} diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h index b6d133ef..51d16f5d 100644 --- a/drivers/net/bnxt/bnxt_stats.h +++ b/drivers/net/bnxt/bnxt_stats.h @@ -37,7 +37,7 @@ #include <rte_ethdev.h> void bnxt_free_stats(struct bnxt *bp); -void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, +int bnxt_stats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_stats *bnxt_stats); void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev); int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev, @@ -46,6 +46,11 @@ int bnxt_dev_xstats_get_names_op(__rte_unused struct rte_eth_dev *eth_dev, int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats, unsigned int n); void bnxt_dev_xstats_reset_op(struct rte_eth_dev *eth_dev); +int bnxt_dev_xstats_get_by_id_op(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int limit); +int bnxt_dev_xstats_get_names_by_id_op(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, unsigned int limit); struct bnxt_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h index 16f3a0bd..f753c10f 100644 --- a/drivers/net/bnxt/bnxt_txq.h +++ b/drivers/net/bnxt/bnxt_txq.h @@ -46,7 +46,7 @@ struct bnxt_tx_queue { uint16_t tx_next_rs; /* next desc to set RS bit */ uint16_t queue_id; /* TX queue index */ uint16_t reg_idx; /* TX queue register index */ - uint8_t port_id; /* Device port identifier */ + uint16_t port_id; /* Device port identifier */ uint8_t pthresh; /* Prefetch threshold register */ uint8_t hthresh; /* Host threshold register */ uint8_t wthresh; /* Write-back threshold reg */ diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c index 6870b16d..8ca4bbd8 100644 --- a/drivers/net/bnxt/bnxt_txr.c +++ b/drivers/net/bnxt/bnxt_txr.c @@ -161,7 +161,7 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | - PKT_TX_VLAN_PKT)) + PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM)) long_bd = true; tx_buf = &txr->tx_buf_ring[txr->tx_prod]; @@ -211,21 +211,39 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) { /* TSO */ - txbd1->lflags = TX_BD_LONG_LFLAGS_LSO; + txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO; txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len + tx_pkt->outer_l2_len + tx_pkt->outer_l3_len; txbd1->mss = tx_pkt->tso_segsz; - } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM | - PKT_TX_UDP_CKSUM)) { + } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) { + /* Outer IP, Inner IP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) { /* TCP/UDP CSO */ - txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; txbd1->mss = 0; - } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) { /* IP CSO */ - txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM; + txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; + txbd1->mss = 0; + } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) { + /* IP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM; txbd1->mss = 0; } } else { @@ -295,6 +313,9 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct)) break; + cpr->valid = FLIP_VALID(cons, + cpr->cp_ring_struct->ring_mask, + cpr->valid); if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) nb_tx_pkts++; diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h index 5b097114..2feac51d 100644 --- a/drivers/net/bnxt/bnxt_txr.h +++ b/drivers/net/bnxt/bnxt_txr.h @@ -49,7 +49,7 @@ struct bnxt_tx_ring_info { struct tx_bd_long *tx_desc_ring; struct bnxt_sw_tx_bd *tx_buf_ring; - phys_addr_t tx_desc_mapping; + rte_iova_t tx_desc_mapping; #define BNXT_DEV_STATE_CLOSING 0x1 uint32_t dev_state; @@ -69,4 +69,25 @@ int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +#define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM) +#define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM) + + +#define TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_IP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_TIP_IP_CHKSUM (TX_BD_LONG_LFLAGS_T_IP_CHKSUM | \ + TX_BD_LONG_LFLAGS_IP_CHKSUM) +#define TX_BD_FLG_TIP_TCP_UDP_CHKSUM (TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM | \ + TX_BD_LONG_LFLAGS_T_IP_CHKSUM) + #endif diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index db9fb079..5bac2605 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -83,6 +83,7 @@ void bnxt_init_vnics(struct bnxt *bp) prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE); STAILQ_INIT(&vnic->filter); + STAILQ_INIT(&vnic->flow_list); STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next); } for (i = 0; i < MAX_FF_POOLS; i++) @@ -174,7 +175,7 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN); uint16_t max_vnics; int i; - phys_addr_t mz_phys_addr; + rte_iova_t mz_phys_addr; max_vnics = bp->max_vnics; snprintf(mz_name, RTE_MEMZONE_NAMESIZE, @@ -191,13 +192,13 @@ int bnxt_alloc_vnic_attributes(struct bnxt *bp) if (!mz) return -ENOMEM; } - mz_phys_addr = mz->phys_addr; + mz_phys_addr = mz->iova; if ((unsigned long)mz->addr == mz_phys_addr) { RTE_LOG(WARNING, PMD, "Memzone physical address same as virtual.\n"); RTE_LOG(WARNING, PMD, - "Using rte_mem_virt2phy()\n"); - mz_phys_addr = rte_mem_virt2phy(mz->addr); + "Using rte_mem_virt2iova()\n"); + mz_phys_addr = rte_mem_virt2iova(mz->addr); if (mz_phys_addr == 0) { RTE_LOG(ERR, PMD, "unable to map vnic address to physical memory\n"); diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h index 993f2212..875dc3c1 100644 --- a/drivers/net/bnxt/bnxt_vnic.h +++ b/drivers/net/bnxt/bnxt_vnic.h @@ -53,11 +53,11 @@ struct bnxt_vnic_info { uint16_t dflt_ring_grp; uint16_t mru; uint16_t hash_type; - phys_addr_t rss_table_dma_addr; + rte_iova_t rss_table_dma_addr; uint16_t *rss_table; - phys_addr_t rss_hash_key_dma_addr; + rte_iova_t rss_hash_key_dma_addr; void *rss_hash_key; - phys_addr_t mc_list_dma_addr; + rte_iova_t mc_list_dma_addr; char *mc_list; uint32_t mc_addr_cnt; #define BNXT_MAX_MC_ADDRS 16 @@ -80,6 +80,7 @@ struct bnxt_vnic_info { bool rss_dflt_cr; STAILQ_HEAD(, bnxt_filter_info) filter; + STAILQ_HEAD(, rte_flow) flow_list; }; struct bnxt; diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h index cb8660af..c16edbad 100644 --- a/drivers/net/bnxt/hsi_struct_def_dpdk.h +++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -33,25 +33,27 @@ #ifndef _HSI_STRUCT_DEF_DPDK_ #define _HSI_STRUCT_DEF_DPDK_ -/* HSI and HWRM Specification 1.7.7 */ +/* HSI and HWRM Specification 1.8.2 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 7 +#define HWRM_VERSION_MINOR 8 +#define HWRM_VERSION_UPDATE 2 -#define HWRM_VERSION_STR "1.7.7" +#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */ + +#define HWRM_VERSION_STR "1.8.2.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. */ #define HWRM_NA_SIGNATURE ((uint32_t)(-1)) #define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */ +#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */ #define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ #define HWRM_ROCE_SP_HSI_VERSION_MAJOR 1 -#define HWRM_ROCE_SP_HSI_VERSION_MINOR 7 -#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 4 +#define HWRM_ROCE_SP_HSI_VERSION_MINOR 8 +#define HWRM_ROCE_SP_HSI_VERSION_UPDATE 2 /* * Request types @@ -129,6 +131,9 @@ #define HWRM_CFA_NTUPLE_FILTER_ALLOC (UINT32_C(0x99)) #define HWRM_CFA_NTUPLE_FILTER_FREE (UINT32_C(0x9a)) #define HWRM_CFA_NTUPLE_FILTER_CFG (UINT32_C(0x9b)) +#define HWRM_CFA_EM_FLOW_ALLOC (UINT32_C(0x9c)) +#define HWRM_CFA_EM_FLOW_FREE (UINT32_C(0x9d)) +#define HWRM_CFA_EM_FLOW_CFG (UINT32_C(0x9e)) #define HWRM_TUNNEL_DST_PORT_QUERY (UINT32_C(0xa0)) #define HWRM_TUNNEL_DST_PORT_ALLOC (UINT32_C(0xa1)) #define HWRM_TUNNEL_DST_PORT_FREE (UINT32_C(0xa2)) @@ -815,8 +820,6 @@ struct rx_pkt_cmpl { * packet. Length = 32B */ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11) - #define RX_PKT_CMPL_TYPE_RX_L2_TPA_START UINT32_C(0x13) - #define RX_PKT_CMPL_TYPE_RX_L2_TPA_END UINT32_C(0x15) /* * When this bit is '1', it indicates a packet that has an error * of some type. Type of error is indicated in error_flags. @@ -1800,6 +1803,8 @@ struct hwrm_async_event_cmpl { UINT32_C(0x32) /* VF Configuration Change */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE UINT32_C(0x33) + /* LLFC/PFC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE UINT32_C(0x34) /* HWRM Error */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR UINT32_C(0xff) uint32_t event_data2; @@ -2117,9 +2122,18 @@ struct hwrm_ver_get_output { * This field returns the default request timeout value in * milliseconds. */ + uint8_t init_pending; + /* + * This field will indicate if any subsystems is not fully + * initialized. + */ + /* + * If set to 1, device is not ready. If set to 0, device is + * ready to accept all HWRM commands. + */ + #define HWRM_VER_GET_OUTPUT_INIT_PENDING_DEV_NOT_RDY UINT32_C(0x1) uint8_t unused_0; uint8_t unused_1; - uint8_t unused_2; uint8_t valid; /* * This field is used in Output records to indicate that the @@ -2246,6 +2260,122 @@ struct hwrm_func_reset_output { */ } __attribute__((packed)); +/* hwrm_func_vf_cfg */ +/* + * Description: This command allows configuration of a VF by its driver. If this + * function is called by a PF driver, then the HWRM shall fail this command. If + * guest VLAN and/or MAC address are provided in this command, then the HWRM + * shall set up appropriate MAC/VLAN filters for the VF that is being + * configured. A VF driver should set VF MTU/MRU using this command prior to + * allocating RX VNICs or TX rings for the corresponding VF. + */ +/* Input (32 bytes) */ +struct hwrm_func_vf_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the mtu field to be configured. */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) + /* This bit must be '1' for the guest_vlan field to be configured. */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2) + /* + * This bit must be '1' for the async_event_cr field to be configured. + */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4) + /* This bit must be '1' for the dflt_mac_addr field to be configured. */ + #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8) + uint16_t mtu; + /* + * The maximum transmission unit requested on the function. The HWRM + * should make sure that the mtu of the function does not exceed the mtu + * of the physical port that this function is associated with. In + * addition to requesting mtu per function, it is possible to configure + * mtu per transmit ring. By default, the mtu of each transmit ring + * associated with a function is equal to the mtu of the function. The + * HWRM should make sure that the mtu of each transmit ring that is + * assigned to a function has a valid mtu. + */ + uint16_t guest_vlan; + /* + * The guest VLAN for the function being configured. This field's format + * is same as 802.1Q Tag's Tag Control Information (TCI) format that + * includes both Priority Code Point (PCP) and VLAN Identifier (VID). + */ + uint16_t async_event_cr; + /* + * ID of the target completion ring for receiving asynchronous event + * completions. If this field is not valid, then the HWRM shall use the + * default completion ring of the function that is being configured as + * the target completion ring for providing any asynchronous event + * completions for that function. If this field is valid, then the HWRM + * shall use the completion ring identified by this ID as the target + * completion ring for providing any asynchronous event completions for + * the function that is being configured. + */ + uint8_t dflt_mac_addr[6]; + /* + * This value is the current MAC address requested by the VF driver to + * be configured on this VF. A value of 00-00-00-00-00-00 indicates no + * MAC address configuration is requested by the VF driver. The parent + * PF driver may reject or overwrite this MAC address. + */ +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_func_vf_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ +} __attribute__((packed)); + /* hwrm_func_qcaps */ /* * Description: This command returns capabilities of a function. The input FID @@ -2727,8 +2857,16 @@ struct hwrm_func_qcfg_output { #define HWRM_FUNC_QCFG_OUTPUT_PORT_PF_CNT_UNAVAIL UINT32_C(0x0) uint16_t dflt_vnic_id; /* The default VNIC ID assigned to a function that is being queried. */ - uint8_t unused_0; - uint8_t unused_1; + uint16_t max_mtu_configured; + /* + * This value specifies the MAX MTU that can be configured by + * host drivers. This 'max_mtu_configure' can be HW max MTU or + * OEM applications specified value. Host drivers can't + * configure the MTU greater than this value. Host drivers + * should read this value prior to configuring the MTU. FW will + * fail the host request with MTU greater than + * 'max_mtu_configured'. + */ uint32_t min_bw; /* * Minimum BW allocated for this function. The HWRM will @@ -2826,7 +2964,7 @@ struct hwrm_func_qcfg_output { #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEB UINT32_C(0x1) /* Virtual Ethernet Port Aggregator (VEPA) */ #define HWRM_FUNC_QCFG_OUTPUT_EVB_MODE_VEPA UINT32_C(0x2) - uint8_t unused_2; + uint8_t unused_0; uint16_t alloc_vfs; /* * The number of VFs that are allocated to the function. This is @@ -2846,7 +2984,7 @@ struct hwrm_func_qcfg_output { * The number of strict priority transmit rings out of currently * allocated TX rings to the function (alloc_tx_rings). */ - uint8_t unused_3; + uint8_t unused_1; uint8_t valid; /* * This field is used in Output records to indicate that the @@ -3199,6 +3337,14 @@ struct hwrm_func_cfg_input { */ #define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC \ UINT32_C(0x1000) + /* + * This bit requests that the firmware test to see if all the + * assets requested in this command (i.e. number of TX rings) + * are available. The firmware will return an error if the + * requested assets are not available. The firwmare will NOT + * reserve the assets if they are available. + */ + #define HWRM_FUNC_CFG_INPUT_FLAGS_TX_ASSETS_TEST UINT32_C(0x2000) uint32_t enables; /* This bit must be '1' for the mtu field to be configured. */ #define HWRM_FUNC_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) @@ -4236,123 +4382,6 @@ struct hwrm_func_buf_unrgtr_output { */ } __attribute__((packed)); -/* hwrm_func_vf_cfg */ -/* - * Description: This command allows configuration of a VF by its driver. If this - * function is called by a PF driver, then the HWRM shall fail this command. If - * guest VLAN and/or MAC address are provided in this command, then the HWRM - * shall set up appropriate MAC/VLAN filters for the VF that is being - * configured. A VF driver should set VF MTU/MRU using this command prior to - * allocating RX VNICs or TX rings for the corresponding VF. - */ -/* Input (32 bytes) */ - -struct hwrm_func_vf_cfg_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format for the - * rest of the command is determined by this field. - */ - uint16_t cmpl_ring; - /* - * This value indicates the what completion ring the request will be - * optionally completed on. If the value is -1, then no CR completion - * will be generated. Any other value must be a valid CR ring_id value - * for this function. - */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; - /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids - * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM - */ - uint64_t resp_addr; - /* - * This is the host address where the response will be written when the - * request is complete. This area must be 16B aligned and must be - * cleared to zero before the request is made. - */ - uint32_t enables; - /* This bit must be '1' for the mtu field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1) - /* This bit must be '1' for the guest_vlan field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2) - /* - * This bit must be '1' for the async_event_cr field to be configured. - */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR UINT32_C(0x4) - /* This bit must be '1' for the dflt_mac_addr field to be configured. */ - #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8) - uint16_t mtu; - /* - * The maximum transmission unit requested on the function. The HWRM - * should make sure that the mtu of the function does not exceed the mtu - * of the physical port that this function is associated with. In - * addition to requesting mtu per function, it is possible to configure - * mtu per transmit ring. By default, the mtu of each transmit ring - * associated with a function is equal to the mtu of the function. The - * HWRM should make sure that the mtu of each transmit ring that is - * assigned to a function has a valid mtu. - */ - uint16_t guest_vlan; - /* - * The guest VLAN for the function being configured. This field's format - * is same as 802.1Q Tag's Tag Control Information (TCI) format that - * includes both Priority Code Point (PCP) and VLAN Identifier (VID). - */ - uint16_t async_event_cr; - /* - * ID of the target completion ring for receiving asynchronous event - * completions. If this field is not valid, then the HWRM shall use the - * default completion ring of the function that is being configured as - * the target completion ring for providing any asynchronous event - * completions for that function. If this field is valid, then the HWRM - * shall use the completion ring identified by this ID as the target - * completion ring for providing any asynchronous event completions for - * the function that is being configured. - */ - uint8_t dflt_mac_addr[6]; - /* - * This value is the current MAC address requested by the VF driver to - * be configured on this VF. A value of 00-00-00-00-00-00 indicates no - * MAC address configuration is requested by the VF driver. The parent - * PF driver may reject or overwrite this MAC address. - */ -} __attribute__((packed)); - -/* Output (16 bytes) */ - -struct hwrm_func_vf_cfg_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in parameters, - * and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; - /* - * This field is used in Output records to indicate that the output is - * completely written to RAM. This field should be read as '1' to - * indicate that the output has been completely written. When writing a - * command completion or response to an internal processor, the order of - * writes has to be such that this field is written last. - */ -} __attribute__((packed)); - /* hwrm_port_phy_cfg */ /* * Description: This command configures the PHY device for the port. It allows @@ -4917,12 +4946,12 @@ struct hwrm_port_phy_qcfg_output { #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8) /* 10Mb link speed */ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff) - uint8_t duplex; + uint8_t duplex_cfg; /* This value is indicates the duplex of the current connection. */ /* Half Duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF UINT32_C(0x0) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_HALF UINT32_C(0x0) /* Full duplex connection. */ - #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL UINT32_C(0x1) + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL UINT32_C(0x1) uint8_t pause; /* * This value is used to indicate the current pause @@ -5250,6 +5279,11 @@ struct hwrm_port_phy_qcfg_output { /* 40G_ACTIVE_CABLE */ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE \ UINT32_C(0x18) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET UINT32_C(0x19) + /* 1G_baseSX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX UINT32_C(0x1a) + /* 1G_baseCX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX UINT32_C(0x1b) uint8_t media_type; /* This value represents a media type. */ /* Unknown */ @@ -5576,8 +5610,16 @@ struct hwrm_port_phy_qcfg_output { */ #define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED \ UINT32_C(0x40) + uint8_t duplex_state; + /* + * This value is indicates the duplex of the current connection + * state. + */ + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_HALF UINT32_C(0x0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_STATE_FULL UINT32_C(0x1) uint8_t unused_1; - uint8_t unused_2; char phy_vendor_name[16]; /* * Up to 16 bytes of null padded ASCII string representing PHY @@ -5591,10 +5633,10 @@ struct hwrm_port_phy_qcfg_output { * to null, then the vendor specific part number is not * available. */ - uint32_t unused_3; + uint32_t unused_2; + uint8_t unused_3; uint8_t unused_4; uint8_t unused_5; - uint8_t unused_6; uint8_t valid; /* * This field is used in Output records to indicate that the @@ -7314,6 +7356,14 @@ struct hwrm_vnic_cfg_input { * that is used for computing RSS hash only. */ #define HWRM_VNIC_CFG_INPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is being configured to receive + * both RoCE and non-RoCE traffic, but forward only the RoCE + * traffic further. Also, RoCE traffic can be mirrored to L2 + * driver. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) uint32_t enables; /* * This bit must be '1' for the dflt_ring_grp field to be @@ -7523,6 +7573,13 @@ struct hwrm_vnic_qcfg_output { * is not configured. */ #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE UINT32_C(0x20) + /* + * When this bit is '1', the VNIC is configured to receive both + * RoCE and non-RoCE traffic, but forward only RoCE traffic + * further. Also RoCE traffic can be mirrored to L2 driver. + */ + #define HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE \ + UINT32_C(0x40) uint32_t unused_2; uint8_t unused_3; uint8_t unused_4; @@ -7538,6 +7595,183 @@ struct hwrm_vnic_qcfg_output { */ } __attribute__((packed)); + +/* hwrm_vnic_tpa_cfg */ +/* Description: This function is used to enable/configure TPA on the VNIC. */ +/* Input (40 bytes) */ +struct hwrm_vnic_tpa_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t flags; + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) of non-tunneled TCP + * packets. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) of tunneled TCP packets. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) according to Windows + * Receive Segment Coalescing (RSC) rules. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) according to Linux + * Generic Receive Offload (GRO) rules. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) for TCP packets with IP + * ECN set to non-zero. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10) + /* + * When this bit is '1', the VNIC shall be configured to perform + * transparent packet aggregation (TPA) for GRE tunneled TCP + * packets only if all packets have the same GRE sequence. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ + UINT32_C(0x20) + /* + * When this bit is '1' and the GRO mode is enabled, the VNIC + * shall be configured to perform transparent packet aggregation + * (TPA) for TCP/IPv4 packets with consecutively increasing + * IPIDs. In other words, the last packet that is being + * aggregated to an already existing aggregation context shall + * have IPID 1 more than the IPID of the last packet that was + * aggregated in that aggregation context. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40) + /* + * When this bit is '1' and the GRO mode is enabled, the VNIC + * shall be configured to perform transparent packet aggregation + * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit + * (IPv6) value. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80) + uint32_t enables; + /* This bit must be '1' for the max_agg_segs field to be configured. */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) + /* This bit must be '1' for the max_aggs field to be configured. */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) + /* + * This bit must be '1' for the max_agg_timer field to be + * configured. + */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) + /* This bit must be '1' for the min_agg_len field to be configured. */ + #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) + uint16_t vnic_id; + /* Logical vnic ID */ + uint16_t max_agg_segs; + /* + * This is the maximum number of TCP segments that can be + * aggregated (unit is Log2). Max value is 31. + */ + /* 1 segment */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) + /* 2 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) + /* 4 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) + /* 8 segments */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) + /* Any segment size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) + uint16_t max_aggs; + /* + * This is the maximum number of aggregations this VNIC is + * allowed (unit is Log2). Max value is 7 + */ + /* 1 aggregation */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) + /* 2 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) + /* 4 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) + /* 8 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) + /* 16 aggregations */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) + /* Any aggregation size larger than this is not valid */ + #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) + uint8_t unused_0; + uint8_t unused_1; + uint32_t max_agg_timer; + /* + * This is the maximum amount of time allowed for an aggregation + * context to complete after it was initiated. + */ + uint32_t min_agg_len; + /* + * This is the minimum amount of payload length required to + * start an aggregation context. + */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_tpa_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_vnic_rss_cfg */ /* Description: This function is used to enable RSS configuration. */ /* Input (48 bytes) */ @@ -7651,7 +7885,6 @@ struct hwrm_vnic_rss_cfg_output { * the VNIC. */ /* Input (40 bytes) */ - struct hwrm_vnic_plcmodes_cfg_input { uint16_t req_type; /* @@ -7770,7 +8003,6 @@ struct hwrm_vnic_plcmodes_cfg_input { } __attribute__((packed)); /* Output (16 bytes) */ - struct hwrm_vnic_plcmodes_cfg_output { uint16_t error_code; /* @@ -7807,7 +8039,6 @@ struct hwrm_vnic_plcmodes_cfg_output { * of the VNIC. */ /* Input (24 bytes) */ - struct hwrm_vnic_plcmodes_qcfg_input { uint16_t req_type; /* @@ -7840,7 +8071,6 @@ struct hwrm_vnic_plcmodes_qcfg_input { } __attribute__((packed)); /* Output (24 bytes) */ - struct hwrm_vnic_plcmodes_qcfg_output { uint16_t error_code; /* @@ -8065,182 +8295,6 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output { */ } __attribute__((packed)); -/* hwrm_vnic_tpa_cfg */ -/* Description: This function is used to enable/configure TPA on the VNIC. */ -/* Input (40 bytes) */ -struct hwrm_vnic_tpa_cfg_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; - /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. - */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; - /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM - */ - uint64_t resp_addr; - /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. - */ - uint32_t flags; - /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) of non-tunneled TCP - * packets. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1) - /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) of tunneled TCP packets. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2) - /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) according to Windows - * Receive Segment Coalescing (RSC) rules. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4) - /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) according to Linux - * Generic Receive Offload (GRO) rules. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8) - /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) for TCP packets with IP - * ECN set to non-zero. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10) - /* - * When this bit is '1', the VNIC shall be configured to perform - * transparent packet aggregation (TPA) for GRE tunneled TCP - * packets only if all packets have the same GRE sequence. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \ - UINT32_C(0x20) - /* - * When this bit is '1' and the GRO mode is enabled, the VNIC - * shall be configured to perform transparent packet aggregation - * (TPA) for TCP/IPv4 packets with consecutively increasing - * IPIDs. In other words, the last packet that is being - * aggregated to an already existing aggregation context shall - * have IPID 1 more than the IPID of the last packet that was - * aggregated in that aggregation context. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40) - /* - * When this bit is '1' and the GRO mode is enabled, the VNIC - * shall be configured to perform transparent packet aggregation - * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit - * (IPv6) value. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80) - uint32_t enables; - /* This bit must be '1' for the max_agg_segs field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1) - /* This bit must be '1' for the max_aggs field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2) - /* - * This bit must be '1' for the max_agg_timer field to be - * configured. - */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4) - /* This bit must be '1' for the min_agg_len field to be configured. */ - #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8) - uint16_t vnic_id; - /* Logical vnic ID */ - uint16_t max_agg_segs; - /* - * This is the maximum number of TCP segments that can be - * aggregated (unit is Log2). Max value is 31. - */ - /* 1 segment */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0) - /* 2 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1) - /* 4 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2) - /* 8 segments */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3) - /* Any segment size larger than this is not valid */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f) - uint16_t max_aggs; - /* - * This is the maximum number of aggregations this VNIC is - * allowed (unit is Log2). Max value is 7 - */ - /* 1 aggregation */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0) - /* 2 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1) - /* 4 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2) - /* 8 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3) - /* 16 aggregations */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4) - /* Any aggregation size larger than this is not valid */ - #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7) - uint8_t unused_0; - uint8_t unused_1; - uint32_t max_agg_timer; - /* - * This is the maximum amount of time allowed for an aggregation - * context to complete after it was initiated. - */ - uint32_t min_agg_len; - /* - * This is the minimum amount of payload length required to - * start an aggregation context. - */ -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_vnic_tpa_cfg_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; - /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. - */ -} __attribute__((packed)); - /* hwrm_ring_alloc */ /* * Description: This command allocates and does basic preparation for a ring. @@ -9046,6 +9100,12 @@ struct hwrm_cfa_l2_filter_alloc_input { * datagram payload */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + /* + * IPV4 over virtual eXtensible Local Area + * Network (IPV4oVXLAN) + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) /* Any tunneled traffic */ #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ UINT32_C(0xff) @@ -9471,6 +9531,25 @@ struct hwrm_cfa_l2_set_rx_mask_output { */ } __attribute__((packed)); +/* Command specific Error Codes (8 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + uint8_t code; + /* + * command specific error codes that goes to the cmd_err field + * in Common HWRM Error Response. + */ + /* Unknown error */ + #define HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* + * Unable to complete operation due to conflict + * with Ntuple Filter + */ + #define \ + HWRM_CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR \ + UINT32_C(0x1) + uint8_t unused_0[7]; +} __attribute__((packed)); + /* hwrm_cfa_vlan_antispoof_cfg */ /* Description: Configures vlan anti-spoof filters for VF. */ /* Input (32 bytes) */ @@ -9550,6 +9629,1010 @@ struct hwrm_cfa_vlan_antispoof_cfg_output { */ }; +/* hwrm_cfa_ntuple_filter_alloc */ +/* + * Description: This is a ntuple filter that uses fields from L4/L3 header and + * optionally fields from L2. The ntuple filters apply to receive traffic only. + * All L2/L3/L4 header fields are specified in network byte order. These filters + * can be used for Receive Flow Steering (RFS). # For ethertype value, only + * 0x0800 (IPv4) and 0x86dd (IPv6) shall be supported for ntuple filters. # If a + * field specified in this command is not enabled as a valid field, then that + * field shall not be used in matching packet header fields against this filter. + */ +/* Input (128 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t flags; + /* + * Setting of this flag indicates the applicability to the + * loopback path. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x1) + /* + * Setting of this flag indicates drop action. If this flag is + * not set, then it should be considered accept action. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x2) + /* + * Setting of this flag indicates that a meter is expected to be + * attached to this flow. This hint can be used when choosing + * the action record format required for the flow. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER UINT32_C(0x4) + uint32_t enables; + /* This bit must be '1' for the l2_filter_id field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID \ + UINT32_C(0x1) + /* This bit must be '1' for the ethertype field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE \ + UINT32_C(0x2) + /* This bit must be '1' for the tunnel_type field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4) + /* This bit must be '1' for the src_macaddr field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR \ + UINT32_C(0x8) + /* This bit must be '1' for the ipaddr_type field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE \ + UINT32_C(0x10) + /* This bit must be '1' for the src_ipaddr field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR \ + UINT32_C(0x20) + /* + * This bit must be '1' for the src_ipaddr_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK \ + UINT32_C(0x40) + /* This bit must be '1' for the dst_ipaddr field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR \ + UINT32_C(0x80) + /* + * This bit must be '1' for the dst_ipaddr_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK \ + UINT32_C(0x100) + /* This bit must be '1' for the ip_protocol field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL \ + UINT32_C(0x200) + /* This bit must be '1' for the src_port field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT \ + UINT32_C(0x400) + /* + * This bit must be '1' for the src_port_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK \ + UINT32_C(0x800) + /* This bit must be '1' for the dst_port field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT \ + UINT32_C(0x1000) + /* + * This bit must be '1' for the dst_port_mask field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK \ + UINT32_C(0x2000) + /* This bit must be '1' for the pri_hint field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_PRI_HINT \ + UINT32_C(0x4000) + /* + * This bit must be '1' for the ntuple_filter_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_NTUPLE_FILTER_ID \ + UINT32_C(0x8000) + /* This bit must be '1' for the dst_id field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x20000) + /* This bit must be '1' for the dst_macaddr field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR \ + UINT32_C(0x40000) + uint64_t l2_filter_id; + /* + * This value identifies a set of CFA data structures used for + * an L2 context. + */ + uint8_t src_macaddr[6]; + /* + * This value indicates the source MAC address in the Ethernet + * header. + */ + uint16_t ethertype; + /* This value indicates the ethertype in the Ethernet header. */ + uint8_t ip_addr_type; + /* + * This value indicates the type of IP address. 4 - IPv4 6 - + * IPv6 All others are invalid. + */ + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN \ + UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 \ + UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 \ + UINT32_C(0x6) + uint8_t ip_protocol; + /* + * The value of protocol filed in IP header. Applies to UDP and + * TCP traffic. 6 - TCP 17 - UDP + */ + /* invalid */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN \ + UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_TCP \ + UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_PROTOCOL_UDP \ + UINT32_C(0x11) + uint16_t dst_id; + /* + * If set, this value shall represent the Logical VNIC ID of the + * destination VNIC for the RX path and network port id of the + * destination port for the TX path. + */ + uint16_t mirror_vnic_id; + /* Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint8_t tunnel_type; + /* + * This value indicates the tunnel type for this filter. If this + * field is not specified, then the filter shall apply to both + * non-tunneled and tunneled packets. If this field conflicts + * with the tunnel_type specified in the l2_filter_id, then the + * HWRM shall return an error for this command. + */ + /* Non-tunnel */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + UINT32_C(0x1) + /* + * Network Virtualization Generic Routing + * Encapsulation (NVGRE) + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + UINT32_C(0x2) + /* + * Generic Routing Encapsulation (GRE) inside + * Ethernet payload + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + /* + * Generic Routing Encapsulation (GRE) inside IP + * datagram payload + */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + UINT32_C(0x8) + /* Any tunneled traffic */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + uint8_t pri_hint; + /* + * This hint is provided to help in placing the filter in the + * filter table. + */ + /* No preference */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + UINT32_C(0x0) + /* Above the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE UINT32_C(0x1) + /* Below the given filter */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_BELOW UINT32_C(0x2) + /* As high as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_HIGHEST \ + UINT32_C(0x3) + /* As low as possible */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_PRI_HINT_LOWEST UINT32_C(0x4) + uint32_t src_ipaddr[4]; + /* + * The value of source IP address to be used in filtering. For + * IPv4, first four bytes represent the IP address. + */ + uint32_t src_ipaddr_mask[4]; + /* + * The value of source IP address mask to be used in filtering. + * For IPv4, first four bytes represent the IP address mask. + */ + uint32_t dst_ipaddr[4]; + /* + * The value of destination IP address to be used in filtering. + * For IPv4, first four bytes represent the IP address. + */ + uint32_t dst_ipaddr_mask[4]; + /* + * The value of destination IP address mask to be used in + * filtering. For IPv4, first four bytes represent the IP + * address mask. + */ + uint16_t src_port; + /* + * The value of source port to be used in filtering. Applies to + * UDP and TCP traffic. + */ + uint16_t src_port_mask; + /* + * The value of source port mask to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_port; + /* + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_port_mask; + /* + * The value of destination port mask to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint64_t ntuple_filter_id_hint; + /* This is the ID of the filter that goes along with the pri_hint. */ +} __attribute__((packed)); + +/* Output (24 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint64_t ntuple_filter_id; + /* This value is an opaque id into CFA data structures. */ + uint32_t flow_id; + /* + * This is the ID of the flow associated with this filter. This + * value shall be used to match and associate the flow + * identifier returned in completion records. A value of + * 0xFFFFFFFF shall indicate no flow id. + */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* Command specific Error Codes (8 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + uint8_t code; + /* + * command specific error codes that goes to the cmd_err field + * in Common HWRM Error Response. + */ + /* Unknown error */ + #define HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0) + /* + * Unable to complete operation due to conflict + * with Rx Mask VLAN + */ + #define \ + HWRM_CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR \ + UINT32_C(0x1) + uint8_t unused_0[7]; +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_free */ +/* Description: Free an ntuple filter */ +/* Input (24 bytes) */ +struct hwrm_cfa_ntuple_filter_free_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint64_t ntuple_filter_id; + /* This value is an opaque id into CFA data structures. */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_cfa_ntuple_filter_free_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_cfa_ntuple_filter_cfg */ +/* + * Description: Configure an ntuple filter with a new destination VNIC and/or + * meter. + */ +/* Input (48 bytes) */ +struct hwrm_cfa_ntuple_filter_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the new_dst_id field to be configured. */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_DST_ID \ + UINT32_C(0x1) + /* + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the new_meter_instance_id field to + * be configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint32_t unused_0; + uint64_t ntuple_filter_id; + /* This value is an opaque id into CFA data structures. */ + uint32_t new_dst_id; + /* + * If set, this value shall represent the new Logical VNIC ID of + * the destination VNIC for the RX path and new network port id + * of the destination port for the TX path. + */ + uint32_t new_mirror_vnic_id; + /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint16_t new_meter_instance_id; + /* + * New meter to attach to the flow. Specifying the invalid + * instance ID is used to remove any existing meter from the + * flow. + */ + /* + * A value of 0xfff is considered invalid and + * implies the instance is not configured. + */ + #define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + uint16_t unused_1[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_cfa_ntuple_filter_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_alloc */ +/* + * Description: This is a generic Exact Match (EM) flow that uses fields from + * L4/L3/L2 headers. The EM flows apply to transmit and receive traffic. All + * L2/L3/L4 header fields are specified in network byte order. For each EM flow, + * there is an associated set of actions specified. For tunneled packets, all + * L2/L3/L4 fields specified are fields of inner headers unless otherwise + * specified. # If a field specified in this command is not enabled as a valid + * field, then that field shall not be used in matching packet header fields + * against this EM flow entry. + */ +/* Input (112 bytes) */ +struct hwrm_cfa_em_flow_alloc_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t flags; + /* + * Enumeration denoting the RX, TX type of the resource. This + * enumeration is used for resources that are similar for both + * TX and RX paths of the chip. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_TX \ + (UINT32_C(0x0) << 0) + /* rx path */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX \ + (UINT32_C(0x1) << 0) + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_LAST \ + CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX + /* + * Setting of this flag indicates enabling of a byte counter for + * a given flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_BYTE_CTR UINT32_C(0x2) + /* + * Setting of this flag indicates enabling of a packet counter + * for a given flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PKT_CTR UINT32_C(0x4) + /* + * Setting of this flag indicates de-capsulation action for the + * given flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DECAP UINT32_C(0x8) + /* + * Setting of this flag indicates encapsulation action for the + * given flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_ENCAP UINT32_C(0x10) + /* + * Setting of this flag indicates drop action. If this flag is + * not set, then it should be considered accept action. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP UINT32_C(0x20) + /* + * Setting of this flag indicates that a meter is expected to be + * attached to this flow. This hint can be used when choosing + * the action record format required for the flow. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_METER UINT32_C(0x40) + uint32_t enables; + /* This bit must be '1' for the l2_filter_id field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1) + /* This bit must be '1' for the tunnel_type field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_TYPE UINT32_C(0x2) + /* This bit must be '1' for the tunnel_id field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_TUNNEL_ID UINT32_C(0x4) + /* This bit must be '1' for the src_macaddr field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR UINT32_C(0x8) + /* This bit must be '1' for the dst_macaddr field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x10) + /* This bit must be '1' for the ovlan_vid field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID UINT32_C(0x20) + /* This bit must be '1' for the ivlan_vid field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID UINT32_C(0x40) + /* This bit must be '1' for the ethertype field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x80) + /* This bit must be '1' for the src_ipaddr field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x100) + /* This bit must be '1' for the dst_ipaddr field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x200) + /* This bit must be '1' for the ipaddr_type field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x400) + /* This bit must be '1' for the ip_protocol field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x800) + /* This bit must be '1' for the src_port field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x1000) + /* This bit must be '1' for the dst_port field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x2000) + /* This bit must be '1' for the dst_id field to be configured. */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x4000) + /* + * This bit must be '1' for the mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the encap_record_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ENCAP_RECORD_ID \ + UINT32_C(0x10000) + /* + * This bit must be '1' for the meter_instance_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_METER_INSTANCE_ID \ + UINT32_C(0x20000) + uint64_t l2_filter_id; + /* + * This value identifies a set of CFA data structures used for + * an L2 context. + */ + uint8_t tunnel_type; + /* Tunnel Type. */ + /* Non-tunnel */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + UINT32_C(0x0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) + /* + * Network Virtualization Generic Routing + * Encapsulation (NVGRE) + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_NVGRE UINT32_C(0x2) + /* + * Generic Routing Encapsulation (GRE) inside + * Ethernet payload + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_L2GRE UINT32_C(0x3) + /* IP in IP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPIP UINT32_C(0x4) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_MPLS UINT32_C(0x6) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_STT UINT32_C(0x7) + /* + * Generic Routing Encapsulation (GRE) inside IP + * datagram payload + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_IPGRE UINT32_C(0x8) + /* + * IPV4 over virtual eXtensible Local Area + * Network (IPV4oVXLAN) + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9) + /* Any tunneled traffic */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + UINT32_C(0xff) + uint8_t unused_0; + uint16_t unused_1; + uint32_t tunnel_id; + /* + * Tunnel identifier. Virtual Network Identifier (VNI). Only + * valid with tunnel_types VXLAN, NVGRE, and Geneve. Only lower + * 24-bits of VNI field are used in setting up the filter. + */ + uint8_t src_macaddr[6]; + /* + * This value indicates the source MAC address in the Ethernet + * header. + */ + uint16_t meter_instance_id; + /* The meter instance to attach to the flow. */ + /* + * A value of 0xfff is considered invalid and + * implies the instance is not configured. + */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + uint8_t dst_macaddr[6]; + /* + * This value indicates the destination MAC address in the + * Ethernet header. + */ + uint16_t ovlan_vid; + /* + * This value indicates the VLAN ID of the outer VLAN tag in the + * Ethernet header. + */ + uint16_t ivlan_vid; + /* + * This value indicates the VLAN ID of the inner VLAN tag in the + * Ethernet header. + */ + uint16_t ethertype; + /* This value indicates the ethertype in the Ethernet header. */ + uint8_t ip_addr_type; + /* + * This value indicates the type of IP address. 4 - IPv4 6 - + * IPv6 All others are invalid. + */ + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_UNKNOWN UINT32_C(0x0) + /* IPv4 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 UINT32_C(0x4) + /* IPv6 */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6) + uint8_t ip_protocol; + /* + * The value of protocol filed in IP header. Applies to UDP and + * TCP traffic. 6 - TCP 17 - UDP + */ + /* invalid */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UNKNOWN UINT32_C(0x0) + /* TCP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_TCP UINT32_C(0x6) + /* UDP */ + #define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_PROTOCOL_UDP UINT32_C(0x11) + uint8_t unused_2; + uint8_t unused_3; + uint32_t src_ipaddr[4]; + /* + * The value of source IP address to be used in filtering. For + * IPv4, first four bytes represent the IP address. + */ + uint32_t dst_ipaddr[4]; + /* + * big_endian = True The value of destination IP address to be + * used in filtering. For IPv4, first four bytes represent the + * IP address. + */ + uint16_t src_port; + /* + * The value of source port to be used in filtering. Applies to + * UDP and TCP traffic. + */ + uint16_t dst_port; + /* + * The value of destination port to be used in filtering. + * Applies to UDP and TCP traffic. + */ + uint16_t dst_id; + /* + * If set, this value shall represent the Logical VNIC ID of the + * destination VNIC for the RX path and network port id of the + * destination port for the TX path. + */ + uint16_t mirror_vnic_id; + /* Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint32_t encap_record_id; + /* Logical ID of the encapsulation record. */ + uint32_t unused_4; +} __attribute__((packed)); + +/* Output (24 bytes) */ +struct hwrm_cfa_em_flow_alloc_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint64_t em_filter_id; + /* This value is an opaque id into CFA data structures. */ + uint32_t flow_id; + /* + * This is the ID of the flow associated with this filter. This + * value shall be used to match and associate the flow + * identifier returned in completion records. A value of + * 0xFFFFFFFF shall indicate no flow id. + */ + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_free */ +/* Description: Free an EM flow table entry */ +/* Input (24 bytes) */ +struct hwrm_cfa_em_flow_free_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint64_t em_filter_id; + /* This value is an opaque id into CFA data structures. */ +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_cfa_em_flow_free_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_cfa_em_flow_cfg */ +/* + * Description: Configure an EM flow with a new destination VNIC and/or meter. + */ +/* Input (48 bytes) */ +struct hwrm_cfa_em_flow_cfg_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t enables; + /* This bit must be '1' for the new_dst_id field to be configured. */ + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_DST_ID UINT32_C(0x1) + /* + * This bit must be '1' for the new_mirror_vnic_id field to be + * configured. + */ + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID \ + UINT32_C(0x2) + /* + * This bit must be '1' for the new_meter_instance_id field to + * be configured. + */ + #define HWRM_CFA_EM_FLOW_CFG_INPUT_ENABLES_NEW_METER_INSTANCE_ID \ + UINT32_C(0x4) + uint32_t unused_0; + uint64_t em_filter_id; + /* This value is an opaque id into CFA data structures. */ + uint32_t new_dst_id; + /* + * If set, this value shall represent the new Logical VNIC ID of + * the destination VNIC for the RX path and network port id of + * the destination port for the TX path. + */ + uint32_t new_mirror_vnic_id; + /* New Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint16_t new_meter_instance_id; + /* + * New meter to attach to the flow. Specifying the invalid + * instance ID is used to remove any existing meter from the + * flow. + */ + /* + * A value of 0xfff is considered invalid and + * implies the instance is not configured. + */ + #define HWRM_CFA_EM_FLOW_CFG_INPUT_NEW_METER_INSTANCE_ID_INVALID \ + UINT32_C(0xffff) + uint16_t unused_1[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_cfa_em_flow_cfg_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_tunnel_dst_port_query */ /* * Description: This function is called by a driver to query tunnel type @@ -9591,6 +10674,12 @@ struct hwrm_tunnel_dst_port_query_input { /* Generic Network Virtualization Encapsulation (Geneve) */ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE \ UINT32_C(0x5) + /* + * IPV4 over virtual eXtensible Local Area + * Network (IPV4oVXLAN) + */ + #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) uint8_t unused_0[7]; } __attribute__((packed)); @@ -9691,6 +10780,12 @@ struct hwrm_tunnel_dst_port_alloc_input { /* Generic Network Virtualization Encapsulation (Geneve) */ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ UINT32_C(0x5) + /* + * IPV4 over virtual eXtensible Local Area + * Network (IPV4oVXLAN) + */ + #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) uint8_t unused_0; uint16_t tunnel_dst_port_val; /* @@ -9781,6 +10876,12 @@ struct hwrm_tunnel_dst_port_free_input { #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1) /* Generic Network Virtualization Encapsulation (Geneve) */ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5) + /* + * IPV4 over virtual eXtensible Local Area + * Network (IPV4oVXLAN) + */ + #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 \ + UINT32_C(0x9) uint8_t unused_0; uint16_t tunnel_dst_port_id; /* @@ -9984,77 +11085,9 @@ struct hwrm_stat_ctx_free_output { */ } __attribute__((packed)); -/* hwrm_stat_ctx_clr_stats */ -/* Description: This command clears statistics of a context. */ -/* Input (24 bytes) */ -struct hwrm_stat_ctx_clr_stats_input { - uint16_t req_type; - /* - * This value indicates what type of request this is. The format - * for the rest of the command is determined by this field. - */ - uint16_t cmpl_ring; - /* - * This value indicates the what completion ring the request - * will be optionally completed on. If the value is -1, then no - * CR completion will be generated. Any other value must be a - * valid CR ring_id value for this function. - */ - uint16_t seq_id; - /* This value indicates the command sequence number. */ - uint16_t target_id; - /* - * Target ID of this command. 0x0 - 0xFFF8 - Used for function - * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - * - HWRM - */ - uint64_t resp_addr; - /* - * This is the host address where the response will be written - * when the request is complete. This area must be 16B aligned - * and must be cleared to zero before the request is made. - */ - uint32_t stat_ctx_id; - /* ID of the statistics context that is being queried. */ - uint32_t unused_0; -} __attribute__((packed)); - -/* Output (16 bytes) */ -struct hwrm_stat_ctx_clr_stats_output { - uint16_t error_code; - /* - * Pass/Fail or error type Note: receiver to verify the in - * parameters, and fail the call with an error when appropriate - */ - uint16_t req_type; - /* This field returns the type of original request. */ - uint16_t seq_id; - /* This field provides original sequence number of the command. */ - uint16_t resp_len; - /* - * This field is the length of the response in bytes. The last - * byte of the response is a valid flag that will read as '1' - * when the command has been completely written to memory. - */ - uint32_t unused_0; - uint8_t unused_1; - uint8_t unused_2; - uint8_t unused_3; - uint8_t valid; - /* - * This field is used in Output records to indicate that the - * output is completely written to RAM. This field should be - * read as '1' to indicate that the output has been completely - * written. When writing a command completion or response to an - * internal processor, the order of writes has to be such that - * this field is written last. - */ -} __attribute__((packed)); - /* hwrm_stat_ctx_query */ /* Description: This command returns statistics of a context. */ /* Input (24 bytes) */ - struct hwrm_stat_ctx_query_input { uint16_t req_type; /* @@ -10087,7 +11120,6 @@ struct hwrm_stat_ctx_query_input { } __attribute__((packed)); /* Output (176 bytes) */ - struct hwrm_stat_ctx_query_output { uint16_t error_code; /* @@ -10158,6 +11190,73 @@ struct hwrm_stat_ctx_query_output { */ } __attribute__((packed)); +/* hwrm_stat_ctx_clr_stats */ +/* Description: This command clears statistics of a context. */ +/* Input (24 bytes) */ +struct hwrm_stat_ctx_clr_stats_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint32_t stat_ctx_id; + /* ID of the statistics context that is being queried. */ + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_clr_stats_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* hwrm_exec_fwd_resp */ /* * Description: This command is used to send an encapsulated request to the @@ -10331,6 +11430,310 @@ struct hwrm_reject_fwd_resp_output { */ } __attribute__((packed)); +/* hwrm_nvm_get_dir_entries */ +/* Input (24 bytes) */ +struct hwrm_nvm_get_dir_entries_input { + uint16_t req_type; + uint16_t cmpl_ring; + uint16_t seq_id; + uint16_t target_id; + uint64_t resp_addr; + uint64_t host_dest_addr; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_nvm_get_dir_entries_output { + uint16_t error_code; + uint16_t req_type; + uint16_t seq_id; + uint16_t resp_len; + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; +} __attribute__((packed)); + + +/* hwrm_nvm_erase_dir_entry */ +/* Input (24 bytes) */ +struct hwrm_nvm_erase_dir_entry_input { + uint16_t req_type; + uint16_t cmpl_ring; + uint16_t seq_id; + uint16_t target_id; + uint64_t resp_addr; + uint16_t dir_idx; + uint16_t unused_0[3]; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_erase_dir_entry_output { + uint16_t error_code; + uint16_t req_type; + uint16_t seq_id; + uint16_t resp_len; + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; +}; + +/* hwrm_nvm_get_dir_info */ +/* Input (16 bytes) */ +struct hwrm_nvm_get_dir_info_input { + uint16_t req_type; + uint16_t cmpl_ring; + uint16_t seq_id; + uint16_t target_id; + uint64_t resp_addr; +} __attribute__((packed)); + +/* Output (24 bytes) */ +struct hwrm_nvm_get_dir_info_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t entries; + /* Number of directory entries in the directory. */ + uint32_t entry_length; + /* Size of each directory entry, in bytes. */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_nvm_write */ +/* + * Note: Write to the allocated NVRAM of an item referenced by an existing + * directory entry. + */ +/* Input (48 bytes) */ +struct hwrm_nvm_write_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint64_t host_src_addr; + /* 64-bit Host Source Address. This is where the source data is. */ + uint16_t dir_type; + /* + * The Directory Entry Type (valid values are defined in the + * bnxnvm_directory_type enum defined in the file + * bnxnvm_defs.h). + */ + uint16_t dir_ordinal; + /* + * Directory ordinal. The 0-based instance of the combined + * Directory Entry Type and Extension. + */ + uint16_t dir_ext; + /* + * The Directory Entry Extension flags (see BNX_DIR_EXT_* in the + * file bnxnvm_defs.h). + */ + uint16_t dir_attr; + /* + * Directory Entry Attribute flags (see BNX_DIR_ATTR_* in the + * file bnxnvm_defs.h). + */ + uint32_t dir_data_length; + /* + * Length of data to write, in bytes. May be less than or equal + * to the allocated size for the directory entry. The data + * length stored in the directory entry will be updated to + * reflect this value once the write is complete. + */ + uint16_t option; + /* Option. */ + uint16_t flags; + /* + * When this bit is '1', the original active image will not be + * removed. TBD: what purpose is this? + */ + #define HWRM_NVM_WRITE_INPUT_FLAGS_KEEP_ORIG_ACTIVE_IMG UINT32_C(0x1) + uint32_t dir_item_length; + /* + * The requested length of the allocated NVM for the item, in + * bytes. This value may be greater than or equal to the + * specified data length (dir_data_length). If this value is + * less than the specified data length, it will be ignored. The + * response will contain the actual allocated item length, which + * may be greater than the requested item length. The purpose + * for allocating more than the required number of bytes for an + * item's data is to pre-allocate extra storage (padding) to + * accommodate the potential future growth of an item (e.g. + * upgraded firmware with a size increase, log growth, expanded + * configuration data). + */ + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_nvm_write_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t dir_item_length; + /* + * Length of the allocated NVM for the item, in bytes. The value + * may be greater than or equal to the specified data length or + * the requested item length. The actual item length used when + * creating a new directory entry will be a multiple of an NVM + * block size. + */ + uint16_t dir_idx; + /* The directory index of the created or modified item. */ + uint8_t unused_0; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + +/* hwrm_nvm_read */ +/* + * Note: Read the contents of an NVRAM item as referenced (indexed) by an + * existing directory entry. + */ +/* Input (40 bytes) */ +struct hwrm_nvm_read_input { + uint16_t req_type; + /* + * This value indicates what type of request this is. The format + * for the rest of the command is determined by this field. + */ + uint16_t cmpl_ring; + /* + * This value indicates the what completion ring the request + * will be optionally completed on. If the value is -1, then no + * CR completion will be generated. Any other value must be a + * valid CR ring_id value for this function. + */ + uint16_t seq_id; + /* This value indicates the command sequence number. */ + uint16_t target_id; + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function + * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF + * - HWRM + */ + uint64_t resp_addr; + /* + * This is the host address where the response will be written + * when the request is complete. This area must be 16B aligned + * and must be cleared to zero before the request is made. + */ + uint64_t host_dest_addr; + /* + * 64-bit Host Destination Address. This is the host address + * where the data will be written to. + */ + uint16_t dir_idx; + /* The 0-based index of the directory entry. */ + uint8_t unused_0; + uint8_t unused_1; + uint32_t offset; + /* The NVRAM byte-offset to read from. */ + uint32_t len; + /* The length of the data to be read, in bytes. */ + uint32_t unused_2; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_nvm_read_output { + uint16_t error_code; + /* + * Pass/Fail or error type Note: receiver to verify the in + * parameters, and fail the call with an error when appropriate + */ + uint16_t req_type; + /* This field returns the type of original request. */ + uint16_t seq_id; + /* This field provides original sequence number of the command. */ + uint16_t resp_len; + /* + * This field is the length of the response in bytes. The last + * byte of the response is a valid flag that will read as '1' + * when the command has been completely written to memory. + */ + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t valid; + /* + * This field is used in Output records to indicate that the + * output is completely written to RAM. This field should be + * read as '1' to indicate that the output has been completely + * written. When writing a command completion or response to an + * internal processor, the order of writes has to be such that + * this field is written last. + */ +} __attribute__((packed)); + /* Hardware Resource Manager Specification */ /* Description: This structure is used to specify port description. */ /* @@ -10391,11 +11794,28 @@ struct output { /* Short Command Structure (16 bytes) */ struct hwrm_short_input { uint16_t req_type; + /* + * This field indicates the type of request in the request + * buffer. The format for the rest of the command (request) is + * determined by this field. + */ uint16_t signature; - #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD (UINT32_C(0x4321)) + /* + * This field indicates a signature that is used to identify + * short form of the command listed here. This field shall be + * set to 17185 (0x4321). + */ + /* Signature indicating this is a short form of HWRM command */ + #define HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD UINT32_C(0x4321) uint16_t unused_0; + /* Reserved for future use. */ uint16_t size; + /* This value indicates the length of the request. */ uint64_t req_addr; + /* + * This is the host address where the request was written. This + * area must be 16B aligned. + */ } __attribute__((packed)); #define HWRM_GET_HWRM_ERROR_CODE(arg) \ diff --git a/drivers/net/bnxt/rte_pmd_bnxt.c b/drivers/net/bnxt/rte_pmd_bnxt.c index c343d903..a3134074 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.c +++ b/drivers/net/bnxt/rte_pmd_bnxt.c @@ -67,7 +67,7 @@ int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg) true : false; } -int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on) +int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on) { struct rte_eth_dev *eth_dev; struct bnxt *bp; @@ -108,12 +108,12 @@ rte_pmd_bnxt_set_all_queues_drop_en_cb(struct bnxt_vnic_info *vnic, void *onptr) vnic->bd_stall = !(*on); } -int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on) +int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on) { struct rte_eth_dev *eth_dev; struct bnxt *bp; uint32_t i; - int rc; + int rc = -EINVAL; RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); @@ -159,7 +159,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on) return rc; } -int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, +int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf, struct ether_addr *mac_addr) { struct rte_eth_dev *dev; @@ -191,7 +191,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, return rc; } -int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, +int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk) { struct rte_eth_dev *eth_dev; @@ -241,7 +241,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, return rc; } -int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev; @@ -294,7 +294,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) return rc; } -int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) +int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev; @@ -322,9 +322,6 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on) if (vf >= dev_info.max_vfs) return -EINVAL; - if (on == bp->pf.vf_info[vf].vlan_spoof_en) - return 0; - rc = bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(bp, vf, on); if (!rc) { bp->pf.vf_info[vf].vlan_spoof_en = on; @@ -350,7 +347,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq_cb(struct bnxt_vnic_info *vnic, void *onptr) } int -rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) +rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; @@ -385,7 +382,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on) return rc; } -int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, +int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask, uint8_t on) { struct rte_eth_dev *dev; @@ -409,20 +406,19 @@ int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, if (vf >= bp->pdev->max_vfs) return -EINVAL; - if (rx_mask & (ETH_VMDQ_ACCEPT_UNTAG | ETH_VMDQ_ACCEPT_HASH_MC)) { + if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG) { RTE_LOG(ERR, PMD, "Currently cannot toggle this setting\n"); return -ENOTSUP; } - if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC && !on) { - RTE_LOG(ERR, PMD, "Currently cannot disable UC Rx\n"); - return -ENOTSUP; - } + /* Is this really the correct mapping? VFd seems to think it is. */ + if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC) + flag |= BNXT_VNIC_INFO_PROMISC; if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST) flag |= BNXT_VNIC_INFO_BCAST; if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST) - flag |= BNXT_VNIC_INFO_ALLMULTI; + flag |= BNXT_VNIC_INFO_ALLMULTI | BNXT_VNIC_INFO_MCAST; if (on) bp->pf.vf_info[vf].l2_rx_mask |= flag; @@ -477,7 +473,7 @@ static int bnxt_set_vf_table(struct bnxt *bp, uint16_t vf) return rc; } -int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, +int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on) { struct bnxt_vlan_table_entry *ve; @@ -570,7 +566,7 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, return rc; } -int rte_pmd_bnxt_get_vf_stats(uint8_t port, +int rte_pmd_bnxt_get_vf_stats(uint16_t port, uint16_t vf_id, struct rte_eth_stats *stats) { @@ -598,7 +594,7 @@ int rte_pmd_bnxt_get_vf_stats(uint8_t port, return bnxt_hwrm_func_qstats(bp, bp->pf.first_vf_id + vf_id, stats); } -int rte_pmd_bnxt_reset_vf_stats(uint8_t port, +int rte_pmd_bnxt_reset_vf_stats(uint16_t port, uint16_t vf_id) { struct rte_eth_dev *dev; @@ -625,7 +621,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint8_t port, return bnxt_hwrm_func_clr_stats(bp, bp->pf.first_vf_id + vf_id); } -int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id) +int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id) { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; @@ -651,7 +647,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id) return bnxt_vf_vnic_count(bp, vf_id); } -int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, +int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id, uint64_t *count) { struct rte_eth_dev *dev; @@ -679,7 +675,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, count); } -int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr, +int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *addr, uint32_t vf_id) { struct rte_eth_dev *dev; @@ -710,7 +706,7 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr, /* If the VF currently uses a random MAC, update default to this one */ if (bp->pf.vf_info[vf_id].random_mac) { if (rte_pmd_bnxt_get_vf_rx_status(port, vf_id) <= 0) - rc = bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr); + bnxt_hwrm_func_vf_mac(bp, vf_id, (uint8_t *)addr); } /* query the default VNIC id used by the function */ @@ -731,7 +727,7 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr, (HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) && memcmp(addr, filter->l2_addr, ETHER_ADDR_LEN) == 0) { - bnxt_hwrm_clear_filter(bp, filter); + bnxt_hwrm_clear_l2_filter(bp, filter); break; } } @@ -749,14 +745,14 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *addr, /* Do not add a filter for the default MAC */ if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf_id, &dflt_mac) || memcmp(filter->l2_addr, dflt_mac.addr_bytes, ETHER_ADDR_LEN)) - rc = bnxt_hwrm_set_filter(bp, vnic.fw_vnic_id, filter); + rc = bnxt_hwrm_set_l2_filter(bp, vnic.fw_vnic_id, filter); exit: return rc; } int -rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, +rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id) { struct rte_eth_dev *dev; @@ -793,7 +789,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, return rc; } -int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on) +int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev; diff --git a/drivers/net/bnxt/rte_pmd_bnxt.h b/drivers/net/bnxt/rte_pmd_bnxt.h index c4c4770e..f881d30d 100644 --- a/drivers/net/bnxt/rte_pmd_bnxt.h +++ b/drivers/net/bnxt/rte_pmd_bnxt.h @@ -78,7 +78,7 @@ struct rte_pmd_bnxt_mb_event_param { * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); +int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint16_t port, uint16_t vf, uint8_t on); /** * Set the VF MAC address. @@ -94,7 +94,7 @@ int rte_pmd_bnxt_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* or *mac_addr* is invalid. */ -int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, +int rte_pmd_bnxt_set_vf_mac_addr(uint16_t port, uint16_t vf, struct ether_addr *mac_addr); /** @@ -115,7 +115,7 @@ int rte_pmd_bnxt_set_vf_mac_addr(uint8_t port, uint16_t vf, * - (-EINVAL) if bad parameter. */ int -rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); +rte_pmd_bnxt_set_vf_vlan_stripq(uint16_t port, uint16_t vf, uint8_t on); /** * Enable/Disable vf vlan insert @@ -134,7 +134,7 @@ rte_pmd_bnxt_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on); * - (-EINVAL) if bad parameter. */ int -rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, +rte_pmd_bnxt_set_vf_vlan_insert(uint16_t port, uint16_t vf, uint16_t vlan_id); /** @@ -156,7 +156,7 @@ rte_pmd_bnxt_set_vf_vlan_insert(uint8_t port, uint16_t vf, * - (-ENODEV) if *port_id* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, +int rte_pmd_bnxt_set_vf_vlan_filter(uint16_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on); /** @@ -173,7 +173,7 @@ int rte_pmd_bnxt_set_vf_vlan_filter(uint8_t port, uint16_t vlan, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on); +int rte_pmd_bnxt_set_tx_loopback(uint16_t port, uint8_t on); /** * set all queues drop enable bit @@ -189,7 +189,7 @@ int rte_pmd_bnxt_set_tx_loopback(uint8_t port, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on); +int rte_pmd_bnxt_set_all_queues_drop_en(uint16_t port, uint8_t on); /** * Set the VF rate limit. @@ -207,7 +207,7 @@ int rte_pmd_bnxt_set_all_queues_drop_en(uint8_t port, uint8_t on); * - (-ENODEV) if *port* invalid. * - (-EINVAL) if *vf* or *mac_addr* is invalid. */ -int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, +int rte_pmd_bnxt_set_vf_rate_limit(uint16_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk); /** @@ -226,7 +226,7 @@ int rte_pmd_bnxt_set_vf_rate_limit(uint8_t port, uint16_t vf, * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_get_vf_stats(uint8_t port, +int rte_pmd_bnxt_get_vf_stats(uint16_t port, uint16_t vf_id, struct rte_eth_stats *stats); @@ -242,7 +242,7 @@ int rte_pmd_bnxt_get_vf_stats(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_reset_vf_stats(uint8_t port, +int rte_pmd_bnxt_reset_vf_stats(uint16_t port, uint16_t vf_id); /** @@ -261,7 +261,7 @@ int rte_pmd_bnxt_reset_vf_stats(uint8_t port, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); +int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint16_t port, uint16_t vf, uint8_t on); /** * Set RX L2 Filtering mode of a VF of an Ethernet device. @@ -280,7 +280,7 @@ int rte_pmd_bnxt_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on); * - (-ENODEV) if *port_id* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, +int rte_pmd_bnxt_set_vf_rxmode(uint16_t port, uint16_t vf, uint16_t rx_mask, uint8_t on); /** @@ -297,7 +297,7 @@ int rte_pmd_bnxt_set_vf_rxmode(uint8_t port, uint16_t vf, * - (-ENOMEM) on an allocation failure * - (-1) firmware interface error */ -int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id); +int rte_pmd_bnxt_get_vf_rx_status(uint16_t port, uint16_t vf_id); /** * Queries the TX drop counter for the function @@ -313,7 +313,7 @@ int rte_pmd_bnxt_get_vf_rx_status(uint8_t port, uint16_t vf_id); * - (-EINVAL) invalid vf_id specified. * - (-ENOTSUP) Ethernet device is not a PF */ -int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, +int rte_pmd_bnxt_get_vf_tx_drop_count(uint16_t port, uint16_t vf_id, uint64_t *count); /** @@ -331,7 +331,7 @@ int rte_pmd_bnxt_get_vf_tx_drop_count(uint8_t port, uint16_t vf_id, * - (-ENOTSUP) Ethernet device is not a PF * - (-ENOMEM) on an allocation failure */ -int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr, +int rte_pmd_bnxt_mac_addr_add(uint16_t port, struct ether_addr *mac_addr, uint32_t vf_id); /** @@ -350,5 +350,5 @@ int rte_pmd_bnxt_mac_addr_add(uint8_t port, struct ether_addr *mac_addr, * - (-ENODEV) if *port* invalid. * - (-EINVAL) if bad parameter. */ -int rte_pmd_bnxt_set_vf_persist_stats(uint8_t port, uint16_t vf, uint8_t on); +int rte_pmd_bnxt_set_vf_persist_stats(uint16_t port, uint16_t vf, uint8_t on); #endif /* _PMD_BNXT_H_ */ |