diff options
Diffstat (limited to 'drivers/net/bnxt')
25 files changed, 2003 insertions, 1251 deletions
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile index fd0cb523..8be3cb0e 100644 --- a/drivers/net/bnxt/Makefile +++ b/drivers/net/bnxt/Makefile @@ -29,6 +29,7 @@ EXPORT_MAP := rte_pmd_bnxt_version.map SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_flow.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c @@ -38,6 +39,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_util.c SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += rte_pmd_bnxt.c # diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h index afaaf8c4..db5c4eb0 100644 --- a/drivers/net/bnxt/bnxt.h +++ b/drivers/net/bnxt/bnxt.h @@ -22,8 +22,24 @@ #define BNXT_MAX_MTU 9500 #define VLAN_TAG_SIZE 4 +#define BNXT_VF_RSV_NUM_RSS_CTX 1 +#define BNXT_VF_RSV_NUM_L2_CTX 4 +/* TODO: For now, do not support VMDq/RFS on VFs. */ +#define BNXT_VF_RSV_NUM_VNIC 1 #define BNXT_MAX_LED 4 #define BNXT_NUM_VLANS 2 +#define BNXT_MIN_RING_DESC 16 +#define BNXT_MAX_TX_RING_DESC 4096 +#define BNXT_MAX_RX_RING_DESC 8192 +#define BNXT_DB_SIZE 0x80 + +#define BNXT_INT_LAT_TMR_MIN 75 +#define BNXT_INT_LAT_TMR_MAX 150 +#define BNXT_NUM_CMPL_AGGR_INT 36 +#define BNXT_CMPL_AGGR_DMA_TMR 37 +#define BNXT_NUM_CMPL_DMA_AGGR 36 +#define BNXT_CMPL_AGGR_DMA_TMR_DURING_INT 50 +#define BNXT_NUM_CMPL_DMA_AGGR_DURING_INT 12 struct bnxt_led_info { uint8_t led_id; @@ -98,6 +114,7 @@ struct bnxt_child_vf_info { struct bnxt_pf_info { #define BNXT_FIRST_PF_FID 1 #define BNXT_MAX_VFS(bp) (bp->pf.max_vfs) +#define BNXT_TOTAL_VFS(bp) ((bp)->pf.total_vfs) #define BNXT_FIRST_VF_FID 128 #define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) #define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp)) @@ -105,6 +122,9 @@ struct bnxt_pf_info { uint16_t first_vf_id; uint16_t active_vfs; uint16_t max_vfs; + uint16_t total_vfs; /* Total VFs possible. + * Not necessarily enabled. + */ uint32_t func_cfg_flags; void *vf_req_buf; rte_iova_t vf_req_buf_dma_addr; @@ -202,6 +222,16 @@ struct bnxt_ptp_cfg { uint32_t tx_mapped_regs[BNXT_PTP_TX_REGS]; }; +struct bnxt_coal { + uint16_t num_cmpl_aggr_int; + uint16_t num_cmpl_dma_aggr; + uint16_t num_cmpl_dma_aggr_during_int; + uint16_t int_lat_tmr_max; + uint16_t int_lat_tmr_min; + uint16_t cmpl_aggr_dma_tmr; + uint16_t cmpl_aggr_dma_tmr_during_int; +}; + #define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) struct bnxt { void *bar0; @@ -302,12 +332,14 @@ struct bnxt { struct bnxt_led_info leds[BNXT_MAX_LED]; uint8_t num_leds; struct bnxt_ptp_cfg *ptp_cfg; + uint16_t vf_resv_strategy; }; int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete); int bnxt_rcv_msg_from_vf(struct bnxt *bp, uint16_t vf_id, void *msg); bool is_bnxt_supported(struct rte_eth_dev *dev); +bool bnxt_stratus_device(struct bnxt *bp); extern const struct rte_flow_ops bnxt_flow_ops; extern int bnxt_logtype_driver; diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h index 6c1e6d2b..c7af5698 100644 --- a/drivers/net/bnxt/bnxt_cpr.h +++ b/drivers/net/bnxt/bnxt_cpr.h @@ -22,12 +22,20 @@ #define ADV_RAW_CMP(idx, n) ((idx) + (n)) #define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) #define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask) +#define RING_CMPL(ring_mask, idx) ((idx) & (ring_mask)) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) #define FLIP_VALID(cons, mask, val) ((cons) >= (mask) ? !(val) : (val)) #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) +#define NEXT_CMPL(cpr, idx, v, inc) do { \ + (idx) += (inc); \ + if (unlikely((idx) == (cpr)->cp_ring_struct->ring_size)) { \ + (v) = !(v); \ + (idx) = 0; \ + } \ +} while (0) #define B_CP_DB_REARM(cpr, raw_cons) \ rte_write32((DB_CP_REARM_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ @@ -50,6 +58,10 @@ rte_write32((DB_CP_FLAGS | \ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \ ((cpr)->cp_doorbell)) +#define B_CP_DB(cpr, raw_cons, ring_mask) \ + rte_write32((DB_CP_FLAGS | \ + RING_CMPL((ring_mask), raw_cons)), \ + ((cpr)->cp_doorbell)) struct bnxt_ring; struct bnxt_cp_ring_info { diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index 6e56bfd3..cc7e4391 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -26,6 +26,7 @@ #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" #include "bnxt_nvm_defs.h" +#include "bnxt_util.h" #define DRV_MODULE_NAME "bnxt" static const char bnxt_version[] = @@ -73,6 +74,7 @@ int bnxt_logtype_driver; #define BROADCOM_DEV_ID_58802 0xd802 #define BROADCOM_DEV_ID_58804 0xd804 #define BROADCOM_DEV_ID_58808 0x16f0 +#define BROADCOM_DEV_ID_58802_VF 0xd800 static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, @@ -116,6 +118,7 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58804) }, { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58808) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_58802_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -147,11 +150,13 @@ static const struct rte_pci_id bnxt_pci_id_map[] = { DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \ DEV_RX_OFFLOAD_JUMBO_FRAME | \ DEV_RX_OFFLOAD_CRC_STRIP | \ + DEV_RX_OFFLOAD_KEEP_CRC | \ DEV_RX_OFFLOAD_TCP_LRO) static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask); static void bnxt_print_link_info(struct rte_eth_dev *eth_dev); static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu); +static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); /***********************/ @@ -195,13 +200,14 @@ alloc_mem_err: static int bnxt_init_chip(struct bnxt *bp) { - unsigned int i; + struct bnxt_rx_queue *rxq; struct rte_eth_link new; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; uint32_t queue_id, base = BNXT_MISC_VEC_ID; uint32_t vec = BNXT_MISC_VEC_ID; + unsigned int i, j; int rc; /* disable uio/vfio intr/eventfd mapping */ @@ -243,7 +249,19 @@ static int bnxt_init_chip(struct bnxt *bp) /* VNIC configuration */ for (i = 0; i < bp->nr_vnics; i++) { + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps; + + vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0); + if (!vnic->fw_grp_ids) { + PMD_DRV_LOG(ERR, + "Failed to alloc %d bytes for group ids\n", + size); + rc = -ENOMEM; + goto err_out; + } + memset(vnic->fw_grp_ids, -1, size); rc = bnxt_hwrm_vnic_alloc(bp, vnic); if (rc) { @@ -252,12 +270,15 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } - rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); - if (rc) { - PMD_DRV_LOG(ERR, - "HWRM vnic %d ctx alloc failure rc: %x\n", - i, rc); - goto err_out; + /* Alloc RSS context only if RSS mode is enabled */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) { + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); + if (rc) { + PMD_DRV_LOG(ERR, + "HWRM vnic %d ctx alloc failure rc: %x\n", + i, rc); + goto err_out; + } } rc = bnxt_hwrm_vnic_cfg(bp, vnic); @@ -275,6 +296,13 @@ static int bnxt_init_chip(struct bnxt *bp) goto err_out; } + for (j = 0; j < bp->rx_nr_rings; j++) { + rxq = bp->eth_dev->data->rx_queues[j]; + + if (rxq->rx_deferred_start) + rxq->vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + rc = bnxt_vnic_rss_configure(bp, vnic); if (rc) { PMD_DRV_LOG(ERR, @@ -410,7 +438,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, /* For the sake of symmetry, max_rx_queues = max_tx_queues */ dev_info->max_rx_queues = max_rx_rings; dev_info->max_tx_queues = max_rx_rings; - dev_info->reta_size = bp->max_rsscos_ctx; + dev_info->reta_size = HW_HASH_INDEX_SIZE; dev_info->hash_key_size = 40; max_vnics = bp->max_vnics; @@ -449,6 +477,10 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, eth_dev->data->dev_conf.intr_conf.lsc = 1; eth_dev->data->dev_conf.intr_conf.rxq = 1; + dev_info->rx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->rx_desc_lim.nb_max = BNXT_MAX_RX_RING_DESC; + dev_info->tx_desc_lim.nb_min = BNXT_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_max = BNXT_MAX_TX_RING_DESC; /* *INDENT-ON* */ @@ -489,6 +521,7 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) { struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; + int rc; bp->rx_queues = (void *)eth_dev->data->rx_queues; bp->tx_queues = (void *)eth_dev->data->tx_queues; @@ -496,19 +529,23 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) bp->rx_nr_rings = eth_dev->data->nb_rx_queues; if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) { - int rc; + rc = bnxt_hwrm_check_vf_rings(bp); + if (rc) { + PMD_DRV_LOG(ERR, "HWRM insufficient resources\n"); + return -ENOSPC; + } - rc = bnxt_hwrm_func_reserve_vf_resc(bp); + rc = bnxt_hwrm_func_reserve_vf_resc(bp, false); if (rc) { PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc); return -ENOSPC; } - + } else { /* legacy driver needs to get updated values */ rc = bnxt_hwrm_func_qcaps(bp); if (rc) { PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc); - return -ENOSPC; + return rc; } } @@ -519,7 +556,9 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) bp->max_cp_rings || eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues > bp->max_stat_ctx || - (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps) { + (uint32_t)(eth_dev->data->nb_rx_queues) > bp->max_ring_grps || + (!(eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) && + bp->max_vnics < eth_dev->data->nb_rx_queues)) { PMD_DRV_LOG(ERR, "Insufficient resources to support requested config\n"); PMD_DRV_LOG(ERR, @@ -527,9 +566,9 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) eth_dev->data->nb_tx_queues, eth_dev->data->nb_rx_queues); PMD_DRV_LOG(ERR, - "Res available: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d\n", + "MAX: TxQ %d, RxQ %d, CQ %d Stat %d, Grp %d, Vnic %d\n", bp->max_tx_rings, bp->max_rx_rings, bp->max_cp_rings, - bp->max_stat_ctx, bp->max_ring_grps); + bp->max_stat_ctx, bp->max_ring_grps, bp->max_vnics); return -ENOSPC; } @@ -664,6 +703,8 @@ static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) rte_free(bp->grp_info); bp->grp_info = NULL; } + + bnxt_dev_uninit(eth_dev); } static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, @@ -1287,9 +1328,9 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) struct bnxt_vnic_info *vnic; unsigned int i; int rc = 0; - uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN | - HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK; - uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN; + uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK; + uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN; /* Cycle through all VNICs */ for (i = 0; i < bp->nr_vnics; i++) { @@ -1336,8 +1377,8 @@ static int bnxt_add_vlan_filter(struct bnxt *bp, uint16_t vlan_id) memcpy(new_filter->l2_addr, filter->l2_addr, ETHER_ADDR_LEN); /* MAC + VLAN ID filter */ - new_filter->l2_ovlan = vlan_id; - new_filter->l2_ovlan_mask = 0xF000; + new_filter->l2_ivlan = vlan_id; + new_filter->l2_ivlan_mask = 0xF000; new_filter->enables |= en; rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, @@ -1563,6 +1604,7 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) for (i = 0; i < bp->nr_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + uint16_t size = 0; vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + VLAN_TAG_SIZE * 2; @@ -1570,9 +1612,14 @@ static int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu) if (rc) break; - rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); - if (rc) - return rc; + size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); + size -= RTE_PKTMBUF_HEADROOM; + + if (size < new_mtu) { + rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic); + if (rc) + return rc; + } } return rc; @@ -3061,6 +3108,18 @@ static bool bnxt_vf_pciid(uint16_t id) id == BROADCOM_DEV_ID_5741X_VF || id == BROADCOM_DEV_ID_57414_VF || id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF2 || + id == BROADCOM_DEV_ID_58802_VF) + return true; + return false; +} + +bool bnxt_stratus_device(struct bnxt *bp) +{ + uint16_t id = bp->pdev->id.device_id; + + if (id == BROADCOM_DEV_ID_STRATUS_NIC || + id == BROADCOM_DEV_ID_STRATUS_NIC_VF1 || id == BROADCOM_DEV_ID_STRATUS_NIC_VF2) return true; return false; @@ -3112,7 +3171,6 @@ init_err_disable: return rc; } -static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev); #define ALLOW_FUNC(x) \ { \ @@ -3404,13 +3462,15 @@ error: } static int -bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { +bnxt_dev_uninit(struct rte_eth_dev *eth_dev) +{ struct bnxt *bp = eth_dev->data->dev_private; int rc; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; + PMD_DRV_LOG(DEBUG, "Calling Device uninit\n"); bnxt_disable_int(bp); bnxt_free_int(bp); bnxt_free_mem(bp); @@ -3424,8 +3484,17 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { } rc = bnxt_hwrm_func_driver_unregister(bp, 0); bnxt_free_hwrm_resources(bp); - rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); - rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + + if (bp->tx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone); + bp->tx_mem_zone = NULL; + } + + if (bp->rx_mem_zone) { + rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone); + bp->rx_mem_zone = NULL; + } + if (bp->dev_stopped == 0) bnxt_dev_close_op(eth_dev); if (bp->pf.vf_info) @@ -3471,9 +3540,7 @@ bool is_bnxt_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &bnxt_rte_pmd); } -RTE_INIT(bnxt_init_log); -static void -bnxt_init_log(void) +RTE_INIT(bnxt_init_log) { bnxt_logtype_driver = rte_log_register("pmd.bnxt.driver"); if (bnxt_logtype_driver >= 0) diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c index e36da997..1038941e 100644 --- a/drivers/net/bnxt/bnxt_filter.c +++ b/drivers/net/bnxt/bnxt_filter.c @@ -117,16 +117,29 @@ void bnxt_free_filter_mem(struct bnxt *bp) max_filters = bp->max_l2_ctx; for (i = 0; i < max_filters; i++) { filter = &bp->filter_info[i]; - if (filter->fw_l2_filter_id != ((uint64_t)-1)) { - PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n"); + if (filter->fw_l2_filter_id != ((uint64_t)-1) && + filter->filter_type == HWRM_CFA_L2_FILTER) { + PMD_DRV_LOG(ERR, "L2 filter is not free\n"); /* Call HWRM to try to free filter again */ rc = bnxt_hwrm_clear_l2_filter(bp, filter); if (rc) PMD_DRV_LOG(ERR, - "HWRM filter cannot be freed rc = %d\n", - rc); + "Cannot free L2 filter: %d\n", + rc); } filter->fw_l2_filter_id = UINT64_MAX; + + if (filter->fw_ntuple_filter_id != ((uint64_t)-1) && + filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + PMD_DRV_LOG(ERR, "NTUPLE filter is not free\n"); + /* Call HWRM to try to free filter again */ + rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); + if (rc) + PMD_DRV_LOG(ERR, + "Cannot free NTUPLE filter: %d\n", + rc); + } + filter->fw_ntuple_filter_id = UINT64_MAX; } STAILQ_INIT(&bp->free_filter_list); @@ -180,1072 +193,3 @@ void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter) { STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); } - -static int -bnxt_flow_agrs_validate(const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - if (!pattern) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM_NUM, - NULL, "NULL pattern."); - return -rte_errno; - } - - if (!actions) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION_NUM, - NULL, "NULL action."); - return -rte_errno; - } - - if (!attr) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR, - NULL, "NULL attribute."); - return -rte_errno; - } - - return 0; -} - -static const struct rte_flow_item * -nxt_non_void_pattern(const struct rte_flow_item *cur) -{ - while (1) { - if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) - return cur; - cur++; - } -} - -static const struct rte_flow_action * -nxt_non_void_action(const struct rte_flow_action *cur) -{ - while (1) { - if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) - return cur; - cur++; - } -} - -int bnxt_check_zero_bytes(const uint8_t *bytes, int len) -{ - int i; - for (i = 0; i < len; i++) - if (bytes[i] != 0x00) - return 0; - return 1; -} - -static int -bnxt_filter_type_check(const struct rte_flow_item pattern[], - struct rte_flow_error *error __rte_unused) -{ - const struct rte_flow_item *item = nxt_non_void_pattern(pattern); - int use_ntuple = 1; - - while (item->type != RTE_FLOW_ITEM_TYPE_END) { - switch (item->type) { - case RTE_FLOW_ITEM_TYPE_ETH: - use_ntuple = 1; - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - use_ntuple = 0; - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - case RTE_FLOW_ITEM_TYPE_IPV6: - case RTE_FLOW_ITEM_TYPE_TCP: - case RTE_FLOW_ITEM_TYPE_UDP: - /* FALLTHROUGH */ - /* need ntuple match, reset exact match */ - if (!use_ntuple) { - PMD_DRV_LOG(ERR, - "VLAN flow cannot use NTUPLE filter\n"); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Cannot use VLAN with NTUPLE"); - return -rte_errno; - } - use_ntuple |= 1; - break; - default: - PMD_DRV_LOG(ERR, "Unknown Flow type"); - use_ntuple |= 1; - } - item++; - } - return use_ntuple; -} - -static int -bnxt_validate_and_parse_flow_type(struct bnxt *bp, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - struct rte_flow_error *error, - struct bnxt_filter_info *filter) -{ - const struct rte_flow_item *item = nxt_non_void_pattern(pattern); - const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; - const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; - const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; - const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; - const struct rte_flow_item_udp *udp_spec, *udp_mask; - const struct rte_flow_item_eth *eth_spec, *eth_mask; - const struct rte_flow_item_nvgre *nvgre_spec; - const struct rte_flow_item_nvgre *nvgre_mask; - const struct rte_flow_item_vxlan *vxlan_spec; - const struct rte_flow_item_vxlan *vxlan_mask; - uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; - uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; - const struct rte_flow_item_vf *vf_spec; - uint32_t tenant_id_be = 0; - bool vni_masked = 0; - bool tni_masked = 0; - uint32_t vf = 0; - int use_ntuple; - uint32_t en = 0; - uint32_t en_ethertype; - int dflt_vnic; - - use_ntuple = bnxt_filter_type_check(pattern, error); - PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); - if (use_ntuple < 0) - return use_ntuple; - - filter->filter_type = use_ntuple ? - HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER; - en_ethertype = use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : - EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; - - while (item->type != RTE_FLOW_ITEM_TYPE_END) { - if (item->last) { - /* last or range is NOT supported as match criteria */ - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "No support for range"); - return -rte_errno; - } - if (!item->spec || !item->mask) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "spec/mask is NULL"); - return -rte_errno; - } - switch (item->type) { - case RTE_FLOW_ITEM_TYPE_ETH: - eth_spec = item->spec; - eth_mask = item->mask; - - /* Source MAC address mask cannot be partially set. - * Should be All 0's or all 1's. - * Destination MAC address mask must not be partially - * set. Should be all 1's or all 0's. - */ - if ((!is_zero_ether_addr(ð_mask->src) && - !is_broadcast_ether_addr(ð_mask->src)) || - (!is_zero_ether_addr(ð_mask->dst) && - !is_broadcast_ether_addr(ð_mask->dst))) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "MAC_addr mask not valid"); - return -rte_errno; - } - - /* Mask is not allowed. Only exact matches are */ - if (eth_mask->type && - eth_mask->type != RTE_BE16(0xffff)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "ethertype mask not valid"); - return -rte_errno; - } - - if (is_broadcast_ether_addr(ð_mask->dst)) { - rte_memcpy(filter->dst_macaddr, - ð_spec->dst, 6); - en |= use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : - EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; - } - if (is_broadcast_ether_addr(ð_mask->src)) { - rte_memcpy(filter->src_macaddr, - ð_spec->src, 6); - en |= use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : - EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; - } /* - * else { - * RTE_LOG(ERR, PMD, "Handle this condition\n"); - * } - */ - if (eth_mask->type) { - filter->ethertype = - rte_be_to_cpu_16(eth_spec->type); - en |= en_ethertype; - } - - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - vlan_spec = item->spec; - vlan_mask = item->mask; - if (en & en_ethertype) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VLAN TPID matching is not" - " supported"); - return -rte_errno; - } - if (vlan_mask->tci && - vlan_mask->tci == RTE_BE16(0x0fff)) { - /* Only the VLAN ID can be matched. */ - filter->l2_ovlan = - rte_be_to_cpu_16(vlan_spec->tci & - RTE_BE16(0x0fff)); - en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; - } else if (vlan_mask->tci) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "VLAN mask is invalid"); - return -rte_errno; - } - if (vlan_mask->inner_type && - vlan_mask->inner_type != RTE_BE16(0xffff)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "inner ethertype mask not" - " valid"); - return -rte_errno; - } - if (vlan_mask->inner_type) { - filter->ethertype = - rte_be_to_cpu_16(vlan_spec->inner_type); - en |= en_ethertype; - } - - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - /* If mask is not involved, we could use EM filters. */ - ipv4_spec = item->spec; - ipv4_mask = item->mask; - /* Only IP DST and SRC fields are maskable. */ - if (ipv4_mask->hdr.version_ihl || - ipv4_mask->hdr.type_of_service || - ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.next_proto_id || - ipv4_mask->hdr.hdr_checksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid IPv4 mask."); - return -rte_errno; - } - filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; - filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | - EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; - if (ipv4_mask->hdr.src_addr) { - filter->src_ipaddr_mask[0] = - ipv4_mask->hdr.src_addr; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; - } - if (ipv4_mask->hdr.dst_addr) { - filter->dst_ipaddr_mask[0] = - ipv4_mask->hdr.dst_addr; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; - } - filter->ip_addr_type = use_ntuple ? - HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : - HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; - if (ipv4_spec->hdr.next_proto_id) { - filter->ip_protocol = - ipv4_spec->hdr.next_proto_id; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; - else - en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; - } - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - ipv6_spec = item->spec; - ipv6_mask = item->mask; - - /* Only IP DST and SRC fields are maskable. */ - if (ipv6_mask->hdr.vtc_flow || - ipv6_mask->hdr.payload_len || - ipv6_mask->hdr.proto || - ipv6_mask->hdr.hop_limits) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid IPv6 mask."); - return -rte_errno; - } - - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | - EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; - rte_memcpy(filter->src_ipaddr, - ipv6_spec->hdr.src_addr, 16); - rte_memcpy(filter->dst_ipaddr, - ipv6_spec->hdr.dst_addr, 16); - if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, - 16)) { - rte_memcpy(filter->src_ipaddr_mask, - ipv6_mask->hdr.src_addr, 16); - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; - } - if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, - 16)) { - rte_memcpy(filter->dst_ipaddr_mask, - ipv6_mask->hdr.dst_addr, 16); - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; - } - filter->ip_addr_type = use_ntuple ? - NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : - EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; - break; - case RTE_FLOW_ITEM_TYPE_TCP: - tcp_spec = item->spec; - tcp_mask = item->mask; - - /* Check TCP mask. Only DST & SRC ports are maskable */ - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || - tcp_mask->hdr.tcp_urp) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid TCP mask"); - return -rte_errno; - } - filter->src_port = tcp_spec->hdr.src_port; - filter->dst_port = tcp_spec->hdr.dst_port; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | - EM_FLOW_ALLOC_INPUT_EN_DST_PORT; - if (tcp_mask->hdr.dst_port) { - filter->dst_port_mask = tcp_mask->hdr.dst_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; - } - if (tcp_mask->hdr.src_port) { - filter->src_port_mask = tcp_mask->hdr.src_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; - } - break; - case RTE_FLOW_ITEM_TYPE_UDP: - udp_spec = item->spec; - udp_mask = item->mask; - - if (udp_mask->hdr.dgram_len || - udp_mask->hdr.dgram_cksum) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid UDP mask"); - return -rte_errno; - } - - filter->src_port = udp_spec->hdr.src_port; - filter->dst_port = udp_spec->hdr.dst_port; - if (use_ntuple) - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; - else - en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | - EM_FLOW_ALLOC_INPUT_EN_DST_PORT; - - if (udp_mask->hdr.dst_port) { - filter->dst_port_mask = udp_mask->hdr.dst_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; - } - if (udp_mask->hdr.src_port) { - filter->src_port_mask = udp_mask->hdr.src_port; - en |= !use_ntuple ? 0 : - NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; - } - break; - case RTE_FLOW_ITEM_TYPE_VXLAN: - vxlan_spec = item->spec; - vxlan_mask = item->mask; - /* Check if VXLAN item is used to describe protocol. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!vxlan_spec && vxlan_mask) || - (vxlan_spec && !vxlan_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VXLAN item"); - return -rte_errno; - } - - if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || - vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || - vxlan_spec->flags != 0x8) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VXLAN item"); - return -rte_errno; - } - - /* Check if VNI is masked. */ - if (vxlan_spec && vxlan_mask) { - vni_masked = - !!memcmp(vxlan_mask->vni, vni_mask, - RTE_DIM(vni_mask)); - if (vni_masked) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid VNI mask"); - return -rte_errno; - } - - rte_memcpy(((uint8_t *)&tenant_id_be + 1), - vxlan_spec->vni, 3); - filter->vni = - rte_be_to_cpu_32(tenant_id_be); - filter->tunnel_type = - CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; - } - break; - case RTE_FLOW_ITEM_TYPE_NVGRE: - nvgre_spec = item->spec; - nvgre_mask = item->mask; - /* Check if NVGRE item is used to describe protocol. - * If yes, both spec and mask should be NULL. - * If no, both spec and mask shouldn't be NULL. - */ - if ((!nvgre_spec && nvgre_mask) || - (nvgre_spec && !nvgre_mask)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid NVGRE item"); - return -rte_errno; - } - - if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || - nvgre_spec->protocol != 0x6558) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid NVGRE item"); - return -rte_errno; - } - - if (nvgre_spec && nvgre_mask) { - tni_masked = - !!memcmp(nvgre_mask->tni, tni_mask, - RTE_DIM(tni_mask)); - if (tni_masked) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Invalid TNI mask"); - return -rte_errno; - } - rte_memcpy(((uint8_t *)&tenant_id_be + 1), - nvgre_spec->tni, 3); - filter->vni = - rte_be_to_cpu_32(tenant_id_be); - filter->tunnel_type = - CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; - } - break; - case RTE_FLOW_ITEM_TYPE_VF: - vf_spec = item->spec; - vf = vf_spec->id; - if (!BNXT_PF(bp)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Configuring on a VF!"); - return -rte_errno; - } - - if (vf >= bp->pdev->max_vfs) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Incorrect VF id!"); - return -rte_errno; - } - - if (!attr->transfer) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Matching VF traffic without" - " affecting it (transfer attribute)" - " is unsupported"); - return -rte_errno; - } - - filter->mirror_vnic_id = - dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); - if (dflt_vnic < 0) { - /* This simply indicates there's no driver - * loaded. This is not an error. - */ - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, - item, - "Unable to get default VNIC for VF"); - return -rte_errno; - } - filter->mirror_vnic_id = dflt_vnic; - en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; - break; - default: - break; - } - item++; - } - filter->enables = en; - - return 0; -} - -/* Parse attributes */ -static int -bnxt_flow_parse_attr(const struct rte_flow_attr *attr, - struct rte_flow_error *error) -{ - /* Must be input direction */ - if (!attr->ingress) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - attr, "Only support ingress."); - return -rte_errno; - } - - /* Not supported */ - if (attr->egress) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - attr, "No support for egress."); - return -rte_errno; - } - - /* Not supported */ - if (attr->priority) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, - attr, "No support for priority."); - return -rte_errno; - } - - /* Not supported */ - if (attr->group) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_GROUP, - attr, "No support for group."); - return -rte_errno; - } - - return 0; -} - -struct bnxt_filter_info * -bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, - struct bnxt_vnic_info *vnic) -{ - struct bnxt_filter_info *filter1, *f0; - struct bnxt_vnic_info *vnic0; - int rc; - - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - f0 = STAILQ_FIRST(&vnic0->filter); - - //This flow has same DST MAC as the port/l2 filter. - if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0) - return f0; - - //This flow needs DST MAC which is not same as port/l2 - PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); - filter1 = bnxt_get_unused_filter(bp); - if (filter1 == NULL) - return NULL; - filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; - filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | - L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; - memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN); - memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN); - rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, - filter1); - if (rc) { - bnxt_free_filter(bp, filter1); - return NULL; - } - return filter1; -} - -static int -bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - const struct rte_flow_attr *attr, - struct rte_flow_error *error, - struct bnxt_filter_info *filter) -{ - const struct rte_flow_action *act = nxt_non_void_action(actions); - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - const struct rte_flow_action_queue *act_q; - const struct rte_flow_action_vf *act_vf; - struct bnxt_vnic_info *vnic, *vnic0; - struct bnxt_filter_info *filter1; - uint32_t vf = 0; - int dflt_vnic; - int rc; - - if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { - PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n"); - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "Cannot create flow on RSS queues"); - rc = -rte_errno; - goto ret; - } - - rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, - filter); - if (rc != 0) - goto ret; - - rc = bnxt_flow_parse_attr(attr, error); - if (rc != 0) - goto ret; - //Since we support ingress attribute only - right now. - if (filter->filter_type == HWRM_CFA_EM_FILTER) - filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; - - switch (act->type) { - case RTE_FLOW_ACTION_TYPE_QUEUE: - /* Allow this flow. Redirect to a VNIC. */ - act_q = (const struct rte_flow_action_queue *)act->conf; - if (act_q->index >= bp->rx_nr_rings) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "Invalid queue ID."); - rc = -rte_errno; - goto ret; - } - PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); - - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]); - if (vnic == NULL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "No matching VNIC for queue ID."); - rc = -rte_errno; - goto ret; - } - filter->dst_id = vnic->fw_vnic_id; - filter1 = bnxt_get_l2_filter(bp, filter, vnic); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - PMD_DRV_LOG(DEBUG, "VNIC found\n"); - break; - case RTE_FLOW_ACTION_TYPE_DROP: - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - filter1 = bnxt_get_l2_filter(bp, filter, vnic0); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - if (filter->filter_type == HWRM_CFA_EM_FILTER) - filter->flags = - HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; - else - filter->flags = - HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; - break; - case RTE_FLOW_ACTION_TYPE_COUNT: - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - filter1 = bnxt_get_l2_filter(bp, filter, vnic0); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; - break; - case RTE_FLOW_ACTION_TYPE_VF: - act_vf = (const struct rte_flow_action_vf *)act->conf; - vf = act_vf->id; - if (!BNXT_PF(bp)) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Configuring on a VF!"); - rc = -rte_errno; - goto ret; - } - - if (vf >= bp->pdev->max_vfs) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Incorrect VF id!"); - rc = -rte_errno; - goto ret; - } - - filter->mirror_vnic_id = - dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); - if (dflt_vnic < 0) { - /* This simply indicates there's no driver loaded. - * This is not an error. - */ - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "Unable to get default VNIC for VF"); - rc = -rte_errno; - goto ret; - } - filter->mirror_vnic_id = dflt_vnic; - filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; - - vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); - filter1 = bnxt_get_l2_filter(bp, filter, vnic0); - if (filter1 == NULL) { - rc = -ENOSPC; - goto ret; - } - filter->fw_l2_filter_id = filter1->fw_l2_filter_id; - break; - - default: - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "Invalid action."); - rc = -rte_errno; - goto ret; - } - - act = nxt_non_void_action(++act); - if (act->type != RTE_FLOW_ACTION_TYPE_END) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, "Invalid action."); - rc = -rte_errno; - goto ret; - } -ret: - return rc; -} - -static int -bnxt_flow_validate(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_filter_info *filter; - int ret = 0; - - ret = bnxt_flow_agrs_validate(attr, pattern, actions, error); - if (ret != 0) - return ret; - - filter = bnxt_get_unused_filter(bp); - if (filter == NULL) { - PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); - return -ENOMEM; - } - - ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, - error, filter); - /* No need to hold on to this filter if we are just validating flow */ - filter->fw_l2_filter_id = UINT64_MAX; - bnxt_free_filter(bp, filter); - - return ret; -} - -static int -bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) -{ - struct bnxt_filter_info *mf; - struct rte_flow *flow; - int i; - - for (i = bp->nr_vnics - 1; i >= 0; i--) { - struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - - STAILQ_FOREACH(flow, &vnic->flow_list, next) { - mf = flow->filter; - - if (mf->filter_type == nf->filter_type && - mf->flags == nf->flags && - mf->src_port == nf->src_port && - mf->src_port_mask == nf->src_port_mask && - mf->dst_port == nf->dst_port && - mf->dst_port_mask == nf->dst_port_mask && - mf->ip_protocol == nf->ip_protocol && - mf->ip_addr_type == nf->ip_addr_type && - mf->ethertype == nf->ethertype && - mf->vni == nf->vni && - mf->tunnel_type == nf->tunnel_type && - mf->l2_ovlan == nf->l2_ovlan && - mf->l2_ovlan_mask == nf->l2_ovlan_mask && - mf->l2_ivlan == nf->l2_ivlan && - mf->l2_ivlan_mask == nf->l2_ivlan_mask && - !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && - !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, - ETHER_ADDR_LEN) && - !memcmp(mf->src_macaddr, nf->src_macaddr, - ETHER_ADDR_LEN) && - !memcmp(mf->dst_macaddr, nf->dst_macaddr, - ETHER_ADDR_LEN) && - !memcmp(mf->src_ipaddr, nf->src_ipaddr, - sizeof(nf->src_ipaddr)) && - !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, - sizeof(nf->src_ipaddr_mask)) && - !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, - sizeof(nf->dst_ipaddr)) && - !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, - sizeof(nf->dst_ipaddr_mask))) { - if (mf->dst_id == nf->dst_id) - return -EEXIST; - /* Same Flow, Different queue - * Clear the old ntuple filter - */ - if (nf->filter_type == HWRM_CFA_EM_FILTER) - bnxt_hwrm_clear_em_filter(bp, mf); - if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER) - bnxt_hwrm_clear_ntuple_filter(bp, mf); - /* Free the old filter, update flow - * with new filter - */ - bnxt_free_filter(bp, mf); - flow->filter = nf; - return -EXDEV; - } - } - } - return 0; -} - -static struct rte_flow * -bnxt_flow_create(struct rte_eth_dev *dev, - const struct rte_flow_attr *attr, - const struct rte_flow_item pattern[], - const struct rte_flow_action actions[], - struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_filter_info *filter; - struct bnxt_vnic_info *vnic = NULL; - bool update_flow = false; - struct rte_flow *flow; - unsigned int i; - int ret = 0; - - flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); - if (!flow) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate memory"); - return flow; - } - - ret = bnxt_flow_agrs_validate(attr, pattern, actions, error); - if (ret != 0) { - PMD_DRV_LOG(ERR, "Not a validate flow.\n"); - goto free_flow; - } - - filter = bnxt_get_unused_filter(bp); - if (filter == NULL) { - PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); - goto free_flow; - } - - ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, - error, filter); - if (ret != 0) - goto free_filter; - - ret = bnxt_match_filter(bp, filter); - if (ret == -EEXIST) { - PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); - /* Clear the filter that was created as part of - * validate_and_parse_flow() above - */ - bnxt_hwrm_clear_l2_filter(bp, filter); - goto free_filter; - } else if (ret == -EXDEV) { - PMD_DRV_LOG(DEBUG, "Flow with same pattern exists"); - PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); - update_flow = true; - } - - if (filter->filter_type == HWRM_CFA_EM_FILTER) { - filter->enables |= - HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; - ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); - } - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { - filter->enables |= - HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; - ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); - } - - for (i = 0; i < bp->nr_vnics; i++) { - vnic = &bp->vnic_info[i]; - if (filter->dst_id == vnic->fw_vnic_id) - break; - } - - if (!ret) { - flow->filter = filter; - flow->vnic = vnic; - if (update_flow) { - ret = -EXDEV; - goto free_flow; - } - PMD_DRV_LOG(ERR, "Successfully created flow.\n"); - STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); - return flow; - } -free_filter: - bnxt_free_filter(bp, filter); -free_flow: - if (ret == -EEXIST) - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Matching Flow exists."); - else if (ret == -EXDEV) - rte_flow_error_set(error, ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Flow with pattern exists, updating destination queue"); - else - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to create flow."); - rte_free(flow); - flow = NULL; - return flow; -} - -static int -bnxt_flow_destroy(struct rte_eth_dev *dev, - struct rte_flow *flow, - struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_filter_info *filter = flow->filter; - struct bnxt_vnic_info *vnic = flow->vnic; - int ret = 0; - - ret = bnxt_match_filter(bp, filter); - if (ret == 0) - PMD_DRV_LOG(ERR, "Could not find matching flow\n"); - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - ret = bnxt_hwrm_clear_l2_filter(bp, filter); - if (!ret) { - STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); - rte_free(flow); - } else { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to destroy flow."); - } - - return ret; -} - -static int -bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) -{ - struct bnxt *bp = (struct bnxt *)dev->data->dev_private; - struct bnxt_vnic_info *vnic; - struct rte_flow *flow; - unsigned int i; - int ret = 0; - - for (i = 0; i < bp->nr_vnics; i++) { - vnic = &bp->vnic_info[i]; - STAILQ_FOREACH(flow, &vnic->flow_list, next) { - struct bnxt_filter_info *filter = flow->filter; - - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - - if (ret) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_HANDLE, - NULL, - "Failed to flush flow in HW."); - return -rte_errno; - } - - STAILQ_REMOVE(&vnic->flow_list, flow, - rte_flow, next); - rte_free(flow); - } - } - - return ret; -} - -const struct rte_flow_ops bnxt_flow_ops = { - .validate = bnxt_flow_validate, - .create = bnxt_flow_create, - .destroy = bnxt_flow_destroy, - .flush = bnxt_flow_flush, -}; diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h index d27be703..a1ecfb19 100644 --- a/drivers/net/bnxt/bnxt_filter.h +++ b/drivers/net/bnxt/bnxt_filter.h @@ -69,7 +69,6 @@ struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp); void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter); struct bnxt_filter_info *bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, struct bnxt_vnic_info *vnic); -int bnxt_check_zero_bytes(const uint8_t *bytes, int len); #define NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR \ HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c new file mode 100644 index 00000000..ac765674 --- /dev/null +++ b/drivers/net/bnxt/bnxt_flow.c @@ -0,0 +1,1171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include <sys/queue.h> + +#include <rte_log.h> +#include <rte_malloc.h> +#include <rte_flow.h> +#include <rte_flow_driver.h> +#include <rte_tailq.h> + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_vnic.h" +#include "bnxt_util.h" +#include "hsi_struct_def_dpdk.h" + +static int +bnxt_flow_args_validate(const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + if (!pattern) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, + NULL, + "NULL pattern."); + return -rte_errno; + } + + if (!actions) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, + NULL, + "NULL action."); + return -rte_errno; + } + + if (!attr) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "NULL attribute."); + return -rte_errno; + } + + return 0; +} + +static const struct rte_flow_item * +bnxt_flow_non_void_item(const struct rte_flow_item *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ITEM_TYPE_VOID) + return cur; + cur++; + } +} + +static const struct rte_flow_action * +bnxt_flow_non_void_action(const struct rte_flow_action *cur) +{ + while (1) { + if (cur->type != RTE_FLOW_ACTION_TYPE_VOID) + return cur; + cur++; + } +} + +static int +bnxt_filter_type_check(const struct rte_flow_item pattern[], + struct rte_flow_error *error __rte_unused) +{ + const struct rte_flow_item *item = + bnxt_flow_non_void_item(pattern); + int use_ntuple = 1; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + use_ntuple = 1; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + use_ntuple = 0; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + case RTE_FLOW_ITEM_TYPE_TCP: + case RTE_FLOW_ITEM_TYPE_UDP: + /* FALLTHROUGH */ + /* need ntuple match, reset exact match */ + if (!use_ntuple) { + PMD_DRV_LOG(ERR, + "VLAN flow cannot use NTUPLE filter\n"); + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Cannot use VLAN with NTUPLE"); + return -rte_errno; + } + use_ntuple |= 1; + break; + default: + PMD_DRV_LOG(ERR, "Unknown Flow type\n"); + use_ntuple |= 1; + } + item++; + } + return use_ntuple; +} + +static int +bnxt_validate_and_parse_flow_type(struct bnxt *bp, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern); + const struct rte_flow_item_vlan *vlan_spec, *vlan_mask; + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask; + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask; + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask; + const struct rte_flow_item_udp *udp_spec, *udp_mask; + const struct rte_flow_item_eth *eth_spec, *eth_mask; + const struct rte_flow_item_nvgre *nvgre_spec; + const struct rte_flow_item_nvgre *nvgre_mask; + const struct rte_flow_item_vxlan *vxlan_spec; + const struct rte_flow_item_vxlan *vxlan_mask; + uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF}; + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; + const struct rte_flow_item_vf *vf_spec; + uint32_t tenant_id_be = 0; + bool vni_masked = 0; + bool tni_masked = 0; + uint32_t vf = 0; + int use_ntuple; + uint32_t en = 0; + uint32_t en_ethertype; + int dflt_vnic; + + use_ntuple = bnxt_filter_type_check(pattern, error); + PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple); + if (use_ntuple < 0) + return use_ntuple; + + filter->filter_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER; + en_ethertype = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE : + EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE; + + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (item->last) { + /* last or range is NOT supported as match criteria */ + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "No support for range"); + return -rte_errno; + } + + if (!item->spec || !item->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "spec/mask is NULL"); + return -rte_errno; + } + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth_spec = item->spec; + eth_mask = item->mask; + + /* Source MAC address mask cannot be partially set. + * Should be All 0's or all 1's. + * Destination MAC address mask must not be partially + * set. Should be all 1's or all 0's. + */ + if ((!is_zero_ether_addr(ð_mask->src) && + !is_broadcast_ether_addr(ð_mask->src)) || + (!is_zero_ether_addr(ð_mask->dst) && + !is_broadcast_ether_addr(ð_mask->dst))) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MAC_addr mask not valid"); + return -rte_errno; + } + + /* Mask is not allowed. Only exact matches are */ + if (eth_mask->type && + eth_mask->type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ethertype mask not valid"); + return -rte_errno; + } + + if (is_broadcast_ether_addr(ð_mask->dst)) { + rte_memcpy(filter->dst_macaddr, + ð_spec->dst, 6); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR; + } + + if (is_broadcast_ether_addr(ð_mask->src)) { + rte_memcpy(filter->src_macaddr, + ð_spec->src, 6); + en |= use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR : + EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR; + } /* + * else { + * PMD_DRV_LOG(ERR, "Handle this condition\n"); + * } + */ + if (eth_mask->type) { + filter->ethertype = + rte_be_to_cpu_16(eth_spec->type); + en |= en_ethertype; + } + + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + vlan_spec = item->spec; + vlan_mask = item->mask; + if (en & en_ethertype) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN TPID matching is not" + " supported"); + return -rte_errno; + } + if (vlan_mask->tci && + vlan_mask->tci == RTE_BE16(0x0fff)) { + /* Only the VLAN ID can be matched. */ + filter->l2_ovlan = + rte_be_to_cpu_16(vlan_spec->tci & + RTE_BE16(0x0fff)); + en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID; + } else { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VLAN mask is invalid"); + return -rte_errno; + } + if (vlan_mask->inner_type && + vlan_mask->inner_type != RTE_BE16(0xffff)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "inner ethertype mask not" + " valid"); + return -rte_errno; + } + if (vlan_mask->inner_type) { + filter->ethertype = + rte_be_to_cpu_16(vlan_spec->inner_type); + en |= en_ethertype; + } + + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + /* If mask is not involved, we could use EM filters. */ + ipv4_spec = item->spec; + ipv4_mask = item->mask; + /* Only IP DST and SRC fields are maskable. */ + if (ipv4_mask->hdr.version_ihl || + ipv4_mask->hdr.type_of_service || + ipv4_mask->hdr.total_length || + ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || + ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.next_proto_id || + ipv4_mask->hdr.hdr_checksum) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv4 mask."); + return -rte_errno; + } + + filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr; + filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + + if (ipv4_mask->hdr.src_addr) { + filter->src_ipaddr_mask[0] = + ipv4_mask->hdr.src_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + + if (ipv4_mask->hdr.dst_addr) { + filter->dst_ipaddr_mask[0] = + ipv4_mask->hdr.dst_addr; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + + filter->ip_addr_type = use_ntuple ? + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 : + HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4; + + if (ipv4_spec->hdr.next_proto_id) { + filter->ip_protocol = + ipv4_spec->hdr.next_proto_id; + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO; + else + en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6_spec = item->spec; + ipv6_mask = item->mask; + + /* Only IP DST and SRC fields are maskable. */ + if (ipv6_mask->hdr.vtc_flow || + ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.proto || + ipv6_mask->hdr.hop_limits) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid IPv6 mask."); + return -rte_errno; + } + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR | + EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR; + + rte_memcpy(filter->src_ipaddr, + ipv6_spec->hdr.src_addr, 16); + rte_memcpy(filter->dst_ipaddr, + ipv6_spec->hdr.dst_addr, 16); + + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr, + 16)) { + rte_memcpy(filter->src_ipaddr_mask, + ipv6_mask->hdr.src_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK; + } + + if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr, + 16)) { + rte_memcpy(filter->dst_ipaddr_mask, + ipv6_mask->hdr.dst_addr, 16); + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK; + } + + filter->ip_addr_type = use_ntuple ? + NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 : + EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + tcp_spec = item->spec; + tcp_mask = item->mask; + + /* Check TCP mask. Only DST & SRC ports are maskable */ + if (tcp_mask->hdr.sent_seq || + tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || + tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || + tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TCP mask"); + return -rte_errno; + } + + filter->src_port = tcp_spec->hdr.src_port; + filter->dst_port = tcp_spec->hdr.dst_port; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (tcp_mask->hdr.dst_port) { + filter->dst_port_mask = tcp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + + if (tcp_mask->hdr.src_port) { + filter->src_port_mask = tcp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_UDP: + udp_spec = item->spec; + udp_mask = item->mask; + + if (udp_mask->hdr.dgram_len || + udp_mask->hdr.dgram_cksum) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid UDP mask"); + return -rte_errno; + } + + filter->src_port = udp_spec->hdr.src_port; + filter->dst_port = udp_spec->hdr.dst_port; + + if (use_ntuple) + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT | + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT; + else + en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT | + EM_FLOW_ALLOC_INPUT_EN_DST_PORT; + + if (udp_mask->hdr.dst_port) { + filter->dst_port_mask = udp_mask->hdr.dst_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK; + } + + if (udp_mask->hdr.src_port) { + filter->src_port_mask = udp_mask->hdr.src_port; + en |= !use_ntuple ? 0 : + NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK; + } + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan_spec = item->spec; + vxlan_mask = item->mask; + /* Check if VXLAN item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!vxlan_spec && vxlan_mask) || + (vxlan_spec && !vxlan_mask)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] || + vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] || + vxlan_spec->flags != 0x8) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VXLAN item"); + return -rte_errno; + } + + /* Check if VNI is masked. */ + if (vxlan_spec && vxlan_mask) { + vni_masked = + !!memcmp(vxlan_mask->vni, vni_mask, + RTE_DIM(vni_mask)); + if (vni_masked) { + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VNI mask"); + return -rte_errno; + } + + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + vxlan_spec->vni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + } + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre_spec = item->spec; + nvgre_mask = item->mask; + /* Check if NVGRE item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!nvgre_spec && nvgre_mask) || + (nvgre_spec && !nvgre_mask)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 || + nvgre_spec->protocol != 0x6558) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid NVGRE item"); + return -rte_errno; + } + + if (nvgre_spec && nvgre_mask) { + tni_masked = + !!memcmp(nvgre_mask->tni, tni_mask, + RTE_DIM(tni_mask)); + if (tni_masked) { + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid TNI mask"); + return -rte_errno; + } + rte_memcpy(((uint8_t *)&tenant_id_be + 1), + nvgre_spec->tni, 3); + filter->vni = + rte_be_to_cpu_32(tenant_id_be); + filter->tunnel_type = + CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE; + } + break; + case RTE_FLOW_ITEM_TYPE_VF: + vf_spec = item->spec; + vf = vf_spec->id; + + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Configuring on a VF!"); + return -rte_errno; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Incorrect VF id!"); + return -rte_errno; + } + + if (!attr->transfer) { + rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Matching VF traffic without" + " affecting it (transfer attribute)" + " is unsupported"); + return -rte_errno; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver + * loaded. This is not an error. + */ + rte_flow_error_set + (error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Unable to get default VNIC for VF"); + return -rte_errno; + } + + filter->mirror_vnic_id = dflt_vnic; + en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + break; + default: + break; + } + item++; + } + filter->enables = en; + + return 0; +} + +/* Parse attributes */ +static int +bnxt_flow_parse_attr(const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + /* Must be input direction */ + if (!attr->ingress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, + "Only support ingress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->egress) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, + attr, + "No support for egress."); + return -rte_errno; + } + + /* Not supported */ + if (attr->priority) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, + attr, + "No support for priority."); + return -rte_errno; + } + + /* Not supported */ + if (attr->group) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, + "No support for group."); + return -rte_errno; + } + + return 0; +} + +struct bnxt_filter_info * +bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf, + struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter1, *f0; + struct bnxt_vnic_info *vnic0; + int rc; + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + f0 = STAILQ_FIRST(&vnic0->filter); + + /* This flow has same DST MAC as the port/l2 filter. */ + if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0) + return f0; + + /* This flow needs DST MAC which is not same as port/l2 */ + PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n"); + filter1 = bnxt_get_unused_filter(bp); + if (filter1 == NULL) + return NULL; + + filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK; + memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN); + memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN); + rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id, + filter1); + if (rc) { + bnxt_free_filter(bp, filter1); + return NULL; + } + return filter1; +} + +static int +bnxt_validate_and_parse_flow(struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + const struct rte_flow_attr *attr, + struct rte_flow_error *error, + struct bnxt_filter_info *filter) +{ + const struct rte_flow_action *act = + bnxt_flow_non_void_action(actions); + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + const struct rte_flow_action_queue *act_q; + const struct rte_flow_action_vf *act_vf; + struct bnxt_vnic_info *vnic, *vnic0; + struct bnxt_filter_info *filter1; + uint32_t vf = 0; + int dflt_vnic; + int rc; + + if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n"); + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Cannot create flow on RSS queues"); + rc = -rte_errno; + goto ret; + } + + rc = + bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter); + if (rc != 0) + goto ret; + + rc = bnxt_flow_parse_attr(attr, error); + if (rc != 0) + goto ret; + + /* Since we support ingress attribute only - right now. */ + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX; + + switch (act->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Allow this flow. Redirect to a VNIC. */ + act_q = (const struct rte_flow_action_queue *)act->conf; + if (act_q->index >= bp->rx_nr_rings) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid queue ID."); + rc = -rte_errno; + goto ret; + } + PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]); + if (vnic == NULL) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "No matching VNIC for queue ID."); + rc = -rte_errno; + goto ret; + } + + filter->dst_id = vnic->fw_vnic_id; + filter1 = bnxt_get_l2_filter(bp, filter, vnic); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + PMD_DRV_LOG(DEBUG, "VNIC found\n"); + break; + case RTE_FLOW_ACTION_TYPE_DROP: + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + if (filter->filter_type == HWRM_CFA_EM_FILTER) + filter->flags = + HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP; + else + filter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP; + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER; + break; + case RTE_FLOW_ACTION_TYPE_VF: + act_vf = (const struct rte_flow_action_vf *)act->conf; + vf = act_vf->id; + + if (!BNXT_PF(bp)) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Configuring on a VF!"); + rc = -rte_errno; + goto ret; + } + + if (vf >= bp->pdev->max_vfs) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Incorrect VF id!"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = + dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf); + if (dflt_vnic < 0) { + /* This simply indicates there's no driver loaded. + * This is not an error. + */ + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Unable to get default VNIC for VF"); + rc = -rte_errno; + goto ret; + } + + filter->mirror_vnic_id = dflt_vnic; + filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID; + + vnic0 = STAILQ_FIRST(&bp->ff_pool[0]); + filter1 = bnxt_get_l2_filter(bp, filter, vnic0); + if (filter1 == NULL) { + rc = -ENOSPC; + goto ret; + } + + filter->fw_l2_filter_id = filter1->fw_l2_filter_id; + break; + + default: + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid action."); + rc = -rte_errno; + goto ret; + } + + if (filter1) { + bnxt_free_filter(bp, filter1); + filter1->fw_l2_filter_id = -1; + } + + act = bnxt_flow_non_void_action(++act); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, + "Invalid action."); + rc = -rte_errno; + goto ret; + } +ret: + return rc; +} + +static int +bnxt_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter; + int ret = 0; + + ret = bnxt_flow_args_validate(attr, pattern, actions, error); + if (ret != 0) + return ret; + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + return -ENOMEM; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + /* No need to hold on to this filter if we are just validating flow */ + filter->fw_l2_filter_id = UINT64_MAX; + bnxt_free_filter(bp, filter); + + return ret; +} + +static int +bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf) +{ + struct bnxt_filter_info *mf; + struct rte_flow *flow; + int i; + + for (i = bp->nr_vnics - 1; i >= 0; i--) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + mf = flow->filter; + + if (mf->filter_type == nf->filter_type && + mf->flags == nf->flags && + mf->src_port == nf->src_port && + mf->src_port_mask == nf->src_port_mask && + mf->dst_port == nf->dst_port && + mf->dst_port_mask == nf->dst_port_mask && + mf->ip_protocol == nf->ip_protocol && + mf->ip_addr_type == nf->ip_addr_type && + mf->ethertype == nf->ethertype && + mf->vni == nf->vni && + mf->tunnel_type == nf->tunnel_type && + mf->l2_ovlan == nf->l2_ovlan && + mf->l2_ovlan_mask == nf->l2_ovlan_mask && + mf->l2_ivlan == nf->l2_ivlan && + mf->l2_ivlan_mask == nf->l2_ivlan_mask && + !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) && + !memcmp(mf->l2_addr_mask, nf->l2_addr_mask, + ETHER_ADDR_LEN) && + !memcmp(mf->src_macaddr, nf->src_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->dst_macaddr, nf->dst_macaddr, + ETHER_ADDR_LEN) && + !memcmp(mf->src_ipaddr, nf->src_ipaddr, + sizeof(nf->src_ipaddr)) && + !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask, + sizeof(nf->src_ipaddr_mask)) && + !memcmp(mf->dst_ipaddr, nf->dst_ipaddr, + sizeof(nf->dst_ipaddr)) && + !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask, + sizeof(nf->dst_ipaddr_mask))) { + if (mf->dst_id == nf->dst_id) + return -EEXIST; + /* + * Same Flow, Different queue + * Clear the old ntuple filter + * Reuse the matching L2 filter + * ID for the new filter + */ + nf->fw_l2_filter_id = mf->fw_l2_filter_id; + if (nf->filter_type == HWRM_CFA_EM_FILTER) + bnxt_hwrm_clear_em_filter(bp, mf); + if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER) + bnxt_hwrm_clear_ntuple_filter(bp, mf); + /* Free the old filter, update flow + * with new filter + */ + bnxt_free_filter(bp, mf); + flow->filter = nf; + return -EXDEV; + } + } + } + return 0; +} + +static struct rte_flow * +bnxt_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter; + struct bnxt_vnic_info *vnic = NULL; + bool update_flow = false; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0); + if (!flow) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to allocate memory"); + return flow; + } + + ret = bnxt_flow_args_validate(attr, pattern, actions, error); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Not a validate flow.\n"); + goto free_flow; + } + + filter = bnxt_get_unused_filter(bp); + if (filter == NULL) { + PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n"); + goto free_flow; + } + + ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr, + error, filter); + if (ret != 0) + goto free_filter; + + ret = bnxt_match_filter(bp, filter); + if (ret == -EEXIST) { + PMD_DRV_LOG(DEBUG, "Flow already exists.\n"); + /* Clear the filter that was created as part of + * validate_and_parse_flow() above + */ + bnxt_hwrm_clear_l2_filter(bp, filter); + goto free_filter; + } else if (ret == -EXDEV) { + PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n"); + PMD_DRV_LOG(DEBUG, "Updating with different destination\n"); + update_flow = true; + } + + if (filter->filter_type == HWRM_CFA_EM_FILTER) { + filter->enables |= + HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter); + } + + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) { + filter->enables |= + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID; + ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter); + } + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (filter->dst_id == vnic->fw_vnic_id) + break; + } + + if (!ret) { + flow->filter = filter; + flow->vnic = vnic; + if (update_flow) { + ret = -EXDEV; + goto free_flow; + } + PMD_DRV_LOG(ERR, "Successfully created flow.\n"); + STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + return flow; + } +free_filter: + bnxt_free_filter(bp, filter); +free_flow: + if (ret == -EEXIST) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Matching Flow exists."); + else if (ret == -EXDEV) + rte_flow_error_set(error, ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Flow with pattern exists, updating destination queue"); + else + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to create flow."); + rte_free(flow); + flow = NULL; + return flow; +} + +static int +bnxt_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_filter_info *filter = flow->filter; + struct bnxt_vnic_info *vnic = flow->vnic; + int ret = 0; + + ret = bnxt_match_filter(bp, filter); + if (ret == 0) + PMD_DRV_LOG(ERR, "Could not find matching flow\n"); + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + else + ret = bnxt_hwrm_clear_l2_filter(bp, filter); + if (!ret) { + STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); + rte_free(flow); + } else { + rte_flow_error_set(error, -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to destroy flow."); + } + + return ret; +} + +static int +bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct bnxt_vnic_info *vnic; + struct rte_flow *flow; + unsigned int i; + int ret = 0; + + for (i = 0; i < bp->nr_vnics; i++) { + vnic = &bp->vnic_info[i]; + STAILQ_FOREACH(flow, &vnic->flow_list, next) { + struct bnxt_filter_info *filter = flow->filter; + + if (filter->filter_type == HWRM_CFA_EM_FILTER) + ret = bnxt_hwrm_clear_em_filter(bp, filter); + if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) + ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); + + if (ret) { + rte_flow_error_set + (error, + -ret, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Failed to flush flow in HW."); + return -rte_errno; + } + + STAILQ_REMOVE(&vnic->flow_list, flow, + rte_flow, next); + rte_free(flow); + } + } + + return ret; +} + +const struct rte_flow_ops bnxt_flow_ops = { + .validate = bnxt_flow_validate, + .create = bnxt_flow_create, + .destroy = bnxt_flow_destroy, + .flush = bnxt_flow_flush, +}; diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index d6fdc1b8..c682488a 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -166,10 +166,26 @@ err_ret: req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ } while (0) +#define HWRM_CHECK_RESULT_SILENT() do {\ + if (rc) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return rc; \ + } \ +} while (0) + #define HWRM_CHECK_RESULT() do {\ if (rc) { \ PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \ rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc > 0) \ + rc = -EINVAL; \ return rc; \ } \ if (resp->error_code) { \ @@ -188,6 +204,10 @@ err_ret: PMD_DRV_LOG(ERR, "error %d\n", rc); \ } \ rte_spinlock_unlock(&bp->hwrm_lock); \ + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ + rc = -EACCES; \ + else if (rc > 0) \ + rc = -EINVAL; \ return rc; \ } \ } while (0) @@ -376,13 +396,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, req.l2_ovlan = filter->l2_ovlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN) - req.l2_ovlan = filter->l2_ivlan; + req.l2_ivlan = filter->l2_ivlan; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) req.l2_ovlan_mask = filter->l2_ovlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK) - req.l2_ovlan_mask = filter->l2_ivlan_mask; + req.l2_ivlan_mask = filter->l2_ivlan_mask; if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID) req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) @@ -506,6 +526,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (BNXT_PF(bp)) { bp->pf.port_id = resp->port_id; bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs); new_max_vfs = bp->pdev->max_vfs; if (new_max_vfs != bp->pf.max_vfs) { if (bp->pf.vf_info) @@ -657,9 +678,19 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) return rc; } -int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp) +int bnxt_hwrm_check_vf_rings(struct bnxt *bp) +{ + if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM))) + return 0; + + return bnxt_hwrm_func_reserve_vf_resc(bp, true); +} + +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) { int rc; + uint32_t flags = 0; + uint32_t enables; struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_vf_cfg_input req = {0}; @@ -670,7 +701,8 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp) HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS | HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS | - HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS); + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS); req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings); req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings * @@ -679,10 +711,35 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp) req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings); + req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); + if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { + enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS | + HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.enables |= rte_cpu_to_le_32(enables); + req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); + req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); + req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } + + if (test) + flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST | + HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST; + + req.flags = rte_cpu_to_le_32(flags); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT(); + if (test) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -710,6 +767,11 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); } + bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy); + if (bp->vf_resv_strategy > + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) + bp->vf_resv_strategy = + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL; HWRM_UNLOCK(); return rc; @@ -1265,8 +1327,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) /* map ring groups to this vnic */ PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n", vnic->start_grp_id, vnic->end_grp_id); - for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) + for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; + vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id; vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE; vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE; @@ -1559,6 +1622,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t size; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id); + return rc; + } + HWRM_PREP(req, VNIC_PLCMODES_CFG); req.flags = rte_cpu_to_le_32( @@ -1816,8 +1884,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, - unsigned int idx __rte_unused) +static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -1829,17 +1896,52 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->cp_raw_cons = 0; } +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->rx_desc_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_desc_ring)); + memset(rxr->rx_buf_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_buf_ring)); + rxr->rx_prod = 0; + } + ring = rxr->ag_ring_struct; + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->ag_buf_ring, 0, + rxr->ag_ring_struct->ring_size * + sizeof(*rxr->ag_buf_ring)); + rxr->ag_prod = 0; + bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr); + + bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; +} + int bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; - int rc = 0; for (i = 0; i < bp->tx_cp_nr_rings; i++) { struct bnxt_tx_queue *txq = bp->tx_queues[i]; struct bnxt_tx_ring_info *txr = txq->tx_ring; struct bnxt_ring *ring = txr->tx_ring_struct; struct bnxt_cp_ring_info *cpr = txq->cp_ring; - unsigned int idx = bp->rx_cp_nr_rings + i; if (ring->fw_ring_id != INVALID_HW_RING_ID) { bnxt_hwrm_ring_free(bp, ring, @@ -1855,59 +1957,15 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) txr->tx_cons = 0; } if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, idx); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } - - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - struct bnxt_rx_queue *rxq = bp->rx_queues[i]; - struct bnxt_rx_ring_info *rxr = rxq->rx_ring; - struct bnxt_ring *ring = rxr->rx_ring_struct; - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID; - memset(rxr->rx_desc_ring, 0, - rxr->rx_ring_struct->ring_size * - sizeof(*rxr->rx_desc_ring)); - memset(rxr->rx_buf_ring, 0, - rxr->rx_ring_struct->ring_size * - sizeof(*rxr->rx_buf_ring)); - rxr->rx_prod = 0; - } - ring = rxr->ag_ring_struct; - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_hwrm_ring_free(bp, ring, - HWRM_RING_FREE_INPUT_RING_TYPE_RX); - ring->fw_ring_id = INVALID_HW_RING_ID; - memset(rxr->ag_buf_ring, 0, - rxr->ag_ring_struct->ring_size * - sizeof(*rxr->ag_buf_ring)); - rxr->ag_prod = 0; - bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID; - } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, i); - bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID; + bnxt_free_cp_ring(bp, cpr); cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; } } - /* Default completion ring */ - { - struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; - - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { - bnxt_free_cp_ring(bp, cpr, 0); - cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - } - } + for (i = 0; i < bp->rx_cp_nr_rings; i++) + bnxt_free_hwrm_rx_ring(bp, i); - return rc; + return 0; } int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) @@ -1970,6 +2028,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); else rc = bnxt_hwrm_clear_l2_filter(bp, filter); + STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); //if (rc) //break; } @@ -2057,6 +2116,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); bnxt_hwrm_vnic_free(bp, vnic); + + rte_free(vnic->fw_grp_ids); } /* Ring resources */ bnxt_free_all_hwrm_rings(bp); @@ -3151,7 +3212,9 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + /* Not allowed on NS2 device, NPAR, MultiHost, VF */ + if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) || + BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) return 0; HWRM_PREP(req, PORT_CLR_STATS); @@ -3298,13 +3361,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) req.host_dest_addr = rte_cpu_to_le_64(dma_handle); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); - if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); return rc; } @@ -3336,12 +3398,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, req.offset = rte_cpu_to_le_32(offset); req.len = rte_cpu_to_le_32(length); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); - HWRM_CHECK_RESULT(); - HWRM_UNLOCK(); if (rc == 0) memcpy(data, buf, length); rte_free(buf); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return rc; } @@ -3372,14 +3435,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, rte_iova_t dma_handle; uint8_t *buf; - HWRM_PREP(req, NVM_WRITE); - - req.dir_type = rte_cpu_to_le_16(dir_type); - req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); - req.dir_ext = rte_cpu_to_le_16(dir_ext); - req.dir_attr = rte_cpu_to_le_16(dir_attr); - req.dir_data_length = rte_cpu_to_le_32(data_len); - buf = rte_malloc("nvm_write", data_len, 0); rte_mem_lock_page(buf); if (!buf) @@ -3392,14 +3447,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, return -ENOMEM; } memcpy(buf, data, data_len); + + HWRM_PREP(req, NVM_WRITE); + + req.dir_type = rte_cpu_to_le_16(dir_type); + req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); + req.dir_ext = rte_cpu_to_le_16(dir_ext); + req.dir_attr = rte_cpu_to_le_16(dir_attr); + req.dir_data_length = rte_cpu_to_le_32(data_len); req.host_src_addr = rte_cpu_to_le_64(dma_handle); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + rte_free(buf); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - rte_free(buf); return rc; } @@ -3800,7 +3863,6 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, HWRM_UNLOCK(); filter->fw_ntuple_filter_id = UINT64_MAX; - filter->fw_l2_filter_id = UINT64_MAX; return 0; } @@ -3832,3 +3894,54 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) } return 0; } + +static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + uint16_t flags; + + req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + req->num_cmpl_dma_aggr_during_int = + rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int); + + req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max); + + /* min timer set to 1/2 of interrupt timer */ + req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min); + + /* buf timer set to 1/4 of interrupt timer */ + req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr); + + req->cmpl_aggr_dma_tmr_during_int = + rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int); + + flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET | + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE; + req->flags = rte_cpu_to_le_16(flags); +} + +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id) +{ + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp = + bp->hwrm_cmd_resp_addr; + int rc; + + /* Set ring coalesce parameters only for Stratus 100G NIC */ + if (!bnxt_stratus_device(bp)) + return 0; + + HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS); + bnxt_hwrm_set_coal_params(coal, &req); + req.ring_id = rte_cpu_to_le_16(ring_id); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + return 0; +} diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h index 60a4ab16..379aac6e 100644 --- a/drivers/net/bnxt/bnxt_hwrm.h +++ b/drivers/net/bnxt/bnxt_hwrm.h @@ -29,6 +29,9 @@ struct bnxt_cp_ring_info; #define HWRM_QUEUE_SERVICE_PROFILE_LOSSY \ HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY +#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC \ + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC + int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -107,12 +110,13 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_free_all_hwrm_resources(struct bnxt *bp); void bnxt_free_hwrm_resources(struct bnxt *bp); +void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index); int bnxt_alloc_hwrm_resources(struct bnxt *bp); int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link); int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up); int bnxt_hwrm_func_qcfg(struct bnxt *bp); int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp); -int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp); +int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test); int bnxt_hwrm_allocate_pf_only(struct bnxt *bp); int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs); int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, @@ -167,4 +171,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, int bnxt_hwrm_ptp_cfg(struct bnxt *bp); int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_set_ring_coal(struct bnxt *bp, + struct bnxt_coal *coal, uint16_t ring_id); +int bnxt_hwrm_check_vf_rings(struct bnxt *bp); #endif diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c index bb9f6d1c..fcbd6bc6 100644 --- a/drivers/net/bnxt/bnxt_ring.c +++ b/drivers/net/bnxt/bnxt_ring.c @@ -258,6 +258,116 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, return 0; } +static void bnxt_init_dflt_coal(struct bnxt_coal *coal) +{ + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + coal->num_cmpl_aggr_int = BNXT_NUM_CMPL_AGGR_INT; + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + coal->num_cmpl_dma_aggr = BNXT_NUM_CMPL_DMA_AGGR; + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + coal->num_cmpl_dma_aggr_during_int = BNXT_NUM_CMPL_DMA_AGGR_DURING_INT; + coal->int_lat_tmr_max = BNXT_INT_LAT_TMR_MAX; + /* min timer set to 1/2 of interrupt timer */ + coal->int_lat_tmr_min = BNXT_INT_LAT_TMR_MIN; + /* buf timer set to 1/4 of interrupt timer */ + coal->cmpl_aggr_dma_tmr = BNXT_CMPL_AGGR_DMA_TMR; + coal->cmpl_aggr_dma_tmr_during_int = BNXT_CMPL_AGGR_DMA_TMR_DURING_INT; +} + +int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index) +{ + struct rte_pci_device *pci_dev = bp->pdev; + struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + unsigned int map_idx = queue_index + bp->rx_cp_nr_rings; + int rc = 0; + + bp->grp_info[queue_index].fw_stats_ctx = cpr->hw_stats_ctx_id; + + /* Rx cmpl */ + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, + queue_index, HWRM_NA_SIGNATURE, + HWRM_NA_SIGNATURE); + if (rc) + goto err_out; + + cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr + + queue_index * BNXT_DB_SIZE; + bp->grp_info[queue_index].cp_fw_ring_id = cp_ring->fw_ring_id; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + + if (!queue_index) { + /* + * In order to save completion resources, use the first + * completion ring from PF or VF as the default completion ring + * for async event and HWRM forward response handling. + */ + bp->def_cp_ring = cpr; + rc = bnxt_hwrm_set_async_event_cr(bp); + if (rc) + goto err_out; + } + /* Rx ring */ + rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + queue_index, cpr->hw_stats_ctx_id, + cp_ring->fw_ring_id); + if (rc) + goto err_out; + + rxr->rx_prod = 0; + rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr + + queue_index * BNXT_DB_SIZE; + bp->grp_info[queue_index].rx_fw_ring_id = ring->fw_ring_id; + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + + ring = rxr->ag_ring_struct; + /* Agg ring */ + if (!ring) + PMD_DRV_LOG(ERR, "Alloc AGG Ring is NULL!\n"); + + rc = bnxt_hwrm_ring_alloc(bp, ring, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + map_idx, HWRM_NA_SIGNATURE, + cp_ring->fw_ring_id); + if (rc) + goto err_out; + + PMD_DRV_LOG(DEBUG, "Alloc AGG Done!\n"); + rxr->ag_prod = 0; + rxr->ag_doorbell = (char *)pci_dev->mem_resource[2].addr + + map_idx * BNXT_DB_SIZE; + bp->grp_info[queue_index].ag_fw_ring_id = ring->fw_ring_id; + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + + rxq->rx_buf_use_size = BNXT_MAX_MTU + ETHER_HDR_LEN + + ETHER_CRC_LEN + (2 * VLAN_TAG_SIZE); + + if (bp->eth_dev->data->rx_queue_state[queue_index] == + RTE_ETH_QUEUE_STATE_STARTED) { + if (bnxt_init_one_rx_ring(rxq)) { + RTE_LOG(ERR, PMD, + "bnxt_init_one_rx_ring failed!\n"); + bnxt_rx_queue_release_op(rxq); + rc = -ENOMEM; + goto err_out; + } + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + } + rxq->index = queue_index; + PMD_DRV_LOG(INFO, + "queue %d, rx_deferred_start %d, state %d!\n", + queue_index, rxq->rx_deferred_start, + bp->eth_dev->data->rx_queue_state[queue_index]); + +err_out: + return rc; +} /* ring_grp usage: * [0] = default completion ring * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings @@ -265,9 +375,12 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, */ int bnxt_alloc_hwrm_rings(struct bnxt *bp) { + struct bnxt_coal coal; unsigned int i; int rc = 0; + bnxt_init_dflt_coal(&coal); + for (i = 0; i < bp->rx_cp_nr_rings; i++) { struct bnxt_rx_queue *rxq = bp->rx_queues[i]; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; @@ -291,6 +404,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) cpr->cp_doorbell = (char *)bp->doorbell_base + i * 0x80; bp->grp_info[i].cp_fw_ring_id = cp_ring->fw_ring_id; B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); if (!i) { /* @@ -379,6 +493,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp) txr->tx_doorbell = (char *)bp->doorbell_base + idx * 0x80; txq->index = idx; + bnxt_hwrm_set_ring_coal(bp, &coal, cp_ring->fw_ring_id); } err_out: diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h index 65bf3e2f..1446d784 100644 --- a/drivers/net/bnxt/bnxt_ring.h +++ b/drivers/net/bnxt/bnxt_ring.h @@ -70,6 +70,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, struct bnxt_rx_queue *rxq, struct bnxt_cp_ring_info *cp_ring_info, const char *suffix); +int bnxt_alloc_hwrm_rx_ring(struct bnxt *bp, int queue_index); int bnxt_alloc_hwrm_rings(struct bnxt *bp); #endif diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c index c55ddec4..832fc9ec 100644 --- a/drivers/net/bnxt/bnxt_rxq.c +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -199,12 +199,14 @@ err_out: return rc; } -static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) +void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) { struct bnxt_sw_rx_bd *sw_ring; struct bnxt_tpa_info *tpa_info; uint16_t i; + rte_spinlock_lock(&rxq->lock); + if (rxq) { sw_ring = rxq->rx_ring->rx_buf_ring; if (sw_ring) { @@ -239,6 +241,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq) } } } + + rte_spinlock_unlock(&rxq->lock); } void bnxt_free_rx_mbufs(struct bnxt *bp) @@ -286,6 +290,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads; struct bnxt_rx_queue *rxq; int rc = 0; + uint8_t queue_state; if (queue_idx >= bp->max_rx_rings) { PMD_DRV_LOG(ERR, @@ -326,8 +331,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, rxq->queue_id = queue_idx; rxq->port_id = eth_dev->data->port_id; - rxq->crc_len = rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP ? - 0 : ETHER_CRC_LEN; + rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ? + ETHER_CRC_LEN : 0; eth_dev->data->rx_queues[queue_idx] = rxq; /* Allocate RX ring hardware descriptors */ @@ -341,6 +346,11 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, } rte_atomic64_init(&rxq->rx_mbuf_alloc_fail); + rxq->rx_deferred_start = rx_conf->rx_deferred_start; + queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED : + RTE_ETH_QUEUE_STATE_STARTED; + eth_dev->data->rx_queue_state[queue_idx] = queue_state; + rte_spinlock_init(&rxq->lock); out: return rc; } @@ -389,6 +399,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; struct bnxt_vnic_info *vnic = NULL; + int rc = 0; if (rxq == NULL) { PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id); @@ -396,28 +407,47 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) } dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - rxq->rx_deferred_start = false; + + bnxt_free_hwrm_rx_ring(bp, rx_queue_id); + bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id); PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id); + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { vnic = rxq->vnic; + if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID) return 0; - PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n", - vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id); + + PMD_DRV_LOG(DEBUG, + "vnic = %p fw_grp_id = %d\n", + vnic, bp->grp_info[rx_queue_id].fw_grp_id); + vnic->fw_grp_ids[rx_queue_id] = - bp->grp_info[rx_queue_id + 1].fw_grp_id; - return bnxt_vnic_rss_configure(bp, vnic); + bp->grp_info[rx_queue_id].fw_grp_id; + rc = bnxt_vnic_rss_configure(bp, vnic); } - return 0; + if (rc == 0) + rxq->rx_deferred_start = false; + + return rc; } int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct bnxt *bp = (struct bnxt *)dev->data->dev_private; struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; - struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id]; struct bnxt_vnic_info *vnic = NULL; + struct bnxt_rx_queue *rxq = NULL; + int rc = 0; + + /* Rx CQ 0 also works as Default CQ for async notifications */ + if (!rx_queue_id) { + PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id); + return -EINVAL; + } + + rxq = bp->rx_queues[rx_queue_id]; if (rxq == NULL) { PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id); @@ -431,7 +461,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { vnic = rxq->vnic; vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID; - return bnxt_vnic_rss_configure(bp, vnic); + rc = bnxt_vnic_rss_configure(bp, vnic); } - return 0; + + if (rc == 0) + bnxt_rx_queue_release_mbufs(rxq); + + return rc; } diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h index 8307f603..e5d6001d 100644 --- a/drivers/net/bnxt/bnxt_rxq.h +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -10,6 +10,9 @@ struct bnxt; struct bnxt_rx_ring_info; struct bnxt_cp_ring_info; struct bnxt_rx_queue { + rte_spinlock_t lock; /* Synchronize between rx_queue_stop + * and fast path + */ struct rte_mempool *mb_pool; /* mbuf pool for RX ring */ struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */ struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */ @@ -54,4 +57,5 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id); int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id); +void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq); #endif diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 9d884292..c7bc8848 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -540,8 +540,10 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, int rc = 0; bool evt = false; - /* If Rx Q was stopped return */ - if (rxq->rx_deferred_start) + /* If Rx Q was stopped return. RxQ0 cannot be stopped. */ + if (unlikely(((rxq->rx_deferred_start || + !rte_spinlock_trylock(&rxq->lock)) && + rxq->queue_id))) return 0; /* Handle RX burst request */ @@ -572,18 +574,20 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, raw_cons = NEXT_RAW_CMP(raw_cons); if (nb_rx_pkts == nb_pkts || evt) break; + /* Post some Rx buf early in case of larger burst processing */ + if (nb_rx_pkts == BNXT_RX_POST_THRESH) + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); } cpr->cp_raw_cons = raw_cons; - if ((prod == rxr->rx_prod && ag_prod == rxr->ag_prod) && !evt) { + if (!nb_rx_pkts && !evt) { /* * For PMD, there is no need to keep on pushing to REARM * the doorbell if there are no new completions */ - return nb_rx_pkts; + goto done; } - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); if (prod != rxr->rx_prod) B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); @@ -591,6 +595,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, if (ag_prod != rxr->ag_prod) B_RX_DB(rxr->ag_doorbell, rxr->ag_prod); + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + /* Attempt to alloc Rx buf in case of a previous allocation failure. */ if (rc == -ENOMEM) { int i; @@ -614,16 +620,22 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } } +done: + rte_spinlock_unlock(&rxq->lock); + return nb_rx_pkts; } void bnxt_free_rx_rings(struct bnxt *bp) { int i; + struct bnxt_rx_queue *rxq; - for (i = 0; i < (int)bp->rx_nr_rings; i++) { - struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + if (!bp->rx_queues) + return; + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + rxq = bp->rx_queues[i]; if (!rxq) continue; diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index 5b28f032..3815a219 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -54,6 +54,8 @@ #define RX_CMP_IP_CS_UNKNOWN(rxcmp1) \ !((rxcmp1)->flags2 & RX_CMP_IP_CS_BITS) +#define BNXT_RX_POST_THRESH 32 + enum pkt_hash_types { PKT_HASH_TYPE_NONE, /* Undefined type */ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */ diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c index bbd4e78b..a5d3c866 100644 --- a/drivers/net/bnxt/bnxt_stats.c +++ b/drivers/net/bnxt/bnxt_stats.c @@ -278,6 +278,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, count = 0; for (i = 0; i < RTE_DIM(bnxt_rx_stats_strings); i++) { uint64_t *rx_stats = (uint64_t *)bp->hw_rx_port_stats; + xstats[count].id = count; xstats[count].value = rte_le_to_cpu_64( *(uint64_t *)((char *)rx_stats + bnxt_rx_stats_strings[i].offset)); @@ -286,6 +287,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, for (i = 0; i < RTE_DIM(bnxt_tx_stats_strings); i++) { uint64_t *tx_stats = (uint64_t *)bp->hw_tx_port_stats; + xstats[count].id = count; xstats[count].value = rte_le_to_cpu_64( *(uint64_t *)((char *)tx_stats + bnxt_tx_stats_strings[i].offset)); @@ -293,6 +295,7 @@ int bnxt_dev_xstats_get_op(struct rte_eth_dev *eth_dev, } /* The Tx drop pkts aka the Anti spoof coounter */ + xstats[count].id = count; xstats[count].value = rte_le_to_cpu_64(tx_drop_pkts); count++; diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h index 720ca90c..f2c712a7 100644 --- a/drivers/net/bnxt/bnxt_txq.h +++ b/drivers/net/bnxt/bnxt_txq.h @@ -24,6 +24,7 @@ struct bnxt_tx_queue { uint8_t wthresh; /* Write-back threshold reg */ uint32_t ctx_curr; /* Hardware context states */ uint8_t tx_deferred_start; /* not in global dev start */ + uint8_t cmpl_next; /* Next BD to trigger a compl */ struct bnxt *bp; int index; diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c index 470fddd5..67bb35e0 100644 --- a/drivers/net/bnxt/bnxt_txr.c +++ b/drivers/net/bnxt/bnxt_txr.c @@ -114,7 +114,9 @@ static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr) } static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, - struct bnxt_tx_queue *txq) + struct bnxt_tx_queue *txq, + uint16_t *coal_pkts, + uint16_t *cmpl_next) { struct bnxt_tx_ring_info *txr = txq->tx_ring; struct tx_bd_long *txbd; @@ -133,7 +135,9 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | - PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM)) + PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM | + PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN | + PKT_TX_TUNNEL_GENEVE)) long_bd = true; tx_buf = &txr->tx_buf_ring[txr->tx_prod]; @@ -146,14 +150,21 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, return -ENOMEM; txbd = &txr->tx_desc_ring[txr->tx_prod]; - txbd->opaque = txr->tx_prod; + txbd->opaque = *coal_pkts; txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT; + txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW; + if (!*cmpl_next) { + txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL; + } else { + *coal_pkts = 0; + *cmpl_next = false; + } txbd->len = tx_pkt->data_len; - if (txbd->len >= 2014) + if (tx_pkt->pkt_len >= 2014) txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K; else - txbd->flags_type |= lhint_arr[txbd->len >> 9]; - txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf)); + txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9]; + txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf)); if (long_bd) { txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; @@ -194,16 +205,46 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, /* Outer IP, Inner IP, Inner TCP/UDP CSO */ txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) == + PKT_TX_OIP_IIP_TCP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) == + PKT_TX_OIP_IIP_UDP_CKSUM) { + /* Outer IP, Inner IP, Inner TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) == PKT_TX_IIP_TCP_UDP_CKSUM) { /* (Inner) IP, (Inner) TCP/UDP CSO */ txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) == + PKT_TX_IIP_UDP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) == + PKT_TX_IIP_TCP_CKSUM) { + /* (Inner) IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM; + txbd1->mss = 0; } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) == PKT_TX_OIP_TCP_UDP_CKSUM) { /* Outer IP, (Inner) TCP/UDP CSO */ txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) == + PKT_TX_OIP_UDP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) == + PKT_TX_OIP_TCP_CKSUM) { + /* Outer IP, (Inner) TCP/UDP CSO */ + txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM; + txbd1->mss = 0; } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) == PKT_TX_OIP_IIP_CKSUM) { /* Outer IP, Inner IP CSO */ @@ -214,11 +255,23 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, /* TCP/UDP CSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; txbd1->mss = 0; - } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) { + } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) == + PKT_TX_TCP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) == + PKT_TX_UDP_CKSUM) { + /* TCP/UDP CSO */ + txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) == + PKT_TX_IP_CKSUM) { /* IP CSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM; txbd1->mss = 0; - } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) { + } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) == + PKT_TX_OUTER_IP_CKSUM) { /* IP CSO */ txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM; txbd1->mss = 0; @@ -234,14 +287,15 @@ static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, tx_buf = &txr->tx_buf_ring[txr->tx_prod]; txbd = &txr->tx_desc_ring[txr->tx_prod]; - txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg)); - txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; + txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg)); + txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; txbd->len = m_seg->data_len; m_seg = m_seg->next; } txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END; + txbd1->lflags = rte_cpu_to_le_32(txbd1->lflags); txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); @@ -278,35 +332,44 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) struct bnxt_cp_ring_info *cpr = txq->cp_ring; uint32_t raw_cons = cpr->cp_raw_cons; uint32_t cons; - int nb_tx_pkts = 0; + uint32_t nb_tx_pkts = 0; struct tx_cmpl *txcmp; + struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring; + struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct; + uint32_t ring_mask = cp_ring_struct->ring_mask; + uint32_t opaque = 0; - if ((txq->tx_ring->tx_ring_struct->ring_size - - (bnxt_tx_avail(txq->tx_ring))) > - txq->tx_free_thresh) { - while (1) { - cons = RING_CMP(cpr->cp_ring_struct, raw_cons); - txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons]; - - if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct)) - break; - cpr->valid = FLIP_VALID(cons, - cpr->cp_ring_struct->ring_mask, - cpr->valid); - - if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) - nb_tx_pkts++; - else - RTE_LOG_DP(DEBUG, PMD, - "Unhandled CMP type %02x\n", - CMP_TYPE(txcmp)); - raw_cons = NEXT_RAW_CMP(raw_cons); - } - if (nb_tx_pkts) - bnxt_tx_cmp(txq, nb_tx_pkts); + if (((txq->tx_ring->tx_prod - txq->tx_ring->tx_cons) & + txq->tx_ring->tx_ring_struct->ring_mask) < txq->tx_free_thresh) + return 0; + + do { + cons = RING_CMPL(ring_mask, raw_cons); + txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons]; + rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) & + ring_mask]); + + if (!CMPL_VALID(txcmp, cpr->valid)) + break; + opaque = rte_cpu_to_le_32(txcmp->opaque); + NEXT_CMPL(cpr, cons, cpr->valid, 1); + rte_prefetch0(&cp_desc_ring[cons]); + + if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) + nb_tx_pkts += opaque; + else + RTE_LOG_DP(ERR, PMD, + "Unhandled CMP type %02x\n", + CMP_TYPE(txcmp)); + raw_cons = cons; + } while (nb_tx_pkts < ring_mask); + + if (nb_tx_pkts) { + bnxt_tx_cmp(txq, nb_tx_pkts); cpr->cp_raw_cons = raw_cons; - B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + B_CP_DB(cpr, cpr->cp_raw_cons, ring_mask); } + return nb_tx_pkts; } @@ -315,8 +378,8 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, { struct bnxt_tx_queue *txq = tx_queue; uint16_t nb_tx_pkts = 0; - uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2; - uint16_t last_db_mask = 0; + uint16_t coal_pkts = 0; + uint16_t cmpl_next = txq->cmpl_next; /* Handle TX completions */ bnxt_handle_tx_cp(txq); @@ -326,16 +389,25 @@ uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n"); return 0; } + + txq->cmpl_next = 0; /* Handle TX burst request */ for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) { - if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) { + int rc; + + /* Request a completion on first and last packet */ + cmpl_next |= (nb_pkts == nb_tx_pkts + 1); + coal_pkts++; + rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq, + &coal_pkts, &cmpl_next); + + if (unlikely(rc)) { + /* Request a completion in next cycle */ + txq->cmpl_next = 1; break; - } else if ((nb_tx_pkts & db_mask) != last_db_mask) { - B_TX_DB(txq->tx_ring->tx_doorbell, - txq->tx_ring->tx_prod); - last_db_mask = nb_tx_pkts & db_mask; } } + if (nb_tx_pkts) B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod); diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h index 15c7e5a0..7f3c7cdb 100644 --- a/drivers/net/bnxt/bnxt_txr.h +++ b/drivers/net/bnxt/bnxt_txr.h @@ -45,10 +45,20 @@ int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id); #define PKT_TX_OIP_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \ + PKT_TX_IP_CKSUM | PKT_TX_OUTER_IP_CKSUM) #define PKT_TX_IIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ PKT_TX_IP_CKSUM) +#define PKT_TX_IIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_IP_CKSUM) +#define PKT_TX_IIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM) #define PKT_TX_OIP_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM | \ PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_UDP_CKSUM (PKT_TX_UDP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) +#define PKT_TX_OIP_TCP_CKSUM (PKT_TX_TCP_CKSUM | \ + PKT_TX_OUTER_IP_CKSUM) #define PKT_TX_OIP_IIP_CKSUM (PKT_TX_IP_CKSUM | \ PKT_TX_OUTER_IP_CKSUM) #define PKT_TX_TCP_UDP_CKSUM (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM) diff --git a/drivers/net/bnxt/bnxt_util.c b/drivers/net/bnxt/bnxt_util.c new file mode 100644 index 00000000..7d334271 --- /dev/null +++ b/drivers/net/bnxt/bnxt_util.c @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#include <inttypes.h> + +#include "bnxt_util.h" + +int bnxt_check_zero_bytes(const uint8_t *bytes, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (bytes[i] != 0x00) + return 0; + return 1; +} diff --git a/drivers/net/bnxt/bnxt_util.h b/drivers/net/bnxt/bnxt_util.h new file mode 100644 index 00000000..2378833c --- /dev/null +++ b/drivers/net/bnxt/bnxt_util.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Broadcom + * All rights reserved. + */ + +#ifndef _BNXT_UTIL_H_ +#define _BNXT_UTIL_H_ + +int bnxt_check_zero_bytes(const uint8_t *bytes, int len); + +#endif /* _BNXT_UTIL_H_ */ diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c index 19d06af5..c0577cd7 100644 --- a/drivers/net/bnxt/bnxt_vnic.c +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -39,7 +39,7 @@ void bnxt_init_vnics(struct bnxt *bp) { struct bnxt_vnic_info *vnic; uint16_t max_vnics; - int i, j; + int i; max_vnics = bp->max_vnics; STAILQ_INIT(&bp->free_vnic_list); @@ -52,9 +52,6 @@ void bnxt_init_vnics(struct bnxt *bp) vnic->hash_mode = HWRM_VNIC_RSS_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT; - for (j = 0; j < MAX_QUEUES_PER_VNIC; j++) - vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE; - prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE); STAILQ_INIT(&vnic->filter); STAILQ_INIT(&vnic->flow_list); diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h index c521d7e5..9029f78c 100644 --- a/drivers/net/bnxt/bnxt_vnic.h +++ b/drivers/net/bnxt/bnxt_vnic.h @@ -15,13 +15,9 @@ struct bnxt_vnic_info { uint16_t fw_vnic_id; /* returned by Chimp during alloc */ uint16_t rss_rule; -#define MAX_NUM_TRAFFIC_CLASSES 8 -#define MAX_NUM_RSS_QUEUES_PER_VNIC 16 -#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \ - MAX_NUM_TRAFFIC_CLASSES) uint16_t start_grp_id; uint16_t end_grp_id; - uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC]; + uint16_t *fw_grp_ids; uint16_t dflt_ring_grp; uint16_t mru; uint16_t hash_type; diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h index fd6d8807..f5c7b422 100644 --- a/drivers/net/bnxt/hsi_struct_def_dpdk.h +++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -686,8 +686,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MINOR 9 #define HWRM_VERSION_UPDATE 2 /* non-zero means beta version */ -#define HWRM_VERSION_RSVD 6 -#define HWRM_VERSION_STR "1.9.2.6" +#define HWRM_VERSION_RSVD 9 +#define HWRM_VERSION_STR "1.9.2.9" /**************** * hwrm_ver_get * @@ -3183,6 +3183,9 @@ struct hwrm_async_event_cmpl { /* LLFC/PFC Configuration Change */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE \ UINT32_C(0x34) + /* Default VNIC Configuration Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE \ + UINT32_C(0x35) /* HWRM Error */ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \ UINT32_C(0xff) @@ -3280,6 +3283,11 @@ struct hwrm_async_event_cmpl_link_status_change { UINT32_C(0xffff0) #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT \ 4 + /* Indicates the physical function this event occured on. */ + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0xff00000) + #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PF_ID_SFT \ + 20 } __attribute__((packed)); /* hwrm_async_event_cmpl_link_mtu_change (size:128b/16B) */ @@ -4087,6 +4095,10 @@ struct hwrm_async_event_cmpl_vf_flr { #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK \ UINT32_C(0xffff) #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0 + /* Indicates the physical function this event occured on. */ + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0xff0000) + #define HWRM_ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_PF_ID_SFT 16 } __attribute__((packed)); /* hwrm_async_event_cmpl_vf_mac_addr_change (size:128b/16B) */ @@ -4354,6 +4366,88 @@ struct hwrm_async_event_cmpl_llfc_pfc_change { 5 } __attribute__((packed)); +/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */ +struct hwrm_async_event_cmpl_default_vnic_change { + uint16_t type; + /* + * This field indicates the exact type of the completion. + * By convention, the LSB identifies the length of the + * record in 16B units. Even values indicate 16B + * records. Odd values indicate 32B + * records. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_MASK \ + UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_SFT \ + 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT \ + UINT32_C(0x2e) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_TYPE_HWRM_ASYNC_EVENT + /* unused1 is 10 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_MASK \ + UINT32_C(0xffc0) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_UNUSED1_SFT \ + 6 + /* Identifiers of events. */ + uint16_t event_id; + /* Notification of a default vnic allocaiton or free */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION \ + UINT32_C(0x35) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_ID_ALLOC_FREE_NOTIFICATION + /* Event specific data */ + uint32_t event_data2; + uint8_t opaque_v; + /* + * This value is written by the NIC such that it will be different + * for each pass through the completion queue. The even passes + * will write 1. The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_V \ + UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_MASK \ + UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_OPAQUE_SFT 1 + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + /* Event specific data */ + uint32_t event_data1; + /* Indicates default vnic configuration change */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_MASK \ + UINT32_C(0x3) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_SFT \ + 0 + /* + * If this field is set to 1, then it indicates that + * a default VNIC has been allocate. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_ALLOC \ + UINT32_C(0x1) + /* + * If this field is set to 2, then it indicates that + * a default VNIC has been freed. + */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE \ + UINT32_C(0x2) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_LAST \ + HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_DEF_VNIC_STATE_DEF_VNIC_FREE + /* Indicates the physical function this event occured on. */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_MASK \ + UINT32_C(0x3fc) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_PF_ID_SFT \ + 2 + /* Indicates the virtual function this event occured on */ + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_MASK \ + UINT32_C(0x3fffc00) + #define HWRM_ASYNC_EVENT_CMPL_DEFAULT_VNIC_CHANGE_EVENT_DATA1_VF_ID_SFT \ + 10 +} __attribute__((packed)); + /* hwrm_async_event_cmpl_hwrm_error (size:128b/16B) */ struct hwrm_async_event_cmpl_hwrm_error { uint16_t type; @@ -5197,6 +5291,21 @@ struct hwrm_func_qcaps_output { #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED \ UINT32_C(0x10000) /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to adopt the VF's belonging + * to another PF. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADOPTED_PF_SUPPORTED \ + UINT32_C(0x20000) + /* + * If the query is for a VF, then this flag shall be ignored, + * If this query is for a PF and this flag is set to 1, + * then the PF has the capability to administer another PF. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED \ + UINT32_C(0x40000) + /* * This value is current MAC address configured for this * function. A value of 00-00-00-00-00-00 indicates no * MAC address is currently configured. diff --git a/drivers/net/bnxt/meson.build b/drivers/net/bnxt/meson.build new file mode 100644 index 00000000..e130f271 --- /dev/null +++ b/drivers/net/bnxt/meson.build @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +install_headers('rte_pmd_bnxt.h') +version = 2 +sources = files('bnxt_cpr.c', + 'bnxt_ethdev.c', + 'bnxt_filter.c', + 'bnxt_flow.c', + 'bnxt_hwrm.c', + 'bnxt_irq.c', + 'bnxt_ring.c', + 'bnxt_rxq.c', + 'bnxt_rxr.c', + 'bnxt_stats.c', + 'bnxt_txq.c', + 'bnxt_txr.c', + 'bnxt_util.c', + 'bnxt_vnic.c', + 'rte_pmd_bnxt.c') |